From 735252b8fd3269e81d65c44bd46d30aa82fe5fc9 Mon Sep 17 00:00:00 2001 From: Jin Hai Date: Sat, 18 May 2024 16:02:58 +0800 Subject: [PATCH] LOG_ERROR message before raise recoverable error (#1217) ### What problem does this PR solve? Refactor code: LOG_ERROR message before raise recoverable error ### Type of change - [x] Refactoring --------- Signed-off-by: Jin Hai --- docs/references/benchmark.md | 18 +-- src/executor/explain_physical_plan.cpp | 25 +++- .../expression/expression_evaluator.cpp | 13 +- src/executor/expression/expression_state.cpp | 9 +- src/executor/fragment_builder.cpp | 4 +- src/executor/operator/physical_aggregate.cpp | 8 +- src/executor/operator/physical_command.cpp | 45 +++++-- .../operator/physical_create_index_finish.cpp | 1 + src/executor/operator/physical_explain.cpp | 10 +- src/executor/operator/physical_fusion.cpp | 5 +- src/executor/operator/physical_import.cpp | 54 +++++--- src/executor/operator/physical_insert.cpp | 6 +- src/executor/operator/physical_knn_scan.cpp | 17 ++- src/executor/operator/physical_limit.cpp | 9 +- src/executor/operator/physical_match.cpp | 20 ++- src/executor/operator/physical_merge_knn.cpp | 4 +- src/executor/operator/physical_show.cpp | 46 +++++-- src/executor/operator/physical_sink.cpp | 4 +- src/executor/operator/physical_source.cpp | 5 +- src/executor/physical_operator_type.cpp | 5 +- src/executor/physical_planner.cpp | 21 ++- src/expression/cast_expression.cpp | 5 +- src/expression/match_tensor_expression.cpp | 8 +- src/function/aggregate/avg.cpp | 29 ++++- src/function/aggregate/sum.cpp | 31 ++++- src/function/aggregate_function.cpp | 5 +- src/function/aggregate_function_set.cpp | 12 +- src/function/cast/embedding_cast.cppm | 16 ++- src/function/cast/float_cast.cppm | 6 +- src/function/cast/uuid_cast.cppm | 5 +- src/function/scalar/add.cpp | 26 +++- src/function/scalar/divide.cpp | 9 +- src/function/scalar/equals.cpp | 13 +- src/function/scalar/extract.cpp | 25 +++- src/function/scalar/greater.cpp | 13 +- src/function/scalar/inequals.cpp | 13 +- src/function/scalar_function.cpp | 5 +- src/function/scalar_function_set.cpp | 4 +- src/function/table/knn_scan_data.cpp | 10 +- src/function/table/merge_knn_data.cpp | 5 +- src/main/query_context.cpp | 2 + src/planner/bind_context.cpp | 17 ++- src/planner/binder/aggregate_binder.cpp | 5 +- src/planner/binder/bind_alias_proxy.cpp | 5 +- src/planner/binder/group_binder.cpp | 24 +++- src/planner/binder/having_binder.cpp | 13 +- src/planner/binder/insert_binder.cpp | 5 +- src/planner/binder/join_binder.cpp | 10 +- src/planner/binder/limit_binder.cpp | 21 ++- src/planner/binder/order_binder.cpp | 13 +- src/planner/binder/where_binder.cpp | 5 +- src/planner/bound_delete_statement.cpp | 5 +- src/planner/bound_update_statement.cpp | 10 +- src/planner/column_identifier.cpp | 13 +- src/planner/explain_ast.cpp | 17 ++- src/planner/expression_binder.cpp | 57 ++++++--- src/planner/logical_planner.cpp | 73 +++++++---- .../node/logical_match_tensor_scan.cpp | 5 +- src/planner/query_binder.cpp | 120 +++++++++++++----- .../correlated_expressions_detector.cpp | 9 +- .../subquery/dependent_join_flattener.cpp | 39 ++++-- .../rewrite_correlated_expressions.cpp | 9 +- src/planner/subquery/subquery_unnest.cpp | 33 +++-- .../buffer/file_worker/data_file_worker.cpp | 45 +++++-- .../buffer/file_worker/file_worker.cpp | 4 +- .../buffer/file_worker/raw_file_worker.cpp | 9 +- src/storage/column_vector/column_vector.cpp | 104 +++++++++++---- src/storage/column_vector/column_vector.cppm | 9 +- .../operator/binary_operator.cppm | 21 ++- src/storage/column_vector/value.cpp | 10 +- src/storage/definition/index_base.cpp | 9 +- src/storage/definition/index_full_text.cpp | 15 ++- src/storage/definition/index_hnsw.cpp | 23 +++- src/storage/definition/index_ivfflat.cpp | 23 +++- src/storage/definition/index_secondary.cpp | 11 +- src/storage/invertedindex/column_inverter.cpp | 6 +- .../invertedindex/disk_segment_reader.cpp | 9 +- .../invertedindex/format/doc_list_encoder.cpp | 3 +- .../format/inmem_doc_list_decoder.cpp | 1 - .../invertedindex/format/skiplist_reader.cpp | 3 +- .../invertedindex/search/query_node.cpp | 20 ++- .../invertedindex/search/search_driver.cpp | 5 +- src/storage/io/file_reader.cpp | 13 +- src/storage/io/file_reader.cppm | 3 +- .../ann_ivf/annivfflat_index_data.cppm | 16 ++- src/storage/meta/catalog.cpp | 17 ++- .../meta/entry/segment_index_entry.cpp | 16 ++- src/storage/meta/entry/table_entry.cpp | 4 +- src/storage/meta/entry/table_index_entry.cpp | 8 +- src/storage/meta/table_index_meta.cpp | 4 +- src/storage/txn/txn_store.cpp | 4 +- src/storage/wal/wal_manager.cpp | 40 ++++-- .../function/cast/embedding_cast.cpp | 23 +++- src/unit_test/parser/search_driver.cpp | 25 +++- .../invertedindex/search/query_match.cpp | 5 +- .../dml/compact/test_compact_many_index.slt | 6 + 96 files changed, 1191 insertions(+), 402 deletions(-) diff --git a/docs/references/benchmark.md b/docs/references/benchmark.md index 58670d8e1b..383b481cbe 100644 --- a/docs/references/benchmark.md +++ b/docs/references/benchmark.md @@ -128,20 +128,12 @@ options: ### Enwiki > - 33000000 documents -> - 10000 `OR` queries generated based on the dataset. All terms are extracted from the dataset and very rare(occurrence < 100) terms are excluded. The number of terms of each query match the weight `[0.03, 0.15, 0.25, 0.25, 0.15, 0.08, 0.04, 0.03, 0.02]`. - -| | Time to insert & build index | Time to import & build index | Latency(ms)(mean_time, max_time, p95_time) | -| ----------------- | ---------------------------- | ---------------------------- | ------------------------------------------ | -| **Elasticsearch** | 2289 s | N/A | 7.27, 326.31, 14.75 | -| **Infinity** | 2321 s | 944 s | 1.54, 812.55, 3.51 | - - -| Python clients | Infinity(qps, RES, vCPU) | Elasticsearch(qps, RES, vCPU) | -| -------------- | ------------------------ | ----------------------------- | -| 1 | 636, 9G, 0.9 | 213, 20G, 3 | -| 4 | 1938, 9G, 3.2 | 672, 21G, 8.5 | -| 8 | 3294, 9G, 5.7 | 1174, 21G, 10 | +> - 100000 `OR` queries generated based on the dataset. All terms are extracted from the dataset and very rare(occurrence < 100) terms are excluded. The number of terms of each query match the weight `[0.03, 0.15, 0.25, 0.25, 0.15, 0.08, 0.04, 0.03, 0.02]`. +| | Time to insert & build index | Time to import & build index | P95 Latency(ms)| QPS (8 python clients) | Memory | vCPU | +| ----------------- | ---------------------------- | ---------------------------- | ---------------| -----------------------| --------| ----- | +| **Elasticsearch** | 2289 s | N/A | 14.75 | 1174 | 21.0GB | 10.0 | +| **Infinity** | 2321 s | 944 s | 3.51 | 3294 | 9.0GB | 5.7 | --- diff --git a/src/executor/explain_physical_plan.cpp b/src/executor/explain_physical_plan.cpp index 9f32d5c9fb..22234407e6 100644 --- a/src/executor/explain_physical_plan.cpp +++ b/src/executor/explain_physical_plan.cpp @@ -89,6 +89,7 @@ import statement_common; import flush_statement; import common_query_filter; import table_entry; +import logger; namespace infinity { @@ -1481,23 +1482,33 @@ void ExplainPhysicalPlan::Explain(const PhysicalShow *show_node, SharedPtr>> &, i64) { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } void ExplainPhysicalPlan::Explain(const PhysicalDummyScan *, SharedPtr>> &, i64) { - UnrecoverableError("Not implement: PhysicalDummyScan"); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } void ExplainPhysicalPlan::Explain(const PhysicalHashJoin *, SharedPtr>> &, i64) { - UnrecoverableError("Not implement: PhysicalHashJoin"); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } void ExplainPhysicalPlan::Explain(const PhysicalSortMergeJoin *, SharedPtr>> &, i64) { - UnrecoverableError("Not implement: PhysicalSortMergeJoin"); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } void ExplainPhysicalPlan::Explain(const PhysicalIndexJoin *, SharedPtr>> &, i64) { - UnrecoverableError("Not implement: PhysicalIndexJoin"); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } void ExplainPhysicalPlan::Explain(const PhysicalDelete *delete_node, SharedPtr>> &result, i64 intent_size) { @@ -1657,7 +1668,9 @@ void ExplainPhysicalPlan::Explain(const PhysicalExport *export_node, SharedPtr>> &result, i64 intent_size) { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } void ExplainPhysicalPlan::Explain(const PhysicalCreateView *create_node, SharedPtr>> &result, i64 intent_size) { diff --git a/src/executor/expression/expression_evaluator.cpp b/src/executor/expression/expression_evaluator.cpp index cc921d5211..5673f30880 100644 --- a/src/executor/expression/expression_evaluator.cpp +++ b/src/executor/expression/expression_evaluator.cpp @@ -34,6 +34,7 @@ import third_party; import infinity_exception; import expression_type; import bound_cast_func; +import logger; namespace infinity { @@ -68,7 +69,9 @@ void ExpressionEvaluator::Execute(const SharedPtr &expr, SharedPtr &state, SharedPtr &output_column_vector) { if (in_aggregate_) { - RecoverableError(Status::RecursiveAggregate(expr->ToString())); + Status status = Status::RecursiveAggregate(expr->ToString()); + LOG_ERROR(status.message()); + RecoverableError(status); } in_aggregate_ = true; SharedPtr &child_state = state->Children()[0]; @@ -80,7 +83,9 @@ void ExpressionEvaluator::Execute(const SharedPtr &expr, this->Execute(child_expr, child_state, child_output_col); if (expr->aggregate_function_.return_type_ != *output_column_vector->data_type()) { - RecoverableError(Status::DataTypeMismatch(expr->aggregate_function_.return_type_.ToString(), output_column_vector->data_type()->ToString())); + Status status = Status::DataTypeMismatch(expr->aggregate_function_.return_type_.ToString(), output_column_vector->data_type()->ToString()); + LOG_ERROR(status.message()); + RecoverableError(status); } auto data_state = state->agg_state_; @@ -183,7 +188,9 @@ void ExpressionEvaluator::Execute(const SharedPtr &expr, } void ExpressionEvaluator::Execute(const SharedPtr &, SharedPtr &, SharedPtr &) { - RecoverableError(Status::NotSupport("IN execution isn't implemented yet.")); + Status status = Status::NotSupport("IN execution isn't implemented yet."); + LOG_ERROR(status.message()); + RecoverableError(status); } } // namespace infinity diff --git a/src/executor/expression/expression_state.cpp b/src/executor/expression/expression_state.cpp index ccef3e3d39..de74255cb0 100644 --- a/src/executor/expression/expression_state.cpp +++ b/src/executor/expression/expression_state.cpp @@ -38,6 +38,7 @@ import status; import default_values; import internal_types; import data_type; +import logger; namespace infinity { @@ -70,7 +71,9 @@ SharedPtr ExpressionState::CreateState(const SharedPtr ExpressionState::CreateState(const SharedPtr &agg_expr, char *agg_state, const AggregateFlag agg_flag) { if (agg_expr->arguments().size() != 1) { - RecoverableError(Status::FunctionArgsError(agg_expr->ToString())); + Status status = Status::FunctionArgsError(agg_expr->ToString()); + LOG_ERROR(status.message()); + RecoverableError(status); } SharedPtr result = MakeShared(); @@ -112,7 +115,9 @@ SharedPtr ExpressionState::CreateState(const SharedPtr ExpressionState::CreateState(const SharedPtr &cast_expr) { if (cast_expr->arguments().size() != 1) { - RecoverableError(Status::FunctionArgsError(cast_expr->ToString())); + Status status = Status::FunctionArgsError(cast_expr->ToString()); + LOG_ERROR(status.message()); + RecoverableError(status); } SharedPtr result = MakeShared(); diff --git a/src/executor/fragment_builder.cpp b/src/executor/fragment_builder.cpp index d55ecc9914..8400fac028 100644 --- a/src/executor/fragment_builder.cpp +++ b/src/executor/fragment_builder.cpp @@ -62,7 +62,9 @@ void FragmentBuilder::BuildExplain(PhysicalOperator *phys_op, PlanFragment *curr switch (explain_op->explain_type()) { case ExplainType::kAnalyze: { - RecoverableError(Status::NotSupport("Not implement: Query analyze")); + Status status = Status::NotSupport("Not implement: Query analyze"); + LOG_ERROR(status.message()); + RecoverableError(status); } case ExplainType::kAst: case ExplainType::kUnOpt: diff --git a/src/executor/operator/physical_aggregate.cpp b/src/executor/operator/physical_aggregate.cpp index 23f156ccc4..6d5d25bacc 100644 --- a/src/executor/operator/physical_aggregate.cpp +++ b/src/executor/operator/physical_aggregate.cpp @@ -216,7 +216,9 @@ void PhysicalAggregate::GroupByInputTable(const SharedPtr &input_tabl SharedPtr input_type = input_table->GetColumnTypeById(column_id); SharedPtr output_type = grouped_input_table->GetColumnTypeById(column_id); if (*input_type != *output_type) { - RecoverableError(Status::DataTypeMismatch(input_type->ToString(), output_type->ToString())); + Status status = Status::DataTypeMismatch(input_type->ToString(), output_type->ToString()); + LOG_ERROR(status.message()); + RecoverableError(status); } types.emplace_back(input_type); } @@ -357,7 +359,9 @@ void PhysicalAggregate::GenerateGroupByResult(const SharedPtr &input_ SharedPtr input_type = input_table->GetColumnTypeById(column_id); SharedPtr output_type = output_table->GetColumnTypeById(column_id); if (*input_type != *output_type) { - RecoverableError(Status::DataTypeMismatch(input_type->ToString(), output_type->ToString())); + Status status = Status::DataTypeMismatch(input_type->ToString(), output_type->ToString()); + LOG_ERROR(status.message()); + RecoverableError(status); } types.emplace_back(input_type); } diff --git a/src/executor/operator/physical_command.cpp b/src/executor/operator/physical_command.cpp index bf65bc02ee..978f1db230 100644 --- a/src/executor/operator/physical_command.cpp +++ b/src/executor/operator/physical_command.cpp @@ -35,6 +35,7 @@ import config; import status; import infinity_exception; import variables; +import logger; namespace infinity { @@ -56,16 +57,22 @@ bool PhysicalCommand::Execute(QueryContext *query_context, OperatorState *operat switch(session_var) { case SessionVariable::kEnableProfile: { if (set_command->value_type() != SetVarType::kBool) { - RecoverableError(Status::DataTypeMismatch("Boolean", set_command->value_type_str())); + Status status = Status::DataTypeMismatch("Boolean", set_command->value_type_str()); + LOG_ERROR(status.message()); + RecoverableError(status); } query_context->current_session()->SessionVariables()->enable_profile_ = set_command->value_bool(); return true; } case SessionVariable::kInvalid: { - RecoverableError(Status::InvalidCommand(fmt::format("Unknown session variable: {}", set_command->var_name()))); + Status status = Status::InvalidCommand(fmt::format("Unknown session variable: {}", set_command->var_name())); + LOG_ERROR(status.message()); + RecoverableError(status); } default: { - RecoverableError(Status::InvalidCommand(fmt::format("Session variable: {} is read-only", set_command->var_name()))); + Status status = Status::InvalidCommand(fmt::format("Session variable: {} is read-only", set_command->var_name())); + LOG_ERROR(status.message()); + RecoverableError(status); } } break; @@ -75,16 +82,22 @@ bool PhysicalCommand::Execute(QueryContext *query_context, OperatorState *operat switch(global_var) { case GlobalVariable::kProfileRecordCapacity: { if (set_command->value_type() != SetVarType::kInteger) { - RecoverableError(Status::DataTypeMismatch("Integer", set_command->value_type_str())); + Status status = Status::DataTypeMismatch("Integer", set_command->value_type_str()); + LOG_ERROR(status.message()); + RecoverableError(status); } query_context->storage()->catalog()->ResizeProfileHistory(set_command->value_int()); return true; } case GlobalVariable::kInvalid: { - RecoverableError(Status::InvalidCommand(fmt::format("unknown global variable {}", set_command->var_name()))); + Status status = Status::InvalidCommand(fmt::format("unknown global variable {}", set_command->var_name())); + LOG_ERROR(status.message()); + RecoverableError(status); } default: { - RecoverableError(Status::InvalidCommand(fmt::format("Global variable: {} is read-only", set_command->var_name()))); + Status status = Status::InvalidCommand(fmt::format("Global variable: {} is read-only", set_command->var_name())); + LOG_ERROR(status.message()); + RecoverableError(status); } } break; @@ -130,22 +143,30 @@ bool PhysicalCommand::Execute(QueryContext *query_context, OperatorState *operat return true; } - RecoverableError(Status::SetInvalidVarValue("log level", "trace, debug, info, warning, error, critical")); + Status status = Status::SetInvalidVarValue("log level", "trace, debug, info, warning, error, critical"); + LOG_ERROR(status.message()); + RecoverableError(status); break; } case GlobalOptionIndex::kInvalid: { - RecoverableError(Status::InvalidCommand(fmt::format("Unknown config: {}", set_command->var_name()))); + Status status = Status::InvalidCommand(fmt::format("Unknown config: {}", set_command->var_name())); + LOG_ERROR(status.message()); + RecoverableError(status); break; } default: { - RecoverableError(Status::InvalidCommand(fmt::format("Config {} is read-only", set_command->var_name()))); + Status status = Status::InvalidCommand(fmt::format("Config {} is read-only", set_command->var_name())); + LOG_ERROR(status.message()); + RecoverableError(status); break; } } break; } default: { - RecoverableError(Status::InvalidCommand("Invalid set command scope, neither session nor global")); + Status status = Status::InvalidCommand("Invalid set command scope, neither session nor global"); + LOG_ERROR(status.message()); + RecoverableError(status); } } @@ -156,7 +177,9 @@ bool PhysicalCommand::Execute(QueryContext *query_context, OperatorState *operat ExportCmd *export_command = (ExportCmd *)(command_info_.get()); auto profiler_record = query_context->current_session()->GetProfileRecord(export_command->file_no()); if (profiler_record == nullptr) { - RecoverableError(Status::DataNotExist(fmt::format("The record does not exist: {}", export_command->file_no()))); + Status status = Status::DataNotExist(fmt::format("The record does not exist: {}", export_command->file_no())); + LOG_ERROR(status.message()); + RecoverableError(status); } LocalFileSystem fs; FileWriter file_writer(fs, export_command->file_name(), 128); diff --git a/src/executor/operator/physical_create_index_finish.cpp b/src/executor/operator/physical_create_index_finish.cpp index 3b7b2db971..7762fcce7b 100644 --- a/src/executor/operator/physical_create_index_finish.cpp +++ b/src/executor/operator/physical_create_index_finish.cpp @@ -46,6 +46,7 @@ bool PhysicalCreateIndexFinish::Execute(QueryContext *query_context, OperatorSta auto *txn = query_context->GetTxn(); auto status = txn->CreateIndexFinish(*db_name_, *table_name_, index_base_); if (!status.ok()) { + LOG_ERROR(status.message()); RecoverableError(status); } operator_state->SetComplete(); diff --git a/src/executor/operator/physical_explain.cpp b/src/executor/operator/physical_explain.cpp index d195a648eb..9a36d52a2d 100644 --- a/src/executor/operator/physical_explain.cpp +++ b/src/executor/operator/physical_explain.cpp @@ -31,6 +31,7 @@ import value; import status; import infinity_exception; import logical_type; +import logger; namespace infinity { @@ -54,7 +55,9 @@ void PhysicalExplain::Init() { switch (explain_type_) { case ExplainType::kAnalyze: { output_names_->emplace_back("Query Analyze"); - RecoverableError(Status::NotSupport("Not implement: Query analyze")); + Status status = Status::NotSupport("Not implement: Query analyze"); + LOG_ERROR(status.message()); + RecoverableError(status); } case ExplainType::kAst: { output_names_->emplace_back("Abstract Syntax Tree"); @@ -102,8 +105,9 @@ bool PhysicalExplain::Execute(QueryContext *, OperatorState *operator_state) { switch (explain_type_) { case ExplainType::kAnalyze: { - title = "Query Analyze"; - RecoverableError(Status::NotSupport("Not implement: Query analyze")); + Status status = Status::NotSupport("Not implement: Query analyze"); + LOG_ERROR(status.message()); + RecoverableError(status); } case ExplainType::kAst: { title = "Abstract Syntax Tree"; diff --git a/src/executor/operator/physical_fusion.cpp b/src/executor/operator/physical_fusion.cpp index d71e2ae191..f8600d4a39 100644 --- a/src/executor/operator/physical_fusion.cpp +++ b/src/executor/operator/physical_fusion.cpp @@ -41,6 +41,7 @@ import third_party; import infinity_exception; import value; import internal_types; +import logger; namespace infinity { @@ -67,7 +68,9 @@ bool PhysicalFusion::Execute(QueryContext *query_context, OperatorState *operato return false; } if (fusion_expr_->method_.compare("rrf") != 0) { - RecoverableError(Status::NotSupport(fmt::format("Fusion method {} is not implemented.", fusion_expr_->method_))); + Status status = Status::NotSupport(fmt::format("Fusion method {} is not implemented.", fusion_expr_->method_)); + LOG_ERROR(status.message()); + RecoverableError(status); } SizeT rank_constant = 60; if (fusion_expr_->options_.get() != nullptr) { diff --git a/src/executor/operator/physical_import.cpp b/src/executor/operator/physical_import.cpp index 0211690816..10a7708f1b 100644 --- a/src/executor/operator/physical_import.cpp +++ b/src/executor/operator/physical_import.cpp @@ -103,15 +103,21 @@ bool PhysicalImport::Execute(QueryContext *query_context, OperatorState *operato void PhysicalImport::ImportFVECS(QueryContext *query_context, ImportOperatorState *import_op_state) { if (table_entry_->ColumnCount() != 1) { - RecoverableError(Status::ImportFileFormatError("FVECS file must have only one column.")); + Status status = Status::ImportFileFormatError("FVECS file must have only one column."); + LOG_ERROR(status.message()); + RecoverableError(status); } auto &column_type = table_entry_->GetColumnDefByID(0)->column_type_; if (column_type->type() != kEmbedding) { - RecoverableError(Status::ImportFileFormatError("FVECS file must have only one embedding column.")); + Status status = Status::ImportFileFormatError("FVECS file must have only one embedding column."); + LOG_ERROR(status.message()); + RecoverableError(status); } auto embedding_info = static_cast(column_type->type_info().get()); if (embedding_info->Type() != kElemFloat) { - RecoverableError(Status::ImportFileFormatError("FVECS file must have only one embedding column with float element.")); + Status status = Status::ImportFileFormatError("FVECS file must have only one embedding column with float element."); + LOG_ERROR(status.message()); + RecoverableError(status); } LocalFileSystem fs; @@ -130,11 +136,14 @@ void PhysicalImport::ImportFVECS(QueryContext *query_context, ImportOperatorStat } if (nbytes != sizeof(dimension)) { - RecoverableError(Status::ImportFileFormatError(fmt::format("Read dimension which length isn't {}.", nbytes))); + Status status = Status::ImportFileFormatError(fmt::format("Read dimension which length isn't {}.", nbytes)); + LOG_ERROR(status.message()); + RecoverableError(status); } if ((int)embedding_info->Dimension() != dimension) { - RecoverableError(Status::ImportFileFormatError( - fmt::format("Dimension in file ({}) doesn't match with table definition ({}).", dimension, embedding_info->Dimension()))); + Status status = Status::ImportFileFormatError(fmt::format("Dimension in file ({}) doesn't match with table definition ({}).", dimension, embedding_info->Dimension())); + LOG_ERROR(status.message()); + RecoverableError(status); } SizeT file_size = fs.GetFileSize(*file_handler); SizeT row_size = dimension * sizeof(FloatT) + sizeof(dimension); @@ -155,8 +164,9 @@ void PhysicalImport::ImportFVECS(QueryContext *query_context, ImportOperatorStat int dim; nbytes = fs.Read(*file_handler, &dim, sizeof(dimension)); if (dim != dimension or nbytes != sizeof(dimension)) { - RecoverableError( - Status::ImportFileFormatError(fmt::format("Dimension in file ({}) doesn't match with table definition ({}).", dim, dimension))); + Status status = Status::ImportFileFormatError(fmt::format("Dimension in file ({}) doesn't match with table definition ({}).", dim, dimension)); + LOG_ERROR(status.message()); + RecoverableError(status); } ptr_t dst_ptr = buf_ptr + block_entry->row_count() * sizeof(FloatT) * dimension; fs.Read(*file_handler, dst_ptr, sizeof(FloatT) * dimension); @@ -464,7 +474,9 @@ void PhysicalImport::CSVRowHandler(void *context) { ZsvCell cell = parser_context->parser_.GetCell(i); LOG_ERROR(fmt::format("Column {}: {}", i, std::string_view((char *)cell.str, cell.len))); } - RecoverableError(Status::ColumnCountMismatch(*err_msg)); + Status status = Status::ColumnCountMismatch(*err_msg); + LOG_ERROR(status.message()); + RecoverableError(status); } // append data to segment entry @@ -482,7 +494,9 @@ void PhysicalImport::CSVRowHandler(void *context) { auto &column_vector = parser_context->column_vectors_[column_idx]; column_vector.AppendByConstantExpr(const_expr); } else { - RecoverableError(Status::ImportFileFormatError(fmt::format("Column {} is empty.", column_def->name_))); + Status status = Status::ImportFileFormatError(fmt::format("Column {} is empty.", column_def->name_)); + LOG_ERROR(status.message()); + RecoverableError(status); } } } @@ -493,7 +507,9 @@ void PhysicalImport::CSVRowHandler(void *context) { auto const_expr = dynamic_cast(column_def->default_expr_.get()); column_vector.AppendByConstantExpr(const_expr); } else { - RecoverableError(Status::ImportFileFormatError(fmt::format("Column {} is empty.", column_def->name_))); + Status status = Status::ImportFileFormatError(fmt::format("Column {} is empty.", column_def->name_)); + LOG_ERROR(status.message()); + RecoverableError(status); } } block_entry->IncreaseRowCount(1); @@ -530,8 +546,10 @@ void AppendJsonTensorToColumn(const nlohmann::json &line_json, EmbeddingInfo *embedding_info) { Vector &&embedding = line_json[column_name].get>(); if (embedding.size() % embedding_info->Dimension() != 0) { - RecoverableError(Status::ImportFileFormatError( - fmt::format("Tensor element count {} isn't multiple of dimension {}.", embedding.size(), embedding_info->Dimension()))); + Status status = Status::ImportFileFormatError( + fmt::format("Tensor element count {} isn't multiple of dimension {}.", embedding.size(), embedding_info->Dimension())); + LOG_ERROR(status.message()); + RecoverableError(status); } const auto input_bytes = embedding.size() * sizeof(T); const Value embedding_value = @@ -546,8 +564,10 @@ void AppendJsonTensorToColumn(const nlohmann::json &line_json, EmbeddingInfo *embedding_info) { Vector &&embedding = line_json[column_name].get>(); if (embedding.size() % embedding_info->Dimension() != 0) { - RecoverableError(Status::ImportFileFormatError( - fmt::format("Tensor element count {} isn't multiple of dimension {}.", embedding.size(), embedding_info->Dimension()))); + Status status = Status::ImportFileFormatError( + fmt::format("Tensor element count {} isn't multiple of dimension {}.", embedding.size(), embedding_info->Dimension())); + LOG_ERROR(status.message()); + RecoverableError(status); } const auto input_bytes = (embedding.size() + 7) / 8; auto input_data = MakeUnique(input_bytes); @@ -693,7 +713,9 @@ void PhysicalImport::JSONLRowHandler(const nlohmann::json &line_json, Vector(column_def->default_expr_.get()); column_vector.AppendByConstantExpr(const_expr); } else { - RecoverableError(Status::ImportFileFormatError(fmt::format("Column {} not found in JSON.", column_def->name_))); + Status status = Status::ImportFileFormatError(fmt::format("Column {} not found in JSON.", column_def->name_)); + LOG_ERROR(status.message()); + RecoverableError(status); } } } diff --git a/src/executor/operator/physical_insert.cpp b/src/executor/operator/physical_insert.cpp index 8ea66f395c..283d4e6ddf 100644 --- a/src/executor/operator/physical_insert.cpp +++ b/src/executor/operator/physical_insert.cpp @@ -34,6 +34,7 @@ import base_expression; import default_values; import status; import infinity_exception; +import logger; import column_def; @@ -51,8 +52,9 @@ bool PhysicalInsert::Execute(QueryContext *query_context, OperatorState *operato } if (row_count > DEFAULT_BLOCK_CAPACITY) { // Fixme: insert batch can larger than 8192, but currently we limit it. - RecoverableError(Status::UnexpectedError( - fmt::format("Insert values row count {} is larger than default block capacity {}.", row_count, DEFAULT_BLOCK_CAPACITY))); + Status status = Status::UnexpectedError(fmt::format("Insert values row count {} is larger than default block capacity {}.", row_count, DEFAULT_BLOCK_CAPACITY)); + LOG_ERROR(status.message()); + RecoverableError(status); // UnrecoverableError( // fmt::format("Insert values row count {} is larger than default block capacity {}.", row_count, DEFAULT_BLOCK_CAPACITY)); } diff --git a/src/executor/operator/physical_knn_scan.cpp b/src/executor/operator/physical_knn_scan.cpp index 4ebe395241..5e58ba8c90 100644 --- a/src/executor/operator/physical_knn_scan.cpp +++ b/src/executor/operator/physical_knn_scan.cpp @@ -159,13 +159,17 @@ bool PhysicalKnnScan::Execute(QueryContext *query_context, OperatorState *operat break; } default: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented KNN distance"); + LOG_ERROR(status.message()); + RecoverableError(status); } } break; } default: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented embedding data type"); + LOG_ERROR(status.message()); + RecoverableError(status); } } return true; @@ -200,6 +204,7 @@ void PhysicalKnnScan::PlanWithIndex(QueryContext *query_context) { // TODO: retu auto [table_index_entry, status] = table_index_meta->GetEntryNolock(txn_id, begin_ts); if (!status.ok()) { // Table index entry isn't found + LOG_ERROR(status.message()); RecoverableError(status); } @@ -383,7 +388,9 @@ void PhysicalKnnScan::ExecuteInternal(QueryContext *query_context, KnnScanOperat break; } default: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented KNN distance"); + LOG_ERROR(status.message()); + RecoverableError(status); } } }; @@ -507,7 +514,9 @@ void PhysicalKnnScan::ExecuteInternal(QueryContext *query_context, KnnScanOperat break; } default: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented index type"); + LOG_ERROR(status.message()); + RecoverableError(status); } } } diff --git a/src/executor/operator/physical_limit.cpp b/src/executor/operator/physical_limit.cpp index 1181e2f0e3..ca1be1a8f9 100644 --- a/src/executor/operator/physical_limit.cpp +++ b/src/executor/operator/physical_limit.cpp @@ -33,6 +33,7 @@ import status; import infinity_exception; import expression_type; import value_expression; +import logger; namespace infinity { @@ -84,7 +85,9 @@ SizeT AtomicCounter::Limit(SizeT row_count) { bool AtomicCounter::IsLimitOver() { if (limit_ < 0) { - RecoverableError(Status::InvalidParameterValue("Limit", std::to_string(limit_), "larger than 0")); + Status status = Status::InvalidParameterValue("Limit", std::to_string(limit_), "larger than 0"); + LOG_ERROR(status.message()); + RecoverableError(status); } return limit_ == 0; } @@ -129,7 +132,9 @@ SizeT UnSyncCounter::Limit(SizeT row_count) { bool UnSyncCounter::IsLimitOver() { if (limit_ < 0) { - RecoverableError(Status::InvalidParameterValue("Limit", std::to_string(limit_), "larger than 0")); + Status status = Status::InvalidParameterValue("Limit", std::to_string(limit_), "larger than 0"); + LOG_ERROR(status.message()); + RecoverableError(status); } return limit_ == 0; } diff --git a/src/executor/operator/physical_match.cpp b/src/executor/operator/physical_match.cpp index e069426816..e4d1ef6f38 100644 --- a/src/executor/operator/physical_match.cpp +++ b/src/executor/operator/physical_match.cpp @@ -492,7 +492,9 @@ void ASSERT_FLOAT_EQ(float bar, u32 i, float a, float b) { if (diff_percent > bar) { OStringStream oss; oss << "result mismatch at " << i << " : a: " << a << ", b: " << b << ", diff_percent: " << diff_percent << std::endl; - RecoverableError(Status::SyntaxError("Debug Info: " + std::move(oss).str())); + Status status = Status::SyntaxError("Debug Info: " + std::move(oss).str()); + LOG_ERROR(status.message()); + RecoverableError(status); } } @@ -539,13 +541,17 @@ bool PhysicalMatch::ExecuteInnerHomebrewed(QueryContext *query_context, Operator use_ordinary_iter = true; use_block_max_iter = true; } else { - RecoverableError(Status::SyntaxError("block_max option must be empty, true, false or compare")); + Status status = Status::SyntaxError("block_max option must be empty, true, false or compare"); + LOG_ERROR(status.message()); + RecoverableError(status); } // 1.3 build filter SearchDriver driver(column2analyzer, default_field); UniquePtr query_tree = driver.ParseSingleWithFields(match_expr_->fields_, match_expr_->matching_text_); if (!query_tree) { - RecoverableError(Status::ParseMatchExprFailed(match_expr_->fields_, match_expr_->matching_text_)); + Status status = Status::ParseMatchExprFailed(match_expr_->fields_, match_expr_->matching_text_); + LOG_ERROR(status.message()); + RecoverableError(status); } auto finish_parse_query_tree_time = std::chrono::high_resolution_clock::now(); @@ -599,7 +605,9 @@ bool PhysicalMatch::ExecuteInnerHomebrewed(QueryContext *query_context, Operator if (auto iter_n_option = search_ops.options_.find("topn"); iter_n_option != search_ops.options_.end()) { int top_n_option = std::stoi(iter_n_option->second); if (top_n_option <= 0) { - RecoverableError(Status::SyntaxError("topn must be a positive integer")); + Status status = Status::SyntaxError("topn must be a positive integer"); + LOG_ERROR(status.message()); + RecoverableError(status); } top_n = top_n_option; } else { @@ -725,7 +733,9 @@ bool PhysicalMatch::ExecuteInnerHomebrewed(QueryContext *query_context, Operator LOG_TRACE(std::move(compare_info).str()); if (blockmax_result_count != blockmax_result_count_2 or ordinary_result_count != blockmax_result_count or blockmax_loop_cnt != blockmax_loop_cnt_2) { - RecoverableError(Status::SyntaxError("Debug Info: result count mismatch!")); + Status status = Status::SyntaxError("Debug Info: result count mismatch!"); + LOG_ERROR(status.message()); + RecoverableError(status); } for (u32 i = 0; i < result_count; ++i) { ASSERT_FLOAT_EQ(1e-6, i, ordinary_score_result[i], blockmax_score_result[i]); diff --git a/src/executor/operator/physical_merge_knn.cpp b/src/executor/operator/physical_merge_knn.cpp index 3d3f56ac17..e57633f313 100644 --- a/src/executor/operator/physical_merge_knn.cpp +++ b/src/executor/operator/physical_merge_knn.cpp @@ -75,7 +75,9 @@ bool PhysicalMergeKnn::Execute(QueryContext *query_context, OperatorState *opera break; } default: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } } return true; diff --git a/src/executor/operator/physical_show.cpp b/src/executor/operator/physical_show.cpp index 32394f8c91..b9b4ccbf35 100644 --- a/src/executor/operator/physical_show.cpp +++ b/src/executor/operator/physical_show.cpp @@ -62,6 +62,7 @@ import default_values; import catalog; import txn_manager; import wal_manager; +import logger; namespace infinity { @@ -354,7 +355,9 @@ void PhysicalShow::Init() { break; } default: { - RecoverableError(Status::NotSupport("Not implemented show type")); + Status status = Status::NotSupport("Not implemented show type"); + LOG_ERROR(status.message()); + RecoverableError(status); } } } @@ -462,6 +465,7 @@ void PhysicalShow::ExecuteShowDatabase(QueryContext *query_context, ShowOperator auto [database_info, status] = txn->GetDatabaseInfo(db_name_); if (!status.ok()) { + LOG_ERROR(status.message()); RecoverableError(status); return; } @@ -539,6 +543,7 @@ void PhysicalShow::ExecuteShowTable(QueryContext *query_context, ShowOperatorSta auto [table_info, status] = txn->GetTableInfo(db_name_, object_name_); if (!status.ok()) { + LOG_ERROR(status.message()); RecoverableError(status); return; } @@ -661,6 +666,7 @@ void PhysicalShow::ExecuteShowIndex(QueryContext *query_context, ShowOperatorSta auto [table_index_info, status] = txn->GetTableIndexInfo(db_name_, object_name_, index_name_.value()); if (!status.ok()) { + LOG_ERROR(status.message()); RecoverableError(status); return; } @@ -904,6 +910,7 @@ void PhysicalShow::ExecuteShowTables(QueryContext *query_context, ShowOperatorSt Status status = txn->GetTables(db_name_, table_collections_detail); if (!status.ok()) { + LOG_ERROR(status.message()); RecoverableError(status); return; } @@ -1056,6 +1063,7 @@ void PhysicalShow::ExecuteShowViews(QueryContext *query_context, ShowOperatorSta Status status = txn->GetViews(db_name_, views_detail); if (!status.ok()) { show_operator_state->status_ = status.clone(); + LOG_ERROR(status.message()); RecoverableError(status); } @@ -1203,6 +1211,7 @@ void PhysicalShow::ExecuteShowColumns(QueryContext *query_context, ShowOperatorS auto [table_entry, status] = txn->GetTableByName(db_name_, object_name_); if (!status.ok()) { show_operator_state->status_ = status.clone(); + LOG_ERROR(status.message()); RecoverableError(status); return; } @@ -1305,6 +1314,7 @@ void PhysicalShow::ExecuteShowSegments(QueryContext *query_context, ShowOperator auto [table_entry, status] = txn->GetTableByName(db_name_, object_name_); if (!status.ok()) { show_operator_state->status_ = status.clone(); + LOG_ERROR(status.message()); RecoverableError(status); return; } @@ -1369,6 +1379,7 @@ void PhysicalShow::ExecuteShowSegmentDetail(QueryContext *query_context, ShowOpe auto [table_entry, status] = txn->GetTableByName(db_name_, object_name_); if (!status.ok()) { show_operator_state->status_ = status.clone(); + LOG_ERROR(status.message()); RecoverableError(status); return; } @@ -1462,7 +1473,9 @@ void PhysicalShow::ExecuteShowSegmentDetail(QueryContext *query_context, ShowOpe } } else { - RecoverableError(Status::SegmentNotExist(*segment_id_)); + Status status = Status::SegmentNotExist(*segment_id_); + LOG_ERROR(status.message()); + RecoverableError(status); return; } @@ -1494,7 +1507,9 @@ void PhysicalShow::ExecuteShowBlocks(QueryContext *query_context, ShowOperatorSt auto segment_entry = table_entry->GetSegmentByID(*segment_id_, begin_ts); if (!segment_entry) { - RecoverableError(Status::SegmentNotExist(*segment_id_)); + Status status = Status::SegmentNotExist(*segment_id_); + LOG_ERROR(status.message()); + RecoverableError(status); return; } auto block_entry_iter = BlockEntryIter(segment_entry.get()); @@ -1560,13 +1575,17 @@ void PhysicalShow::ExecuteShowBlockDetail(QueryContext *query_context, ShowOpera auto segment_entry = table_entry->GetSegmentByID(*segment_id_, begin_ts); if (!segment_entry) { - RecoverableError(Status::SegmentNotExist(*segment_id_)); + Status status = Status::SegmentNotExist(*segment_id_); + LOG_ERROR(status.message()); + RecoverableError(status); return; } auto block_entry = segment_entry->GetBlockEntryByID(*block_id_); if (!block_entry) { - RecoverableError(Status::BlockNotExist(*block_id_)); + Status status = Status::BlockNotExist(*block_id_); + LOG_ERROR(status.message()); + RecoverableError(status); return; } @@ -1638,6 +1657,7 @@ void PhysicalShow::ExecuteShowBlockColumn(QueryContext *query_context, ShowOpera auto [table_entry, status] = txn->GetTableByName(db_name_, object_name_); if (!status.ok()) { show_operator_state->status_ = status.clone(); + LOG_ERROR(status.message()); RecoverableError(status); return; } @@ -1646,19 +1666,25 @@ void PhysicalShow::ExecuteShowBlockColumn(QueryContext *query_context, ShowOpera SizeT table_column_id = *column_id_; if (table_column_id >= column_count) { - RecoverableError(Status::ColumnNotExist(fmt::format("index {}", table_column_id))); + Status status = Status::ColumnNotExist(fmt::format("index {}", table_column_id)); + LOG_ERROR(status.message()); + RecoverableError(status); return; } auto segment_entry = table_entry->GetSegmentByID(*segment_id_, begin_ts); if (!segment_entry) { - RecoverableError(Status::SegmentNotExist(*segment_id_)); + Status status = Status::SegmentNotExist(*segment_id_); + LOG_ERROR(status.message()); + RecoverableError(status); return; } auto block_entry = segment_entry->GetBlockEntryByID(*block_id_); if (!block_entry) { - RecoverableError(Status::BlockNotExist(*block_id_)); + Status status = Status::BlockNotExist(*block_id_); + LOG_ERROR(status.message()); + RecoverableError(status); return; } @@ -2679,6 +2705,7 @@ void PhysicalShow::ExecuteShowSessionVariable(QueryContext *query_context, ShowO } default: { operator_state->status_ = Status::NoSysVar(object_name_); + LOG_ERROR(operator_state->status_.message()); RecoverableError(operator_state->status_); return; } @@ -2824,6 +2851,7 @@ void PhysicalShow::ExecuteShowSessionVariables(QueryContext *query_context, Show } default: { operator_state->status_ = Status::NoSysVar(var_name); + LOG_ERROR(operator_state->status_.message()); RecoverableError(operator_state->status_); return; } @@ -3124,6 +3152,7 @@ void PhysicalShow::ExecuteShowGlobalVariable(QueryContext *query_context, ShowOp } default: { operator_state->status_ = Status::NoSysVar(object_name_); + LOG_ERROR(operator_state->status_.message()); RecoverableError(operator_state->status_); return; } @@ -3470,6 +3499,7 @@ void PhysicalShow::ExecuteShowGlobalVariables(QueryContext *query_context, ShowO } default: { operator_state->status_ = Status::NoSysVar(var_name); + LOG_ERROR(operator_state->status_.message()); RecoverableError(operator_state->status_); return; } diff --git a/src/executor/operator/physical_sink.cpp b/src/executor/operator/physical_sink.cpp index ca10e4a4b4..43d71788f2 100644 --- a/src/executor/operator/physical_sink.cpp +++ b/src/executor/operator/physical_sink.cpp @@ -164,7 +164,9 @@ void PhysicalSink::FillSinkStateFromLastOperatorState(MaterializeSinkState *mate break; } default: { - RecoverableError(Status::NotSupport(fmt::format("{} isn't supported here.", PhysicalOperatorToString(task_op_state->operator_type_)))); + Status status = Status::NotSupport(fmt::format("{} isn't supported here.", PhysicalOperatorToString(task_op_state->operator_type_))); + LOG_ERROR(status.message()); + RecoverableError(status); } } } diff --git a/src/executor/operator/physical_source.cpp b/src/executor/operator/physical_source.cpp index 48fcbec9f9..623d9c8138 100644 --- a/src/executor/operator/physical_source.cpp +++ b/src/executor/operator/physical_source.cpp @@ -27,6 +27,7 @@ import data_block; import fragment_data; import status; import infinity_exception; +import logger; namespace infinity { @@ -57,7 +58,9 @@ bool PhysicalSource::Execute(QueryContext *, SourceState *source_state) { return queue_source_state->GetData(); } default: { - RecoverableError(Status::NotSupport("Not support source state type")); + Status status = Status::NotSupport("Not support source state type"); + LOG_ERROR(status.message()); + RecoverableError(status); } } return true; diff --git a/src/executor/physical_operator_type.cpp b/src/executor/physical_operator_type.cpp index f93a1fe4b0..d4b05ffb46 100644 --- a/src/executor/physical_operator_type.cpp +++ b/src/executor/physical_operator_type.cpp @@ -19,6 +19,7 @@ module physical_operator_type; import stl; import status; import infinity_exception; +import logger; namespace infinity { String PhysicalOperatorToString(PhysicalOperatorType type) { @@ -150,6 +151,8 @@ String PhysicalOperatorToString(PhysicalOperatorType type) { return "CreateIndexFinish"; } - RecoverableError(Status::NotSupport("Unknown physical operator type")); + Status status = Status::NotSupport("Unknown physical operator type"); + LOG_ERROR(status.message()); + RecoverableError(status); } } // namespace infinity diff --git a/src/executor/physical_planner.cpp b/src/executor/physical_planner.cpp index fb902e8ca4..e3d33fde4a 100644 --- a/src/executor/physical_planner.cpp +++ b/src/executor/physical_planner.cpp @@ -135,6 +135,7 @@ import command_statement; import explain_statement; import load_meta; import block_index; +import logger; namespace infinity { @@ -717,13 +718,19 @@ UniquePtr PhysicalPlanner::BuildTop(const SharedPtr= std::numeric_limits::max()) { - RecoverableError(Status::SyntaxError("Offset is too large")); + Status status = Status::SyntaxError("Offset is too large"); + LOG_ERROR(status.message()); + RecoverableError(status); } if (merge_limit >= std::numeric_limits::max()) { - RecoverableError(Status::SyntaxError("Limit is too large")); + Status status = Status::SyntaxError("Limit is too large"); + LOG_ERROR(status.message()); + RecoverableError(status); } if (input_physical_operator->TaskletCount() <= 1) { // only Top @@ -838,7 +845,9 @@ UniquePtr PhysicalPlanner::BuildIndexScan(const SharedPtr PhysicalPlanner::BuildViewScan(const SharedPtr &logical_operator) const { - RecoverableError(Status::NotSupport("BuildViewScan")); + Status status = Status::NotSupport("BuildViewScan"); + LOG_ERROR(status.message()); + RecoverableError(status); return nullptr; } @@ -1042,7 +1051,9 @@ UniquePtr PhysicalPlanner::BuildExplain(const SharedPtr explain_node{nullptr}; switch (logical_explain->explain_type()) { case ExplainType::kAnalyze: { - RecoverableError(Status::NotSupport("Not implement: Explain analyze")); + Status status = Status::NotSupport("Not implement: Explain analyze"); + LOG_ERROR(status.message()); + RecoverableError(status); break; } case ExplainType::kAst: diff --git a/src/expression/cast_expression.cpp b/src/expression/cast_expression.cpp index cb1812e58c..1944daf0c8 100644 --- a/src/expression/cast_expression.cpp +++ b/src/expression/cast_expression.cpp @@ -24,6 +24,7 @@ import stl; import third_party; import cast_function; import status; +import logger; namespace infinity { @@ -36,7 +37,9 @@ SharedPtr CastExpression::AddCastToType(const SharedPtrType(), target_type); return MakeShared(cast, source_expr_ptr, target_type); } else { - RecoverableError(Status::NotSupportedTypeConversion(source_expr_ptr->Type().ToString(), target_type.ToString())); + Status status = Status::NotSupportedTypeConversion(source_expr_ptr->Type().ToString(), target_type.ToString()); + LOG_ERROR(status.message()); + RecoverableError(status); } return nullptr; } diff --git a/src/expression/match_tensor_expression.cpp b/src/expression/match_tensor_expression.cpp index b3cbdad6ae..a802c76d0d 100644 --- a/src/expression/match_tensor_expression.cpp +++ b/src/expression/match_tensor_expression.cpp @@ -15,6 +15,7 @@ module; module match_tensor_expression; + import stl; import expression_type; import internal_types; @@ -23,6 +24,7 @@ import base_expression; import column_expression; import infinity_exception; import status; +import logger; namespace infinity { @@ -45,8 +47,10 @@ DataType MatchTensorExpression::Type() const { return DataType(LogicalType::kFloat); } default: { - RecoverableError(Status::NotSupport(fmt::format("Unsupported query tensor data type: {}, now only support float input", - EmbeddingT::EmbeddingDataType2String(embedding_data_type_)))); + Status status = Status::NotSupport(fmt::format("Unsupported query tensor data type: {}, now only support float input", + EmbeddingT::EmbeddingDataType2String(embedding_data_type_))); + LOG_ERROR(status.message()); + RecoverableError(status); return DataType(LogicalType::kInvalid); } } diff --git a/src/function/aggregate/avg.cpp b/src/function/aggregate/avg.cpp index 7a98cc8163..3c97aaa450 100644 --- a/src/function/aggregate/avg.cpp +++ b/src/function/aggregate/avg.cpp @@ -27,22 +27,41 @@ import third_party; import logical_type; import internal_types; import data_type; +import logger; namespace infinity { template struct AvgState { public: - inline void Initialize() { RecoverableError(Status::NotSupport("Initialize average state.")); } + inline void Initialize() { + Status status = Status::NotSupport("Initialize average state."); + LOG_ERROR(status.message()); + RecoverableError(status); + } - inline void Update(const ValueType *__restrict, SizeT) { RecoverableError(Status::NotSupport("Update average state.")); } + inline void Update(const ValueType *__restrict, SizeT) { + Status status = Status::NotSupport("Update average state."); + LOG_ERROR(status.message()); + RecoverableError(status); + } - inline void ConstantUpdate(const ValueType *__restrict, SizeT, SizeT) { RecoverableError(Status::NotSupport("Constant update average state.")); } + inline void ConstantUpdate(const ValueType *__restrict, SizeT, SizeT) { + Status status = Status::NotSupport("Constant update average state."); + LOG_ERROR(status.message()); + RecoverableError(status); + } - inline ptr_t Finalize() { RecoverableError(Status::NotSupport("Finalize average state.")); } + inline ptr_t Finalize() { + Status status = Status::NotSupport("Finalize average state."); + LOG_ERROR(status.message()); + RecoverableError(status); + } inline static SizeT Size(const DataType &data_type) { - RecoverableError(Status::NotSupport(fmt::format("Average state type size: {}", data_type.ToString()))); + Status status = Status::NotSupport(fmt::format("Average state type size: {}", data_type.ToString())); + LOG_ERROR(status.message()); + RecoverableError(status); } }; diff --git a/src/function/aggregate/sum.cpp b/src/function/aggregate/sum.cpp index f45d14c81a..bd79c1ac4f 100644 --- a/src/function/aggregate/sum.cpp +++ b/src/function/aggregate/sum.cpp @@ -27,21 +27,42 @@ import third_party; import logical_type; import internal_types; import data_type; +import logger; namespace infinity { template struct SumState { public: - inline void Initialize() { RecoverableError(Status::NotSupport("Not implemented")); } + inline void Initialize() { + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); + } - inline void Update(const ValueType *__restrict, SizeT) { RecoverableError(Status::NotSupport("Not implemented")); } + inline void Update(const ValueType *__restrict, SizeT) { + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); + } - inline void ConstantUpdate(const ValueType *__restrict, SizeT, SizeT) { RecoverableError(Status::NotSupport("Not implemented")); } + inline void ConstantUpdate(const ValueType *__restrict, SizeT, SizeT) { + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); + } - inline ptr_t Finalize() { RecoverableError(Status::NotSupport("Not implemented")); } + inline ptr_t Finalize() { + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); + } - inline static SizeT Size(const DataType &) { RecoverableError(Status::NotSupport("Not implemented")); } + inline static SizeT Size(const DataType &) { + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); + } }; template <> diff --git a/src/function/aggregate_function.cpp b/src/function/aggregate_function.cpp index 561e174cbf..20e35ca3b4 100644 --- a/src/function/aggregate_function.cpp +++ b/src/function/aggregate_function.cpp @@ -21,12 +21,15 @@ module aggregate_function; import base_expression; import infinity_exception; import status; +import logger; namespace infinity { void AggregateFunction::CastArgumentTypes(BaseExpression &) { // Check and add a cast function to cast the input arguments expression type to target type - RecoverableError(Status::NotSupport("Not implemented: need to cast the argument types")); + Status status = Status::NotSupport("Not implemented: need to cast the argument types"); + LOG_ERROR(status.message()); + RecoverableError(status); } std::string AggregateFunction::ToString() const { diff --git a/src/function/aggregate_function_set.cpp b/src/function/aggregate_function_set.cpp index d4ce10a20c..8c4acde312 100644 --- a/src/function/aggregate_function_set.cpp +++ b/src/function/aggregate_function_set.cpp @@ -61,7 +61,9 @@ AggregateFunction AggregateFunctionSet::GetMostMatchFunction(const SharedPtr 1) { @@ -74,7 +76,9 @@ AggregateFunction AggregateFunctionSet::GetMostMatchFunction(const SharedPtr &argument) { if (argument.get() == nullptr) { - RecoverableError(Status::AggregateFunctionWithEmptyArgs()); + Status status = Status::AggregateFunctionWithEmptyArgs(); + LOG_ERROR(status.message()); + RecoverableError(status); } i64 cost = CastTable::instance().GetCastCost(argument->Type().type(), func.argument_type_.type()); diff --git a/src/function/cast/embedding_cast.cppm b/src/function/cast/embedding_cast.cppm index d11e7fef2b..21087e8c60 100644 --- a/src/function/cast/embedding_cast.cppm +++ b/src/function/cast/embedding_cast.cppm @@ -52,12 +52,16 @@ export inline BoundCastFunc BindEmbeddingCast(const DataType &source, const Data } if (source.type() != LogicalType::kEmbedding || target.type() != LogicalType::kEmbedding) { - RecoverableError(Status::NotSupportedTypeConversion(source.ToString(), target.ToString())); + Status status = Status::NotSupportedTypeConversion(source.ToString(), target.ToString()); + LOG_ERROR(status.message()); + RecoverableError(status); } auto source_info = static_cast(source.type_info().get()); auto target_info = static_cast(target.type_info().get()); if (source_info->Dimension() != target_info->Dimension()) { - RecoverableError(Status::DataTypeMismatch(source.ToString(), target.ToString())); + Status status = Status::DataTypeMismatch(source.ToString(), target.ToString()); + LOG_ERROR(status.message()); + RecoverableError(status); } switch (source_info->Type()) { case EmbeddingDataType::kElemInt8: { @@ -333,13 +337,17 @@ inline bool EmbeddingTryCastToVarlen::Run(const EmbeddingT &source, const auto source_embedding_dim = embedding_info->Dimension(); const auto target_embedding_dim = target_embedding_info->Dimension(); if (source_embedding_dim % target_embedding_dim != 0) { - RecoverableError(Status::DataTypeMismatch(source_type.ToString(), target_type.ToString())); + Status status = Status::DataTypeMismatch(source_type.ToString(), target_type.ToString()); + LOG_ERROR(status.message()); + RecoverableError(status); } const auto target_tensor_num = source_embedding_dim / target_embedding_dim; // estimate the size of target tensor if (const auto target_tensor_bytes = target_tensor_num * target_embedding_info->Size(); target_tensor_bytes > DEFAULT_FIXLEN_TENSOR_CHUNK_SIZE) { // TODO: better error message: tensor size overflow - RecoverableError(Status::DataTypeMismatch(source_type.ToString(), target_type.ToString())); + Status status = Status::DataTypeMismatch(source_type.ToString(), target_type.ToString()); + LOG_ERROR(status.message()); + RecoverableError(status); } target.embedding_num_ = target_tensor_num; if (target_vector_ptr->buffer_->buffer_type_ != VectorBufferType::kTensorHeap) { diff --git a/src/function/cast/float_cast.cppm b/src/function/cast/float_cast.cppm index 5b5f3cca2d..a20c0c7cbc 100644 --- a/src/function/cast/float_cast.cppm +++ b/src/function/cast/float_cast.cppm @@ -16,7 +16,6 @@ module; export module float_cast; - import stl; import bound_cast_func; import vector_buffer; @@ -28,6 +27,7 @@ import column_vector; import internal_types; import data_type; import status; +import logger; namespace infinity { @@ -69,7 +69,9 @@ inline BoundCastFunc BindFloatCast(const DataType &source, const DataType &targe } case LogicalType::kBoolean: case LogicalType::kEmbedding: { - RecoverableError(Status::NotSupport(fmt::format("Attempt to cast from {} to {}", source.ToString(), target.ToString()))); + Status status = Status::NotSupport(fmt::format("Attempt to cast from {} to {}", source.ToString(), target.ToString())); + LOG_ERROR(status.message()); + RecoverableError(status); } default: { UnrecoverableError(fmt::format("Attempt to cast from {} to {}", source.ToString(), target.ToString())); diff --git a/src/function/cast/uuid_cast.cppm b/src/function/cast/uuid_cast.cppm index 27409463a1..54bd5f8919 100644 --- a/src/function/cast/uuid_cast.cppm +++ b/src/function/cast/uuid_cast.cppm @@ -27,6 +27,7 @@ import infinity_exception; import third_party; import internal_types; import status; +import logger; namespace infinity { @@ -55,7 +56,9 @@ struct UuidTryCastToVarlen { template <> inline bool UuidTryCastToVarlen::Run(const UuidT &, VarcharT &, ColumnVector*) { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); // target.length_ = UuidT::LENGTH; // std::memcpy(target.prefix, source.body, VarcharT::PREFIX_LENGTH); // Assert(vector_ptr->buffer_->buffer_type_ == VectorBufferType::kHeap, diff --git a/src/function/scalar/add.cpp b/src/function/scalar/add.cpp index 243e3fc309..5cd18ac3b7 100644 --- a/src/function/scalar/add.cpp +++ b/src/function/scalar/add.cpp @@ -22,7 +22,7 @@ import status; import infinity_exception; import scalar_function; import scalar_function_set; - +import logger; import third_party; import logical_type; import internal_types; @@ -33,7 +33,9 @@ namespace infinity { struct AddFunction { template static inline bool Run(TA, TB, TC &) { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } }; @@ -76,7 +78,9 @@ inline bool AddFunction::Run(BigIntT left, BigIntT right, BigIntT &result) { // HugeIntT + HugeIntT = HugeIntT, and check overflow template <> inline bool AddFunction::Run(HugeIntT, HugeIntT, HugeIntT &) { - RecoverableError(Status::NotSupport("Not implemented: HugeIntT + HugeIntT = HugeIntT")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); return false; } @@ -103,7 +107,9 @@ inline bool AddFunction::Run(DoubleT left, DoubleT right, DoubleT &result) { // Decimal + Decimal = Decimal template <> inline bool AddFunction::Run(DecimalT, DecimalT, DecimalT &) { - RecoverableError(Status::NotSupport("Not implemented: Decimal + Decimal")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); return false; } @@ -158,7 +164,9 @@ inline bool AddFunction::Run(IntervalT left, TimestampT right, TimestampT &resul // Mixed Type + i64 template <> inline bool AddFunction::Run(MixedT, BigIntT, MixedT &) { - RecoverableError(Status::NotSupport("Not implemented: MixedT + BigIntT")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); return false; } @@ -171,7 +179,9 @@ inline bool AddFunction::Run(BigIntT left, MixedT right, MixedT &result) { // Mixed Type + f64 template <> inline bool AddFunction::Run(MixedT, DoubleT, MixedT &) { - RecoverableError(Status::NotSupport("Not implemented: MixedT + DoubleT")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); return false; } @@ -184,7 +194,9 @@ inline bool AddFunction::Run(DoubleT left, MixedT right, MixedT &result) { // Mixed Type + Mixed Type template <> inline bool AddFunction::Run(MixedT, MixedT, MixedT &) { - RecoverableError(Status::NotSupport("Not implemented: MixedT + MixedT")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); return false; } diff --git a/src/function/scalar/divide.cpp b/src/function/scalar/divide.cpp index 669057e9f5..94210e0b7f 100644 --- a/src/function/scalar/divide.cpp +++ b/src/function/scalar/divide.cpp @@ -27,6 +27,7 @@ import third_party; import status; import internal_types; import data_type; +import logger; namespace infinity { @@ -62,13 +63,17 @@ inline bool DivFunction::Run(DoubleT left, DoubleT right, DoubleT &result) { template <> inline bool DivFunction::Run(HugeIntT, HugeIntT, HugeIntT &) { - RecoverableError(Status::NotSupport("Not implement huge int divide operator.")); + Status status = Status::NotSupport("Not implement huge int divide operator."); + LOG_ERROR(status.message()); + RecoverableError(status); return false; } template <> inline bool DivFunction::Run(HugeIntT, HugeIntT, DoubleT &) { - RecoverableError(Status::NotSupport("Not implement huge int divide operator.")); + Status status = Status::NotSupport("Not implement huge int divide operator."); + LOG_ERROR(status.message()); + RecoverableError(status); return false; } diff --git a/src/function/scalar/equals.cpp b/src/function/scalar/equals.cpp index 0cce40b3e0..d230049dda 100644 --- a/src/function/scalar/equals.cpp +++ b/src/function/scalar/equals.cpp @@ -30,6 +30,7 @@ import third_party; import logical_type; import internal_types; import data_type; +import logger; namespace infinity { @@ -72,7 +73,9 @@ struct ColumnValueReaderTypeEqualsFunction { template <> inline void EqualsFunction::Run(MixedT, BigIntT, bool &) { - RecoverableError(Status::NotSupport("Not implement: mixed == bigint")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } template <> @@ -82,7 +85,9 @@ inline void EqualsFunction::Run(BigIntT left, MixedT right, bool &result) { template <> inline void EqualsFunction::Run(MixedT, DoubleT, bool &) { - RecoverableError(Status::NotSupport("Not implement: mixed == double")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } template <> @@ -92,7 +97,9 @@ inline void EqualsFunction::Run(DoubleT left, MixedT right, bool &result) { template <> inline void EqualsFunction::Run(MixedT, VarcharT, bool &) { - RecoverableError(Status::NotSupport("Not implement: mixed == varchar")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } template <> diff --git a/src/function/scalar/extract.cpp b/src/function/scalar/extract.cpp index 6d4e446a65..95445ec2c2 100644 --- a/src/function/scalar/extract.cpp +++ b/src/function/scalar/extract.cpp @@ -27,13 +27,16 @@ import third_party; import logical_type; import internal_types; import data_type; +import logger; namespace infinity { struct ExtractYearFunction { template static inline void Run(TA, TB &) { - RecoverableError(Status::NotSupport("ExtractYear function isn't implemented")); + Status status = Status::NotSupport("ExtractYear function isn't implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } }; @@ -55,7 +58,9 @@ inline void ExtractYearFunction::Run(TimestampT left, BigIntT &result) { struct ExtractMonthFunction { template static inline void Run(TA, TB &) { - RecoverableError(Status::NotSupport("ExtractMonth function isn't implemented")); + Status status = Status::NotSupport("ExtractMonth function isn't implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } }; @@ -77,7 +82,9 @@ inline void ExtractMonthFunction::Run(TimestampT left, BigIntT &result) { struct ExtractDayFunction { template static inline void Run(TA, TB &) { - RecoverableError(Status::NotSupport("ExtractDay function isn't implemented")); + Status status = Status::NotSupport("ExtractDay function isn't implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } }; @@ -99,7 +106,9 @@ inline void ExtractDayFunction::Run(TimestampT left, BigIntT &result) { struct ExtractHourFunction { template static inline void Run(TA, TB &) { - RecoverableError(Status::NotSupport("ExtractHour function isn't implemented")); + Status status = Status::NotSupport("ExtractHour function isn't implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } }; @@ -121,7 +130,9 @@ inline void ExtractHourFunction::Run(TimeT left, BigIntT &result) { struct ExtractMinuteFunction { template static inline void Run(TA, TB &) { - RecoverableError(Status::NotSupport("ExtractMinute function isn't implemented")); + Status status = Status::NotSupport("ExtractMinute function isn't implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } }; @@ -143,7 +154,9 @@ inline void ExtractMinuteFunction::Run(TimeT left, BigIntT &result) { struct ExtractSecondFunction { template static inline void Run(TA, TB &) { - RecoverableError(Status::NotSupport("ExtractSecond function isn't implemented")); + Status status = Status::NotSupport("ExtractSecond function isn't implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } }; diff --git a/src/function/scalar/greater.cpp b/src/function/scalar/greater.cpp index 60e0f85d6c..5c2b54d9d2 100644 --- a/src/function/scalar/greater.cpp +++ b/src/function/scalar/greater.cpp @@ -29,6 +29,7 @@ import third_party; import logical_type; import internal_types; import data_type; +import logger; namespace infinity { @@ -55,7 +56,9 @@ struct ColumnValueReaderTypeGreaterFunction { template <> inline void GreaterFunction::Run(MixedT, BigIntT, bool &) { - RecoverableError(Status::NotSupport("Not implement: mixed > bigint")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } template <> @@ -65,7 +68,9 @@ inline void GreaterFunction::Run(BigIntT left, MixedT right, bool &result) { template <> inline void GreaterFunction::Run(MixedT, DoubleT, bool &) { - RecoverableError(Status::NotSupport("Not implement: mixed > double")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } template <> @@ -75,7 +80,9 @@ inline void GreaterFunction::Run(DoubleT left, MixedT right, bool &result) { template <> inline void GreaterFunction::Run(MixedT, VarcharT, bool &) { - RecoverableError(Status::NotSupport("Not implement: mixed > varchar")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } template <> diff --git a/src/function/scalar/inequals.cpp b/src/function/scalar/inequals.cpp index d8689cc49a..f5bef05a34 100644 --- a/src/function/scalar/inequals.cpp +++ b/src/function/scalar/inequals.cpp @@ -30,6 +30,7 @@ import scalar_function_set; import third_party; import internal_types; import data_type; +import logger; namespace infinity { @@ -74,7 +75,9 @@ struct ColumnValueReaderTypeInEqualsFunction { template <> inline void InEqualsFunction::Run(MixedT, BigIntT, bool &) { - RecoverableError(Status::NotSupport("Not implement: mixed <> bigint")); + Status status = Status::NotSupport("Not implement: mixed <> bigint"); + LOG_ERROR(status.message()); + RecoverableError(status); } template <> @@ -84,7 +87,9 @@ inline void InEqualsFunction::Run(BigIntT left, MixedT right, bool &result) { template <> inline void InEqualsFunction::Run(MixedT, DoubleT, bool &) { - RecoverableError(Status::NotSupport("Not implement: mixed <> double")); + Status status = Status::NotSupport("Not implement: mixed <> double"); + LOG_ERROR(status.message()); + RecoverableError(status); } template <> @@ -94,7 +99,9 @@ inline void InEqualsFunction::Run(DoubleT left, MixedT right, bool &result) { template <> inline void InEqualsFunction::Run(MixedT, VarcharT, bool &) { - RecoverableError(Status::NotSupport("Not implement: mixed <> varchar")); + Status status = Status::NotSupport("Not implement: mixed <> varchar"); + LOG_ERROR(status.message()); + RecoverableError(status); } template <> diff --git a/src/function/scalar_function.cpp b/src/function/scalar_function.cpp index 54e164c965..cedacdc2d8 100644 --- a/src/function/scalar_function.cpp +++ b/src/function/scalar_function.cpp @@ -26,6 +26,7 @@ import data_block; import base_expression; import column_vector; import third_party; +import logger; namespace infinity { @@ -41,7 +42,9 @@ void ScalarFunction::CastArgumentTypes(Vector &input_arguments) } for (SizeT idx = 0; idx < arguments_count; ++idx) { if (parameter_types_[idx] != input_arguments[idx].Type()) { - RecoverableError(Status::NotSupport("Not implemented: need to cast the argument types")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } } } diff --git a/src/function/scalar_function_set.cpp b/src/function/scalar_function_set.cpp index bf14c6f208..6754d10a85 100644 --- a/src/function/scalar_function_set.cpp +++ b/src/function/scalar_function_set.cpp @@ -65,7 +65,9 @@ ScalarFunction ScalarFunctionSet::GetMostMatchFunction(const Vector 1) { diff --git a/src/function/table/knn_scan_data.cpp b/src/function/table/knn_scan_data.cpp index 2de0efea2b..653b7ee4c9 100644 --- a/src/function/table/knn_scan_data.cpp +++ b/src/function/table/knn_scan_data.cpp @@ -41,6 +41,7 @@ import expression_state; import internal_types; import data_type; import status; +import logger; namespace infinity { @@ -56,7 +57,8 @@ KnnDistance1::KnnDistance1(KnnDistanceType dist_type) { break; } default: { - RecoverableError(Status::NotSupport(fmt::format("KnnDistanceType: {} is not support.", (i32)dist_type))); + Status status = Status::NotSupport(fmt::format("KnnDistanceType: {} is not support.", (i32)dist_type)); + RecoverableError(status); } } } @@ -71,8 +73,10 @@ KnnScanFunctionData::KnnScanFunctionData(KnnScanSharedData *shared_data, u32 cur break; } default: { - RecoverableError(Status::NotSupport( - fmt::format("EmbeddingDataType: {} is not support.", EmbeddingType::EmbeddingDataType2String(knn_scan_shared_data_->elem_type_)))); + Status status = Status::NotSupport( + fmt::format("EmbeddingDataType: {} is not support.", EmbeddingType::EmbeddingDataType2String(knn_scan_shared_data_->elem_type_))); + LOG_ERROR(status.message()); + RecoverableError(status); } } } diff --git a/src/function/table/merge_knn_data.cpp b/src/function/table/merge_knn_data.cpp index 375057821f..901ec1298c 100644 --- a/src/function/table/merge_knn_data.cpp +++ b/src/function/table/merge_knn_data.cpp @@ -22,6 +22,7 @@ import infinity_exception; import merge_knn; import knn_result_handler; import status; +import logger; module merge_knn_data; @@ -42,7 +43,9 @@ MergeKnnFunctionData::MergeKnnFunctionData(i64 query_count, break; } default: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } } } diff --git a/src/main/query_context.cpp b/src/main/query_context.cpp index 24326530e5..4595e0751c 100644 --- a/src/main/query_context.cpp +++ b/src/main/query_context.cpp @@ -131,6 +131,7 @@ QueryResult QueryContext::QueryStatement(const BaseStatement *statement) { auto status = logical_planner_->Build(statement, bind_context); // FIXME if (!status.ok()) { + LOG_ERROR(status.message()); RecoverableError(status); } @@ -214,6 +215,7 @@ bool QueryContext::ExecuteBGStatement(BaseStatement *statement, BGQueryState &st SharedPtr bind_context; auto status = logical_planner_->Build(statement, bind_context); if (!status.ok()) { + LOG_ERROR(status.message()); RecoverableError(status); } current_max_node_id_ = bind_context->GetNewLogicalNodeId(); diff --git a/src/planner/bind_context.cpp b/src/planner/bind_context.cpp index a9cffc3525..14dc80ff79 100644 --- a/src/planner/bind_context.cpp +++ b/src/planner/bind_context.cpp @@ -28,6 +28,7 @@ import column_identifer; import block_index; import column_expr; +import logger; namespace infinity { @@ -263,7 +264,9 @@ SharedPtr BindContext::ResolveColumnId(const ColumnIdentifier // TODO: What will happen, when different tables have the same column name? Vector &binding_names = binding_names_by_column_[column_name_ref]; if (binding_names.size() > 1) { - RecoverableError(Status::SyntaxError(fmt::format("Ambiguous column table_name: {}", column_identifier.ToString()))); + Status status = Status::SyntaxError(fmt::format("Ambiguous column table_name: {}", column_identifier.ToString())); + LOG_ERROR(status.message()); + RecoverableError(status); } String &binding_name = binding_names[0]; @@ -271,7 +274,9 @@ SharedPtr BindContext::ResolveColumnId(const ColumnIdentifier auto binding_iter = binding_by_name_.find(binding_name); if (binding_iter == binding_by_name_.end()) { // Found the binding, but the binding don't have the column, which should happen. - RecoverableError(Status::SyntaxError(fmt::format("{} doesn't exist.", column_identifier.ToString()))); + Status status = Status::SyntaxError(fmt::format("{} doesn't exist.", column_identifier.ToString())); + LOG_ERROR(status.message()); + RecoverableError(status); } const auto &binding = binding_iter->second; @@ -287,7 +292,9 @@ SharedPtr BindContext::ResolveColumnId(const ColumnIdentifier bound_column_expr->source_position_.binding_name_ = binding->table_name_; } else { // Found the binding, but the binding don't have the column, which should happen. - RecoverableError(Status::SyntaxError(fmt::format("{} doesn't exist.", column_identifier.ToString()))); + Status status = Status::SyntaxError(fmt::format("{} doesn't exist.", column_identifier.ToString())); + LOG_ERROR(status.message()); + RecoverableError(status); } } else { // Table isn't found in current bind context, maybe its parent has it. @@ -310,7 +317,9 @@ SharedPtr BindContext::ResolveColumnId(const ColumnIdentifier bound_column_expr->source_position_ = SourcePosition(binding_context_id_, ExprSourceType::kBinding); bound_column_expr->source_position_.binding_name_ = binding->table_name_; } else { - RecoverableError(Status::SyntaxError(fmt::format("{} doesn't exist.", column_identifier.ToString()))); + Status status = Status::SyntaxError(fmt::format("{} doesn't exist.", column_identifier.ToString())); + LOG_ERROR(status.message()); + RecoverableError(status); } } else { // Table isn't found in current bind context, maybe its parent has it. diff --git a/src/planner/binder/aggregate_binder.cpp b/src/planner/binder/aggregate_binder.cpp index 2ac36e3478..8bf8f184d8 100644 --- a/src/planner/binder/aggregate_binder.cpp +++ b/src/planner/binder/aggregate_binder.cpp @@ -25,6 +25,7 @@ import status; import infinity_exception; import parsed_expr; import knn_expr; +import logger; namespace infinity { @@ -34,7 +35,9 @@ SharedPtr AggregateBinder::BuildExpression(const ParsedExpr &exp } SharedPtr AggregateBinder::BuildKnnExpr(const KnnExpr &, BindContext *, i64 , bool ) { - RecoverableError(Status::SyntaxError("KNN expression isn't supported in in aggregate function")); + Status status = Status::SyntaxError("KNN expression isn't supported in in aggregate function"); + LOG_ERROR(status.message()); + RecoverableError(status); return nullptr; } diff --git a/src/planner/binder/bind_alias_proxy.cpp b/src/planner/binder/bind_alias_proxy.cpp index 2fc9897bf5..b630ba94c1 100644 --- a/src/planner/binder/bind_alias_proxy.cpp +++ b/src/planner/binder/bind_alias_proxy.cpp @@ -25,6 +25,7 @@ import status; import infinity_exception; import third_party; import parsed_expr; +import logger; namespace infinity { @@ -40,7 +41,9 @@ BindAliasProxy::BindAlias(ExpressionBinder &expression_binder, const ParsedExpr const ParsedExpr *select_expr = bind_context_ptr->select_expression_[alias_pair->second]; if (binding_alias_) { - RecoverableError(Status::SyntaxError(fmt::format("Trying to bind an alias table_name: {} in another alias", expr_name))); + Status status = Status::SyntaxError(fmt::format("Trying to bind an alias table_name: {} in another alias", expr_name)); + LOG_ERROR(status.message()); + RecoverableError(status); } binding_alias_ = true; diff --git a/src/planner/binder/group_binder.cpp b/src/planner/binder/group_binder.cpp index b7ebc79455..2509ef4b62 100644 --- a/src/planner/binder/group_binder.cpp +++ b/src/planner/binder/group_binder.cpp @@ -74,7 +74,9 @@ SharedPtr GroupBinder::BuildExpression(const ParsedExpr &expr, B String expr_name = expr.GetName(); if (bind_context_ptr->group_index_by_name_.contains(expr_name)) { - RecoverableError(Status::SyntaxError(fmt::format("Duplicated group by expression: {}", expr_name))); + Status status = Status::SyntaxError(fmt::format("Duplicated group by expression: {}", expr_name)); + LOG_ERROR(status.message()); + RecoverableError(status); } // Add the group by expression into bind context @@ -121,7 +123,9 @@ SharedPtr GroupBinder::BindConstantExpression(const ConstantExpr Vector &expr_array = bind_context_ptr->select_expression_; if (select_idx > (i64)expr_array.size() or select_idx < 1) { - RecoverableError(Status::SyntaxError(fmt::format("GROUP BY clause out of range - should be from 1 to {}", expr_array.size()))); + Status status = Status::SyntaxError(fmt::format("GROUP BY clause out of range - should be from 1 to {}", expr_array.size())); + LOG_ERROR(status.message()); + RecoverableError(status); } select_idx -= 1; @@ -147,24 +151,32 @@ SharedPtr GroupBinder::BuildColExpr(const ColumnExpr &expr, Bind SharedPtr GroupBinder::BuildFuncExpr(const FunctionExpr &expr, BindContext *bind_context_ptr, i64 depth, bool root) { SharedPtr function_set_ptr = FunctionSet::GetFunctionSet(query_context_->storage()->catalog(), expr); if (function_set_ptr->type_ != FunctionType::kScalar) { - RecoverableError(Status::SyntaxError("Only scalar function is supported in group by list.")); + Status status = Status::SyntaxError("Only scalar function is supported in group by list."); + LOG_ERROR(status.message()); + RecoverableError(status); } return ExpressionBinder::BuildFuncExpr(expr, bind_context_ptr, depth, root); } void GroupBinder::CheckFuncType(FunctionType func_type) const { if (func_type != FunctionType::kScalar) { - RecoverableError(Status::SyntaxError("Only scalar function is supported in group by list.")); + Status status = Status::SyntaxError("Only scalar function is supported in group by list."); + LOG_ERROR(status.message()); + RecoverableError(status); } } SharedPtr GroupBinder::BuildSubquery(const SubqueryExpr &, BindContext *, SubqueryType, i64, bool) { - RecoverableError(Status::SyntaxError("Subquery isn't supported in group by list.")); + Status status = Status::SyntaxError("Subquery isn't supported in group by list."); + LOG_ERROR(status.message()); + RecoverableError(status); return nullptr; } SharedPtr GroupBinder::BuildKnnExpr(const KnnExpr &, BindContext *, i64, bool) { - RecoverableError(Status::SyntaxError("KNN expression isn't supported in group by list")); + Status status = Status::SyntaxError("KNN expression isn't supported in group by list"); + LOG_ERROR(status.message()); + RecoverableError(status); return nullptr; } diff --git a/src/planner/binder/having_binder.cpp b/src/planner/binder/having_binder.cpp index a67cb828b9..12f8819561 100644 --- a/src/planner/binder/having_binder.cpp +++ b/src/planner/binder/having_binder.cpp @@ -27,6 +27,7 @@ import infinity_exception; import third_party; import function_set; import bind_alias_proxy; +import logger; namespace infinity { @@ -69,7 +70,9 @@ SharedPtr HavingBinder::BuildExpression(const ParsedExpr &expr, return result; } else { // in an aggregate function, which means aggregate function nested, which is error. - RecoverableError(Status::SyntaxError("Aggregate function is called in another aggregate function.")); + Status status = Status::SyntaxError("Aggregate function is called in another aggregate function."); + LOG_ERROR(status.message()); + RecoverableError(status); } } @@ -103,7 +106,9 @@ SharedPtr HavingBinder::BuildFuncExpr(const FunctionExpr &expr, SharedPtr function_set_ptr = FunctionSet::GetFunctionSet(query_context_->storage()->catalog(), expr); if (function_set_ptr->type_ == FunctionType::kAggregate) { if (this->binding_agg_func_) { - RecoverableError(Status::SyntaxError("Aggregate function is called in another aggregate function.")); + Status status = Status::SyntaxError("Aggregate function is called in another aggregate function."); + LOG_ERROR(status.message()); + RecoverableError(status); } else { this->binding_agg_func_ = true; } @@ -134,7 +139,9 @@ SharedPtr HavingBinder::BuildFuncExpr(const FunctionExpr &expr, } SharedPtr HavingBinder::BuildKnnExpr(const KnnExpr &, BindContext *, i64, bool) { - RecoverableError(Status::SyntaxError("KNN expression isn't supported in having clause")); + Status status = Status::SyntaxError("KNN expression isn't supported in having clause"); + LOG_ERROR(status.message()); + RecoverableError(status); return nullptr; } diff --git a/src/planner/binder/insert_binder.cpp b/src/planner/binder/insert_binder.cpp index 802dd84d70..9aa354eae7 100644 --- a/src/planner/binder/insert_binder.cpp +++ b/src/planner/binder/insert_binder.cpp @@ -27,6 +27,7 @@ import infinity_exception; import third_party; import function_set; import bind_alias_proxy; +import logger; namespace infinity { @@ -36,7 +37,9 @@ SharedPtr InsertBinder::BuildExpression(const ParsedExpr &expr, } SharedPtr InsertBinder::BuildKnnExpr(const KnnExpr &, BindContext *, i64 , bool ) { - RecoverableError(Status::SyntaxError("KNN expression isn't supported in insert clause")); + Status status = Status::SyntaxError("KNN expression isn't supported in insert clause"); + LOG_ERROR(status.message()); + RecoverableError(status); return nullptr; } diff --git a/src/planner/binder/join_binder.cpp b/src/planner/binder/join_binder.cpp index 2c208cbd18..ef3ffff8f7 100644 --- a/src/planner/binder/join_binder.cpp +++ b/src/planner/binder/join_binder.cpp @@ -24,6 +24,7 @@ import status; import infinity_exception; import parsed_expr; import knn_expr; +import logger; namespace infinity { @@ -31,7 +32,9 @@ SharedPtr JoinBinder::BuildExpression(const ParsedExpr &expr, Bi SharedPtr result; switch (expr.type_) { case ParsedExprType::kSubquery: { - RecoverableError(Status::SyntaxError("Subquery isn't allowed in JOIN condition.")); + Status status = Status::SyntaxError("Subquery isn't allowed in JOIN condition."); + LOG_ERROR(status.message()); + RecoverableError(status); } default: { result = ExpressionBinder::BuildExpression(expr, bind_context_ptr, depth, root); @@ -41,7 +44,10 @@ SharedPtr JoinBinder::BuildExpression(const ParsedExpr &expr, Bi } SharedPtr JoinBinder::BuildKnnExpr(const KnnExpr &, BindContext *, i64 , bool ) { - RecoverableError(Status::SyntaxError("KNN expression isn't supported in join clause.")); + + Status status = Status::SyntaxError("KNN expression isn't supported in join clause."); + LOG_ERROR(status.message()); + RecoverableError(status); return nullptr; } diff --git a/src/planner/binder/limit_binder.cpp b/src/planner/binder/limit_binder.cpp index 219135b892..22b5ee211c 100644 --- a/src/planner/binder/limit_binder.cpp +++ b/src/planner/binder/limit_binder.cpp @@ -24,16 +24,21 @@ import function_set; import function; import status; import infinity_exception; +import logger; namespace infinity { SharedPtr LimitBinder::BuildExpression(const ParsedExpr &expr, BindContext *bind_context_ptr, i64 depth, bool root) { switch (expr.type_) { case ParsedExprType::kParameter: { - RecoverableError(Status::SyntaxError("Parameter expression isn't allowed in limit expression.")); + Status status = Status::SyntaxError("Parameter expression isn't allowed in limit expression."); + LOG_ERROR(status.message()); + RecoverableError(status); } case ParsedExprType::kSubquery: { - RecoverableError(Status::SyntaxError("Subquery expression isn't allowed in limit expression.")); + Status status = Status::SyntaxError("Subquery expression isn't allowed in limit expression."); + LOG_ERROR(status.message()); + RecoverableError(status); } default: { return ExpressionBinder::BuildExpression(expr, bind_context_ptr, depth, root); @@ -44,20 +49,26 @@ SharedPtr LimitBinder::BuildExpression(const ParsedExpr &expr, B SharedPtr LimitBinder::BuildFuncExpr(const FunctionExpr &expr, BindContext *bind_context_ptr, i64 depth, bool root) { SharedPtr function_set_ptr = FunctionSet::GetFunctionSet(query_context_->storage()->catalog(), expr); if (function_set_ptr->type_ != FunctionType::kScalar) { - RecoverableError(Status::SyntaxError("Only scalar function is supported in limit clause.")); + Status status = Status::SyntaxError("Only scalar function is supported in limit clause."); + LOG_ERROR(status.message()); + RecoverableError(status); } return ExpressionBinder::BuildFuncExpr(expr, bind_context_ptr, depth, root); } SharedPtr LimitBinder::BuildColExpr(const ColumnExpr &expr, BindContext *bind_context_ptr, i64 depth, bool root) { if (expr.star_) { - RecoverableError(Status::SyntaxError("Star expression isn't allowed in limit clause.")); + Status status = Status::SyntaxError("Star expression isn't allowed in limit clause."); + LOG_ERROR(status.message()); + RecoverableError(status); } return ExpressionBinder::BuildColExpr(expr, bind_context_ptr, depth, root); } SharedPtr LimitBinder::BuildKnnExpr(const KnnExpr &, BindContext *, i64, bool) { - RecoverableError(Status::SyntaxError("KNN expression isn't supported in limit clause")); + Status status = Status::SyntaxError("KNN expression isn't supported in limit clause"); + LOG_ERROR(status.message()); + RecoverableError(status); return nullptr; } diff --git a/src/planner/binder/order_binder.cpp b/src/planner/binder/order_binder.cpp index f03df83a33..d2f502cdae 100644 --- a/src/planner/binder/order_binder.cpp +++ b/src/planner/binder/order_binder.cpp @@ -28,6 +28,7 @@ import parsed_expr; import constant_expr; import knn_expr; import function_expr; +import logger; namespace infinity { @@ -35,7 +36,9 @@ void OrderBinder::PushExtraExprToSelectList(ParsedExpr *expr, const SharedPtrtype_ == ParsedExprType::kConstant) { ConstantExpr *order_by_index = (ConstantExpr *)expr; if (order_by_index->literal_type_ != LiteralType::kInteger) { - RecoverableError(Status::SyntaxError("Order by non-integer constant value.")); + Status status = Status::SyntaxError("Order by non-integer constant value."); + LOG_ERROR(status.message()); + RecoverableError(status); } // Order by 1, means order by 1st select list item. return; @@ -78,14 +81,18 @@ SharedPtr OrderBinder::BuildExpression(const ParsedExpr &expr, B if (const_expr.literal_type_ == LiteralType::kInteger) { column_id = const_expr.integer_value_; if (column_id <= 0 or column_id > (i64)bind_context_ptr->project_exprs_.size()) { - RecoverableError(Status::SyntaxError("Order by are going to use nonexistent column from select list.")); + Status status = Status::SyntaxError("Order by are going to use nonexistent column from select list."); + LOG_ERROR(status.message()); + RecoverableError(status); } --column_id; //TODO: If we do not have a projection before sort, expression will need to be evaluated twice // now return shared_ptr of the chosen project_expr return bind_context_ptr->project_exprs_[column_id]; } else { - RecoverableError(Status::SyntaxError("Order by non-integer constant value.")); + Status status = Status::SyntaxError("Order by non-integer constant value."); + LOG_ERROR(status.message()); + RecoverableError(status); } } else { String expr_name = expr.GetName(); diff --git a/src/planner/binder/where_binder.cpp b/src/planner/binder/where_binder.cpp index 37121d6c66..5adba7426e 100644 --- a/src/planner/binder/where_binder.cpp +++ b/src/planner/binder/where_binder.cpp @@ -28,6 +28,7 @@ import bind_alias_proxy; import parsed_expr; import column_expr; import status; +import logger; namespace infinity { @@ -55,7 +56,9 @@ SharedPtr WhereBinder::BuildColExpr(const ColumnExpr &expr, Bind } if (result.get() == nullptr) { - RecoverableError(Status::ColumnNotExist(expr.GetName())); + Status status = Status::ColumnNotExist(expr.GetName()); + LOG_ERROR(status.message()); + RecoverableError(status); } return result; } diff --git a/src/planner/bound_delete_statement.cpp b/src/planner/bound_delete_statement.cpp index c1afec9d11..d654123f06 100644 --- a/src/planner/bound_delete_statement.cpp +++ b/src/planner/bound_delete_statement.cpp @@ -40,6 +40,7 @@ import subquery_unnest; import conjunction_expression; import table_reference; +import logger; namespace infinity { @@ -112,7 +113,9 @@ void BoundDeleteStatement::BuildSubquery(SharedPtr &root, if (condition->type() == ExpressionType::kSubQuery) { if (building_subquery_) { // nested subquery - RecoverableError(Status::SyntaxError("Nested subquery detected")); + Status status = Status::SyntaxError("Nested subquery detected"); + LOG_ERROR(status.message()); + RecoverableError(status); } condition = UnnestSubquery(root, condition, query_context, bind_context); } diff --git a/src/planner/bound_update_statement.cpp b/src/planner/bound_update_statement.cpp index 5890975414..e91a130163 100644 --- a/src/planner/bound_update_statement.cpp +++ b/src/planner/bound_update_statement.cpp @@ -37,7 +37,7 @@ import logical_table_scan; import logical_filter; import logical_update; import subquery_unnest; - +import logger; import conjunction_expression; import table_reference; @@ -47,7 +47,9 @@ SharedPtr BoundUpdateStatement::BuildPlan(QueryContext *query_conte const SharedPtr &bind_context = this->bind_context_; SharedPtr current_node; if (where_conditions_.empty()) { - RecoverableError(Status::SyntaxError("where_conditions_ shall not be empty")); + Status status = Status::SyntaxError("where_conditions_ shall not be empty"); + LOG_ERROR(status.message()); + RecoverableError(status); } SharedPtr from = BuildFrom(table_ref_ptr_, query_context, bind_context); if (!where_conditions_.empty()) { @@ -115,7 +117,9 @@ void BoundUpdateStatement::BuildSubquery(SharedPtr &root, if (condition->type() == ExpressionType::kSubQuery) { if (building_subquery_) { // nested subquery - RecoverableError(Status::SyntaxError("Nested subquery detected")); + Status status = Status::SyntaxError("Nested subquery detected"); + LOG_ERROR(status.message()); + RecoverableError(status); } condition = UnnestSubquery(root, condition, query_context, bind_context); } diff --git a/src/planner/column_identifier.cpp b/src/planner/column_identifier.cpp index 8eaf09461e..349af9e019 100644 --- a/src/planner/column_identifier.cpp +++ b/src/planner/column_identifier.cpp @@ -14,6 +14,8 @@ module; +module column_identifer; + import stl; import column_expr; @@ -21,14 +23,15 @@ import infinity_exception; import status; import third_party; import query_context; - -module column_identifer; +import logger; namespace infinity { ColumnIdentifier ColumnIdentifier::MakeColumnIdentifier(QueryContext *, const ColumnExpr &expr) { if (expr.star_ && expr.names_.empty()) { - RecoverableError(Status::SyntaxError("Star expression should be unfolded before.")); + Status status = Status::SyntaxError("Star expression should be unfolded before."); + LOG_ERROR(status.message()); + RecoverableError(status); } SharedPtr db_name_ptr = nullptr; @@ -38,7 +41,9 @@ ColumnIdentifier ColumnIdentifier::MakeColumnIdentifier(QueryContext *, const Co i64 name_count = expr.names_.size(); if (name_count > 4 || name_count <= 0) { - RecoverableError(Status::SyntaxError("Star expression should be unfolded before.")); + Status status = Status::SyntaxError("Star expression should be unfolded before."); + LOG_ERROR(status.message()); + RecoverableError(status); } --name_count; column_name_ptr = MakeShared(expr.names_[name_count]); diff --git a/src/planner/explain_ast.cpp b/src/planner/explain_ast.cpp index cbe70074be..5e27853110 100644 --- a/src/planner/explain_ast.cpp +++ b/src/planner/explain_ast.cpp @@ -47,6 +47,7 @@ import join_reference; import subquery_reference; import table_reference; import statement_common; +import logger; namespace infinity { @@ -389,7 +390,9 @@ void ExplainAST::BuildBaseTableRef(const BaseTableReference *base_table_ref, Sha if (cross_product_ref->alias_ != nullptr) { from_str += " AS " + String(cross_product_ref->alias_->alias_); if (cross_product_ref->alias_->column_alias_array_ != nullptr) { - RecoverableError(Status::SyntaxError("Table reference has columns alias")); + Status status = Status::SyntaxError("Table reference has columns alias"); + LOG_ERROR(status.message()); + RecoverableError(status); } } else { from_str += ": "; @@ -409,7 +412,9 @@ void ExplainAST::BuildBaseTableRef(const BaseTableReference *base_table_ref, Sha if (join_reference->alias_ != nullptr) { from_str += " AS " + String(join_reference->alias_->alias_); if (join_reference->alias_->column_alias_array_ != nullptr) { - RecoverableError(Status::SyntaxError("Table reference has columns alias")); + Status status = Status::SyntaxError("Table reference has columns alias"); + LOG_ERROR(status.message()); + RecoverableError(status); } } result->emplace_back(MakeShared(from_str)); @@ -429,7 +434,9 @@ void ExplainAST::BuildBaseTableRef(const BaseTableReference *base_table_ref, Sha if (table_reference->alias_ != nullptr) { from_str += " AS " + String(table_reference->alias_->alias_); if (table_reference->alias_->column_alias_array_ != nullptr) { - RecoverableError(Status::SyntaxError("Table reference has columns alias")); + Status status = Status::SyntaxError("Table reference has columns alias"); + LOG_ERROR(status.message()); + RecoverableError(status); } } result->emplace_back(MakeShared(from_str)); @@ -441,7 +448,9 @@ void ExplainAST::BuildBaseTableRef(const BaseTableReference *base_table_ref, Sha if (subquery_reference->alias_ != nullptr) { from_str += " AS " + String(subquery_reference->alias_->alias_); if (subquery_reference->alias_->column_alias_array_ != nullptr) { - RecoverableError(Status::SyntaxError("Table reference has columns alias")); + Status status = Status::SyntaxError("Table reference has columns alias"); + LOG_ERROR(status.message()); + RecoverableError(status); } } else { from_str += ": "; diff --git a/src/planner/expression_binder.cpp b/src/planner/expression_binder.cpp index 37338347d7..ccfa005fe2 100644 --- a/src/planner/expression_binder.cpp +++ b/src/planner/expression_binder.cpp @@ -85,7 +85,9 @@ SharedPtr ExpressionBinder::Bind(const ParsedExpr &expr, BindCon SharedPtr result = BuildExpression(expr, bind_context_ptr, depth, root); if (result.get() == nullptr) { if (result.get() == nullptr) { - RecoverableError(Status::SyntaxError(fmt::format("Fail to bind the expression: {}", expr.GetName()))); + Status status = Status::SyntaxError(fmt::format("Fail to bind the expression: {}", expr.GetName())); + LOG_ERROR(status.message()); + RecoverableError(status); } // Maybe the correlated expression, trying to bind it in the parent context. // result = Bind(expr, bind_context_ptr->parent_, depth + 1, root); @@ -451,18 +453,24 @@ SharedPtr ExpressionBinder::BuildKnnExpr(const KnnExpr &parsed_k } if (parsed_knn_expr.topn_ <= 0) { String topn = std::to_string(parsed_knn_expr.topn_); - RecoverableError(Status::InvalidParameterValue("topn", topn, "topn should be greater than 0")); + Status status = Status::InvalidParameterValue("topn", topn, "topn should be greater than 0"); + LOG_ERROR(status.message()); + RecoverableError(status); } auto expr_ptr = BuildColExpr((ColumnExpr &)*parsed_knn_expr.column_expr_, bind_context_ptr, depth, false); TypeInfo *type_info = expr_ptr->Type().type_info().get(); if (type_info == nullptr or type_info->type() != TypeInfoType::kEmbedding) { - RecoverableError(Status::SyntaxError("Expect the column search is an embedding column")); + Status status = Status::SyntaxError("Expect the column search is an embedding column"); + LOG_ERROR(status.message()); + RecoverableError(status); } else { EmbeddingInfo *embedding_info = (EmbeddingInfo *)type_info; if ((i64)embedding_info->Dimension() != parsed_knn_expr.dimension_) { - RecoverableError(Status::SyntaxError(fmt::format("Query embedding with dimension: {} which doesn't not matched with {}", - parsed_knn_expr.dimension_, - embedding_info->Dimension()))); + Status status = Status::SyntaxError(fmt::format("Query embedding with dimension: {} which doesn't not matched with {}", + parsed_knn_expr.dimension_, + embedding_info->Dimension())); + LOG_ERROR(status.message()); + RecoverableError(status); } } @@ -490,32 +498,41 @@ SharedPtr ExpressionBinder::BuildMatchTensorExpr(const MatchTens } // TODO: now only support MaxSim search method if (expr.search_method_ != MatchTensorMethod::kMaxSim) { - RecoverableError(Status::NotSupport(fmt::format("Unsupported search method: {}, now only support MaxSim search method", - MatchTensorExpr::MethodToString(expr.search_method_)))); + Status status = Status::NotSupport(fmt::format("Unsupported search method: {}, now only support MaxSim search method", + MatchTensorExpr::MethodToString(expr.search_method_))); + LOG_ERROR(status.message()); + RecoverableError(status); } auto expr_ptr = BuildColExpr((ColumnExpr &)*expr.column_expr_, bind_context_ptr, depth, false); auto column_data_type = expr_ptr->Type(); TypeInfo *type_info = column_data_type.type_info().get(); u32 tensor_column_basic_embedding_dim = 0; if (column_data_type.type() != LogicalType::kTensor or type_info == nullptr or type_info->type() != TypeInfoType::kEmbedding) { - RecoverableError(Status::SyntaxError("Expect the column search is an tensor column")); + Status status = Status::SyntaxError("Expect the column search is an tensor column"); + LOG_ERROR(status.message()); + RecoverableError(status); } else { EmbeddingInfo *embedding_info = (EmbeddingInfo *)type_info; tensor_column_basic_embedding_dim = embedding_info->Dimension(); if (expr.dimension_ == 0 or expr.dimension_ % tensor_column_basic_embedding_dim != 0) { - RecoverableError(Status::SyntaxError(fmt::format("Query embedding with dimension: {} which doesn't match with tensor basic dimension {}", - expr.dimension_, - embedding_info->Dimension()))); + Status status = Status::SyntaxError(fmt::format("Query embedding with dimension: {} which doesn't match with tensor basic dimension {}", + expr.dimension_, embedding_info->Dimension())); + LOG_ERROR(status.message()); + RecoverableError(status); } // TODO: now only support float query tensor // TODO: now only support search on tensor column with float data type if (expr.embedding_data_type_ != EmbeddingDataType::kElemFloat) { - RecoverableError(Status::NotSupport(fmt::format("Unsupported query tensor data type: {}, now only support float input", - EmbeddingT::EmbeddingDataType2String(expr.embedding_data_type_)))); + Status status = Status::NotSupport(fmt::format("Unsupported query tensor data type: {}, now only support float input", + EmbeddingT::EmbeddingDataType2String(expr.embedding_data_type_))); + LOG_ERROR(status.message()); + RecoverableError(status); } if (embedding_info->Type() != EmbeddingDataType::kElemFloat) { - RecoverableError(Status::NotSupport( - fmt::format("Unsupported tensor column type: {}, now only support search on float tensor column", embedding_info->ToString()))); + Status status = Status::NotSupport( + fmt::format("Unsupported tensor column type: {}, now only support search on float tensor column", embedding_info->ToString())); + LOG_ERROR(status.message()); + RecoverableError(status); } } arguments.emplace_back(std::move(expr_ptr)); @@ -598,13 +615,17 @@ Optional> ExpressionBinder::TryBuildSpecialFuncExpr(co switch (special_function_ptr->special_type()) { case SpecialType::kDistance: { if (!bind_context_ptr->allow_distance) { - RecoverableError(Status::SyntaxError("DISTANCE() needs to be allowed only when there is only MATCH VECTOR")); + Status status = Status::SyntaxError("DISTANCE() needs to be allowed only when there is only MATCH VECTOR"); + LOG_ERROR(status.message()); + RecoverableError(status); } break; } case SpecialType::kScore: { if (!bind_context_ptr->allow_score) { - RecoverableError(Status::SyntaxError("SCORE() requires Fusion or MATCH TEXT or MATCH TENSOR")); + Status status = Status::SyntaxError("SCORE() requires Fusion or MATCH TEXT or MATCH TENSOR"); + LOG_ERROR(status.message()); + RecoverableError(status); } break; } diff --git a/src/planner/logical_planner.cpp b/src/planner/logical_planner.cpp index 14efdee449..3fe70e3870 100644 --- a/src/planner/logical_planner.cpp +++ b/src/planner/logical_planner.cpp @@ -87,6 +87,7 @@ import drop_schema_info; import drop_table_info; import drop_view_info; import column_def; +import logger; namespace { @@ -221,7 +222,9 @@ Status LogicalPlanner::BuildInsertValue(const InsertStatement *statement, Shared } if (table_entry->EntryType() == TableEntryType::kCollectionEntry) { - RecoverableError(Status::NotSupport("Currently, collection isn't supported.")); + Status status = Status::NotSupport("Currently, collection isn't supported."); + LOG_ERROR(status.message()); + RecoverableError(status); } // Create value list @@ -250,10 +253,12 @@ Status LogicalPlanner::BuildInsertValue(const InsertStatement *statement, Shared if (statement->columns_ != nullptr) { SizeT column_count = statement->columns_->size(); if (column_count != value_list.size()) { - RecoverableError(Status::SyntaxError(fmt::format("INSERT: Target column count ({}) and " - "input value count mismatch ({})", - column_count, - value_list.size()))); + Status status = Status::SyntaxError(fmt::format("INSERT: Target column count ({}) and " + "input value count mismatch ({})", + column_count, + value_list.size())); + LOG_ERROR(status.message()); + RecoverableError(status); } SizeT table_column_count = table_entry->ColumnCount(); @@ -296,10 +301,12 @@ Status LogicalPlanner::BuildInsertValue(const InsertStatement *statement, Shared bind_context_ptr->expression_binder_->BuildExpression(*column_def->default_expr_.get(), bind_context_ptr.get(), 0, true); value_list.emplace_back(value_expr); } else { - RecoverableError(Status::SyntaxError(fmt::format("INSERT: Table column count ({}) and " - "input value count mismatch ({})", - table_column_count, - column_count))); + Status status = Status::SyntaxError(fmt::format("INSERT: Table column count ({}) and " + "input value count mismatch ({})", + table_column_count, + column_count)); + LOG_ERROR(status.message()); + RecoverableError(status); } const SharedPtr &table_column_type = column_def->column_type_; @@ -332,10 +339,12 @@ Status LogicalPlanner::BuildInsertValue(const InsertStatement *statement, Shared } if (value_list.size() != table_column_count) { - RecoverableError(Status::SyntaxError(fmt::format("INSERT: Table column count ({}) and " - "input value count mismatch ({})", - table_column_count, - value_list.size()))); + Status status = Status::SyntaxError(fmt::format("INSERT: Table column count ({}) and " + "input value count mismatch ({})", + table_column_count, + value_list.size())); + LOG_ERROR(status.message()); + RecoverableError(status); } // Create value list with table column size and null value @@ -373,7 +382,9 @@ Status LogicalPlanner::BuildInsertValue(const InsertStatement *statement, Shared } Status LogicalPlanner::BuildInsertSelect(const InsertStatement *, SharedPtr &) { - RecoverableError(Status::NotSupport("Not supported")); + Status status = Status::NotSupport("Not supported"); + LOG_ERROR(status.message()); + RecoverableError(status); return Status::OK(); } @@ -629,7 +640,9 @@ Status LogicalPlanner::BuildCreateIndex(const CreateStatement *statement, Shared auto schema_name = MakeShared(create_index_info->schema_name_); auto table_name = MakeShared(create_index_info->table_name_); if (table_name->empty()) { - RecoverableError(Status::InvalidIndexName("No index name.")); + Status status = Status::InvalidIndexName("No index name."); + LOG_ERROR(status.message()); + RecoverableError(status); } // if (create_index_info->column_names_->size() != 1) { // UnrecoverableError("Creating index only support single column now."); @@ -640,8 +653,10 @@ Status LogicalPlanner::BuildCreateIndex(const CreateStatement *statement, Shared auto base_table_ref = query_binder_ptr->GetTableRef(*schema_name, *table_name); if (create_index_info->index_info_list_->size() != 1) { - RecoverableError(Status::InvalidIndexDefinition( - fmt::format("Index {} consists of {} IndexInfo however 1 is expected", *index_name, create_index_info->index_info_list_->size()))); + Status status = Status::InvalidIndexDefinition( + fmt::format("Index {} consists of {} IndexInfo however 1 is expected", *index_name, create_index_info->index_info_list_->size())); + LOG_ERROR(status.message()); + RecoverableError(status); } IndexInfo *index_info = create_index_info->index_info_list_->at(0); SharedPtr base_index_ptr{nullptr}; @@ -776,7 +791,9 @@ Status LogicalPlanner::BuildDropSchema(const DropStatement *statement, SharedPtr } if (IsEqual(drop_schema_info->schema_name_, query_context_ptr_->schema_name())) { - RecoverableError(Status::DroppingUsingDb(drop_schema_info->schema_name_)); + Status status = Status::DroppingUsingDb(drop_schema_info->schema_name_); + LOG_ERROR(status.message()); + RecoverableError(status); } SharedPtr schema_name_ptr = MakeShared(drop_schema_info->schema_name_); @@ -825,12 +842,16 @@ Status LogicalPlanner::BuildDropView(const DropStatement *statement, SharedPtr &) { - RecoverableError(Status::NotSupport("Prepare statement isn't supported.")); + Status status = Status::NotSupport("Prepare statement isn't supported."); + LOG_ERROR(status.message()); + RecoverableError(status); return Status::OK(); } Status LogicalPlanner::BuildExecute(const ExecuteStatement *, SharedPtr &) { - RecoverableError(Status::NotSupport("Execute statement isn't supported.")); + Status status = Status::NotSupport("Execute statement isn't supported."); + LOG_ERROR(status.message()); + RecoverableError(status); return Status::OK(); } @@ -859,7 +880,9 @@ Status LogicalPlanner::BuildExport(const CopyStatement *statement, SharedPtrfile_path_)) { - RecoverableError(Status::FileNotFound(statement->file_path_)); + Status status = Status::FileNotFound(statement->file_path_); + LOG_ERROR(status.message()); + RecoverableError(status); } SharedPtr logical_export = MakeShared(bind_context_ptr->GetNewLogicalNodeId(), @@ -887,7 +910,9 @@ Status LogicalPlanner::BuildImport(const CopyStatement *statement, SharedPtrfile_path_)) { - RecoverableError(Status::FileNotFound(statement->file_path_)); + Status status = Status::FileNotFound(statement->file_path_); + LOG_ERROR(status.message()); + RecoverableError(status); } SharedPtr logical_import = MakeShared(bind_context_ptr->GetNewLogicalNodeId(), @@ -902,7 +927,9 @@ Status LogicalPlanner::BuildImport(const CopyStatement *statement, SharedPtr &) { - RecoverableError(Status::NotSupport("Alter statement isn't supported.")); + Status status = Status::NotSupport("Alter statement isn't supported."); + LOG_ERROR(status.message()); + RecoverableError(status); return Status::OK(); } diff --git a/src/planner/node/logical_match_tensor_scan.cpp b/src/planner/node/logical_match_tensor_scan.cpp index e4e23ab285..f6540795bf 100644 --- a/src/planner/node/logical_match_tensor_scan.cpp +++ b/src/planner/node/logical_match_tensor_scan.cpp @@ -31,6 +31,7 @@ import explain_logical_plan; import search_options; import infinity_exception; import status; +import logger; namespace infinity { @@ -86,7 +87,9 @@ void LogicalMatchTensorScan::InitExtraOptions() { if (const auto it = options.options_.find("topn"); it != options.options_.end()) { const int top_n_option = std::stoi(it->second); if (top_n_option <= 0) { - RecoverableError(Status::SyntaxError("topn must be a positive integer")); + Status status = Status::SyntaxError("topn must be a positive integer"); + LOG_ERROR(status.message()); + RecoverableError(status); } topn_ = top_n_option; } else { diff --git a/src/planner/query_binder.cpp b/src/planner/query_binder.cpp index 8f9c634372..7d64d9c733 100644 --- a/src/planner/query_binder.cpp +++ b/src/planner/query_binder.cpp @@ -75,6 +75,7 @@ import base_entry; import view_entry; import table_entry; import txn; +import logger; namespace infinity { @@ -103,11 +104,15 @@ UniquePtr QueryBinder::BindSelect(const SelectStatement &s WithExpr *with_expr = (*statement.with_exprs_)[i]; String name = with_expr->alias_; if (bind_context_ptr_->CTE_map_.contains(name)) { - RecoverableError(Status::SyntaxError("WITH query table_name: " + name + " occurs more than once.")); + Status status = Status::SyntaxError(fmt::format("WITH query table_name: {} occurs more than once.", name)); + LOG_ERROR(status.message()); + RecoverableError(status); } if (with_expr->select_->type_ != StatementType::kSelect) { - RecoverableError(Status::SyntaxError("Non-select statement in WITH clause.")); + Status status = Status::SyntaxError("Non-select statement in WITH clause."); + LOG_ERROR(status.message()); + RecoverableError(status); } masked_name_set.insert(name); @@ -142,8 +147,10 @@ UniquePtr QueryBinder::BindSelect(const SelectStatement &s if (!select_expr->alias_.empty()) { if (bind_context_ptr_->select_alias2index_.contains(select_expr->alias_)) { i64 bound_column_index = bind_context_ptr_->select_alias2index_[select_expr->alias_]; - RecoverableError(Status::SyntaxError(bind_context_ptr_->select_expression_[bound_column_index]->ToString() + " and " + - select_expr->ToString() + " have same alias: " + select_expr->alias_)); + Status status = Status::SyntaxError(bind_context_ptr_->select_expression_[bound_column_index]->ToString() + " and " + + select_expr->ToString() + " have same alias: " + select_expr->alias_); + LOG_ERROR(status.message()); + RecoverableError(status); } else { // Store the alias to column index mapping, the mapping will be used in // - where clause binding @@ -154,7 +161,9 @@ UniquePtr QueryBinder::BindSelect(const SelectStatement &s } else { // KNN expression without alias, isn't allowed if (select_expr->type_ == ParsedExprType::kKnn) { - RecoverableError(Status::SyntaxError("KNN expression in select list must have an alias.")); + Status status = Status::SyntaxError("KNN expression in select list must have an alias."); + LOG_ERROR(status.message()); + RecoverableError(status); } String select_expr_name = select_expr->ToString(); @@ -189,7 +198,9 @@ UniquePtr QueryBinder::BindSelect(const SelectStatement &s auto where_binder = MakeShared(query_context_ptr_, bind_alias_proxy); SharedPtr where_expr = where_binder->Bind(*statement.where_expr_, this->bind_context_ptr_.get(), 0, true); if (where_expr->Type().type() != LogicalType::kBoolean) { - RecoverableError(Status::InvalidFilterExpression(where_expr->Type().ToString())); + Status status = Status::InvalidFilterExpression(where_expr->Type().ToString()); + LOG_ERROR(status.message()); + RecoverableError(status); } bound_select_statement->where_conditions_ = SplitExpressionByDelimiter(where_expr, ConjunctionType::kAnd); } @@ -321,7 +332,9 @@ SharedPtr QueryBinder::BuildTable(QueryContext *query_context, const T return view_ref; } - RecoverableError(Status::SyntaxError("Table or View: " + from_table->table_name_ + " is not found in catalog.")); + Status status = Status::SyntaxError("Table or View: " + from_table->table_name_ + " is not found in catalog."); + LOG_ERROR(status.message()); + RecoverableError(status); return nullptr; } @@ -370,7 +383,9 @@ SharedPtr QueryBinder::BuildCTE(QueryContext *, const String &name) { // Table is from CTE if (this->bind_context_ptr_->IsCTEBound(cte)) { // The CTE is bound before. - RecoverableError(Status::SyntaxError("CTE can only be bound only once")); + Status status = Status::SyntaxError("CTE can only be bound only once"); + LOG_ERROR(status.message()); + RecoverableError(status); } // Build CTE(subquery) @@ -406,11 +421,14 @@ SharedPtr QueryBinder::BuildBaseTable(QueryContext *query_context, auto [table_entry, status] = query_context->GetTxn()->GetTableByName(schema_name, from_table->table_name_); if (!status.ok()) { + LOG_ERROR(status.message()); RecoverableError(status); } if (table_entry->EntryType() == TableEntryType::kCollectionEntry) { - RecoverableError(Status::SyntaxError("Currently, collection isn't supported.")); + Status status = Status::SyntaxError("Currently, collection isn't supported."); + LOG_ERROR(status.message()); + RecoverableError(status); } String alias = from_table->GetTableName(); @@ -446,6 +464,7 @@ SharedPtr QueryBinder::BuildView(QueryContext *query_context, const Ta BaseEntry *base_view_entry{nullptr}; Status status = query_context->GetTxn()->GetViewByName(from_table->db_name_, from_table->table_name_, base_view_entry); if (!status.ok()) { + LOG_ERROR(status.message()); RecoverableError(status); } @@ -453,7 +472,9 @@ SharedPtr QueryBinder::BuildView(QueryContext *query_context, const Ta // Build view scan operator if (this->bind_context_ptr_->IsViewBound(from_table->table_name_)) { - RecoverableError(Status::SyntaxError("View: " + from_table->table_name_ + " is bound before!")); + Status status = Status::SyntaxError(fmt::format("View: {} is bound before!", from_table->table_name_)); + LOG_ERROR(status.message()); + RecoverableError(status); } this->bind_context_ptr_->BoundView(from_table->table_name_); @@ -535,7 +556,9 @@ SharedPtr QueryBinder::BuildCrossProduct(QueryContext *query_context, right_bind_context = bind_contexts[bind_context_idx]; if (bind_context_idx != 0) { - RecoverableError(Status::SyntaxError("Mismatched bind context count.")); + Status status = Status::SyntaxError("Mismatched bind context count."); + LOG_ERROR(status.message()); + RecoverableError(status); } right_query_binder = MakeUnique(query_context, right_bind_context); right_table_ref = right_query_binder->BuildFromClause(query_context, tables[table_count - 1]); @@ -621,13 +644,17 @@ SharedPtr QueryBinder::BuildJoin(QueryContext *query_context, const Jo for (auto &column_name : using_column_names) { // Create left bound column expression if (!result->left_bind_context_->binding_names_by_column_.contains(column_name)) { - RecoverableError(Status::SyntaxError("Column: " + column_name + " doesn't exist in left table")); + Status status = Status::SyntaxError("Column: " + column_name + " doesn't exist in left table"); + LOG_ERROR(status.message()); + RecoverableError(status); } auto &left_column_binding_names = result->left_bind_context_->binding_names_by_column_[column_name]; if (left_column_binding_names.size() != 1) { - RecoverableError(Status::SyntaxError("Ambiguous column table_name: " + column_name + " in left table")); + Status status = Status::SyntaxError(fmt::format("Ambiguous column table_name: {} in left table", column_name)); + LOG_ERROR(status.message()); + RecoverableError(status); } auto &left_binding_name = left_column_binding_names[0]; @@ -643,19 +670,25 @@ SharedPtr QueryBinder::BuildJoin(QueryContext *query_context, const Jo 0); if (!result->right_bind_context_->binding_names_by_column_.contains(column_name)) { - RecoverableError(Status::SyntaxError("Column: " + column_name + " doesn't exist in right table")); + Status status = Status::SyntaxError(fmt::format("Column: {} doesn't exist in right table", column_name)); + LOG_ERROR(status.message()); + RecoverableError(status); } auto &right_column_binding_names = result->right_bind_context_->binding_names_by_column_[column_name]; if (right_column_binding_names.size() != 1) { - RecoverableError(Status::SyntaxError("Ambiguous column table_name: " + column_name + " in right table")); + Status status = Status::SyntaxError(fmt::format("Ambiguous column table_name: {} in right table", column_name)); + LOG_ERROR(status.message()); + RecoverableError(status); } auto &right_binding_name = right_column_binding_names[0]; auto &right_binding_ptr = result->right_bind_context_->binding_by_name_[right_binding_name]; if (right_binding_ptr.get() == nullptr) { - RecoverableError(Status::SyntaxError("Column: " + column_name + " doesn't exist in right table")); + Status status = Status::SyntaxError(fmt::format("Column: {} doesn't exist in right table", column_name)); + LOG_ERROR(status.message()); + RecoverableError(status); } auto right_column_index = right_binding_ptr->name2index_[column_name]; auto right_column_type = right_binding_ptr->column_types_->at(right_column_index); @@ -696,14 +729,18 @@ void QueryBinder::UnfoldStarExpression(QueryContext *, const Vectornames_.empty()) { // select * from t1; if (this->bind_context_ptr_->table_names_.empty()) { - RecoverableError(Status::SyntaxError("No table was bound.")); + Status status = Status::SyntaxError("No table was bound."); + LOG_ERROR(status.message()); + RecoverableError(status); } // select * from t1, t2; means select t1.*, t2.* from t1, t2; for (const auto &table_name : this->bind_context_ptr_->table_names_) { SharedPtr binding = this->bind_context_ptr_->binding_by_name_[table_name]; if (binding.get() == nullptr) { - RecoverableError(Status::SyntaxError("Table: " + table_name + " wasn't bound before.")); + Status status = Status::SyntaxError(fmt::format("Table: {} wasn't bound before.", table_name)); + LOG_ERROR(status.message()); + RecoverableError(status); } GenerateColumns(binding, table_name, output_select_list); } @@ -711,7 +748,9 @@ void QueryBinder::UnfoldStarExpression(QueryContext *, const Vectornames_[0]; SharedPtr binding = this->bind_context_ptr_->binding_by_name_[table_name]; if (binding.get() == nullptr) { - RecoverableError(Status::SyntaxError("Table: " + table_name + " wasn't bound before.")); + Status status = Status::SyntaxError(fmt::format("Table: {} wasn't bound before.", table_name)); + LOG_ERROR(status.message()); + RecoverableError(status); } GenerateColumns(binding, table_name, output_select_list); } @@ -766,7 +805,9 @@ void QueryBinder::GenerateColumns(const SharedPtr &binding, const Strin break; } case BindingType::kView: { - RecoverableError(Status::SyntaxError("Not implemented")); + Status status = Status::SyntaxError("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); break; } } @@ -863,8 +904,9 @@ void QueryBinder::BuildSelectList(QueryContext *, UniquePtrhaving_expressions_.empty() || !bound_select_statement->group_by_expressions_.empty() || !bind_context_ptr_->aggregate_exprs_.empty()) { if (!project_binder->BoundColumn().empty()) { - RecoverableError(Status::SyntaxError("Column: " + project_binder->BoundColumn() + - " must appear in the GROUP BY clause or be used in an aggregate function")); + Status status = Status::SyntaxError(fmt::format("Column: {} must appear in the GROUP BY clause or be used in an aggregate function", project_binder->BoundColumn())); + LOG_ERROR(status.message()); + RecoverableError(status); } } } @@ -917,14 +959,18 @@ void QueryBinder::CheckKnnAndOrderBy(KnnDistanceType distance_type, OrderType or case KnnDistanceType::kL2: case KnnDistanceType::kHamming: { if (order_type != OrderType::kAsc) { - RecoverableError(Status::SyntaxError("L2 and Hamming distance need ascending order")); + Status status = Status::SyntaxError("L2 and Hamming distance need ascending order"); + LOG_ERROR(status.message()); + RecoverableError(status); } break; } case KnnDistanceType::kInnerProduct: case KnnDistanceType::kCosine: { if (order_type != OrderType::kDesc) { - RecoverableError(Status::SyntaxError("Inner product and cosine distance need descending order")); + Status status = Status::SyntaxError("Inner product and cosine distance need descending order"); + LOG_ERROR(status.message()); + RecoverableError(status); } break; } @@ -948,7 +994,9 @@ UniquePtr QueryBinder::BindDelete(const DeleteStatement &s bound_delete_statement->table_ref_ptr_ = base_table_ref; if (base_table_ref.get() == nullptr) { - RecoverableError(Status::SyntaxError(fmt::format("Cannot bind {}.{} to a table", statement.schema_name_, statement.table_name_))); + Status status = Status::SyntaxError(fmt::format("Cannot bind {}.{} to a table", statement.schema_name_, statement.table_name_)); + LOG_ERROR(status.message()); + RecoverableError(status); } SharedPtr bind_alias_proxy = MakeShared(); @@ -956,7 +1004,9 @@ UniquePtr QueryBinder::BindDelete(const DeleteStatement &s if (statement.where_expr_ != nullptr) { SharedPtr where_expr = where_binder->Bind(*statement.where_expr_, this->bind_context_ptr_.get(), 0, true); if(where_expr->Type().type() != LogicalType::kBoolean) { - RecoverableError(Status::InvalidFilterExpression(where_expr->Type().ToString())); + Status status = Status::InvalidFilterExpression(where_expr->Type().ToString()); + LOG_ERROR(status.message()); + RecoverableError(status); } bound_delete_statement->where_conditions_ = SplitExpressionByDelimiter(where_expr, ConjunctionType::kAnd); } @@ -970,7 +1020,9 @@ UniquePtr QueryBinder::BindUpdate(const UpdateStatement &s bound_update_statement->table_ref_ptr_ = base_table_ref; if (base_table_ref.get() == nullptr) { - RecoverableError(Status::SyntaxError(fmt::format("Cannot bind {}.{} to a table", statement.schema_name_, statement.table_name_))); + Status status = Status::SyntaxError(fmt::format("Cannot bind {}.{} to a table", statement.schema_name_, statement.table_name_)); + LOG_ERROR(status.message()); + RecoverableError(status); } SharedPtr bind_alias_proxy = MakeShared(); @@ -978,12 +1030,16 @@ UniquePtr QueryBinder::BindUpdate(const UpdateStatement &s if (statement.where_expr_ != nullptr) { SharedPtr where_expr = where_binder->Bind(*statement.where_expr_, this->bind_context_ptr_.get(), 0, true); if(where_expr->Type().type() != LogicalType::kBoolean) { - RecoverableError(Status::InvalidFilterExpression(where_expr->Type().ToString())); + Status status = Status::InvalidFilterExpression(where_expr->Type().ToString()); + LOG_ERROR(status.message()); + RecoverableError(status); } bound_update_statement->where_conditions_ = SplitExpressionByDelimiter(where_expr, ConjunctionType::kAnd); } if (statement.update_expr_array_ == nullptr) { - RecoverableError(Status::SyntaxError(fmt::format("Update expr array is empty"))); + Status status = Status::SyntaxError(fmt::format("Update expr array is empty")); + LOG_ERROR(status.message()); + RecoverableError(status); } const Vector &column_names = *base_table_ref->column_names_; @@ -995,8 +1051,10 @@ UniquePtr QueryBinder::BindUpdate(const UpdateStatement &s ParsedExpr *expr = upd_expr->value; auto it = std::find(column_names.begin(), column_names.end(), column_name); if (it == column_names.end()) { - RecoverableError(Status::SyntaxError( - fmt::format("Column {} doesn't exist in table {}.{}", column_name, statement.schema_name_, statement.table_name_))); + Status status = Status::SyntaxError( + fmt::format("Column {} doesn't exist in table {}.{}", column_name, statement.schema_name_, statement.table_name_)); + LOG_ERROR(status.message()); + RecoverableError(status); } SizeT column_id = std::distance(column_names.begin(), it); SharedPtr update_expr = project_binder->Bind(*expr, this->bind_context_ptr_.get(), 0, true); diff --git a/src/planner/subquery/correlated_expressions_detector.cpp b/src/planner/subquery/correlated_expressions_detector.cpp index 037c9175be..51852d998e 100644 --- a/src/planner/subquery/correlated_expressions_detector.cpp +++ b/src/planner/subquery/correlated_expressions_detector.cpp @@ -23,6 +23,7 @@ import column_expression; import subquery_expression; import status; import infinity_exception; +import logger; namespace infinity { @@ -35,7 +36,9 @@ SharedPtr CorrelatedExpressionsDetector::VisitReplace(const Shar } if (expression->depth() > 1) { - RecoverableError(Status::SyntaxError("Column expression with depth > 1 is detected")); + Status status = Status::SyntaxError("Column expression with depth > 1 is detected"); + LOG_ERROR(status.message()); + RecoverableError(status); } is_correlated_ = true; @@ -48,7 +51,9 @@ SharedPtr CorrelatedExpressionsDetector::VisitReplace(const Shar return nullptr; } - RecoverableError(Status::SyntaxError("Not support nested correlated subquery in the subquery plan")); + Status status = Status::SyntaxError("Not support nested correlated subquery in the subquery plan"); + LOG_ERROR(status.message()); + RecoverableError(status); return nullptr; } diff --git a/src/planner/subquery/dependent_join_flattener.cpp b/src/planner/subquery/dependent_join_flattener.cpp index c50125d030..3bd437150f 100644 --- a/src/planner/subquery/dependent_join_flattener.cpp +++ b/src/planner/subquery/dependent_join_flattener.cpp @@ -45,6 +45,7 @@ import rewrite_correlated_expression; import internal_types; import join_reference; import data_type; +import logger; namespace infinity { @@ -78,7 +79,9 @@ SharedPtr DependentJoinFlattener::PushDependentJoin(const SharedPtr SharedPtr DependentJoinFlattener::PushDependentJoinInternal(const SharedPtr &subquery_plan) { // 1. Validates if the logical node was checked in operator2correlated_expression_map_ before. if (!operator2correlated_expression_map_.contains(subquery_plan->node_id())) { - RecoverableError(Status::SyntaxError(fmt::format("Logical node {} wasn't detected before.", subquery_plan->node_id()))); + Status status = Status::SyntaxError(fmt::format("Logical node {} wasn't detected before.", subquery_plan->node_id())); + LOG_ERROR(status.message()); + RecoverableError(status); } // 2. if no correlated expression in this operator. which means all correlated expressions are unnested @@ -121,11 +124,17 @@ SharedPtr DependentJoinFlattener::PushDependentJoinInternal(const S case LogicalNodeType::kExcept: case LogicalNodeType::kUnion: case LogicalNodeType::kIntersect: { - RecoverableError(Status::SyntaxError("Can't push down through set operation node.")); + Status status = Status::SyntaxError("Can't push down through set operation node"); + LOG_ERROR(status.message()); + RecoverableError(status); + break; } case LogicalNodeType::kJoin: { - RecoverableError(Status::SyntaxError("Can't push down through join node.")); + Status status = Status::SyntaxError("Can't push down through join node"); + LOG_ERROR(status.message()); + RecoverableError(status); + break; } case LogicalNodeType::kCrossProduct: { @@ -200,7 +209,9 @@ SharedPtr DependentJoinFlattener::PushDependentJoinInternal(const S return logical_join; } case LogicalNodeType::kLimit: { - RecoverableError(Status::SyntaxError("Can't push down through limit node")); + Status status = Status::SyntaxError("Can't push down through limit node"); + LOG_ERROR(status.message()); + RecoverableError(status); break; } case LogicalNodeType::kFilter: { @@ -246,11 +257,15 @@ SharedPtr DependentJoinFlattener::PushDependentJoinInternal(const S return subquery_plan; } case LogicalNodeType::kSort: { - RecoverableError(Status::SyntaxError("Can't push down through order by node")); + Status status = Status::SyntaxError("Can't push down through order by node"); + LOG_ERROR(status.message()); + RecoverableError(status); break; } case LogicalNodeType::kTableScan: { - RecoverableError(Status::SyntaxError("Can't push down through table scan node")); + Status status = Status::SyntaxError("Can't push down through table scan node"); + LOG_ERROR(status.message()); + RecoverableError(status); break; } case LogicalNodeType::kDelete: @@ -273,7 +288,9 @@ SharedPtr DependentJoinFlattener::PushDependentJoinInternal(const S case LogicalNodeType::kShow: case LogicalNodeType::kExplain: case LogicalNodeType::kPrepare: { - RecoverableError(Status::SyntaxError(fmt::format("Logical node {} should be involved in subquery.", subquery_plan->name()))); + Status status = Status::SyntaxError(fmt::format("Logical node {} should be involved in subquery.", subquery_plan->name())); + LOG_ERROR(status.message()); + RecoverableError(status); } case LogicalNodeType::kInvalid: { UnrecoverableError("Invalid logical operator node"); @@ -305,7 +322,9 @@ SharedPtr DependentJoinFlattener::BuildNoCorrelatedInternal(const S column_ids.emplace_back(correlated_columns[0]->binding().column_idx); for (SizeT idx = 1; idx < column_count; ++idx) { if (correlated_columns[idx]->binding().table_idx != table_index) { - RecoverableError(Status::SyntaxError(fmt::format("Correlated column are from different table."))); + Status status = Status::SyntaxError(fmt::format("Correlated column are from different table.")); + LOG_ERROR(status.message()); + RecoverableError(status); } column_names->emplace_back(correlated_columns[idx]->column_name()); column_types->emplace_back(MakeShared(correlated_columns[idx]->Type())); @@ -314,7 +333,9 @@ SharedPtr DependentJoinFlattener::BuildNoCorrelatedInternal(const S const Binding *table_binding_ptr = bind_context_ptr_->GetBindingFromCurrentOrParentByName(correlated_columns[0]->table_name()); if (table_binding_ptr == nullptr) { - RecoverableError(Status::SyntaxError(fmt::format("Can't find table: {} in binding context.", correlated_columns[0]->table_name()))); + Status status = Status::SyntaxError(fmt::format("Can't find table: {} in binding context.", correlated_columns[0]->table_name())); + LOG_ERROR(status.message()); + RecoverableError(status); } // Catalog *catalog = query_context_->storage()->catalog(); diff --git a/src/planner/subquery/rewrite_correlated_expressions.cpp b/src/planner/subquery/rewrite_correlated_expressions.cpp index eac7d602dc..796b44ef76 100644 --- a/src/planner/subquery/rewrite_correlated_expressions.cpp +++ b/src/planner/subquery/rewrite_correlated_expressions.cpp @@ -23,6 +23,7 @@ import column_expression; import status; import infinity_exception; import subquery_expression; +import logger; namespace infinity { @@ -34,7 +35,9 @@ SharedPtr RewriteCorrelatedExpressions::VisitReplace(const Share } if (expression->depth() > 1) { - RecoverableError(Status::SyntaxError("Correlated depth > 1 is not supported now.")); + Status status = Status::SyntaxError("Correlated depth > 1 is not supported now."); + LOG_ERROR(status.message()); + RecoverableError(status); } auto entry = bind_context_ptr_->correlated_column_map_.find(expression->binding()); @@ -55,7 +58,9 @@ SharedPtr RewriteCorrelatedExpressions::VisitReplace(const Share return nullptr; } - RecoverableError(Status::SyntaxError("Not support rewrite nested correlated subquery in the subquery plan")); + Status status = Status::SyntaxError("Not support rewrite nested correlated subquery in the subquery plan"); + LOG_ERROR(status.message()); + RecoverableError(status); return nullptr; } diff --git a/src/planner/subquery/subquery_unnest.cpp b/src/planner/subquery/subquery_unnest.cpp index c5cf3796fd..2d17e35be2 100644 --- a/src/planner/subquery/subquery_unnest.cpp +++ b/src/planner/subquery/subquery_unnest.cpp @@ -56,6 +56,7 @@ import subquery_expr; import infinity_exception; import join_reference; import data_type; +import logger; namespace infinity { @@ -151,11 +152,15 @@ SharedPtr SubqueryUnnest::UnnestUncorrelated(SubqueryExpression // |-> Aggregate( count(*) as count_start) // |-> Limit (1) // |-> right plan tree - RecoverableError(Status::SyntaxError("Plan EXISTS uncorrelated subquery")); + Status status = Status::SyntaxError("Plan EXISTS uncorrelated subquery"); + LOG_ERROR(status.message()); + RecoverableError(status); break; } case SubqueryType::kNotExists: { - RecoverableError(Status::SyntaxError("Plan not EXISTS uncorrelated subquery")); + Status status = Status::SyntaxError("Plan not EXISTS uncorrelated subquery"); + LOG_ERROR(status.message()); + RecoverableError(status); break; } case SubqueryType::kNotIn: @@ -205,9 +210,12 @@ SharedPtr SubqueryUnnest::UnnestUncorrelated(SubqueryExpression return result; } - case SubqueryType::kAny: - RecoverableError(Status::SyntaxError("Plan ANY uncorrelated subquery")); + case SubqueryType::kAny: { + Status status = Status::SyntaxError("Plan ANY uncorrelated subquery"); + LOG_ERROR(status.message()); + RecoverableError(status); break; + } default: { UnrecoverableError("Unknown subquery type."); } @@ -224,7 +232,9 @@ SharedPtr SubqueryUnnest::UnnestCorrelated(SubqueryExpression *e auto &correlated_columns = bind_context->correlated_column_exprs_; if (correlated_columns.empty()) { - RecoverableError(Status::SyntaxError("No correlated column")); + Status status = Status::SyntaxError("No correlated column"); + LOG_ERROR(status.message()); + RecoverableError(status); } // Valid the correlated columns are from one table. @@ -232,7 +242,9 @@ SharedPtr SubqueryUnnest::UnnestCorrelated(SubqueryExpression *e SizeT table_index = correlated_columns[0]->binding().table_idx; for (SizeT idx = 1; idx < column_count; ++idx) { if (table_index != correlated_columns[idx]->binding().table_idx) { - RecoverableError(Status::SyntaxError("Correlated columns can be only from one table, now.")); + Status status = Status::SyntaxError("Correlated columns can be only from one table, now."); + LOG_ERROR(status.message()); + RecoverableError(status); } } @@ -390,7 +402,9 @@ SharedPtr SubqueryUnnest::UnnestCorrelated(SubqueryExpression *e return result; } case SubqueryType::kAny: { - RecoverableError(Status::SyntaxError("Unnest correlated any subquery.")); + Status status = Status::SyntaxError("Unnest correlated any subquery."); + LOG_ERROR(status.message()); + RecoverableError(status); } } UnrecoverableError("Unreachable"); @@ -410,8 +424,9 @@ void SubqueryUnnest::GenerateJoinConditions(QueryContext *query_context, auto &left_column_expr = correlated_columns[idx]; SizeT correlated_column_index = correlated_base_index + idx; if (correlated_column_index >= subplan_column_bindings.size()) { - RecoverableError( - Status::SyntaxError(fmt::format("Column index is out of range.{}/{}", correlated_column_index, subplan_column_bindings.size()))); + Status status = Status::SyntaxError(fmt::format("Column index is out of range.{}/{}", correlated_column_index, subplan_column_bindings.size())); + LOG_ERROR(status.message()); + RecoverableError(status); } // Generate new correlated column expression diff --git a/src/storage/buffer/file_worker/data_file_worker.cpp b/src/storage/buffer/file_worker/data_file_worker.cpp index 09505461d2..f3c2d4fe75 100644 --- a/src/storage/buffer/file_worker/data_file_worker.cpp +++ b/src/storage/buffer/file_worker/data_file_worker.cpp @@ -21,6 +21,7 @@ import infinity_exception; import local_file_system; import third_party; import status; +import logger; namespace infinity { @@ -64,23 +65,31 @@ void DataFileWorker::WriteToFileImpl(bool to_spill, bool &prepare_success) { u64 magic_number = 0x00dd3344; u64 nbytes = fs.Write(*file_handler_, &magic_number, sizeof(magic_number)); if (nbytes != sizeof(magic_number)) { - RecoverableError(Status::DataIOError(fmt::format("Write magic number which length is {}.", nbytes))); + Status status = Status::DataIOError(fmt::format("Write magic number which length is {}.", nbytes)); + LOG_ERROR(status.message()); + RecoverableError(status); } nbytes = fs.Write(*file_handler_, const_cast(&buffer_size_), sizeof(buffer_size_)); if (nbytes != sizeof(buffer_size_)) { - RecoverableError(Status::DataIOError(fmt::format("Write buffer length field which length is {}.", nbytes))); + Status status = Status::DataIOError(fmt::format("Write buffer length field which length is {}.", nbytes)); + LOG_ERROR(status.message()); + RecoverableError(status); } nbytes = fs.Write(*file_handler_, data_, buffer_size_); if (nbytes != buffer_size_) { - RecoverableError(Status::DataIOError(fmt::format("Expect to write buffer with size: {}, but {} bytes is written", buffer_size_, nbytes))); + Status status = Status::DataIOError(fmt::format("Expect to write buffer with size: {}, but {} bytes is written", buffer_size_, nbytes)); + LOG_ERROR(status.message()); + RecoverableError(status); } u64 checksum{}; nbytes = fs.Write(*file_handler_, &checksum, sizeof(checksum)); if (nbytes != sizeof(checksum)) { - RecoverableError(Status::DataIOError(fmt::format("Write buffer length field which length is {}.", nbytes))); + Status status = Status::DataIOError(fmt::format("Write buffer length field which length is {}.", nbytes)); + LOG_ERROR(status.message()); + RecoverableError(status); } prepare_success = true; // Not run defer_fn } @@ -90,40 +99,54 @@ void DataFileWorker::ReadFromFileImpl() { SizeT file_size = fs.GetFileSize(*file_handler_); if (file_size < sizeof(u64) * 3) { - RecoverableError(Status::DataIOError(fmt::format("Incorrect file length {}.", file_size))); + Status status = Status::DataIOError(fmt::format("Incorrect file length {}.", file_size)); + LOG_ERROR(status.message()); + RecoverableError(status); } // file header: magic number, buffer_size u64 magic_number{0}; u64 nbytes = fs.Read(*file_handler_, &magic_number, sizeof(magic_number)); if (nbytes != sizeof(magic_number)) { - RecoverableError(Status::DataIOError(fmt::format("Read magic number which length isn't {}.", nbytes))); + Status status = Status::DataIOError(fmt::format("Read magic number which length isn't {}.", nbytes)); + LOG_ERROR(status.message()); + RecoverableError(status); } if (magic_number != 0x00dd3344) { - RecoverableError(Status::DataIOError(fmt::format("Incorrect file header magic number: {}.", magic_number))); + Status status = Status::DataIOError(fmt::format("Read magic number which length isn't {}.", nbytes)); + LOG_ERROR(status.message()); + RecoverableError(status); } u64 buffer_size_{}; nbytes = fs.Read(*file_handler_, &buffer_size_, sizeof(buffer_size_)); if (nbytes != sizeof(buffer_size_)) { - RecoverableError(Status::DataIOError(fmt::format("Unmatched buffer length: {} / {}", nbytes, buffer_size_))); + Status status = Status::DataIOError(fmt::format("Unmatched buffer length: {} / {}", nbytes, buffer_size_)); + LOG_ERROR(status.message()); + RecoverableError(status); } if (file_size != buffer_size_ + 3 * sizeof(u64)) { - RecoverableError(Status::DataIOError(fmt::format("File size: {} isn't matched with {}.", file_size, buffer_size_ + 3 * sizeof(u64)))); + Status status = Status::DataIOError(fmt::format("File size: {} isn't matched with {}.", file_size, buffer_size_ + 3 * sizeof(u64))); + LOG_ERROR(status.message()); + RecoverableError(status); } // file body data_ = static_cast(new char[buffer_size_]{}); nbytes = fs.Read(*file_handler_, data_, buffer_size_); if (nbytes != buffer_size_) { - RecoverableError(Status::DataIOError(fmt::format("Expect to read buffer with size: {}, but {} bytes is read", buffer_size_, nbytes))); + Status status = Status::DataIOError(fmt::format("Expect to read buffer with size: {}, but {} bytes is read", buffer_size_, nbytes)); + LOG_ERROR(status.message()); + RecoverableError(status); } // file footer: checksum u64 checksum{0}; nbytes = fs.Read(*file_handler_, &checksum, sizeof(checksum)); if (nbytes != sizeof(checksum)) { - RecoverableError(Status::DataIOError(fmt::format("Incorrect file checksum length: {}.", nbytes))); + Status status = Status::DataIOError(fmt::format("Incorrect file checksum length: {}.", nbytes)); + LOG_ERROR(status.message()); + RecoverableError(status); } } diff --git a/src/storage/buffer/file_worker/file_worker.cpp b/src/storage/buffer/file_worker/file_worker.cpp index 81c2660083..22681ad38c 100644 --- a/src/storage/buffer/file_worker/file_worker.cpp +++ b/src/storage/buffer/file_worker/file_worker.cpp @@ -85,7 +85,9 @@ void FileWorker::MoveFile() { String dest_dir = ChooseFileDir(false); String dest_path = fmt::format("{}/{}", dest_dir, *file_name_); if (!fs.Exists(src_path)) { - RecoverableError(Status::FileNotFound(src_path)); + Status status = Status::FileNotFound(src_path); + LOG_ERROR(status.message()); + RecoverableError(status); } if (!fs.Exists(dest_dir)) { fs.CreateDirectory(dest_dir); diff --git a/src/storage/buffer/file_worker/raw_file_worker.cpp b/src/storage/buffer/file_worker/raw_file_worker.cpp index 733ecf3405..71d50d53fa 100644 --- a/src/storage/buffer/file_worker/raw_file_worker.cpp +++ b/src/storage/buffer/file_worker/raw_file_worker.cpp @@ -22,6 +22,7 @@ import infinity_exception; import local_file_system; import third_party; import status; +import logger; namespace infinity { @@ -58,7 +59,9 @@ void RawFileWorker::WriteToFileImpl(bool to_spill, bool &prepare_success) { LocalFileSystem fs; i64 nbytes = fs.Write(*file_handler_, data_, buffer_size_); if (nbytes != (i64)buffer_size_) { - RecoverableError(Status::DataIOError(fmt::format("Expect to write buffer with size: {}, but {} bytes is written", buffer_size_, nbytes))); + Status status = Status::DataIOError(fmt::format("Expect to write buffer with size: {}, but {} bytes is written", buffer_size_, nbytes)); + LOG_ERROR(status.message()); + RecoverableError(status); } prepare_success = true; // Not run defer_fn } @@ -69,7 +72,9 @@ void RawFileWorker::ReadFromFileImpl() { data_ = static_cast(new char[buffer_size_]); i64 nbytes = fs.Read(*file_handler_, data_, buffer_size_); if (nbytes != (i64)buffer_size_) { - RecoverableError(Status::DataIOError(fmt::format("Expect to read buffer with size: {}, but {} bytes is read", buffer_size_, nbytes))); + Status status = Status::DataIOError(fmt::format("Expect to read buffer with size: {}, but {} bytes is read", buffer_size_, nbytes)); + LOG_ERROR(status.message()); + RecoverableError(status); } } diff --git a/src/storage/column_vector/column_vector.cpp b/src/storage/column_vector/column_vector.cpp index 7bdbda33e1..f72810a7b6 100644 --- a/src/storage/column_vector/column_vector.cpp +++ b/src/storage/column_vector/column_vector.cpp @@ -275,10 +275,14 @@ void ColumnVector::Initialize(const ColumnVector &other, const Selection &input_ break; } case kNull: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } case kMissing: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } case kInvalid: { UnrecoverableError("Invalid data type"); @@ -419,13 +423,19 @@ void ColumnVector::Initialize(ColumnVectorType vector_type, const ColumnVector & CopyFrom(other.buffer_.get(), this->buffer_.get(), start_idx, 0, end_idx - start_idx); break; #endif - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } case kNull: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } case kMissing: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } case kInvalid: { UnrecoverableError("Invalid data type"); @@ -580,13 +590,19 @@ void ColumnVector::CopyRow(const ColumnVector &other, SizeT dst_idx, SizeT src_i break; } case kNull: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } case kMissing: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } case kInvalid: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } } } @@ -618,7 +634,9 @@ String ColumnVector::ToString(SizeT row_index) const { return std::to_string(((BigIntT *)data_ptr_)[row_index]); } case kHugeInt: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } case kFloat: { return std::to_string(((FloatT *)data_ptr_)[row_index]); @@ -627,7 +645,9 @@ String ColumnVector::ToString(SizeT row_index) const { return std::to_string(((DoubleT *)data_ptr_)[row_index]); } case kDecimal: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } case kVarchar: { VarcharT &varchar_ref = ((VarcharT *)data_ptr_)[row_index]; @@ -657,44 +677,64 @@ String ColumnVector::ToString(SizeT row_index) const { return ((TimestampT *)data_ptr_)[row_index].ToString(); } case kInterval: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } case kArray: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } case kTuple: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } case kPoint: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } case kLine: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } case kLineSeg: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } case kBox: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } // case kPath: { // } // case kPolygon: { // } case kCircle: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } // case kBitmap: { // } case kUuid: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } // case kBlob: { // } case kEmbedding: { // RecoverableError(Status::NotSupport("Not implemented")); if (data_type_->type_info()->type() != TypeInfoType::kEmbedding) { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } EmbeddingInfo *embedding_info = static_cast(data_type_->type_info().get()); EmbeddingT embedding_element(nullptr, false); @@ -705,7 +745,9 @@ String ColumnVector::ToString(SizeT row_index) const { } case kTensor: { if (data_type_->type_info()->type() != TypeInfoType::kEmbedding) { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } const EmbeddingInfo *embedding_info = static_cast(data_type_->type_info().get()); const auto &[embedding_num, chunk_id, chunk_offset] = reinterpret_cast(data_ptr_)[row_index]; @@ -716,7 +758,9 @@ String ColumnVector::ToString(SizeT row_index) const { return (((RowID *)data_ptr_)[row_index]).ToString(); } case kMixed: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } default: { UnrecoverableError("Attempt to access an unaccepted type"); @@ -1186,7 +1230,9 @@ namespace { Vector SplitArrayElement(std::string_view data, char delimiter) { SizeT data_size = data.size(); if (data_size < 2 || data[0] != '[' || data[data_size - 1] != ']') { - RecoverableError(Status::ImportFileFormatError("Embedding data must be surrounded by [ and ]")); + Status status = Status::ImportFileFormatError("Embedding data must be surrounded by [ and ]"); + LOG_ERROR(status.message()); + RecoverableError(status); } Vector ret; SizeT i = 1, j = 1; @@ -1258,7 +1304,9 @@ void ColumnVector::AppendByStringView(std::string_view sv, char delimiter) { auto embedding_info = static_cast(data_type_->type_info().get()); Vector ele_str_views = SplitArrayElement(sv, delimiter); if (embedding_info->Dimension() < ele_str_views.size()) { - RecoverableError(Status::ImportFileFormatError("Embedding data size exceeds dimension.")); + Status status = Status::ImportFileFormatError("Embedding data size exceeds dimension."); + LOG_ERROR(status.message()); + RecoverableError(status); } SizeT dst_off = index * data_type_->Size(); switch (embedding_info->Type()) { @@ -1301,7 +1349,9 @@ void ColumnVector::AppendByStringView(std::string_view sv, char delimiter) { Vector ele_str_views = SplitArrayElement(sv, delimiter); const auto unit_embedding_dim = embedding_info->Dimension(); if (ele_str_views.size() % unit_embedding_dim != 0) { - RecoverableError(Status::ImportFileFormatError("Embedding data size is not multiple of tensor unit dimension.")); + Status status = Status::ImportFileFormatError("Embedding data size is not multiple of tensor unit dimension."); + LOG_ERROR(status.message()); + RecoverableError(status); } SizeT dst_off = index; switch (embedding_info->Type()) { @@ -1354,7 +1404,9 @@ void ColumnVector::AppendByStringView(std::string_view sv, char delimiter) { break; } default: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } } } diff --git a/src/storage/column_vector/column_vector.cppm b/src/storage/column_vector/column_vector.cppm index 205c4925f0..dd3ff46c95 100644 --- a/src/storage/column_vector/column_vector.cppm +++ b/src/storage/column_vector/column_vector.cppm @@ -35,6 +35,7 @@ import internal_types; import data_type; import embedding_info; import constant_expr; +import logger; namespace infinity { @@ -264,7 +265,9 @@ private: const auto total_elememt_count = ele_str_views.size(); const auto input_bytes = total_elememt_count * sizeof(T); if (input_bytes > DEFAULT_FIXLEN_TENSOR_CHUNK_SIZE) { - RecoverableError(Status::SyntaxError("Tensor size exceeds the limit.")); + Status status = Status::SyntaxError("Tensor size exceeds the limit."); + LOG_ERROR(status.message()); + RecoverableError(status); } embedding_num = total_elememt_count / unit_embedding_dim; auto tmp_data = MakeUniqueForOverwrite(total_elememt_count); @@ -283,7 +286,9 @@ private: embedding_num = total_elememt_count / unit_embedding_dim; const auto bit_bytes = (total_elememt_count + 7) / 8; if (bit_bytes > DEFAULT_FIXLEN_TENSOR_CHUNK_SIZE) { - RecoverableError(Status::SyntaxError("Tensor size exceeds the limit.")); + Status status = Status::SyntaxError("Tensor size exceeds the limit."); + LOG_ERROR(status.message()); + RecoverableError(status); } auto tmp_data = MakeUnique(bit_bytes); for (SizeT i = 0; auto &ele_str_view : ele_str_views) { diff --git a/src/storage/column_vector/operator/binary_operator.cppm b/src/storage/column_vector/operator/binary_operator.cppm index 2e96b767de..05bf21f3fe 100644 --- a/src/storage/column_vector/operator/binary_operator.cppm +++ b/src/storage/column_vector/operator/binary_operator.cppm @@ -29,6 +29,7 @@ import bitmask_buffer; import third_party; import internal_types; import status; +import logger; namespace infinity { @@ -888,7 +889,9 @@ private: SizeT, void *, bool) { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } template @@ -1046,7 +1049,9 @@ private: SizeT, void *, bool) { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } template @@ -1056,7 +1061,9 @@ private: SizeT, void *, bool) { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } template @@ -1066,7 +1073,9 @@ private: SizeT, void *, bool) { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } template @@ -1076,7 +1085,9 @@ private: SizeT, void *, bool) { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } }; diff --git a/src/storage/column_vector/value.cpp b/src/storage/column_vector/value.cpp index 97912590f1..e673f600e3 100644 --- a/src/storage/column_vector/value.cpp +++ b/src/storage/column_vector/value.cpp @@ -225,11 +225,15 @@ Value Value::MakeTensor(const_ptr_t ptr, SizeT bytes, SharedPtr type_i } const EmbeddingInfo *embedding_info = static_cast(type_info_ptr.get()); if (const SizeT len = embedding_info->Size(); bytes % len != 0) { - RecoverableError(Status::SyntaxError(fmt::format("Value::MakeTensor(bytes={}) is not a multiple of embedding size={}", bytes, len))); + Status status = Status::SyntaxError(fmt::format("Value::MakeTensor(bytes={}) is not a multiple of embedding size={}", bytes, len)); + LOG_ERROR(status.message()); + RecoverableError(status); } if (bytes > DEFAULT_FIXLEN_TENSOR_CHUNK_SIZE) { - RecoverableError(Status::SyntaxError( - fmt::format("Value::MakeTensor(bytes={}) is larger than the maximum tensor size={}", bytes, DEFAULT_FIXLEN_TENSOR_CHUNK_SIZE))); + Status status = Status::SyntaxError( + fmt::format("Value::MakeTensor(bytes={}) is larger than the maximum tensor size={}", bytes, DEFAULT_FIXLEN_TENSOR_CHUNK_SIZE)); + LOG_ERROR(status.message()); + RecoverableError(status); } SharedPtr embedding_value_info = MakeShared(); embedding_value_info->data_.resize(bytes); diff --git a/src/storage/definition/index_base.cpp b/src/storage/definition/index_base.cpp index f15ec47eb4..5b004c880f 100644 --- a/src/storage/definition/index_base.cpp +++ b/src/storage/definition/index_base.cpp @@ -31,6 +31,7 @@ import status; import infinity_exception; import create_index_info; import index_defines; +import logger; namespace infinity { @@ -135,7 +136,9 @@ SharedPtr IndexBase::ReadAdv(char *&ptr, int32_t maxbytes) { UnrecoverableError("Error index method while reading"); } default: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } } if (ptr_end < ptr) { @@ -206,7 +209,9 @@ SharedPtr IndexBase::Deserialize(const nlohmann::json &index_def_json UnrecoverableError("Error index method while deserializing"); } default: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } } return res; diff --git a/src/storage/definition/index_full_text.cpp b/src/storage/definition/index_full_text.cpp index c22266359d..4242f067f2 100644 --- a/src/storage/definition/index_full_text.cpp +++ b/src/storage/definition/index_full_text.cpp @@ -32,6 +32,7 @@ import logical_type; import index_defines; import analyzer_pool; import analyzer; +import logger; namespace infinity { @@ -110,7 +111,9 @@ nlohmann::json IndexFullText::Serialize() const { } SharedPtr IndexFullText::Deserialize(const nlohmann::json &) { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); return nullptr; } @@ -119,10 +122,14 @@ void IndexFullText::ValidateColumnDataType(const SharedPtr &base_t auto &column_types_vector = *(base_table_ref->column_types_); SizeT column_id = std::find(column_names_vector.begin(), column_names_vector.end(), column_name) - column_names_vector.begin(); if (column_id == column_names_vector.size()) { - RecoverableError(Status::ColumnNotExist(column_name)); + Status status = Status::ColumnNotExist(column_name); + LOG_ERROR(status.message()); + RecoverableError(status); } else if (auto &data_type = column_types_vector[column_id]; data_type->type() != LogicalType::kVarchar) { - RecoverableError(Status::InvalidIndexDefinition( - fmt::format("Attempt to create full-text index on column: {}, data type: {}.", column_name, data_type->ToString()))); + Status status = Status::InvalidIndexDefinition( + fmt::format("Attempt to create full-text index on column: {}, data type: {}.", column_name, data_type->ToString())); + LOG_ERROR(status.message()); + RecoverableError(status); } } diff --git a/src/storage/definition/index_hnsw.cpp b/src/storage/definition/index_hnsw.cpp index fb3630fae1..cc5b9b724d 100644 --- a/src/storage/definition/index_hnsw.cpp +++ b/src/storage/definition/index_hnsw.cpp @@ -30,6 +30,7 @@ import default_values; import index_base; import logical_type; import statement_common; +import logger; namespace infinity { @@ -73,16 +74,22 @@ IndexHnsw::Make(SharedPtr index_name, const String &file_name, Vectorparam_name_ == "encode") { encode_type = StringToHnswEncodeType(para->param_value_); } else { - RecoverableError(Status::InvalidIndexParam(para->param_name_)); + Status status = Status::InvalidIndexParam(para->param_name_); + LOG_ERROR(status.message()); + RecoverableError(status); } } if (metric_type == MetricType::kInvalid) { - RecoverableError(Status::InvalidIndexParam("Metric type")); + Status status = Status::InvalidIndexParam("Metric type"); + LOG_ERROR(status.message()); + RecoverableError(status); } if (encode_type == HnswEncodeType::kInvalid) { - RecoverableError(Status::InvalidIndexParam("Encode type")); + Status status = Status::InvalidIndexParam("Encode type"); + LOG_ERROR(status.message()); + RecoverableError(status); } return MakeShared(index_name, file_name, std::move(column_names), metric_type, encode_type, M, ef_construction, ef); @@ -145,10 +152,14 @@ void IndexHnsw::ValidateColumnDataType(const SharedPtr &base_table auto &column_types_vector = *(base_table_ref->column_types_); SizeT column_id = std::find(column_names_vector.begin(), column_names_vector.end(), column_name) - column_names_vector.begin(); if (column_id == column_names_vector.size()) { - RecoverableError(Status::ColumnNotExist(column_name)); + Status status = Status::ColumnNotExist(column_name); + LOG_ERROR(status.message()); + RecoverableError(status); } else if (auto &data_type = column_types_vector[column_id]; data_type->type() != LogicalType::kEmbedding) { - RecoverableError(Status::InvalidIndexDefinition( - fmt::format("Attempt to create HNSW index on column: {}, data type: {}.", column_name, data_type->ToString()))); + Status status = Status::InvalidIndexDefinition( + fmt::format("Attempt to create HNSW index on column: {}, data type: {}.", column_name, data_type->ToString())); + LOG_ERROR(status.message()); + RecoverableError(status); } } diff --git a/src/storage/definition/index_ivfflat.cpp b/src/storage/definition/index_ivfflat.cpp index efa7f53873..40b223ba61 100644 --- a/src/storage/definition/index_ivfflat.cpp +++ b/src/storage/definition/index_ivfflat.cpp @@ -29,6 +29,7 @@ import serialize; import index_base; import logical_type; import statement_common; +import logger; namespace infinity { @@ -46,7 +47,9 @@ SharedPtr IndexIVFFlat::Make(SharedPtr index_name, } } if (metric_type == MetricType::kInvalid) { - RecoverableError(Status::LackIndexParam()); + Status status = Status::LackIndexParam(); + LOG_ERROR(status.message()); + RecoverableError(status); } return MakeShared(index_name, file_name, std::move(column_names), centroids_count, metric_type); } @@ -74,7 +77,9 @@ void IndexIVFFlat::WriteAdv(char *&ptr) const { } SharedPtr IndexIVFFlat::ReadAdv(char *&, int32_t) { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); return nullptr; } @@ -98,7 +103,9 @@ nlohmann::json IndexIVFFlat::Serialize() const { } SharedPtr IndexIVFFlat::Deserialize(const nlohmann::json &) { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); return nullptr; } @@ -107,10 +114,14 @@ void IndexIVFFlat::ValidateColumnDataType(const SharedPtr &base_ta auto &column_types_vector = *(base_table_ref->column_types_); SizeT column_id = std::find(column_names_vector.begin(), column_names_vector.end(), column_name) - column_names_vector.begin(); if (column_id == column_names_vector.size()) { - RecoverableError(Status::ColumnNotExist(column_name)); + Status status = Status::ColumnNotExist(column_name); + LOG_ERROR(status.message()); + RecoverableError(status); } else if (auto &data_type = column_types_vector[column_id]; data_type->type() != LogicalType::kEmbedding) { - RecoverableError(Status::InvalidIndexDefinition( - fmt::format("Attempt to create IVFFLAT index on column: {}, data type: {}.", column_name, data_type->ToString()))); + Status status = Status::InvalidIndexDefinition( + fmt::format("Attempt to create IVFFLAT index on column: {}, data type: {}.", column_name, data_type->ToString())); + LOG_ERROR(status.message()); + RecoverableError(status); } } diff --git a/src/storage/definition/index_secondary.cpp b/src/storage/definition/index_secondary.cpp index 9df8d9d3a1..f69d556e24 100644 --- a/src/storage/definition/index_secondary.cpp +++ b/src/storage/definition/index_secondary.cpp @@ -25,6 +25,7 @@ import status; import base_table_ref; import infinity_exception; import third_party; +import logger; namespace infinity { @@ -33,10 +34,14 @@ void IndexSecondary::ValidateColumnDataType(const SharedPtr &base_ auto &column_types_vector = *(base_table_ref->column_types_); SizeT column_id = std::find(column_names_vector.begin(), column_names_vector.end(), column_name) - column_names_vector.begin(); if (column_id == column_names_vector.size()) { - RecoverableError(Status::ColumnNotExist(column_name)); + Status status = Status::ColumnNotExist(column_name); + LOG_ERROR(status.message()); + RecoverableError(status); } else if (auto &data_type = column_types_vector[column_id]; !(data_type->CanBuildSecondaryIndex())) { - RecoverableError(Status::InvalidIndexDefinition( - fmt::format("Attempt to create index on column: {}, data type: {}.", column_name, data_type->ToString()))); + Status status = Status::InvalidIndexDefinition( + fmt::format("Attempt to create index on column: {}, data type: {}.", column_name, data_type->ToString())); + LOG_ERROR(status.message()); + RecoverableError(status); } } diff --git a/src/storage/invertedindex/column_inverter.cpp b/src/storage/invertedindex/column_inverter.cpp index f618aaa02d..ee77fad8e8 100644 --- a/src/storage/invertedindex/column_inverter.cpp +++ b/src/storage/invertedindex/column_inverter.cpp @@ -50,9 +50,9 @@ ColumnInverter::ColumnInverter(PostingWriterProvider posting_writer_provider, Ve void ColumnInverter::InitAnalyzer(const String &analyzer_name) { auto [analyzer, status] = AnalyzerPool::instance().GetAnalyzer(analyzer_name); if(!status.ok()) { - String error_message = fmt::format("Invalid analyzer: {}", analyzer_name); - LOG_ERROR(error_message); - RecoverableError(Status::UnexpectedError(error_message)); + Status status = Status::UnexpectedError(fmt::format("Invalid analyzer: {}", analyzer_name)); + LOG_ERROR(status.message()); + RecoverableError(status); } analyzer_ = std::move(analyzer); } diff --git a/src/storage/invertedindex/disk_segment_reader.cpp b/src/storage/invertedindex/disk_segment_reader.cpp index eeb453fc25..d99a615e1a 100644 --- a/src/storage/invertedindex/disk_segment_reader.cpp +++ b/src/storage/invertedindex/disk_segment_reader.cpp @@ -33,6 +33,7 @@ import third_party; import byte_slice_reader; import infinity_exception; import status; +import logger; namespace infinity { @@ -48,7 +49,9 @@ DiskIndexSegmentReader::DiskIndexSegmentReader(const String &index_dir, const St int rc = fs_.MmapFile(posting_file_, data_ptr_, data_len_); assert(rc == 0); if (rc != 0) { - RecoverableError(Status::MmapFileError(posting_file_)); + Status status = Status::MmapFileError(posting_file_); + LOG_ERROR(status.message()); + RecoverableError(status); } } @@ -56,7 +59,9 @@ DiskIndexSegmentReader::~DiskIndexSegmentReader() { int rc = fs_.MunmapFile(posting_file_); assert(rc == 0); if (rc != 0) { - RecoverableError(Status::MunmapFileError(posting_file_)); + Status status = Status::MunmapFileError(posting_file_); + LOG_ERROR(status.message()); + RecoverableError(status); } } diff --git a/src/storage/invertedindex/format/doc_list_encoder.cpp b/src/storage/invertedindex/format/doc_list_encoder.cpp index bf1293f7f6..4d795f52b0 100644 --- a/src/storage/invertedindex/format/doc_list_encoder.cpp +++ b/src/storage/invertedindex/format/doc_list_encoder.cpp @@ -8,11 +8,10 @@ import file_writer; import file_reader; import posting_byte_slice; import skiplist_writer; +import skiplist_reader; import doc_list_format_option; import inmem_doc_list_decoder; -import skiplist_reader; import index_defines; -import skiplist_reader; import vbyte_compressor; import logger; diff --git a/src/storage/invertedindex/format/inmem_doc_list_decoder.cpp b/src/storage/invertedindex/format/inmem_doc_list_decoder.cpp index d7a723d1ca..df342b8443 100644 --- a/src/storage/invertedindex/format/inmem_doc_list_decoder.cpp +++ b/src/storage/invertedindex/format/inmem_doc_list_decoder.cpp @@ -7,7 +7,6 @@ import memory_pool; import posting_byte_slice; import posting_byte_slice_reader; import index_decoder; -import skiplist_reader; import index_defines; namespace infinity { diff --git a/src/storage/invertedindex/format/skiplist_reader.cpp b/src/storage/invertedindex/format/skiplist_reader.cpp index dcbfb1ecd8..eb58277673 100644 --- a/src/storage/invertedindex/format/skiplist_reader.cpp +++ b/src/storage/invertedindex/format/skiplist_reader.cpp @@ -1,6 +1,5 @@ module; -#include module skiplist_reader; import stl; @@ -27,7 +26,7 @@ bool SkipListReader::SkipTo(u32 query_doc_id, u32 &doc_id, u32 &prev_doc_id, u32 const u32 local_prev_ttf = current_ttf; if (current_cursor >= num_in_buffer) { auto [status, ret] = LoadBuffer(); - assert(status == 0); +// assert(status == 0); if (!ret) { // current segment is exhausted // skip current block diff --git a/src/storage/invertedindex/search/query_node.cpp b/src/storage/invertedindex/search/query_node.cpp index 152c0d94a8..c3d45278e6 100644 --- a/src/storage/invertedindex/search/query_node.cpp +++ b/src/storage/invertedindex/search/query_node.cpp @@ -45,7 +45,9 @@ std::unique_ptr QueryNode::GetOptimizedQueryTree(std::unique_ptr optimized_root; if (!root) { - RecoverableError(Status::SyntaxError("Invalid query statement: Empty query tree")); + Status status = Status::SyntaxError("Invalid query statement: Empty query tree"); + LOG_ERROR(status.message()); + RecoverableError(status); } // push down the weight to the leaf term node root->PushDownWeight(); @@ -62,14 +64,18 @@ std::unique_ptr QueryNode::GetOptimizedQueryTree(std::unique_ptr(root.get())->GetNewOptimizedQueryTree(); if (optimized_root->GetType() == QueryNodeType::NOT) { - RecoverableError(Status::SyntaxError("Invalid query statement: NotQueryNode should not be on the top level")); + Status status = Status::SyntaxError("Invalid query statement: NotQueryNode should not be on the top level"); + LOG_ERROR(status.message()); + RecoverableError(status); } break; } @@ -153,7 +159,9 @@ std::unique_ptr NotQueryNode::InnerGetNewOptimizedQueryTree() { for (auto &child : children_) { switch (child->GetType()) { case QueryNodeType::NOT: { - RecoverableError(Status::SyntaxError("Invalid query statement: NotQueryNode should not have not child")); + Status status = Status::SyntaxError("Invalid query statement: NotQueryNode should not have not child"); + LOG_ERROR(status.message()); + RecoverableError(status); break; } case QueryNodeType::TERM: @@ -345,7 +353,9 @@ std::unique_ptr OrQueryNode::InnerGetNewOptimizedQueryTree() { or_node->children_ = std::move(or_list); return or_node; } else { - RecoverableError(Status::SyntaxError("Invalid query statement: OrQueryNode should not have both not child and non-not child")); + Status status = Status::SyntaxError("Invalid query statement: OrQueryNode should not have both not child and non-not child"); + LOG_ERROR(status.message()); + RecoverableError(status); return nullptr; } } diff --git a/src/storage/invertedindex/search/search_driver.cpp b/src/storage/invertedindex/search/search_driver.cpp index 987d6aeb98..d1c7527a72 100644 --- a/src/storage/invertedindex/search/search_driver.cpp +++ b/src/storage/invertedindex/search/search_driver.cpp @@ -135,7 +135,9 @@ std::unique_ptr SearchDriver::ParseSingle(const std::string &query, c std::unique_ptr SearchDriver::AnalyzeAndBuildQueryNode(const std::string &field, std::string &&text) const { if (text.empty()) { - RecoverableError(Status::SyntaxError("Empty query text")); + Status status = Status::SyntaxError("Empty query text"); + LOG_ERROR(status.message()); + RecoverableError(status); return nullptr; } Term input_term; @@ -150,6 +152,7 @@ std::unique_ptr SearchDriver::AnalyzeAndBuildQueryNode(const std::str } auto [analyzer, status] = AnalyzerPool::instance().GetAnalyzer(analyzer_name); if (!status.ok()) { + LOG_ERROR(status.message()); RecoverableError(status); } if (analyzer_name == AnalyzerPool::STANDARD) { diff --git a/src/storage/io/file_reader.cpp b/src/storage/io/file_reader.cpp index d7d949488d..2c1eafd100 100644 --- a/src/storage/io/file_reader.cpp +++ b/src/storage/io/file_reader.cpp @@ -22,6 +22,7 @@ import file_system_type; import status; import infinity_exception; import third_party; +import logger; namespace infinity { @@ -40,7 +41,9 @@ void FileReader::Read(char_t *buffer, SizeT read_size) { if (buffer_size_ == 0) { already_read_size_ = fs_.Read(*file_handler_, buffer, read_size); if (already_read_size_ != read_size) { - RecoverableError(Status::DataIOError(fmt::format("No enough data from file: {}", file_handler_->path_.string()))); + Status status = Status::DataIOError(fmt::format("No enough data reading from {}", file_handler_->path_.string())); + LOG_ERROR(status.message()); + RecoverableError(status); } } else { if (buffer_offset_ >= buffer_length_) @@ -56,7 +59,9 @@ void FileReader::Read(char_t *buffer, SizeT read_size) { already_read_size_ = fs_.Read(*file_handler_, buffer + start, read_size - start); if (already_read_size_ == 0) { - RecoverableError(Status::DataIOError(fmt::format("No enough data from file: {}", file_handler_->path_.string()))); + Status status = Status::DataIOError(fmt::format("No enough data reading from {}", file_handler_->path_.string())); + LOG_ERROR(status.message()); + RecoverableError(status); } buffer_start_ += buffer_offset_ + read_size; @@ -68,7 +73,9 @@ void FileReader::Read(char_t *buffer, SizeT read_size) { void FileReader::ReadAt(i64 file_offset, char_t *buffer, SizeT read_size) { already_read_size_ = fs_.ReadAt(*file_handler_, file_offset, buffer, read_size); if (already_read_size_ != read_size) { - RecoverableError(Status::DataIOError(fmt::format("No enough data from file: {}", file_handler_->path_.string()))); + Status status = Status::DataIOError(fmt::format("No enough data from file: {}", file_handler_->path_.string())); + LOG_ERROR(status.message()); + RecoverableError(status); } } diff --git a/src/storage/io/file_reader.cppm b/src/storage/io/file_reader.cppm index 058ef9f337..81dd3bf08d 100644 --- a/src/storage/io/file_reader.cppm +++ b/src/storage/io/file_reader.cppm @@ -17,6 +17,7 @@ module; #include #include +export module file_reader; import stl; import file_system; import file_system_type; @@ -24,8 +25,6 @@ import status; import infinity_exception; import local_file_system; -export module file_reader; - namespace infinity { export class FileReader { diff --git a/src/storage/knn_index/ann_ivf/annivfflat_index_data.cppm b/src/storage/knn_index/ann_ivf/annivfflat_index_data.cppm index 010edd3802..ff5790831e 100644 --- a/src/storage/knn_index/ann_ivf/annivfflat_index_data.cppm +++ b/src/storage/knn_index/ann_ivf/annivfflat_index_data.cppm @@ -62,9 +62,13 @@ struct AnnIVFFlatIndexData { } if (metric_ != MetricType::kMetricL2 && metric_ != MetricType::kMetricInnerProduct) { if (metric_ != MetricType::kInvalid) { - RecoverableError(Status::NotSupport("Metric type not implemented")); + Status status = Status::NotSupport("Metric type not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } else { - RecoverableError(Status::NotSupport("Metric type not supported")); + Status status = Status::NotSupport("Metric type not supported"); + LOG_ERROR(status.message()); + RecoverableError(status); } return; } @@ -101,9 +105,13 @@ struct AnnIVFFlatIndexData { } if (metric_ != MetricType::kMetricL2 && metric_ != MetricType::kMetricInnerProduct) { if (metric_ != MetricType::kInvalid) { - RecoverableError(Status::NotSupport("Metric type not implemented")); + Status status = Status::NotSupport("Metric type not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } else { - RecoverableError(Status::NotSupport("Metric type not supported")); + Status status = Status::NotSupport("Metric type not supported"); + LOG_ERROR(status.message()); + RecoverableError(status); } return; } diff --git a/src/storage/meta/catalog.cpp b/src/storage/meta/catalog.cpp index 1a036a061a..ffcba1763c 100644 --- a/src/storage/meta/catalog.cpp +++ b/src/storage/meta/catalog.cpp @@ -413,7 +413,9 @@ SharedPtr Catalog::GetFunctionSetByName(Catalog *catalog, String fu StringToLower(function_name); if (!catalog->function_sets_.contains(function_name)) { - RecoverableError(Status::FunctionNotFound(function_name)); + Status status = Status::FunctionNotFound(function_name); + LOG_ERROR(status.message()); + RecoverableError(status); } return catalog->function_sets_[function_name]; } @@ -521,7 +523,9 @@ UniquePtr Catalog::LoadFromFileDelta(const DeltaCatalogFileIn } i32 n_bytes = catalog_delta_entry->GetSizeInBytes(); if (file_size != n_bytes) { - RecoverableError(Status::CatalogCorrupted(catalog_path)); + Status status = Status::CatalogCorrupted(catalog_path); + LOG_ERROR(status.message()); + RecoverableError(status); } return catalog_delta_entry; } @@ -871,7 +875,9 @@ UniquePtr Catalog::LoadFromFile(const FullCatalogFileInfo &full_ckp_inf String json_str(file_size, 0); SizeT n_bytes = catalog_file_handler->Read(json_str.data(), file_size); if (file_size != n_bytes) { - RecoverableError(Status::CatalogCorrupted(catalog_path)); + Status status = Status::CatalogCorrupted(catalog_path); + LOG_ERROR(status.message()); + RecoverableError(status); } nlohmann::json catalog_json = nlohmann::json::parse(json_str); @@ -913,8 +919,9 @@ void Catalog::SaveFullCatalog(TxnTimeStamp max_commit_ts, String &full_catalog_p SizeT n_bytes = catalog_file_handler->Write(catalog_str.data(), catalog_str.size()); if (n_bytes != catalog_str.size()) { - LOG_ERROR(fmt::format("Saving catalog file failed: {}", catalog_tmp_path)); - RecoverableError(Status::CatalogCorrupted(catalog_tmp_path)); + Status status = Status::DataCorrupted(catalog_tmp_path); + LOG_ERROR(status.message()); + RecoverableError(status); } catalog_file_handler->Sync(); catalog_file_handler->Close(); diff --git a/src/storage/meta/entry/segment_index_entry.cpp b/src/storage/meta/entry/segment_index_entry.cpp index 21f7fc6cd5..c357b3a314 100644 --- a/src/storage/meta/entry/segment_index_entry.cpp +++ b/src/storage/meta/entry/segment_index_entry.cpp @@ -298,7 +298,9 @@ void SegmentIndexEntry::MemIndexInsert(SharedPtr block_entry, break; } default: { - RecoverableError(Status::NotSupport("Not support data type for index hnsw.")); + Status status = Status::NotSupport("Not support data type for index hnsw."); + LOG_ERROR(status.message()); + RecoverableError(status); } } memory_hnsw_indexer_->SetRowCount(row_cnt); @@ -505,7 +507,9 @@ void SegmentIndexEntry::PopulateEntirely(const SegmentEntry *segment_entry, Txn break; } default: { - RecoverableError(Status::NotSupport("Not support data type for index hnsw.")); + Status status = Status::NotSupport("Not support data type for index hnsw."); + LOG_ERROR(status.message()); + RecoverableError(status); } } break; @@ -575,7 +579,9 @@ Status SegmentIndexEntry::CreateIndexPrepare(const SegmentEntry *segment_entry, break; } default: { - RecoverableError(Status::NotSupport("Not support data type for index ivf.")); + Status status = Status::NotSupport("Not support data type for index ivf."); + LOG_ERROR(status.message()); + RecoverableError(status); } } break; @@ -635,7 +641,9 @@ Status SegmentIndexEntry::CreateIndexDo(atomic_u64 &create_index_idx) { break; } default: { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } } } diff --git a/src/storage/meta/entry/table_entry.cpp b/src/storage/meta/entry/table_entry.cpp index 42f2084162..aa3f77253e 100644 --- a/src/storage/meta/entry/table_entry.cpp +++ b/src/storage/meta/entry/table_entry.cpp @@ -1099,7 +1099,9 @@ UniquePtr TableEntry::Deserialize(const nlohmann::json &table_entry_ u64 TableEntry::GetColumnIdByName(const String &column_name) const { auto it = column_name2column_id_.find(column_name); if (it == column_name2column_id_.end()) { - RecoverableError(Status::ColumnNotExist(column_name)); + Status status = Status::ColumnNotExist(column_name); + LOG_ERROR(status.message()); + RecoverableError(status); } return it->second; } diff --git a/src/storage/meta/entry/table_index_entry.cpp b/src/storage/meta/entry/table_index_entry.cpp index b795c1f46b..aab96ac221 100644 --- a/src/storage/meta/entry/table_index_entry.cpp +++ b/src/storage/meta/entry/table_index_entry.cpp @@ -94,7 +94,9 @@ SharedPtr TableIndexEntry::NewTableIndexEntry(const SharedPtrcolumn_names_.size() != 1) { - RecoverableError(Status::SyntaxError("Currently, composite index doesn't supported.")); + Status status = Status::SyntaxError("Currently, composite index doesn't supported."); + LOG_ERROR(status.message()); + RecoverableError(status); } return table_index_entry; } @@ -318,7 +320,9 @@ TableIndexEntry::CreateIndexPrepare(BaseTableRef *table_ref, Txn *txn, bool prep Status TableIndexEntry::CreateIndexDo(BaseTableRef *table_ref, HashMap &create_index_idxes, Txn *txn) { if (this->index_base_->column_names_.size() != 1) { // TODO - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); } auto &index_index = table_ref->index_index_; auto iter = index_index->index_snapshots_.find(*index_base_->index_name_); diff --git a/src/storage/meta/table_index_meta.cpp b/src/storage/meta/table_index_meta.cpp index d06684edf9..a33e36bf44 100644 --- a/src/storage/meta/table_index_meta.cpp +++ b/src/storage/meta/table_index_meta.cpp @@ -153,7 +153,9 @@ TableIndexMeta::GetTableIndexInfo(std::shared_lock &&r_lock, } SharedPtr TableIndexMeta::ToString() { - RecoverableError(Status::NotSupport("Not implemented")); + Status status = Status::NotSupport("Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); return nullptr; } diff --git a/src/storage/txn/txn_store.cpp b/src/storage/txn/txn_store.cpp index 9da1339a6f..c06c325f93 100644 --- a/src/storage/txn/txn_store.cpp +++ b/src/storage/txn/txn_store.cpp @@ -310,7 +310,9 @@ void TxnTableStore::PrepareCommit1() { } if (!delete_state_.rows_.empty()) { if (!table_entry_->CheckDeleteVisible(delete_state_, txn_)) { - RecoverableError(Status::TxnConflict(txn_->TxnID(), "Txn conflict reason.")); + Status status = Status::TxnConflict(txn_->TxnID(), "Txn conflict reason."); + LOG_ERROR(status.message()); + RecoverableError(status); } } } diff --git a/src/storage/wal/wal_manager.cpp b/src/storage/wal/wal_manager.cpp index 46f0c0091e..41680f70f5 100644 --- a/src/storage/wal/wal_manager.cpp +++ b/src/storage/wal/wal_manager.cpp @@ -521,36 +521,48 @@ void WalManager::ReplayWalEntry(const WalEntry &entry) { for (const auto &cmd : entry.cmds_) { LOG_TRACE(fmt::format("Replay wal cmd: {}, commit ts: {}", WalCmd::WalCommandTypeToString(cmd->GetType()).c_str(), entry.commit_ts_)); switch (cmd->GetType()) { - case WalCommandType::CREATE_DATABASE: + case WalCommandType::CREATE_DATABASE: { WalCmdCreateDatabaseReplay(*dynamic_cast(cmd.get()), entry.txn_id_, entry.commit_ts_); break; - case WalCommandType::DROP_DATABASE: + } + case WalCommandType::DROP_DATABASE: { WalCmdDropDatabaseReplay(*dynamic_cast(cmd.get()), entry.txn_id_, entry.commit_ts_); break; - case WalCommandType::CREATE_TABLE: + } + case WalCommandType::CREATE_TABLE: { WalCmdCreateTableReplay(*dynamic_cast(cmd.get()), entry.txn_id_, entry.commit_ts_); break; - case WalCommandType::DROP_TABLE: + } + case WalCommandType::DROP_TABLE: { WalCmdDropTableReplay(*dynamic_cast(cmd.get()), entry.txn_id_, entry.commit_ts_); break; - case WalCommandType::ALTER_INFO: - RecoverableError(Status::NotSupport("WalCmdAlterInfo Replay Not implemented")); + } + case WalCommandType::ALTER_INFO: { + Status status = Status::NotSupport("WalCmdAlterInfo Replay Not implemented"); + LOG_ERROR(status.message()); + RecoverableError(status); break; - case WalCommandType::CREATE_INDEX: + } + case WalCommandType::CREATE_INDEX: { WalCmdCreateIndexReplay(*dynamic_cast(cmd.get()), entry.txn_id_, entry.commit_ts_); break; - case WalCommandType::DROP_INDEX: + } + case WalCommandType::DROP_INDEX: { WalCmdDropIndexReplay(*dynamic_cast(cmd.get()), entry.txn_id_, entry.commit_ts_); break; - case WalCommandType::IMPORT: + } + case WalCommandType::IMPORT: { WalCmdImportReplay(*dynamic_cast(cmd.get()), entry.txn_id_, entry.commit_ts_); break; - case WalCommandType::APPEND: + } + case WalCommandType::APPEND: { WalCmdAppendReplay(*dynamic_cast(cmd.get()), entry.txn_id_, entry.commit_ts_); break; - case WalCommandType::DELETE: + } + case WalCommandType::DELETE: { WalCmdDeleteReplay(*dynamic_cast(cmd.get()), entry.txn_id_, entry.commit_ts_); break; + } // case WalCommandType::SET_SEGMENT_STATUS_SEALED: // WalCmdSetSegmentStatusSealedReplay(*dynamic_cast(cmd.get()), entry.txn_id_, // entry.commit_ts_); break; @@ -559,11 +571,13 @@ void WalManager::ReplayWalEntry(const WalEntry &entry) { // entry.txn_id_, // entry.commit_ts_); // break; - case WalCommandType::CHECKPOINT: + case WalCommandType::CHECKPOINT: { break; - case WalCommandType::COMPACT: + } + case WalCommandType::COMPACT: { WalCmdCompactReplay(*static_cast(cmd.get()), entry.txn_id_, entry.commit_ts_); break; + } default: { UnrecoverableError("WalManager::ReplayWalEntry unknown wal command type"); } diff --git a/src/unit_test/function/cast/embedding_cast.cpp b/src/unit_test/function/cast/embedding_cast.cpp index d44018a4e1..647560f3e7 100644 --- a/src/unit_test/function/cast/embedding_cast.cpp +++ b/src/unit_test/function/cast/embedding_cast.cpp @@ -41,7 +41,28 @@ import embedding_info; import knn_expr; import data_type; -class EmbeddingCastTest : public BaseTest {}; +class EmbeddingCastTest : public BaseTest { + void SetUp() override { + BaseTest::SetUp(); + RemoveDbDirs(); +#ifdef INFINITY_DEBUG + infinity::GlobalResourceUsage::Init(); +#endif + std::shared_ptr config_path = nullptr; + infinity::InfinityContext::instance().Init(config_path); + } + + void TearDown() override { + infinity::InfinityContext::instance().UnInit(); +#ifdef INFINITY_DEBUG + EXPECT_EQ(infinity::GlobalResourceUsage::GetObjectCount(), 0); + EXPECT_EQ(infinity::GlobalResourceUsage::GetRawMemoryCount(), 0); + infinity::GlobalResourceUsage::UnInit(); +#endif + RemoveDbDirs(); + BaseTest::TearDown(); + } +}; TEST_F(EmbeddingCastTest, embedding_cast1) { using namespace infinity; diff --git a/src/unit_test/parser/search_driver.cpp b/src/unit_test/parser/search_driver.cpp index 777a8d6183..96e8e1b656 100644 --- a/src/unit_test/parser/search_driver.cpp +++ b/src/unit_test/parser/search_driver.cpp @@ -20,10 +20,33 @@ import query_node; import term; import analyzer; import infinity_exception; +import global_resource_usage; +import infinity_context; using namespace infinity; -class SearchDriverTest : public BaseTest {}; +class SearchDriverTest : public BaseTest { + void SetUp() override { + BaseTest::SetUp(); + RemoveDbDirs(); +#ifdef INFINITY_DEBUG + infinity::GlobalResourceUsage::Init(); +#endif + std::shared_ptr config_path = nullptr; + infinity::InfinityContext::instance().Init(config_path); + } + + void TearDown() override { + infinity::InfinityContext::instance().UnInit(); +#ifdef INFINITY_DEBUG + EXPECT_EQ(infinity::GlobalResourceUsage::GetObjectCount(), 0); + EXPECT_EQ(infinity::GlobalResourceUsage::GetRawMemoryCount(), 0); + infinity::GlobalResourceUsage::UnInit(); +#endif + RemoveDbDirs(); + BaseTest::TearDown(); + } +}; int ParseStream(const SearchDriver &driver, std::istream &ist) { // read and parse line by line, ignoring empty lines and comments diff --git a/src/unit_test/storage/invertedindex/search/query_match.cpp b/src/unit_test/storage/invertedindex/search/query_match.cpp index 6618158b18..ebbbb18e3b 100644 --- a/src/unit_test/storage/invertedindex/search/query_match.cpp +++ b/src/unit_test/storage/invertedindex/search/query_match.cpp @@ -37,6 +37,7 @@ import search_options; import phrase_doc_iterator; import global_resource_usage; import term_doc_iterator; +import logger; using namespace infinity; @@ -307,7 +308,9 @@ void QueryMatchTest::QueryMatch(const String& db_name, UniquePtr query_tree = driver.ParseSingleWithFields(match_expr->fields_, match_expr->matching_text_); if (!query_tree) { - RecoverableError(Status::ParseMatchExprFailed(match_expr->fields_, match_expr->matching_text_)); + Status status = Status::ParseMatchExprFailed(match_expr->fields_, match_expr->matching_text_); + LOG_ERROR(status.message()); + RecoverableError(status); } FullTextQueryContext full_text_query_context; full_text_query_context.query_tree_ = std::move(query_tree); diff --git a/test/sql/dml/compact/test_compact_many_index.slt b/test/sql/dml/compact/test_compact_many_index.slt index 184ed4524b..d0d07923d5 100644 --- a/test/sql/dml/compact/test_compact_many_index.slt +++ b/test/sql/dml/compact/test_compact_many_index.slt @@ -79,3 +79,9 @@ SELECT c1 FROM tbl2 SEARCH MATCH VECTOR (c3, [0.3, 0.3, 0.2, 0.2], 'float', 'l2' 4 4 3 + +statement ok +DROP TABLE tbl2; + +statement ok +DROP TABLE tbl1; \ No newline at end of file