From a93e87c7e261b034ba04add5737c88bbe2fb41aa Mon Sep 17 00:00:00 2001 From: Jin Hai Date: Sat, 18 May 2024 23:08:55 +0800 Subject: [PATCH] Change log level (#1222) ### What problem does this PR solve? _Briefly describe what this PR aims to solve. Include background context that will help reviewers understand the purpose of the PR._ ### Type of change - [x] Refactoring Signed-off-by: Jin Hai --- src/common/utility/random.cpp | 2 +- src/executor/operator/physical_compact.cpp | 4 ++-- .../operator/physical_compact_finish.cpp | 2 +- src/executor/operator/physical_import.cpp | 12 +++++----- src/executor/operator/physical_match.cpp | 22 +++++++++---------- src/storage/background_process.cpp | 16 +++++++------- src/storage/bg_task/periodic_trigger.cpp | 4 ++-- src/storage/compaction_process.cpp | 6 +++-- .../blockmax_maxscore_iterator.cpp | 4 ++-- .../invertedindex/search/query_builder.cpp | 4 ++-- .../invertedindex/search/query_node.cpp | 2 +- .../invertedindex/search/search_driver.cpp | 2 +- src/storage/meta/catalog.cpp | 6 ++--- src/storage/meta/cleanup_scanner.cpp | 2 +- src/storage/meta/entry/chunk_index_entry.cpp | 4 ++-- src/storage/meta/entry/table_entry.cpp | 4 ++-- src/storage/meta/meta_map.cppm | 2 +- src/storage/txn/txn_store.cpp | 2 +- src/storage/wal/log_file.cpp | 4 ++-- src/storage/wal/wal_manager.cpp | 4 ++-- 20 files changed, 55 insertions(+), 53 deletions(-) diff --git a/src/common/utility/random.cpp b/src/common/utility/random.cpp index 7e70749b63..cc88b9aff1 100644 --- a/src/common/utility/random.cpp +++ b/src/common/utility/random.cpp @@ -53,7 +53,7 @@ SharedPtr DetermineRandomString(const String &parent_dir, const String & result = fmt::format("{}/{}_{}", parent_dir, RandomString(DEFAULT_RANDOM_NAME_LEN), name); ++cnt; } while (!fs.CreateDirectoryNoExp(result)); - LOG_TRACE(fmt::format("Created directory {} in {} times", result, cnt)); + LOG_DEBUG(fmt::format("Created directory {} in {} times", result, cnt)); return MakeShared(std::move(result)); } diff --git a/src/executor/operator/physical_compact.cpp b/src/executor/operator/physical_compact.cpp index fdb2eec7e4..d8e03e57e1 100644 --- a/src/executor/operator/physical_compact.cpp +++ b/src/executor/operator/physical_compact.cpp @@ -81,7 +81,7 @@ class GreedyCompactableSegmentsGenerator { void PhysicalCompact::Init() { if (compact_type_ == CompactStatementType::kManual) { TableEntry *table_entry = base_table_ref_->table_entry_ptr_; - LOG_INFO(fmt::format("Manual compact {} start", *table_entry->GetTableName())); + LOG_DEBUG(fmt::format("Manual compact {} start", *table_entry->GetTableName())); if (!table_entry->CompactPrepare()) { LOG_WARN(fmt::format("Table {} is not compactable.", *table_entry->GetTableName())); return; @@ -96,7 +96,7 @@ void PhysicalCompact::Init() { } } else { TableEntry *table_entry = base_table_ref_->table_entry_ptr_; - LOG_INFO(fmt::format("Auto compact {} start", *table_entry->GetTableName())); + LOG_DEBUG(fmt::format("Auto compact {} start", *table_entry->GetTableName())); Vector compactible_segments; const auto &block_index = *base_table_ref_->block_index_; for (const auto &[segment_id, segment_snapshot] : block_index.segment_block_index_) { diff --git a/src/executor/operator/physical_compact_finish.cpp b/src/executor/operator/physical_compact_finish.cpp index 278a0521f0..ae5d173f9e 100644 --- a/src/executor/operator/physical_compact_finish.cpp +++ b/src/executor/operator/physical_compact_finish.cpp @@ -67,7 +67,7 @@ void PhysicalCompactFinish::SaveSegmentData(QueryContext *query_context, const C ss << "to new segment " << new_segment->segment_id(); segment_data.emplace_back(compact_segment_data.new_segment_, compact_segment_data.old_segments_); } - LOG_INFO(ss.str()); + LOG_DEBUG(ss.str()); txn->Compact(table_entry, std::move(segment_data), compact_type_); String db_name = *table_entry->GetDBName(); diff --git a/src/executor/operator/physical_import.cpp b/src/executor/operator/physical_import.cpp index 10a7708f1b..a13df05873 100644 --- a/src/executor/operator/physical_import.cpp +++ b/src/executor/operator/physical_import.cpp @@ -329,10 +329,10 @@ void PhysicalImport::ImportJSONL(QueryContext *query_context, ImportOperatorStat block_entry->IncreaseRowCount(1); if (block_entry->GetAvailableCapacity() <= 0) { - LOG_TRACE(fmt::format("Block {} saved", block_entry->block_id())); + LOG_DEBUG(fmt::format("Block {} saved", block_entry->block_id())); segment_entry->AppendBlockEntry(std::move(block_entry)); if (segment_entry->Room() <= 0) { - LOG_TRACE(fmt::format("Segment {} saved", segment_entry->segment_id())); + LOG_DEBUG(fmt::format("Segment {} saved", segment_entry->segment_id())); SaveSegmentData(table_entry_, txn, segment_entry); u64 segment_id = Catalog::GetNextSegmentID(table_entry_); segment_entry = SegmentEntry::NewSegmentEntry(table_entry_, segment_id, txn); @@ -393,10 +393,10 @@ void PhysicalImport::ImportJSON(QueryContext *query_context, ImportOperatorState for (const auto &json_entry : json_arr) { if (block_entry->GetAvailableCapacity() <= 0) { - LOG_TRACE(fmt::format("Block {} saved", block_entry->block_id())); + LOG_DEBUG(fmt::format("Block {} saved", block_entry->block_id())); segment_entry->AppendBlockEntry(std::move(block_entry)); if (segment_entry->Room() <= 0) { - LOG_TRACE(fmt::format("Segment {} saved", segment_entry->segment_id())); + LOG_DEBUG(fmt::format("Segment {} saved", segment_entry->segment_id())); SaveSegmentData(table_entry_, txn, segment_entry); u64 segment_id = Catalog::GetNextSegmentID(table_entry_); segment_entry = SegmentEntry::NewSegmentEntry(table_entry_, segment_id, txn); @@ -516,11 +516,11 @@ void PhysicalImport::CSVRowHandler(void *context) { ++parser_context->row_count_; if (block_entry->GetAvailableCapacity() <= 0) { - LOG_TRACE(fmt::format("Block {} saved", block_entry->block_id())); + LOG_DEBUG(fmt::format("Block {} saved", block_entry->block_id())); segment_entry->AppendBlockEntry(std::move(block_entry)); // we have already used all space of the segment if (segment_entry->Room() <= 0) { - LOG_TRACE(fmt::format("Segment {} saved", segment_entry->segment_id())); + LOG_DEBUG(fmt::format("Segment {} saved", segment_entry->segment_id())); SaveSegmentData(table_entry, txn, segment_entry); u64 segment_id = Catalog::GetNextSegmentID(parser_context->table_entry_); segment_entry = SegmentEntry::NewSegmentEntry(table_entry, segment_id, txn); diff --git a/src/executor/operator/physical_match.cpp b/src/executor/operator/physical_match.cpp index e4d1ef6f38..9c8ec305c8 100644 --- a/src/executor/operator/physical_match.cpp +++ b/src/executor/operator/physical_match.cpp @@ -525,7 +525,7 @@ bool PhysicalMatch::ExecuteInnerHomebrewed(QueryContext *query_context, Operator QueryBuilder query_builder(txn, base_table_ref_); auto finish_init_query_builder_time = std::chrono::high_resolution_clock::now(); TimeDurationType query_builder_init_duration = finish_init_query_builder_time - execute_start_time; - LOG_TRACE(fmt::format("PhysicalMatch Part 0.1: Init QueryBuilder time: {} ms", query_builder_init_duration.count())); + LOG_DEBUG(fmt::format("PhysicalMatch 0: Init QueryBuilder time: {} ms", query_builder_init_duration.count())); const Map &column2analyzer = query_builder.GetColumn2Analyzer(); // 1.2 parse options into map, populate default_field SearchOptions search_ops(match_expr_->options_text_); @@ -556,7 +556,7 @@ bool PhysicalMatch::ExecuteInnerHomebrewed(QueryContext *query_context, Operator auto finish_parse_query_tree_time = std::chrono::high_resolution_clock::now(); TimeDurationType parse_query_tree_duration = finish_parse_query_tree_time - finish_init_query_builder_time; - LOG_TRACE(fmt::format("PhysicalMatch Part 0.2: Parse QueryNode tree time: {} ms", parse_query_tree_duration.count())); + LOG_DEBUG(fmt::format("PhysicalMatch 1: Parse QueryNode tree time: {} ms", parse_query_tree_duration.count())); // 2 build query iterator // result @@ -615,7 +615,7 @@ bool PhysicalMatch::ExecuteInnerHomebrewed(QueryContext *query_context, Operator } auto finish_query_builder_time = std::chrono::high_resolution_clock::now(); TimeDurationType query_builder_duration = finish_query_builder_time - finish_parse_query_tree_time; - LOG_TRACE(fmt::format("PhysicalMatch Part 1: Build Query iterator time: {} ms", query_builder_duration.count())); + LOG_DEBUG(fmt::format("PhysicalMatch Part 2: Build Query iterator time: {} ms", query_builder_duration.count())); if (use_block_max_iter) { blockmax_score_result = MakeUniqueForOverwrite(top_n); blockmax_row_id_result = MakeUniqueForOverwrite(top_n); @@ -701,7 +701,7 @@ bool PhysicalMatch::ExecuteInnerHomebrewed(QueryContext *query_context, Operator } auto finish_query_time = std::chrono::high_resolution_clock::now(); TimeDurationType query_duration = finish_query_time - finish_query_builder_time; - LOG_TRACE(fmt::format("PhysicalMatch Part 2: Full text search time: {} ms", query_duration.count())); + LOG_DEBUG(fmt::format("PhysicalMatch Part 3: Full text search time: {} ms", query_duration.count())); #ifdef INFINITY_DEBUG { OStringStream stat_info; @@ -719,7 +719,7 @@ bool PhysicalMatch::ExecuteInnerHomebrewed(QueryContext *query_context, Operator stat_info << "blockmax_duration_3: " << blockmax_duration_3 << std::endl; stat_info << "blockmax_loop_cnt_2: " << blockmax_loop_cnt_2 << std::endl; } - LOG_TRACE(std::move(stat_info).str()); + LOG_DEBUG(std::move(stat_info).str()); } if (use_ordinary_iter and use_block_max_iter) { OStringStream compare_info; @@ -730,7 +730,7 @@ bool PhysicalMatch::ExecuteInnerHomebrewed(QueryContext *query_context, Operator compare_info << "duration ratio 2/1: " << blockmax_duration_2.count() / blockmax_duration.count() << std::endl; compare_info << "duration ratio 3/2: " << blockmax_duration_3.count() / blockmax_duration_2.count() << std::endl; compare_info << "loop count ratio: " << (static_cast(blockmax_loop_cnt) / ordinary_loop_cnt) << std::endl; - LOG_TRACE(std::move(compare_info).str()); + LOG_DEBUG(std::move(compare_info).str()); if (blockmax_result_count != blockmax_result_count_2 or ordinary_result_count != blockmax_result_count or blockmax_loop_cnt != blockmax_loop_cnt_2) { Status status = Status::SyntaxError("Debug Info: result count mismatch!"); @@ -743,10 +743,10 @@ bool PhysicalMatch::ExecuteInnerHomebrewed(QueryContext *query_context, Operator } } #endif - LOG_TRACE(fmt::format("Full text search result count: {}", result_count)); + LOG_DEBUG(fmt::format("Full text search result count: {}", result_count)); auto begin_output_time = std::chrono::high_resolution_clock::now(); TimeDurationType output_info_duration = begin_output_time - finish_query_time; - LOG_TRACE(fmt::format("PhysicalMatch Part 3: Output stat info time: {} ms", output_info_duration.count())); + LOG_DEBUG(fmt::format("PhysicalMatch Part 4: Output stat info time: {} ms", output_info_duration.count())); // 4 populate result DataBlock // 4.1 prepare first output_data_block auto &output_data_blocks = operator_state->data_block_array_; @@ -796,7 +796,7 @@ bool PhysicalMatch::ExecuteInnerHomebrewed(QueryContext *query_context, Operator operator_state->SetComplete(); auto finish_output_time = std::chrono::high_resolution_clock::now(); TimeDurationType output_duration = finish_output_time - begin_output_time; - LOG_TRACE(fmt::format("PhysicalMatch Part 4: Output data time: {} ms", output_duration.count())); + LOG_DEBUG(fmt::format("PhysicalMatch Part 5: Output data time: {} ms", output_duration.count())); return true; } @@ -821,7 +821,7 @@ bool PhysicalMatch::Execute(QueryContext *query_context, OperatorState *operator bool try_result = common_query_filter_->TryFinishBuild(txn); auto finish_filter_time = std::chrono::high_resolution_clock::now(); std::chrono::duration filter_duration = finish_filter_time - start_time; - LOG_TRACE(fmt::format("PhysicalMatch Prepare: Filter time: {} ms", filter_duration.count())); + LOG_DEBUG(fmt::format("PhysicalMatch Prepare: Filter time: {} ms", filter_duration.count())); if (!try_result) { // not ready, abort and wait for next time return true; @@ -830,7 +830,7 @@ bool PhysicalMatch::Execute(QueryContext *query_context, OperatorState *operator bool return_value = ExecuteInnerHomebrewed(query_context, operator_state); auto end_time = std::chrono::high_resolution_clock::now(); std::chrono::duration duration = end_time - start_time; - LOG_TRACE(fmt::format("PhysicalMatch Execute time: {} ms", duration.count())); + LOG_DEBUG(fmt::format("PhysicalMatch Execute time: {} ms", duration.count())); return return_value; } diff --git a/src/storage/background_process.cpp b/src/storage/background_process.cpp index db6997b8f2..afcf84c542 100644 --- a/src/storage/background_process.cpp +++ b/src/storage/background_process.cpp @@ -62,11 +62,11 @@ void BGTaskProcessor::Process() { break; } case BGTaskType::kForceCheckpoint: { - LOG_TRACE("Force checkpoint in background"); + LOG_DEBUG("Force checkpoint in background"); ForceCheckpointTask *force_ckp_task = static_cast(bg_task.get()); auto [max_commit_ts, wal_size] = catalog_->GetCheckpointState(); wal_manager_->Checkpoint(force_ckp_task, max_commit_ts, wal_size); - LOG_TRACE("Force checkpoint in background done"); + LOG_DEBUG("Force checkpoint in background done"); break; } case BGTaskType::kAddDeltaEntry: { @@ -75,26 +75,26 @@ void BGTaskProcessor::Process() { break; } case BGTaskType::kCheckpoint: { - LOG_TRACE("Checkpoint in background"); + LOG_DEBUG("Checkpoint in background"); auto *task = static_cast(bg_task.get()); bool is_full_checkpoint = task->is_full_checkpoint_; auto [max_commit_ts, wal_size] = catalog_->GetCheckpointState(); wal_manager_->Checkpoint(is_full_checkpoint, max_commit_ts, wal_size); - LOG_TRACE("Checkpoint in background done"); + LOG_DEBUG("Checkpoint in background done"); break; } case BGTaskType::kCleanup: { - LOG_TRACE("Cleanup in background"); + LOG_DEBUG("Cleanup in background"); auto task = static_cast(bg_task.get()); task->Execute(); - LOG_TRACE("Cleanup in background done"); + LOG_DEBUG("Cleanup in background done"); break; } case BGTaskType::kUpdateSegmentBloomFilterData: { - LOG_TRACE("Update segment bloom filter"); + LOG_DEBUG("Update segment bloom filter"); auto *task = static_cast(bg_task.get()); task->Execute(); - LOG_TRACE("Update segment bloom filter done"); + LOG_DEBUG("Update segment bloom filter done"); break; } default: { diff --git a/src/storage/bg_task/periodic_trigger.cpp b/src/storage/bg_task/periodic_trigger.cpp index 34a09a4cf7..b6f7c2fad2 100644 --- a/src/storage/bg_task/periodic_trigger.cpp +++ b/src/storage/bg_task/periodic_trigger.cpp @@ -38,7 +38,7 @@ void CleanupPeriodicTrigger::Trigger() { return; } last_visible_ts_ = visible_ts; - LOG_TRACE(fmt::format("Cleanup visible timestamp: {}", visible_ts)); + LOG_DEBUG(fmt::format("Cleanup visible timestamp: {}", visible_ts)); auto buffer_mgr = txn_mgr_->GetBufferMgr(); auto cleanup_task = MakeShared(catalog_, visible_ts, buffer_mgr); @@ -47,7 +47,7 @@ void CleanupPeriodicTrigger::Trigger() { void CheckpointPeriodicTrigger::Trigger() { auto checkpoint_task = MakeShared(is_full_checkpoint_); - LOG_INFO(fmt::format("Trigger {} periodic checkpoint.", is_full_checkpoint_ ? "FULL" : "DELTA")); + LOG_DEBUG(fmt::format("Trigger {} periodic checkpoint.", is_full_checkpoint_ ? "FULL" : "DELTA")); if (!wal_mgr_->TrySubmitCheckpointTask(std::move(checkpoint_task))) { LOG_TRACE(fmt::format("Skip {} checkpoint(time) because there is already a checkpoint task running.", is_full_checkpoint_ ? "FULL" : "DELTA")); } diff --git a/src/storage/compaction_process.cpp b/src/storage/compaction_process.cpp index 7b90e77a12..4f324dcb9a 100644 --- a/src/storage/compaction_process.cpp +++ b/src/storage/compaction_process.cpp @@ -189,13 +189,15 @@ void CompactionProcessor::Process() { break; } case BGTaskType::kNotifyCompact: { + LOG_DEBUG("Do compact start."); DoCompact(); + LOG_DEBUG("Do compact end."); break; } case BGTaskType::kNotifyOptimize: { - LOG_TRACE("Optimize start."); + LOG_DEBUG("Optimize start."); ScanAndOptimize(); - LOG_TRACE("Optimize done."); + LOG_DEBUG("Optimize done."); break; } default: { diff --git a/src/storage/invertedindex/search/early_terminate_iterator/blockmax_maxscore_iterator.cpp b/src/storage/invertedindex/search/early_terminate_iterator/blockmax_maxscore_iterator.cpp index d9d316f04b..4d36f39486 100644 --- a/src/storage/invertedindex/search/early_terminate_iterator/blockmax_maxscore_iterator.cpp +++ b/src/storage/invertedindex/search/early_terminate_iterator/blockmax_maxscore_iterator.cpp @@ -34,13 +34,13 @@ BlockMaxMaxscoreIterator::~BlockMaxMaxscoreIterator() { << " not_use_prev_candidate_cnt: " << not_use_prev_candidate_cnt_ << "\n"; oss << " pivot_history:\n"; for (const auto &p : pivot_history_) { - oss << " pivit value: " << p.first << " at doc_id: " << p.second << '\n'; + oss << " pivot value: " << p.first << " at doc_id: " << p.second << '\n'; } oss << " must_have_history:\n"; for (const auto &p : must_have_history_) { oss << " must_have value: " << p.first << " at doc_id: " << p.second << '\n'; } - LOG_TRACE(std::move(oss).str()); + LOG_DEBUG(std::move(oss).str()); } BlockMaxMaxscoreIterator::BlockMaxMaxscoreIterator(Vector> iterators) : sorted_iterators_(std::move(iterators)) { diff --git a/src/storage/invertedindex/search/query_builder.cpp b/src/storage/invertedindex/search/query_builder.cpp index d7567f7609..39c188512d 100644 --- a/src/storage/invertedindex/search/query_builder.cpp +++ b/src/storage/invertedindex/search/query_builder.cpp @@ -65,7 +65,7 @@ UniquePtr QueryBuilder::CreateSearch(FullTextQueryContext &context) } else { oss << "Empty tree!\n"; } - LOG_TRACE(std::move(oss).str()); + LOG_DEBUG(std::move(oss).str()); } #endif return result; @@ -87,7 +87,7 @@ UniquePtr QueryBuilder::CreateEarlyTerminateSearch(FullT } else { oss << "Empty tree!\n"; } - LOG_TRACE(std::move(oss).str()); + LOG_DEBUG(std::move(oss).str()); } #endif return result; diff --git a/src/storage/invertedindex/search/query_node.cpp b/src/storage/invertedindex/search/query_node.cpp index c3d45278e6..93fb020705 100644 --- a/src/storage/invertedindex/search/query_node.cpp +++ b/src/storage/invertedindex/search/query_node.cpp @@ -100,7 +100,7 @@ std::unique_ptr QueryNode::GetOptimizedQueryTree(std::unique_ptr SearchDriver::ParseSingleWithFields(const std::string } else { oss << "Empty query tree!\n"; } - LOG_TRACE(std::move(oss).str()); + LOG_DEBUG(std::move(oss).str()); } #endif return parsed_query_tree; diff --git a/src/storage/meta/catalog.cpp b/src/storage/meta/catalog.cpp index ffcba1763c..5f5fdd08c1 100644 --- a/src/storage/meta/catalog.cpp +++ b/src/storage/meta/catalog.cpp @@ -931,7 +931,7 @@ void Catalog::SaveFullCatalog(TxnTimeStamp max_commit_ts, String &full_catalog_p global_catalog_delta_entry_->InitFullCheckpointTs(max_commit_ts); - LOG_INFO(fmt::format("Saved catalog to: {}", full_catalog_path)); + LOG_DEBUG(fmt::format("Saved catalog to: {}", full_catalog_path)); } // called by bg_task @@ -945,7 +945,7 @@ bool Catalog::SaveDeltaCatalog(TxnTimeStamp max_commit_ts, String &delta_catalog LOG_TRACE("Save delta catalog ops is empty. Skip flush."); return true; } - LOG_TRACE(fmt::format("Save delta catalog commit ts:{}, checkpoint max commit ts:{}.", flush_delta_entry->commit_ts(), max_commit_ts)); + LOG_DEBUG(fmt::format("Save delta catalog commit ts:{}, checkpoint max commit ts:{}.", flush_delta_entry->commit_ts(), max_commit_ts)); for (auto &op : flush_delta_entry->operations()) { switch (op->GetType()) { @@ -992,7 +992,7 @@ bool Catalog::SaveDeltaCatalog(TxnTimeStamp max_commit_ts, String &delta_catalog // } // LOG_INFO(ss.str()); // } - LOG_TRACE(fmt::format("Save delta catalog to: {}, size: {}.", delta_catalog_path, act_size)); + LOG_DEBUG(fmt::format("Save delta catalog to: {}, size: {}.", delta_catalog_path, act_size)); return false; } diff --git a/src/storage/meta/cleanup_scanner.cpp b/src/storage/meta/cleanup_scanner.cpp index f93c79cd24..edac444673 100644 --- a/src/storage/meta/cleanup_scanner.cpp +++ b/src/storage/meta/cleanup_scanner.cpp @@ -35,7 +35,7 @@ CleanupScanner::CleanupScanner(Catalog *catalog, TxnTimeStamp visible_ts, Buffer void CleanupScanner::AddEntry(SharedPtr entry) { entries_.emplace_back(std::move(entry)); } void CleanupScanner::Scan() { - LOG_TRACE(fmt::format("CleanupScanner: Start scanning, ts: {}", visible_ts_)); + LOG_DEBUG(fmt::format("CleanupScanner: Start scanning, ts: {}", visible_ts_)); catalog_->PickCleanup(this); } diff --git a/src/storage/meta/entry/chunk_index_entry.cpp b/src/storage/meta/entry/chunk_index_entry.cpp index 5eb9fe891c..996f909181 100644 --- a/src/storage/meta/entry/chunk_index_entry.cpp +++ b/src/storage/meta/entry/chunk_index_entry.cpp @@ -224,9 +224,9 @@ void ChunkIndexEntry::Cleanup() { LocalFileSystem fs; fs.DeleteFile(posting_file); fs.DeleteFile(dict_file); - LOG_TRACE(fmt::format("cleaned chunk index entry {}", index_prefix)); + LOG_DEBUG(fmt::format("cleaned chunk index entry {}", index_prefix)); } else { - LOG_TRACE(fmt::format("cleaned chunk index entry {}/{}", *index_dir, chunk_id_)); + LOG_DEBUG(fmt::format("cleaned chunk index entry {}/{}", *index_dir, chunk_id_)); } } diff --git a/src/storage/meta/entry/table_entry.cpp b/src/storage/meta/entry/table_entry.cpp index aa3f77253e..7b4d823018 100644 --- a/src/storage/meta/entry/table_entry.cpp +++ b/src/storage/meta/entry/table_entry.cpp @@ -507,12 +507,12 @@ Status TableEntry::CommitCompact(TransactionID txn_id, TxnTimeStamp commit_ts, T switch (compact_store.type_) { case CompactStatementType::kAuto: { compaction_alg_->CommitCompact(txn_id); - LOG_TRACE(fmt::format("Compact commit picked, tablename: {}", *this->GetTableName())); + LOG_DEBUG(fmt::format("Compact commit picked, table name: {}", *this->GetTableName())); break; } case CompactStatementType::kManual: { // reinitialize compaction_alg_ with new segments and enable it - LOG_TRACE(fmt::format("Compact commit whole, tablename: {}", *this->GetTableName())); + LOG_DEBUG(fmt::format("Compact commit whole, table name: {}", *this->GetTableName())); compaction_alg_->Enable({}); break; } diff --git a/src/storage/meta/meta_map.cppm b/src/storage/meta/meta_map.cppm index 30b4de4b98..85f0370615 100644 --- a/src/storage/meta/meta_map.cppm +++ b/src/storage/meta/meta_map.cppm @@ -143,7 +143,7 @@ void MetaMap::PickCleanup(CleanupScanner *scanner) { std::unique_lock w_lock(rw_locker_); for (auto iter = meta_map_.begin(); iter != meta_map_.end();) { if (iter->second->Empty()) { - LOG_TRACE(fmt::format("PickCleanup: all_delete: {}", iter->first)); + LOG_DEBUG(fmt::format("PickCleanup: all_delete: {}", iter->first)); iter = meta_map_.erase(iter); } else { ++iter; diff --git a/src/storage/txn/txn_store.cpp b/src/storage/txn/txn_store.cpp index c06c325f93..17fc2d8f09 100644 --- a/src/storage/txn/txn_store.cpp +++ b/src/storage/txn/txn_store.cpp @@ -328,7 +328,7 @@ void TxnTableStore::PrepareCommit(TransactionID txn_id, TxnTimeStamp commit_ts, // Attention: "compact" needs to be ahead of "delete" if (compact_state_.type_ != CompactStatementType::kInvalid) { - LOG_TRACE(fmt::format("Commit compact, table dir: {}, commit ts: {}", *table_entry_->TableEntryDir(), commit_ts)); + LOG_DEBUG(fmt::format("Commit compact, table dir: {}, commit ts: {}", *table_entry_->TableEntryDir(), commit_ts)); Catalog::CommitCompact(table_entry_, txn_id, commit_ts, compact_state_); } diff --git a/src/storage/wal/log_file.cpp b/src/storage/wal/log_file.cpp index fa266867e4..16fad184f6 100644 --- a/src/storage/wal/log_file.cpp +++ b/src/storage/wal/log_file.cpp @@ -71,7 +71,7 @@ void CatalogFile::RecycleCatalogFile(TxnTimeStamp max_commit_ts, const String &c if (full_info.max_commit_ts_ < max_commit_ts) { LocalFileSystem fs; fs.DeleteFile(full_info.path_); - LOG_INFO(fmt::format("WalManager::Checkpoint delete catalog file: {}", full_info.path_)); + LOG_DEBUG(fmt::format("WalManager::Checkpoint delete catalog file: {}", full_info.path_)); } else if (full_info.max_commit_ts_ == max_commit_ts) { found = true; } @@ -83,7 +83,7 @@ void CatalogFile::RecycleCatalogFile(TxnTimeStamp max_commit_ts, const String &c if (delta_info.max_commit_ts_ <= max_commit_ts) { LocalFileSystem fs; fs.DeleteFile(delta_info.path_); - LOG_INFO(fmt::format("WalManager::Checkpoint delete catalog file: {}", delta_info.path_)); + LOG_DEBUG(fmt::format("WalManager::Checkpoint delete catalog file: {}", delta_info.path_)); } } } diff --git a/src/storage/wal/wal_manager.cpp b/src/storage/wal/wal_manager.cpp index 41680f70f5..8f7e6ab64a 100644 --- a/src/storage/wal/wal_manager.cpp +++ b/src/storage/wal/wal_manager.cpp @@ -305,7 +305,7 @@ void WalManager::CheckpointInner(bool is_full_checkpoint, Txn *txn, TxnTimeStamp } } try { - LOG_TRACE(fmt::format("{} Checkpoint Txn txn_id: {}, begin_ts: {}, max_commit_ts {}", + LOG_DEBUG(fmt::format("{} Checkpoint Txn txn_id: {}, begin_ts: {}, max_commit_ts {}", is_full_checkpoint ? "FULL" : "DELTA", txn->TxnID(), txn->BeginTS(), @@ -316,7 +316,7 @@ void WalManager::CheckpointInner(bool is_full_checkpoint, Txn *txn, TxnTimeStamp } SetLastCkpWalSize(wal_size); - LOG_TRACE(fmt::format("{} Checkpoint is done for commit_ts <= {}", is_full_checkpoint ? "FULL" : "DELTA", max_commit_ts)); + LOG_DEBUG(fmt::format("{} Checkpoint is done for commit_ts <= {}", is_full_checkpoint ? "FULL" : "DELTA", max_commit_ts)); } catch (RecoverableException &e) { LOG_ERROR(fmt::format("WalManager::Checkpoint failed: {}", e.what())); } catch (UnrecoverableException &e) {