Skip to content

Commit

Permalink
Change log level (#1222)
Browse files Browse the repository at this point in the history
### What problem does this PR solve?

_Briefly describe what this PR aims to solve. Include background context
that will help reviewers understand the purpose of the PR._

### Type of change

- [x] Refactoring

Signed-off-by: Jin Hai <[email protected]>
  • Loading branch information
JinHai-CN authored May 18, 2024
1 parent 735252b commit a93e87c
Show file tree
Hide file tree
Showing 20 changed files with 55 additions and 53 deletions.
2 changes: 1 addition & 1 deletion src/common/utility/random.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ SharedPtr<String> DetermineRandomString(const String &parent_dir, const String &
result = fmt::format("{}/{}_{}", parent_dir, RandomString(DEFAULT_RANDOM_NAME_LEN), name);
++cnt;
} while (!fs.CreateDirectoryNoExp(result));
LOG_TRACE(fmt::format("Created directory {} in {} times", result, cnt));
LOG_DEBUG(fmt::format("Created directory {} in {} times", result, cnt));
return MakeShared<String>(std::move(result));
}

Expand Down
4 changes: 2 additions & 2 deletions src/executor/operator/physical_compact.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ class GreedyCompactableSegmentsGenerator {
void PhysicalCompact::Init() {
if (compact_type_ == CompactStatementType::kManual) {
TableEntry *table_entry = base_table_ref_->table_entry_ptr_;
LOG_INFO(fmt::format("Manual compact {} start", *table_entry->GetTableName()));
LOG_DEBUG(fmt::format("Manual compact {} start", *table_entry->GetTableName()));
if (!table_entry->CompactPrepare()) {
LOG_WARN(fmt::format("Table {} is not compactable.", *table_entry->GetTableName()));
return;
Expand All @@ -96,7 +96,7 @@ void PhysicalCompact::Init() {
}
} else {
TableEntry *table_entry = base_table_ref_->table_entry_ptr_;
LOG_INFO(fmt::format("Auto compact {} start", *table_entry->GetTableName()));
LOG_DEBUG(fmt::format("Auto compact {} start", *table_entry->GetTableName()));
Vector<SegmentEntry *> compactible_segments;
const auto &block_index = *base_table_ref_->block_index_;
for (const auto &[segment_id, segment_snapshot] : block_index.segment_block_index_) {
Expand Down
2 changes: 1 addition & 1 deletion src/executor/operator/physical_compact_finish.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ void PhysicalCompactFinish::SaveSegmentData(QueryContext *query_context, const C
ss << "to new segment " << new_segment->segment_id();
segment_data.emplace_back(compact_segment_data.new_segment_, compact_segment_data.old_segments_);
}
LOG_INFO(ss.str());
LOG_DEBUG(ss.str());

txn->Compact(table_entry, std::move(segment_data), compact_type_);
String db_name = *table_entry->GetDBName();
Expand Down
12 changes: 6 additions & 6 deletions src/executor/operator/physical_import.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -329,10 +329,10 @@ void PhysicalImport::ImportJSONL(QueryContext *query_context, ImportOperatorStat
block_entry->IncreaseRowCount(1);

if (block_entry->GetAvailableCapacity() <= 0) {
LOG_TRACE(fmt::format("Block {} saved", block_entry->block_id()));
LOG_DEBUG(fmt::format("Block {} saved", block_entry->block_id()));
segment_entry->AppendBlockEntry(std::move(block_entry));
if (segment_entry->Room() <= 0) {
LOG_TRACE(fmt::format("Segment {} saved", segment_entry->segment_id()));
LOG_DEBUG(fmt::format("Segment {} saved", segment_entry->segment_id()));
SaveSegmentData(table_entry_, txn, segment_entry);
u64 segment_id = Catalog::GetNextSegmentID(table_entry_);
segment_entry = SegmentEntry::NewSegmentEntry(table_entry_, segment_id, txn);
Expand Down Expand Up @@ -393,10 +393,10 @@ void PhysicalImport::ImportJSON(QueryContext *query_context, ImportOperatorState

for (const auto &json_entry : json_arr) {
if (block_entry->GetAvailableCapacity() <= 0) {
LOG_TRACE(fmt::format("Block {} saved", block_entry->block_id()));
LOG_DEBUG(fmt::format("Block {} saved", block_entry->block_id()));
segment_entry->AppendBlockEntry(std::move(block_entry));
if (segment_entry->Room() <= 0) {
LOG_TRACE(fmt::format("Segment {} saved", segment_entry->segment_id()));
LOG_DEBUG(fmt::format("Segment {} saved", segment_entry->segment_id()));
SaveSegmentData(table_entry_, txn, segment_entry);
u64 segment_id = Catalog::GetNextSegmentID(table_entry_);
segment_entry = SegmentEntry::NewSegmentEntry(table_entry_, segment_id, txn);
Expand Down Expand Up @@ -516,11 +516,11 @@ void PhysicalImport::CSVRowHandler(void *context) {
++parser_context->row_count_;

if (block_entry->GetAvailableCapacity() <= 0) {
LOG_TRACE(fmt::format("Block {} saved", block_entry->block_id()));
LOG_DEBUG(fmt::format("Block {} saved", block_entry->block_id()));
segment_entry->AppendBlockEntry(std::move(block_entry));
// we have already used all space of the segment
if (segment_entry->Room() <= 0) {
LOG_TRACE(fmt::format("Segment {} saved", segment_entry->segment_id()));
LOG_DEBUG(fmt::format("Segment {} saved", segment_entry->segment_id()));
SaveSegmentData(table_entry, txn, segment_entry);
u64 segment_id = Catalog::GetNextSegmentID(parser_context->table_entry_);
segment_entry = SegmentEntry::NewSegmentEntry(table_entry, segment_id, txn);
Expand Down
22 changes: 11 additions & 11 deletions src/executor/operator/physical_match.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -525,7 +525,7 @@ bool PhysicalMatch::ExecuteInnerHomebrewed(QueryContext *query_context, Operator
QueryBuilder query_builder(txn, base_table_ref_);
auto finish_init_query_builder_time = std::chrono::high_resolution_clock::now();
TimeDurationType query_builder_init_duration = finish_init_query_builder_time - execute_start_time;
LOG_TRACE(fmt::format("PhysicalMatch Part 0.1: Init QueryBuilder time: {} ms", query_builder_init_duration.count()));
LOG_DEBUG(fmt::format("PhysicalMatch 0: Init QueryBuilder time: {} ms", query_builder_init_duration.count()));
const Map<String, String> &column2analyzer = query_builder.GetColumn2Analyzer();
// 1.2 parse options into map, populate default_field
SearchOptions search_ops(match_expr_->options_text_);
Expand Down Expand Up @@ -556,7 +556,7 @@ bool PhysicalMatch::ExecuteInnerHomebrewed(QueryContext *query_context, Operator

auto finish_parse_query_tree_time = std::chrono::high_resolution_clock::now();
TimeDurationType parse_query_tree_duration = finish_parse_query_tree_time - finish_init_query_builder_time;
LOG_TRACE(fmt::format("PhysicalMatch Part 0.2: Parse QueryNode tree time: {} ms", parse_query_tree_duration.count()));
LOG_DEBUG(fmt::format("PhysicalMatch 1: Parse QueryNode tree time: {} ms", parse_query_tree_duration.count()));

// 2 build query iterator
// result
Expand Down Expand Up @@ -615,7 +615,7 @@ bool PhysicalMatch::ExecuteInnerHomebrewed(QueryContext *query_context, Operator
}
auto finish_query_builder_time = std::chrono::high_resolution_clock::now();
TimeDurationType query_builder_duration = finish_query_builder_time - finish_parse_query_tree_time;
LOG_TRACE(fmt::format("PhysicalMatch Part 1: Build Query iterator time: {} ms", query_builder_duration.count()));
LOG_DEBUG(fmt::format("PhysicalMatch Part 2: Build Query iterator time: {} ms", query_builder_duration.count()));
if (use_block_max_iter) {
blockmax_score_result = MakeUniqueForOverwrite<float[]>(top_n);
blockmax_row_id_result = MakeUniqueForOverwrite<RowID[]>(top_n);
Expand Down Expand Up @@ -701,7 +701,7 @@ bool PhysicalMatch::ExecuteInnerHomebrewed(QueryContext *query_context, Operator
}
auto finish_query_time = std::chrono::high_resolution_clock::now();
TimeDurationType query_duration = finish_query_time - finish_query_builder_time;
LOG_TRACE(fmt::format("PhysicalMatch Part 2: Full text search time: {} ms", query_duration.count()));
LOG_DEBUG(fmt::format("PhysicalMatch Part 3: Full text search time: {} ms", query_duration.count()));
#ifdef INFINITY_DEBUG
{
OStringStream stat_info;
Expand All @@ -719,7 +719,7 @@ bool PhysicalMatch::ExecuteInnerHomebrewed(QueryContext *query_context, Operator
stat_info << "blockmax_duration_3: " << blockmax_duration_3 << std::endl;
stat_info << "blockmax_loop_cnt_2: " << blockmax_loop_cnt_2 << std::endl;
}
LOG_TRACE(std::move(stat_info).str());
LOG_DEBUG(std::move(stat_info).str());
}
if (use_ordinary_iter and use_block_max_iter) {
OStringStream compare_info;
Expand All @@ -730,7 +730,7 @@ bool PhysicalMatch::ExecuteInnerHomebrewed(QueryContext *query_context, Operator
compare_info << "duration ratio 2/1: " << blockmax_duration_2.count() / blockmax_duration.count() << std::endl;
compare_info << "duration ratio 3/2: " << blockmax_duration_3.count() / blockmax_duration_2.count() << std::endl;
compare_info << "loop count ratio: " << (static_cast<float>(blockmax_loop_cnt) / ordinary_loop_cnt) << std::endl;
LOG_TRACE(std::move(compare_info).str());
LOG_DEBUG(std::move(compare_info).str());
if (blockmax_result_count != blockmax_result_count_2 or ordinary_result_count != blockmax_result_count or
blockmax_loop_cnt != blockmax_loop_cnt_2) {
Status status = Status::SyntaxError("Debug Info: result count mismatch!");
Expand All @@ -743,10 +743,10 @@ bool PhysicalMatch::ExecuteInnerHomebrewed(QueryContext *query_context, Operator
}
}
#endif
LOG_TRACE(fmt::format("Full text search result count: {}", result_count));
LOG_DEBUG(fmt::format("Full text search result count: {}", result_count));
auto begin_output_time = std::chrono::high_resolution_clock::now();
TimeDurationType output_info_duration = begin_output_time - finish_query_time;
LOG_TRACE(fmt::format("PhysicalMatch Part 3: Output stat info time: {} ms", output_info_duration.count()));
LOG_DEBUG(fmt::format("PhysicalMatch Part 4: Output stat info time: {} ms", output_info_duration.count()));
// 4 populate result DataBlock
// 4.1 prepare first output_data_block
auto &output_data_blocks = operator_state->data_block_array_;
Expand Down Expand Up @@ -796,7 +796,7 @@ bool PhysicalMatch::ExecuteInnerHomebrewed(QueryContext *query_context, Operator
operator_state->SetComplete();
auto finish_output_time = std::chrono::high_resolution_clock::now();
TimeDurationType output_duration = finish_output_time - begin_output_time;
LOG_TRACE(fmt::format("PhysicalMatch Part 4: Output data time: {} ms", output_duration.count()));
LOG_DEBUG(fmt::format("PhysicalMatch Part 5: Output data time: {} ms", output_duration.count()));
return true;
}

Expand All @@ -821,7 +821,7 @@ bool PhysicalMatch::Execute(QueryContext *query_context, OperatorState *operator
bool try_result = common_query_filter_->TryFinishBuild(txn);
auto finish_filter_time = std::chrono::high_resolution_clock::now();
std::chrono::duration<float, std::milli> filter_duration = finish_filter_time - start_time;
LOG_TRACE(fmt::format("PhysicalMatch Prepare: Filter time: {} ms", filter_duration.count()));
LOG_DEBUG(fmt::format("PhysicalMatch Prepare: Filter time: {} ms", filter_duration.count()));
if (!try_result) {
// not ready, abort and wait for next time
return true;
Expand All @@ -830,7 +830,7 @@ bool PhysicalMatch::Execute(QueryContext *query_context, OperatorState *operator
bool return_value = ExecuteInnerHomebrewed(query_context, operator_state);
auto end_time = std::chrono::high_resolution_clock::now();
std::chrono::duration<float, std::milli> duration = end_time - start_time;
LOG_TRACE(fmt::format("PhysicalMatch Execute time: {} ms", duration.count()));
LOG_DEBUG(fmt::format("PhysicalMatch Execute time: {} ms", duration.count()));
return return_value;
}

Expand Down
16 changes: 8 additions & 8 deletions src/storage/background_process.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,11 +62,11 @@ void BGTaskProcessor::Process() {
break;
}
case BGTaskType::kForceCheckpoint: {
LOG_TRACE("Force checkpoint in background");
LOG_DEBUG("Force checkpoint in background");
ForceCheckpointTask *force_ckp_task = static_cast<ForceCheckpointTask *>(bg_task.get());
auto [max_commit_ts, wal_size] = catalog_->GetCheckpointState();
wal_manager_->Checkpoint(force_ckp_task, max_commit_ts, wal_size);
LOG_TRACE("Force checkpoint in background done");
LOG_DEBUG("Force checkpoint in background done");
break;
}
case BGTaskType::kAddDeltaEntry: {
Expand All @@ -75,26 +75,26 @@ void BGTaskProcessor::Process() {
break;
}
case BGTaskType::kCheckpoint: {
LOG_TRACE("Checkpoint in background");
LOG_DEBUG("Checkpoint in background");
auto *task = static_cast<CheckpointTask *>(bg_task.get());
bool is_full_checkpoint = task->is_full_checkpoint_;
auto [max_commit_ts, wal_size] = catalog_->GetCheckpointState();
wal_manager_->Checkpoint(is_full_checkpoint, max_commit_ts, wal_size);
LOG_TRACE("Checkpoint in background done");
LOG_DEBUG("Checkpoint in background done");
break;
}
case BGTaskType::kCleanup: {
LOG_TRACE("Cleanup in background");
LOG_DEBUG("Cleanup in background");
auto task = static_cast<CleanupTask *>(bg_task.get());
task->Execute();
LOG_TRACE("Cleanup in background done");
LOG_DEBUG("Cleanup in background done");
break;
}
case BGTaskType::kUpdateSegmentBloomFilterData: {
LOG_TRACE("Update segment bloom filter");
LOG_DEBUG("Update segment bloom filter");
auto *task = static_cast<UpdateSegmentBloomFilterTask *>(bg_task.get());
task->Execute();
LOG_TRACE("Update segment bloom filter done");
LOG_DEBUG("Update segment bloom filter done");
break;
}
default: {
Expand Down
4 changes: 2 additions & 2 deletions src/storage/bg_task/periodic_trigger.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ void CleanupPeriodicTrigger::Trigger() {
return;
}
last_visible_ts_ = visible_ts;
LOG_TRACE(fmt::format("Cleanup visible timestamp: {}", visible_ts));
LOG_DEBUG(fmt::format("Cleanup visible timestamp: {}", visible_ts));

auto buffer_mgr = txn_mgr_->GetBufferMgr();
auto cleanup_task = MakeShared<CleanupTask>(catalog_, visible_ts, buffer_mgr);
Expand All @@ -47,7 +47,7 @@ void CleanupPeriodicTrigger::Trigger() {

void CheckpointPeriodicTrigger::Trigger() {
auto checkpoint_task = MakeShared<CheckpointTask>(is_full_checkpoint_);
LOG_INFO(fmt::format("Trigger {} periodic checkpoint.", is_full_checkpoint_ ? "FULL" : "DELTA"));
LOG_DEBUG(fmt::format("Trigger {} periodic checkpoint.", is_full_checkpoint_ ? "FULL" : "DELTA"));
if (!wal_mgr_->TrySubmitCheckpointTask(std::move(checkpoint_task))) {
LOG_TRACE(fmt::format("Skip {} checkpoint(time) because there is already a checkpoint task running.", is_full_checkpoint_ ? "FULL" : "DELTA"));
}
Expand Down
6 changes: 4 additions & 2 deletions src/storage/compaction_process.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -189,13 +189,15 @@ void CompactionProcessor::Process() {
break;
}
case BGTaskType::kNotifyCompact: {
LOG_DEBUG("Do compact start.");
DoCompact();
LOG_DEBUG("Do compact end.");
break;
}
case BGTaskType::kNotifyOptimize: {
LOG_TRACE("Optimize start.");
LOG_DEBUG("Optimize start.");
ScanAndOptimize();
LOG_TRACE("Optimize done.");
LOG_DEBUG("Optimize done.");
break;
}
default: {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,13 +34,13 @@ BlockMaxMaxscoreIterator::~BlockMaxMaxscoreIterator() {
<< " not_use_prev_candidate_cnt: " << not_use_prev_candidate_cnt_ << "\n";
oss << " pivot_history:\n";
for (const auto &p : pivot_history_) {
oss << " pivit value: " << p.first << " at doc_id: " << p.second << '\n';
oss << " pivot value: " << p.first << " at doc_id: " << p.second << '\n';
}
oss << " must_have_history:\n";
for (const auto &p : must_have_history_) {
oss << " must_have value: " << p.first << " at doc_id: " << p.second << '\n';
}
LOG_TRACE(std::move(oss).str());
LOG_DEBUG(std::move(oss).str());
}

BlockMaxMaxscoreIterator::BlockMaxMaxscoreIterator(Vector<UniquePtr<EarlyTerminateIterator>> iterators) : sorted_iterators_(std::move(iterators)) {
Expand Down
4 changes: 2 additions & 2 deletions src/storage/invertedindex/search/query_builder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ UniquePtr<DocIterator> QueryBuilder::CreateSearch(FullTextQueryContext &context)
} else {
oss << "Empty tree!\n";
}
LOG_TRACE(std::move(oss).str());
LOG_DEBUG(std::move(oss).str());
}
#endif
return result;
Expand All @@ -87,7 +87,7 @@ UniquePtr<EarlyTerminateIterator> QueryBuilder::CreateEarlyTerminateSearch(FullT
} else {
oss << "Empty tree!\n";
}
LOG_TRACE(std::move(oss).str());
LOG_DEBUG(std::move(oss).str());
}
#endif
return result;
Expand Down
2 changes: 1 addition & 1 deletion src/storage/invertedindex/search/query_node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ std::unique_ptr<QueryNode> QueryNode::GetOptimizedQueryTree(std::unique_ptr<Quer
} else {
oss << "Empty query tree!\n";
}
LOG_TRACE(std::move(oss).str());
LOG_DEBUG(std::move(oss).str());
}
#endif
return optimized_root;
Expand Down
2 changes: 1 addition & 1 deletion src/storage/invertedindex/search/search_driver.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ std::unique_ptr<QueryNode> SearchDriver::ParseSingleWithFields(const std::string
} else {
oss << "Empty query tree!\n";
}
LOG_TRACE(std::move(oss).str());
LOG_DEBUG(std::move(oss).str());
}
#endif
return parsed_query_tree;
Expand Down
6 changes: 3 additions & 3 deletions src/storage/meta/catalog.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -931,7 +931,7 @@ void Catalog::SaveFullCatalog(TxnTimeStamp max_commit_ts, String &full_catalog_p

global_catalog_delta_entry_->InitFullCheckpointTs(max_commit_ts);

LOG_INFO(fmt::format("Saved catalog to: {}", full_catalog_path));
LOG_DEBUG(fmt::format("Saved catalog to: {}", full_catalog_path));
}

// called by bg_task
Expand All @@ -945,7 +945,7 @@ bool Catalog::SaveDeltaCatalog(TxnTimeStamp max_commit_ts, String &delta_catalog
LOG_TRACE("Save delta catalog ops is empty. Skip flush.");
return true;
}
LOG_TRACE(fmt::format("Save delta catalog commit ts:{}, checkpoint max commit ts:{}.", flush_delta_entry->commit_ts(), max_commit_ts));
LOG_DEBUG(fmt::format("Save delta catalog commit ts:{}, checkpoint max commit ts:{}.", flush_delta_entry->commit_ts(), max_commit_ts));

for (auto &op : flush_delta_entry->operations()) {
switch (op->GetType()) {
Expand Down Expand Up @@ -992,7 +992,7 @@ bool Catalog::SaveDeltaCatalog(TxnTimeStamp max_commit_ts, String &delta_catalog
// }
// LOG_INFO(ss.str());
// }
LOG_TRACE(fmt::format("Save delta catalog to: {}, size: {}.", delta_catalog_path, act_size));
LOG_DEBUG(fmt::format("Save delta catalog to: {}, size: {}.", delta_catalog_path, act_size));

return false;
}
Expand Down
2 changes: 1 addition & 1 deletion src/storage/meta/cleanup_scanner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ CleanupScanner::CleanupScanner(Catalog *catalog, TxnTimeStamp visible_ts, Buffer
void CleanupScanner::AddEntry(SharedPtr<EntryInterface> entry) { entries_.emplace_back(std::move(entry)); }

void CleanupScanner::Scan() {
LOG_TRACE(fmt::format("CleanupScanner: Start scanning, ts: {}", visible_ts_));
LOG_DEBUG(fmt::format("CleanupScanner: Start scanning, ts: {}", visible_ts_));
catalog_->PickCleanup(this);
}

Expand Down
4 changes: 2 additions & 2 deletions src/storage/meta/entry/chunk_index_entry.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -224,9 +224,9 @@ void ChunkIndexEntry::Cleanup() {
LocalFileSystem fs;
fs.DeleteFile(posting_file);
fs.DeleteFile(dict_file);
LOG_TRACE(fmt::format("cleaned chunk index entry {}", index_prefix));
LOG_DEBUG(fmt::format("cleaned chunk index entry {}", index_prefix));
} else {
LOG_TRACE(fmt::format("cleaned chunk index entry {}/{}", *index_dir, chunk_id_));
LOG_DEBUG(fmt::format("cleaned chunk index entry {}/{}", *index_dir, chunk_id_));
}
}

Expand Down
Loading

0 comments on commit a93e87c

Please sign in to comment.