Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add num-running-compaction-iterators statistic #13299

Draft
wants to merge 2 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions db/db_impl/db_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -2430,6 +2430,8 @@ class DBImpl : public DB {
const std::vector<CompactionInputFiles>& inputs,
bool* sfm_bookkeeping, LogBuffer* log_buffer);

size_t GetNumberCompactionInputIterators(Compaction* c);

// Request compaction tasks token from compaction thread limiter.
// It always succeeds if force = true or limiter is disable.
bool RequestCompactionToken(ColumnFamilyData* cfd, bool force,
Expand Down
18 changes: 16 additions & 2 deletions db/db_impl/db_impl_compaction_flush.cc
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,16 @@ bool DBImpl::EnoughRoomForCompaction(
return enough_room;
}

size_t DBImpl::GetNumberCompactionInputIterators(Compaction* c) {
assert(c);
if (c->start_level() == 0) {
size_t num_l0_files = c->num_input_files(0);
size_t num_non_l0_levels = c->num_input_levels() - 1;
return num_l0_files + num_non_l0_levels;
}
return c->num_input_levels();
}

bool DBImpl::RequestCompactionToken(ColumnFamilyData* cfd, bool force,
std::unique_ptr<TaskLimiterToken>* token,
LogBuffer* log_buffer) {
Expand Down Expand Up @@ -3720,13 +3730,17 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
num_files += each_level.files.size();
}
RecordInHistogram(stats_, NUM_FILES_IN_SINGLE_COMPACTION, num_files);
size_t num_compaction_input_iterators =
GetNumberCompactionInputIterators(c.get());
RecordInHistogram(stats_, NUM_COMPACTION_INPUT_ITERATORS,
num_compaction_input_iterators);

// There are three things that can change compaction score:
// 1) When flush or compaction finish. This case is covered by
// InstallSuperVersionAndScheduleWork
// 2) When MutableCFOptions changes. This case is also covered by
// InstallSuperVersionAndScheduleWork, because this is when the new
// options take effect.
// InstallSuperVersionAndScheduleWork, because this is when the
// new options take effect.
// 3) When we Pick a new compaction, we "remove" those files being
// compacted from the calculation, which then influences compaction
// score. Here we check if we need the new compaction even without the
Expand Down
4 changes: 4 additions & 0 deletions include/rocksdb/statistics.h
Original file line number Diff line number Diff line change
Expand Up @@ -667,6 +667,10 @@ enum Histograms : uint32_t {
// system's prefetch) from the end of SST table during block based table open
TABLE_OPEN_PREFETCH_TAIL_READ_BYTES,

// Number of iterators needed to process compaction inputs
// Equal to number of L0 input files + number of non-L0 input levels
NUM_COMPACTION_INPUT_ITERATORS,

HISTOGRAM_ENUM_MAX
};

Expand Down
10 changes: 7 additions & 3 deletions java/rocksjni/portal.h
Original file line number Diff line number Diff line change
Expand Up @@ -5889,9 +5889,11 @@ class HistogramTypeJni {
return 0x3C;
case ROCKSDB_NAMESPACE::Histograms::TABLE_OPEN_PREFETCH_TAIL_READ_BYTES:
return 0x3D;
case ROCKSDB_NAMESPACE::Histograms::HISTOGRAM_ENUM_MAX:
// 0x3D for backwards compatibility on current minor version.
case ROCKSDB_NAMESPACE::Histograms::NUM_COMPACTION_INPUT_ITERATORS:
return 0x3E;
case ROCKSDB_NAMESPACE::Histograms::HISTOGRAM_ENUM_MAX:
// 0x3F for backwards compatibility on current minor version.
return 0x3F;
default:
// undefined/default
return 0x0;
Expand Down Expand Up @@ -6034,7 +6036,9 @@ class HistogramTypeJni {
return ROCKSDB_NAMESPACE::Histograms::
TABLE_OPEN_PREFETCH_TAIL_READ_BYTES;
case 0x3E:
// 0x1F for backwards compatibility on current minor version.
return ROCKSDB_NAMESPACE::Histograms::NUM_COMPACTION_INPUT_ITERATORS;
case 0x40:
// 0x40 for backwards compatibility on current minor version.
return ROCKSDB_NAMESPACE::Histograms::HISTOGRAM_ENUM_MAX;

default:
Expand Down
10 changes: 8 additions & 2 deletions java/src/main/java/org/rocksdb/HistogramType.java
Original file line number Diff line number Diff line change
Expand Up @@ -210,8 +210,14 @@ public enum HistogramType {
*/
TABLE_OPEN_PREFETCH_TAIL_READ_BYTES((byte) 0x3D),

// 0x3E for backwards compatibility on current minor version.
HISTOGRAM_ENUM_MAX((byte) 0x3E);
/**
* Number of iterators needed to process compaction inputs
* Equal to number of L0 input files + number of non-L0 input levels
*/
NUM_COMPACTION_INPUT_ITERATORS((byte) 0x3E),

// 0x3F for backwards compatibility on current minor version.
HISTOGRAM_ENUM_MAX((byte) 0x3F);

private final byte value;

Expand Down
1 change: 1 addition & 0 deletions monitoring/statistics.cc
Original file line number Diff line number Diff line change
Expand Up @@ -340,6 +340,7 @@ const std::vector<std::pair<Histograms, std::string>> HistogramsNameMap = {
{ASYNC_PREFETCH_ABORT_MICROS, "rocksdb.async.prefetch.abort.micros"},
{TABLE_OPEN_PREFETCH_TAIL_READ_BYTES,
"rocksdb.table.open.prefetch.tail.read.bytes"},
{NUM_COMPACTION_INPUT_ITERATORS, "rocksdb.num.compaction.iterators"},
};

std::shared_ptr<Statistics> CreateDBStatistics() {
Expand Down
Loading