Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CASSANDRA-20132][5.0] Add new table metric PurgeableTombstoneScannedHistogram and a tracing event for scanned purgeable tombstones #3730

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions src/java/org/apache/cassandra/config/Config.java
Original file line number Diff line number Diff line change
Expand Up @@ -526,6 +526,8 @@ public static class SSTableConfig
public volatile int tombstone_warn_threshold = 1000;
public volatile int tombstone_failure_threshold = 100000;

public TombstonesMetricGranularity purgeable_tobmstones_metric_granularity = TombstonesMetricGranularity.row;

public final ReplicaFilteringProtectionOptions replica_filtering_protection = new ReplicaFilteringProtectionOptions();

@Replaces(oldName = "index_summary_capacity_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_LONG, deprecated = true)
Expand Down Expand Up @@ -1312,6 +1314,25 @@ public enum BatchlogEndpointStrategy
}
}

public enum TombstonesMetricGranularity
{
/**
* do not collect the metric at all
*/
disabled,
/**
* track only partition/range/row level tombstone,
* a good compromise between overheads and usability
*/
row,
/**
* track partition/range/row/cell level tombstones,
* the most granular option, but it has some performance overheads
* due to iteration over cells
*/
cell
}

private static final Set<String> SENSITIVE_KEYS = new HashSet<String>() {{
add("client_encryption_options");
add("server_encryption_options");
Expand Down
11 changes: 11 additions & 0 deletions src/java/org/apache/cassandra/config/DatabaseDescriptor.java
Original file line number Diff line number Diff line change
Expand Up @@ -5194,4 +5194,15 @@ public static void setRejectOutOfTokenRangeRequests(boolean enabled)
{
conf.reject_out_of_token_range_requests = enabled;
}

public static Config.TombstonesMetricGranularity getPurgeableTobmstonesMetricGranularity()
{
return conf.purgeable_tobmstones_metric_granularity;
}

@VisibleForTesting
public static void setPurgeableTobmstonesMetricGranularity(Config.TombstonesMetricGranularity granularity)
{
conf.purgeable_tobmstones_metric_granularity = granularity;
}
}
130 changes: 130 additions & 0 deletions src/java/org/apache/cassandra/db/ReadCommand.java
Original file line number Diff line number Diff line change
Expand Up @@ -455,6 +455,8 @@ public UnfilteredPartitionIterator executeLocally(ReadExecutionController execut
iterator = withQuerySizeTracking(iterator);
iterator = maybeSlowDownForTesting(iterator);
iterator = withQueryCancellation(iterator);
if (DatabaseDescriptor.getPurgeableTobmstonesMetricGranularity() != Config.TombstonesMetricGranularity.disabled)
iterator = withPurgeableTombstonesMetricRecording(iterator, cfs);
iterator = RTBoundValidator.validate(withoutPurgeableTombstones(iterator, cfs, executionController), Stage.PURGED, false);
iterator = withMetricsRecording(iterator, cfs.metric, startTimeNanos);

Expand Down Expand Up @@ -867,6 +869,134 @@ protected LongPredicate getPurgeEvaluator()
return Transformation.apply(iterator, new WithoutPurgeableTombstones());
}


/**
* Wraps the provided iterator so that metrics on count of purgeable tombstones are tracked and traced.
* It tracks only tombstones with localDeletionTime < now - gc_grace_period.
* Other (non-purgeable) tombstones will be tracked by regular Cassandra logic later.
*/
private UnfilteredPartitionIterator withPurgeableTombstonesMetricRecording(UnfilteredPartitionIterator iter,
ColumnFamilyStore cfs)
{
class PurgeableTombstonesMetricRecording extends Transformation<UnfilteredRowIterator>
{
private int purgeableTombstones = 0;

@Override
public UnfilteredRowIterator applyToPartition(UnfilteredRowIterator iter)
{
if (!iter.partitionLevelDeletion().isLive())
purgeableTombstones++;
return Transformation.apply(iter, this);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think the iter.partitionLevelDeletions gc-able is kinda checked with the cell.isLive below, but there may be partition level delete with no actual data behind it in other sstable that wouldn't be counted. Worth a test at least

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

public class TestTest extends CQLTester
{
    @BeforeClass
    public static void setUpClass()
    {
        CQLTester.setUpClass();
    }
    @Test
    public void test() throws Throwable
    {
        String table = createTable("CREATE TABLE %s (a int, b text, PRIMARY KEY (a)) WITH gc_grace_seconds = 0");
        execute("DELETE FROM %s WHERE a = 1");
        Util.flushTable(KEYSPACE, table);
        Thread.sleep(1000);
        Assert.assertEquals(0, ColumnFamilyStore.getIfExists(KEYSPACE, table).metric.purgeableTombstoneScannedHistogram.getCount());
        execute("SELECT * FROM %s");
        Assert.assertEquals(1, ColumnFamilyStore.getIfExists(KEYSPACE, table).metric.purgeableTombstoneScannedHistogram.getCount());
        Assert.assertEquals(1, ColumnFamilyStore.getIfExists(KEYSPACE, table).metric.purgeableTombstoneScannedHistogram.getSnapshot().getMax());
    }
}

fails for example when considerZeroes is true

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

with

            public UnfilteredRowIterator applyToPartition(UnfilteredRowIterator iter)
            {
                if (!iter.partitionLevelDeletion().isLive())
                    purgeableTombstones++;

above passes

Copy link
Contributor Author

@netudima netudima Jan 5, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hm, interesting. Thank you for noticing it. I have created PurgeableTombstonesMetricRecording iterator based on MetricRecording iterator logic. It looks like the existing tombstoneScannedHistogram has the same behaviour. Should I change considerZeroes = true for tombstoneScannedHistogram and add the counter increment on a partition delete to MetricRecording as well?

Copy link
Contributor

@clohfink clohfink Jan 12, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we should update existing one and put a note in NEWS.txt, but might be worth a discuss on dev list. It can be a bit confusing when there is, or isnt data shadowed

Copy link
Contributor Author

@netudima netudima Jan 16, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have looked again through the added code and now I am a bit concerned about the iteration over cells to check for cell tombstones - it may create a performance overhead + I am not sure if information about droppable cell tombstones is really valuable: all the times when I saw issues with droppable tombstones - it was about row tombstones. I am thinking now about an option to count only partition level and row level tombstones in this added logic and skip cell tombstones part..
I am going to measure the overhead to see how big the impact can be..

Copy link
Contributor

@clohfink clohfink Jan 16, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

good to measure but the cells are already materialized so the overhead id imagine to be pretty minor, could maybe piggy bank on the withMetricsRecording's iterations (like countTombstone method)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes, I do not expect a huge impact but I want to check if it is visible. We may read many rows if we select a big enough partition and each row has some number of cells, so MxN cells to iterate and check. As I mentioned, I suppose frequently per-cell stats are not really needed to pay an overhead for it, IMHO.. A configuration option can be a compromise here, like: droppable_tobmstones_metric_granularity: disabled|row|cell

In https://issues.apache.org/jira/browse/CASSANDRA-20165 I have spent some time already to try to reduce the number of such non-optimal places already, so I would not like to introduce one more by myself :-D

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have added results to CASSANDRA-20132

}

@Override
public Row applyToStatic(Row row)
{
return applyToRow(row);
}

@Override
public Row applyToRow(Row row)
{
final long nowInSec = nowInSec();
boolean hasTombstones = false;

if (DatabaseDescriptor.getPurgeableTobmstonesMetricGranularity() == Config.TombstonesMetricGranularity.cell)
for (Cell<?> cell : row.cells())
{
if (!cell.isLive(nowInSec) && isPurgeable(cell.localDeletionTime(), nowInSec))
{
purgeableTombstones++;
hasTombstones = true; // allows to avoid counting an extra tombstone if the whole row expired
}
}

// we replicate the logic is used for non-purged tombstones metric here
if (!row.primaryKeyLivenessInfo().isLive(nowInSec)
&& row.hasDeletion(nowInSec)
&& isPurgeable(row.deletion().time(), nowInSec)
&& !hasTombstones)
{
// We're counting primary key deletions only here.
purgeableTombstones++;
}

return row;
}

@Override
public RangeTombstoneMarker applyToMarker(RangeTombstoneMarker marker)
{
final long nowInSec = nowInSec();

// for boundary markers - increment metric only if both - close and open - markers are purgeable
if (marker.isBoundary())
{
countIfBothPurgeable(marker.closeDeletionTime(false),
marker.openDeletionTime(false),
nowInSec);
}
// for bound markers - just increment if it is purgeable
else
{
countIfPurgeable(((RangeTombstoneBoundMarker) marker).deletionTime(), nowInSec);
}

return marker;
}

@Override
public void onClose()
{
cfs.metric.purgeableTombstoneScannedHistogram.update(purgeableTombstones);
if (purgeableTombstones > 0)
Tracing.trace("Read {} purgeable tombstone cells", purgeableTombstones);
}

/**
* Increments if both - close and open - deletion times less than (now - gc_grace_period)
*/
private void countIfBothPurgeable(DeletionTime closeDeletionTime,
DeletionTime openDeletionTime,
long nowInSec)
{
if (isPurgeable(closeDeletionTime, nowInSec) && isPurgeable(openDeletionTime, nowInSec))
purgeableTombstones++;
}

/**
* Increments if deletion time less than (now - gc_grace_period)
*/
private void countIfPurgeable(DeletionTime deletionTime,
long nowInSec)
{
if (isPurgeable(deletionTime, nowInSec))
purgeableTombstones++;
}

/**
* Checks that deletion time < now - gc_grace_period
*/
private boolean isPurgeable(DeletionTime deletionTime,
long nowInSec)
{
return isPurgeable(deletionTime.localDeletionTime(), nowInSec);
}

/**
* Checks that deletion time < now - gc_grace_period
*/
private boolean isPurgeable(long localDeletionTime,
long nowInSec)
{
return localDeletionTime < cfs.gcBefore(nowInSec);
}
}

return Transformation.apply(iter, new PurgeableTombstonesMetricRecording());
}

/**
* Return the queried token(s) for logging
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ public static Collection<VirtualTable> getAll(String name)
new LatencyTableMetric(name, "local_write_latency", t -> t.writeLatency.latency),
new LatencyTableMetric(name, "coordinator_write_latency", t -> t.coordinatorWriteLatency),
new HistogramTableMetric(name, "tombstones_per_read", t -> t.tombstoneScannedHistogram.cf),
new HistogramTableMetric(name, "purgeable_tombstones_per_read", t -> t.purgeableTombstoneScannedHistogram.cf),
new HistogramTableMetric(name, "rows_per_read", t -> t.liveScannedHistogram.cf),
new StorageTableMetric(name, "disk_usage", (TableMetrics t) -> t.totalDiskSpaceUsed),
new StorageTableMetric(name, "max_partition_size", (TableMetrics t) -> t.maxPartitionSize),
Expand Down
3 changes: 3 additions & 0 deletions src/java/org/apache/cassandra/metrics/KeyspaceMetrics.java
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,8 @@ public class KeyspaceMetrics
public final Histogram sstablesPerRangeReadHistogram;
/** Tombstones scanned in queries on this Keyspace */
public final Histogram tombstoneScannedHistogram;
/** Purgeable tombstones scanned in queries on this Keyspace */
public final Histogram purgeableTombstoneScannedHistogram;
/** Live cells scanned in queries on this Keyspace */
public final Histogram liveScannedHistogram;
/** Column update time delta on this Keyspace */
Expand Down Expand Up @@ -238,6 +240,7 @@ public KeyspaceMetrics(final Keyspace ks)
sstablesPerReadHistogram = createKeyspaceHistogram("SSTablesPerReadHistogram", true);
sstablesPerRangeReadHistogram = createKeyspaceHistogram("SSTablesPerRangeReadHistogram", true);
tombstoneScannedHistogram = createKeyspaceHistogram("TombstoneScannedHistogram", false);
purgeableTombstoneScannedHistogram = createKeyspaceHistogram("PurgeableTombstoneScannedHistogram", false);
liveScannedHistogram = createKeyspaceHistogram("LiveScannedHistogram", false);
colUpdateTimeDeltaHistogram = createKeyspaceHistogram("ColUpdateTimeDeltaHistogram", false);
viewLockAcquireTime = createKeyspaceTimer("ViewLockAcquireTime");
Expand Down
3 changes: 3 additions & 0 deletions src/java/org/apache/cassandra/metrics/TableMetrics.java
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,8 @@ public class TableMetrics
public final Gauge<Long> compressionMetadataOffHeapMemoryUsed;
/** Tombstones scanned in queries on this CF */
public final TableHistogram tombstoneScannedHistogram;
/** Purgeable tombstones scanned in queries on this CF */
public final TableHistogram purgeableTombstoneScannedHistogram;
/** Live rows scanned in queries on this CF */
public final TableHistogram liveScannedHistogram;
/** Column update time delta on this CF */
Expand Down Expand Up @@ -771,6 +773,7 @@ public Long getValue()
additionalWriteLatencyNanos = createTableGauge("AdditionalWriteLatencyNanos", () -> MICROSECONDS.toNanos(cfs.additionalWriteLatencyMicros));

tombstoneScannedHistogram = createTableHistogram("TombstoneScannedHistogram", cfs.keyspace.metric.tombstoneScannedHistogram, false);
purgeableTombstoneScannedHistogram = createTableHistogram("PurgeableTombstoneScannedHistogram", cfs.keyspace.metric.purgeableTombstoneScannedHistogram, true);
liveScannedHistogram = createTableHistogram("LiveScannedHistogram", cfs.keyspace.metric.liveScannedHistogram, false);
colUpdateTimeDeltaHistogram = createTableHistogram("ColUpdateTimeDeltaHistogram", cfs.keyspace.metric.colUpdateTimeDeltaHistogram, false);
coordinatorReadLatency = createTableTimer("CoordinatorReadLatency");
Expand Down
Loading