Skip to content

Commit

Permalink
Add cache metrics and test
Browse files Browse the repository at this point in the history
  • Loading branch information
SirTyson committed Jan 29, 2025
1 parent 6b718ef commit d8fafbb
Show file tree
Hide file tree
Showing 6 changed files with 91 additions and 2 deletions.
2 changes: 2 additions & 0 deletions docs/metrics.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ bucketlistDB-live.bulk.inflationWinners | timer | time to load inflation
bucketlistDB-live.bulk.poolshareTrustlines | timer | time to load poolshare trustlines by accountID and assetID
bucketlistDB-live.bulk.prefetch | timer | time to prefetch
bucketlistDB-<X>.point.<y> | timer | time to load single entry of type <Y> on BucketList <X> (live/hotArchive)
bucketlistDB-cache.hit | meter | number of cache hits on Live BucketList Disk random eviction cache
bucketlistDB-cache.miss | meter | number of cache misses on Live BucketList Disk random eviction cache
crypto.verify.hit | meter | number of signature cache hits
crypto.verify.miss | meter | number of signature cache misses
crypto.verify.total | meter | sum of both hits and misses
Expand Down
16 changes: 16 additions & 0 deletions src/bucket/BucketManager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,10 @@ BucketManager::BucketManager(Application& app)
app.getMetrics().NewCounter({"bucketlist", "size", "bytes"}))
, mArchiveBucketListSizeCounter(
app.getMetrics().NewCounter({"bucketlist-archive", "size", "bytes"}))
, mCacheHitMeter(app.getMetrics().NewMeter({"bucketlistDB", "cache", "hit"},
"bucketlistDB"))
, mCacheMissMeter(app.getMetrics().NewMeter(
{"bucketlistDB", "cache", "miss"}, "bucketlistDB"))
, mBucketListEvictionCounters(app)
, mEvictionStatistics(std::make_shared<EvictionStatistics>())
, mConfig(app.getConfig())
Expand Down Expand Up @@ -351,6 +355,18 @@ BucketManager::readMergeCounters()
return mMergeCounters;
}

medida::Meter&
BucketManager::getCacheHitMeter() const
{
return mCacheHitMeter;
}

medida::Meter&
BucketManager::getCacheMissMeter() const
{
return mCacheMissMeter;
}

void
BucketManager::incrMergeCounters(MergeCounters const& delta)
{
Expand Down
4 changes: 4 additions & 0 deletions src/bucket/BucketManager.h
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,8 @@ class BucketManager : NonMovableOrCopyable
medida::Counter& mSharedBucketsSize;
medida::Counter& mLiveBucketListSizeCounter;
medida::Counter& mArchiveBucketListSizeCounter;
medida::Meter& mCacheHitMeter;
medida::Meter& mCacheMissMeter;
EvictionCounters mBucketListEvictionCounters;
MergeCounters mMergeCounters;
std::shared_ptr<EvictionStatistics> mEvictionStatistics{};
Expand Down Expand Up @@ -197,6 +199,8 @@ class BucketManager : NonMovableOrCopyable

template <class BucketT> medida::Meter& getBloomMissMeter() const;
template <class BucketT> medida::Meter& getBloomLookupMeter() const;
medida::Meter& getCacheHitMeter() const;
medida::Meter& getCacheMissMeter() const;

// Reading and writing the merge counters is done in bulk, and takes a lock
// briefly; this can be done from any thread.
Expand Down
16 changes: 16 additions & 0 deletions src/bucket/LiveBucketIndex.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
#include "util/Logging.h"
#include "xdr/Stellar-ledger-entries.h"
#include <ios>
#include <medida/meter.h>
#include <shared_mutex>
#include <vector>

Expand Down Expand Up @@ -39,6 +40,8 @@ LiveBucketIndex::getPageSize(Config const& cfg, size_t bucketSize)
LiveBucketIndex::LiveBucketIndex(BucketManager& bm,
std::filesystem::path const& filename,
Hash const& hash, asio::io_context& ctx)
: mCacheHitMeter(bm.getCacheHitMeter())
, mCacheMissMeter(bm.getCacheMissMeter())
{
ZoneScoped;
releaseAssert(!filename.empty());
Expand Down Expand Up @@ -80,6 +83,8 @@ LiveBucketIndex::LiveBucketIndex(BucketManager const& bm, Archive& ar,
std::streamoff pageSize)

: mDiskIndex(std::make_unique<DiskIndex<LiveBucket>>(ar, bm, pageSize))
, mCacheHitMeter(bm.getCacheHitMeter())
, mCacheMissMeter(bm.getCacheMissMeter())
{
// Only disk indexes are serialized
releaseAssertOrThrow(pageSize != 0);
Expand Down Expand Up @@ -129,8 +134,15 @@ LiveBucketIndex::getCachedEntry(LedgerKey const& k) const
auto cachePtr = mCache->maybeGet(k);
if (cachePtr)
{
mCacheHitMeter.Mark();
return *cachePtr;
}

// In the case of a bloom filter false positive, we might have a cache
// "miss" because we're searching for something that doesn't exist. We
// don't cache non-existent entries, so we don't meter misses here.
// Instead, we track misses when we insert a new entry, since we always
// insert a new entry into the cache after a miss.
}

return nullptr;
Expand Down Expand Up @@ -253,6 +265,10 @@ LiveBucketIndex::maybeAddToCache(std::shared_ptr<BucketEntry const> entry) const
releaseAssertOrThrow(entry);
auto k = getBucketLedgerKey(*entry);

// If we are adding an entry to the cache, we must have missed it
// earlier.
mCacheMissMeter.Mark();

std::unique_lock<std::shared_mutex> lock(mCacheMutex);
mCache->put(k, entry);
}
Expand Down
5 changes: 5 additions & 0 deletions src/bucket/LiveBucketIndex.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,9 @@ class LiveBucketIndex : public NonMovableOrCopyable
// mutex.
mutable std::shared_mutex mCacheMutex;

medida::Meter& mCacheHitMeter;
medida::Meter& mCacheMissMeter;

static inline DiskIndex<LiveBucket>::IterT
getDiskIter(IterT const& iter)
{
Expand Down Expand Up @@ -128,6 +131,8 @@ class LiveBucketIndex : public NonMovableOrCopyable
void markBloomMiss() const;
#ifdef BUILD_TESTS
bool operator==(LiveBucketIndex const& in) const;

void clearCache() const;
#endif
};
}
50 changes: 48 additions & 2 deletions src/bucket/test/BucketIndexTests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -249,17 +249,32 @@ class BucketIndexTest
}

virtual void
run()
run(bool testCache = false)
{
auto searchableBL = getBM()
.getBucketSnapshotManager()
.copySearchableLiveBucketListSnapshot();

auto& hitMeter = getBM().getCacheHitMeter();
auto& missMeter = getBM().getCacheMissMeter();
auto startingHitCount = hitMeter.count();
auto startingMissCount = missMeter.count();

// Test bulk load lookup
auto loadResult =
searchableBL->loadKeysWithLimits(mKeysToSearch, nullptr);
validateResults(mTestEntries, loadResult);

if (testCache)
{
// We should have no cache hits
REQUIRE(hitMeter.count() == startingHitCount);
REQUIRE(missMeter.count() ==
startingMissCount + mKeysToSearch.size());
}

auto missAfterFirstLoad = missMeter.count();

// Test individual entry lookup
loadResult.clear();
for (auto const& key : mKeysToSearch)
Expand All @@ -272,6 +287,23 @@ class BucketIndexTest
}

validateResults(mTestEntries, loadResult);

if (testCache)
{
// We should have no new cache hits and no new cache misses
REQUIRE(missMeter.count() == missAfterFirstLoad);
REQUIRE(hitMeter.count() ==
startingHitCount + mKeysToSearch.size());

// Run bulk lookup again
auto loadResult2 =
searchableBL->loadKeysWithLimits(mKeysToSearch, nullptr);
validateResults(mTestEntries, loadResult2);

REQUIRE(missMeter.count() == missAfterFirstLoad);
REQUIRE(hitMeter.count() ==
startingHitCount + (mKeysToSearch.size() * 2));
}
}

// Do many lookups with subsets of sampled entries
Expand Down Expand Up @@ -493,7 +525,7 @@ class BucketIndexPoolShareTest : public BucketIndexTest
}

virtual void
run() override
run(bool testCache = false) override
{
auto searchableBL = getBM()
.getBucketSnapshotManager()
Expand Down Expand Up @@ -544,6 +576,20 @@ TEST_CASE("key-value lookup", "[bucket][bucketindex]")
testAllIndexTypes(f);
}

TEST_CASE("bl cache", "[bucket][bucketindex]")
{
Config cfg(getTestConfig());

// Use disk index for all levels and cache all entries
// Note: Setting this to 100% will actually switch to in-memory index
cfg.BUCKETLIST_DB_CACHED_PERCENT = 99;
cfg.BUCKETLIST_DB_INDEX_CUTOFF = 0;

auto test = BucketIndexTest(cfg);
test.buildGeneralTest();
test.run(/*testCache=*/true);
}

TEST_CASE("do not load outdated values", "[bucket][bucketindex]")
{
auto f = [&](Config& cfg) {
Expand Down

0 comments on commit d8fafbb

Please sign in to comment.