Skip to content

Commit

Permalink
HashRedisStore: defer coverage loading to a background thread
Browse files Browse the repository at this point in the history
Opt-in by configuration, this should speed-up report loading significantly
in projects with thousands of files
  • Loading branch information
Drowze committed Jan 23, 2024
1 parent d506002 commit 3f79c33
Show file tree
Hide file tree
Showing 3 changed files with 154 additions and 6 deletions.
5 changes: 5 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -264,6 +264,11 @@ Coverband on very high volume sites with many server processes reporting can hav

See more discussion [here](https://github.com/danmayer/coverband/issues/384).

Please note that with the Redis Hash Store, everytime you load the full report, Coverband will execute `HGETALL` queries in your Redis server twice for every file in the project (once for runtime coverage and once for eager loading coverage). This shouldn't have a big impact in small to medium projects, but can be quite a hassle if your project has a few thousand files.
To help reduce the extra redis load when getting the coverage report, you can enable `get_coverage_cache` (but note that when doing that, you will always get a previous version of the report, while a cache is re-populated with a newer version).

- Use Hash Redis Store with _get coverage cache_: `config.store = Coverband::Adapters::HashRedisStore.new(redis, get_coverage_cache: true)`

### Clear Coverage

Now that Coverband uses MD5 hashes there should be no reason to manually clear coverage unless one is testing, changing versions, possibly debugging Coverband itself.
Expand Down
107 changes: 101 additions & 6 deletions lib/coverband/adapters/hash_redis_store.rb
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,83 @@
module Coverband
module Adapters
class HashRedisStore < Base
class GetCoverageNullCacheStore
def self.clear!(*_local_types); end

def self.fetch(_local_type)
yield(0)
end
end

class GetCoverageRedisCacheStore
LOCK_LIMIT = 60 * 30 # 30 minutes

def initialize(redis, key_prefix)
@redis = redis
@key_prefix = [key_prefix, "get-coverage"].join(".")
end

def fetch(local_type)
cached_result = get(local_type)

# if no cache available, block the call and populate the cache
# if cache is available, return it and start re-populating it (with a lock)
if cached_result.nil?
value = yield(0)
result = set(local_type, JSON.generate(value))
value
else
if lock!(local_type)
Thread.new do
begin
result = yield(deferred_time)
set(local_type, JSON.generate(result))
ensure
unlock!(local_type)
end
end
end
JSON.parse(cached_result)
end
end

def clear!(local_types = Coverband::TYPES)
Array(local_types).each do |local_type|
del(local_type)
unlock!(local_type)
end
end

private

# sleep in between to avoid holding other redis commands..
# with a small random offset so runtime and eager types can be processed "at the same time"
def deferred_time
3 + rand(0.0..1.0)
end

def del(local_type)
@redis.del("#{@key_prefix}.cache.#{local_type}")
end

def get(local_type)
@redis.get("#{@key_prefix}.cache.#{local_type}")
end

def set(local_type, value)
@redis.set("#{@key_prefix}.cache.#{local_type}", value)
end

# lock for at most 60 minutes
def lock!(local_type)
@redis.set("#{@key_prefix}.lock.#{local_type}", "1", nx: true, ex: LOCK_LIMIT)
end

def unlock!(local_type)
@redis.del("#{@key_prefix}.lock.#{local_type}")
end
end

FILE_KEY = "file"
FILE_LENGTH_KEY = "file_length"
META_DATA_KEYS = [DATA_KEY, FIRST_UPDATED_KEY, LAST_UPDATED_KEY, FILE_HASH].freeze
Expand All @@ -17,7 +94,7 @@ class HashRedisStore < Base

JSON_PAYLOAD_EXPIRATION = 5 * 60

attr_reader :redis_namespace
attr_reader :redis_namespace, :get_coverage_cache

def initialize(redis, opts = {})
super()
Expand All @@ -29,6 +106,13 @@ def initialize(redis, opts = {})

@ttl = opts[:ttl]
@relative_file_converter = opts[:relative_file_converter] || Utils::RelativeFileConverter

@get_coverage_cache = if opts[:get_coverage_cache]
key_prefix = [REDIS_STORAGE_FORMAT_VERSION, @redis_namespace].compact.join(".")
GetCoverageRedisCacheStore.new(redis, key_prefix)
else
GetCoverageNullCacheStore
end
end

def supported?
Expand All @@ -45,6 +129,7 @@ def clear!
file_keys = files_set
@redis.del(*file_keys) if file_keys.any?
@redis.del(files_key)
@get_coverage_cache.clear!(type)
end
self.type = old_type
end
Expand All @@ -54,6 +139,7 @@ def clear_file!(file)
relative_path_file = @relative_file_converter.convert(file)
Coverband::TYPES.each do |type|
@redis.del(key(relative_path_file, type, file_hash: file_hash))
@get_coverage_cache.clear!(type)
end
@redis.srem(files_key, relative_path_file)
end
Expand Down Expand Up @@ -87,12 +173,21 @@ def save_report(report)
end

def coverage(local_type = nil)
files_set = files_set(local_type)
@redis.pipelined { |pipeline|
files_set.each do |key|
pipeline.hgetall(key)
cached_results = @get_coverage_cache.fetch(local_type || type) do |sleep_time|
files_set = files_set(local_type)

# use batches with a sleep in between to avoid overloading redis
files_set.each_slice(250).flat_map do |key_batch|
sleep sleep_time
@redis.pipelined do |pipeline|
key_batch.each do |key|
pipeline.hgetall(key)
end
end
end
}.each_with_object({}) do |data_from_redis, hash|
end

cached_results.each_with_object({}) do |data_from_redis, hash|
add_coverage_for_file(data_from_redis, hash)
end
end
Expand Down
48 changes: 48 additions & 0 deletions test/coverband/adapters/hash_redis_store_test.rb
Original file line number Diff line number Diff line change
Expand Up @@ -192,4 +192,52 @@ def test_clear_file
@store.clear_file!("app_path/dog.rb")
assert_nil @store.get_coverage_report[:merged]["./dog.rb"]
end

def test_get_coverage_cache
@store = Coverband::Adapters::HashRedisStore.new(
@redis,
redis_namespace: "coverband_test",
relative_file_converter: MockRelativeFileConverter,
get_coverage_cache: true
)
@store.get_coverage_cache.stubs(:deferred_time).returns(0)
@store.get_coverage_cache.clear!
mock_file_hash
yesterday = DateTime.now.prev_day.to_time
mock_time(yesterday)
@store.save_report(
"app_path/dog.rb" => [0, 1, 2]
)
assert_equal(
{
"first_updated_at" => yesterday.to_i,
"last_updated_at" => yesterday.to_i,
"file_hash" => "abcd",
"data" => [0, 1, 2]
},
@store.coverage["./dog.rb"]
)
@store.save_report(
"app_path/dog.rb" => [0, 1, 2]
)
assert_equal(
{
"first_updated_at" => yesterday.to_i,
"last_updated_at" => yesterday.to_i,
"file_hash" => "abcd",
"data" => [0, 1, 2]
},
@store.coverage["./dog.rb"]
)
sleep 0.1 # wait caching thread finish
assert_equal(
{
"first_updated_at" => yesterday.to_i,
"last_updated_at" => yesterday.to_i,
"file_hash" => "abcd",
"data" => [0, 2, 4]
},
@store.coverage["./dog.rb"]
)
end
end

0 comments on commit 3f79c33

Please sign in to comment.