Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

rptest: cache metrics test with custom mountpoint #24140

Open
wants to merge 1 commit into
base: dev
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions tests/docker/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,20 @@ services:
volumes:
- '${BUILD_ROOT}:${BUILD_ROOT}'
- '${BUILD_ROOT}/redpanda_installs:/opt/redpanda_installs'
- 'cloud_storage_cache_100M_test:/var/lib/cloud_storage_cache_100M_test'
networks:
- redpanda-test
dns:
- 192.168.215.126

volumes:
# A volume used in metrics test for cache disk. Note that this will be shared
# between all instances.
cloud_storage_cache_100M_test:
# For details, see:
# https://docs.docker.com/engine/reference/commandline/volume_create/#driver-specific-options
driver: local
driver_opts:
o: "size=100m"
device: tmpfs
type: tmpfs
46 changes: 46 additions & 0 deletions tests/rptest/tests/node_metrics_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,3 +112,49 @@ def test_node_storage_metrics(self):
self.node_metrics.cache_disk_total_bytes())
assert_lists_equal(self.node_metrics.disk_free_bytes(),
self.node_metrics.cache_disk_free_bytes())


class NodeMetricsCloudStorageCacheTest(RedpandaTest):
def __init__(self, test_ctx):
# Set storage_min_free_bytes to 0 to avoid alerting on low disk space with custom cache directory mount.
super().__init__(test_context=test_ctx,
extra_rp_conf={"storage_min_free_bytes": 0})
self.node_metrics = NodeMetrics(self.redpanda)

def setUp(self):
# add node config override not to spawn a new cluster with empty seed servers
overrides = {}
for n in self.redpanda.nodes:
overrides[n] = {
"cloud_storage_cache_directory":
f"/var/lib/cloud_storage_cache_100M_test/{n.name}/"
}
self.redpanda.start(node_config_overrides=overrides)

@cluster(num_nodes=3)
def test_node_cache_storage_metrics(self):
# Skip the test if dedicated nodes are used as we don't have custom
# mount points for cache directories.
if self.redpanda.dedicated_nodes:
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@dotnwat @ivotron this handles the "CDT" case.

return

# disk metrics are updated via health monitor's periodic tick().
self.node_metrics.wait_until_ready()

assert self.node_metrics.disk_total_bytes(
) != self.node_metrics.cache_disk_total_bytes(
), "Total bytes should not be the same as we are using different mount points"

assert self.node_metrics.disk_free_bytes(
) != self.node_metrics.cache_disk_free_bytes(
), "Free bytes should not be the same as we are using different mount points"

assert all(
v == 100 * 1024 * 1024
for v in self.node_metrics.cache_disk_total_bytes()
), f"Expected 100M total bytes, got {self.node_metrics.cache_disk_total_bytes()}"

assert self.node_metrics.cache_disk_free_bytes(
) <= self.node_metrics.cache_disk_total_bytes(
), f"""Free bytes should be less than or equal to total bytes got
{self.node_metrics.cache_disk_free_bytes()} > {self.node_metrics.cache_disk_total_bytes()}"""