From 33a3a27032ccaaf1c7295d2b3271c62bec9b63bd Mon Sep 17 00:00:00 2001 From: filipecosta90 Date: Tue, 14 Nov 2023 00:18:14 +0000 Subject: [PATCH] Added tests to cover --rate-limiting option. Ensured the help message explains the per connection rate-limit --- memtier_benchmark.cpp | 5 +++-- tests/include.py | 14 ++++++++----- tests/tests_oss_simple_flow.py | 38 ++++++++++++++++++++++++++++++++++ 3 files changed, 50 insertions(+), 7 deletions(-) diff --git a/memtier_benchmark.cpp b/memtier_benchmark.cpp index 22c20ef2..26e585cf 100755 --- a/memtier_benchmark.cpp +++ b/memtier_benchmark.cpp @@ -962,7 +962,7 @@ void usage() { " --key=FILE Use specified private key for TLS\n" " --cacert=FILE Use specified CA certs bundle for TLS\n" " --tls-skip-verify Skip verification of server certificate\n" - " --tls-protocols Specify the tls protocol version to use, comma delemited. Use a combination of 'TLSv1', 'TLSv1.1', 'TLSv1.2' and 'TLSv1.3'" + " --tls-protocols Specify the tls protocol version to use, comma delemited. Use a combination of 'TLSv1', 'TLSv1.1', 'TLSv1.2' and 'TLSv1.3'.\n" " --sni=STRING Add an SNI header\n" #endif " -x, --run-count=NUMBER Number of full-test iterations to perform\n" @@ -981,7 +981,8 @@ void usage() { "Test Options:\n" " -n, --requests=NUMBER Number of total requests per client (default: 10000)\n" " use 'allkeys' to run on the entire key-range\n" - " --rate-limiting=NUMBER Number of requests per second\n" + " --rate-limiting=NUMBER The max number of requests to make per second from an individual connection (default is unlimited rate).\n" + " If you use --rate-limiting and a very large rate is entered which cannot be met, memtier will do as many requests as possible per second.\n" " -c, --clients=NUMBER Number of clients per thread (default: 50)\n" " -t, --threads=NUMBER Number of threads (default: 4)\n" " --test-time=SECS Number of seconds to run the test\n" diff --git a/tests/include.py b/tests/include.py index 9cfb4c55..dd268883 100644 --- a/tests/include.py +++ b/tests/include.py @@ -16,7 +16,7 @@ def ensure_tls_protocols(master_nodes_connections): def assert_minimum_memtier_outcomes(config, env, memtier_ok, overall_expected_request_count, - overall_request_count): + overall_request_count, overall_request_delta=None): failed_asserts = env.getNumberOfFailedAssertion() try: # assert correct exit code @@ -25,8 +25,11 @@ def assert_minimum_memtier_outcomes(config, env, memtier_ok, overall_expected_re env.assertTrue(os.path.isfile('{0}/mb.stdout'.format(config.results_dir))) env.assertTrue(os.path.isfile('{0}/mb.stderr'.format(config.results_dir))) env.assertTrue(os.path.isfile('{0}/mb.json'.format(config.results_dir))) - # assert we have the expected request count - env.assertEqual(overall_expected_request_count, overall_request_count) + if overall_request_delta is None: + # assert we have the expected request count + env.assertEqual(overall_expected_request_count, overall_request_count) + else: + env.assertAlmostEqual(overall_expected_request_count, overall_request_count,overall_request_delta) finally: if env.getNumberOfFailedAssertion() > failed_asserts: debugPrintMemtierOnError(config, env) @@ -108,13 +111,14 @@ def addTLSArgs(benchmark_specs, env): -def get_default_memtier_config(threads=10, clients=5, requests=1000): +def get_default_memtier_config(threads=10, clients=5, requests=1000, test_time=None): config = { "memtier_benchmark": { "binary": MEMTIER_BINARY, "threads": threads, "clients": clients, - "requests": requests + "requests": requests, + "test_time": test_time }, } return config diff --git a/tests/tests_oss_simple_flow.py b/tests/tests_oss_simple_flow.py index 30541869..20e9023f 100644 --- a/tests/tests_oss_simple_flow.py +++ b/tests/tests_oss_simple_flow.py @@ -378,3 +378,41 @@ def test_default_arbitrary_command_hset_multi_data_placeholders(env): overall_request_count = agg_info_commandstats(master_nodes_connections, merged_command_stats) assert_minimum_memtier_outcomes(config, env, memtier_ok, overall_expected_request_count, overall_request_count) + +def test_default_set_get_rate_limited(env): + for client_count in [1,2,4]: + for thread_count in [1,2]: + rps_per_client = 100 + test_time_secs = 5 + overall_expected_request_count = test_time_secs * rps_per_client * client_count * thread_count + # we give a 1 sec margin + request_delta = rps_per_client * client_count * thread_count * 1 + # we will specify rate limit and the test time, which should help us get an approximate request count + benchmark_specs = {"name": env.testName, "args": ['--rate-limiting={}'.format(rps_per_client)]} + addTLSArgs(benchmark_specs, env) + config = get_default_memtier_config(thread_count,client_count,None,test_time_secs) + master_nodes_list = env.getMasterNodesList() + + master_nodes_connections = env.getOSSMasterNodesConnectionList() + + # reset the commandstats + for master_connection in master_nodes_connections: + master_connection.execute_command("CONFIG", "RESETSTAT") + + add_required_env_arguments(benchmark_specs, config, env, master_nodes_list) + + # Create a temporary directory + test_dir = tempfile.mkdtemp() + + config = RunConfig(test_dir, env.testName, config, {}) + ensure_clean_benchmark_folder(config.results_dir) + + benchmark = Benchmark.from_json(config, benchmark_specs) + + # benchmark.run() returns True if the return code of memtier_benchmark was 0 + memtier_ok = benchmark.run() + + master_nodes_connections = env.getOSSMasterNodesConnectionList() + merged_command_stats = {'cmdstat_set': {'calls': 0}, 'cmdstat_get': {'calls': 0}} + overall_request_count = agg_info_commandstats(master_nodes_connections, merged_command_stats) + assert_minimum_memtier_outcomes(config, env, memtier_ok, overall_expected_request_count, overall_request_count, request_delta)