From 2794ce6b95348f1689b759bfda4055eb03f819b8 Mon Sep 17 00:00:00 2001 From: jean-christophe81 <98889244+jean-christophe81@users.noreply.github.com> Date: Fri, 8 Sep 2023 15:05:55 +0200 Subject: [PATCH] =?UTF-8?q?backport=2023.04=20Add=20test=20for=20RRD=20set?= =?UTF-8?q?tings=20with=20high=20check=20in=E2=80=A6=20(#864)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * backport 23.04 MON-19875 Add test for RRD settings with high check interval at creation time. (#848) * fix(tests):services-and-bulk --------- Co-authored-by: NITCHEU B REFS:MON-21309 --- .../com/centreon/broker/unified_sql/stream.hh | 1 + broker/unified_sql/src/stream.cc | 9 + broker/unified_sql/src/stream_sql.cc | 148 ++++--------- broker/unified_sql/src/stream_storage.cc | 144 ++++-------- .../services-and-bulk-stmt.robot | 180 +++++++++------ tests/broker-engine/services-increased.robot | 208 +++++++++++++----- tests/resources/Broker.py | 108 ++++++++- tests/resources/Common.py | 25 +++ tests/resources/Engine.py | 26 +++ 9 files changed, 518 insertions(+), 331 deletions(-) diff --git a/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh b/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh index aafd90f0c3a..b73dbae9620 100644 --- a/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh +++ b/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh @@ -311,6 +311,7 @@ class stream : public io::stream { std::unique_ptr _sscr_resources_update; std::unique_ptr _sscr_resources_bind; + static const std::string _index_data_insert_request; database::mysql_stmt _index_data_insert; database::mysql_stmt _index_data_update; database::mysql_stmt _index_data_query; diff --git a/broker/unified_sql/src/stream.cc b/broker/unified_sql/src/stream.cc index df1b2cfd93a..a90f54292c6 100644 --- a/broker/unified_sql/src/stream.cc +++ b/broker/unified_sql/src/stream.cc @@ -41,6 +41,15 @@ using namespace com::centreon::broker; using namespace com::centreon::broker::database; using namespace com::centreon::broker::unified_sql; +const std::string stream::_index_data_insert_request( + "INSERT INTO index_data " + "(host_id,host_name,service_id,service_description," + "check_interval, must_be_rebuild," + "special) VALUES (?,?,?,?,?,?,?) ON DUPLICATE KEY UPDATE " + "host_name=VALUES(host_name), " + "service_description=VALUES(service_description), " + "check_interval=VALUES(check_interval), special=VALUES(special)"); + const std::array stream::metric_type_name{ "GAUGE", "COUNTER", "DERIVE", "ABSOLUTE", "AUTOMATIC"}; diff --git a/broker/unified_sql/src/stream_sql.cc b/broker/unified_sql/src/stream_sql.cc index 656d574fbb1..f4e06eb7c46 100644 --- a/broker/unified_sql/src/stream_sql.cc +++ b/broker/unified_sql/src/stream_sql.cc @@ -644,7 +644,10 @@ void stream::_process_pb_comment(const std::shared_ptr& d) { query_preparator::event_pb_unique unique{ {10, "host_id", io::protobuf_base::invalid_on_zero, 0}, {14, "service_id", io::protobuf_base::invalid_on_zero, 0}, - {6, "entry_time", io::protobuf_base::invalid_on_zero | io::protobuf_base::invalid_on_minus_one, 0}, + {6, "entry_time", + io::protobuf_base::invalid_on_zero | + io::protobuf_base::invalid_on_minus_one, + 0}, {13, "instance_id", io::protobuf_base::invalid_on_zero, 0}, {11, "internal_id", io::protobuf_base::invalid_on_zero, 0}}; query_preparator qp(neb::pb_comment::static_type(), unique); @@ -653,8 +656,14 @@ void stream::_process_pb_comment(const std::shared_ptr& d) { {{2, "author", 0, get_comments_col_size(comments_author)}, {3, "type", 0, 0}, {4, "data", 0, get_comments_col_size(comments_data)}, - {5, "deletion_time", io::protobuf_base::invalid_on_zero | io::protobuf_base::invalid_on_minus_one, 0}, - {6, "entry_time", io::protobuf_base::invalid_on_zero | io::protobuf_base::invalid_on_minus_one, 0}, + {5, "deletion_time", + io::protobuf_base::invalid_on_zero | + io::protobuf_base::invalid_on_minus_one, + 0}, + {6, "entry_time", + io::protobuf_base::invalid_on_zero | + io::protobuf_base::invalid_on_minus_one, + 0}, {7, "entry_type", 0, 0}, {8, "expire_time", io::protobuf_base::invalid_on_zero, 0}, {9, "expires", 0, 0}, @@ -2887,6 +2896,9 @@ void stream::_process_pb_service(const std::shared_ptr& d) { s.host_id(), s.service_id(), s.state(), s.state_type()); SPDLOG_LOGGER_TRACE(log_v2::sql(), "SQL: pb service output: <<{}>>", s.output()); + SPDLOG_LOGGER_TRACE(log_v2::sql(), "SQL: pb service detail: <<{}>>", + io::data::dump_detail{*d}); + // Processed object. if (!_host_instance_known(s.host_id())) { SPDLOG_LOGGER_WARN( @@ -3441,11 +3453,7 @@ void stream::_check_and_update_index_cache(const Service& ss) { ss.host_id(), ss.service_id()); if (!_index_data_insert.prepared()) - _index_data_insert = _mysql.prepare_query( - "INSERT INTO index_data " - "(host_id,host_name,service_id,service_description,must_be_" - "rebuild," - "special) VALUES (?,?,?,?,?,?)"); + _index_data_insert = _mysql.prepare_query(_index_data_insert_request); uint64_t index_id = 0; @@ -3453,104 +3461,44 @@ void stream::_check_and_update_index_cache(const Service& ss) { _index_data_insert.bind_value_as_str(1, hv); _index_data_insert.bind_value_as_i32(2, ss.service_id()); _index_data_insert.bind_value_as_str(3, sv); - _index_data_insert.bind_value_as_str(4, "0"); - _index_data_insert.bind_value_as_str(5, special ? "1" : "0"); + _index_data_insert.bind_value_as_u32(4, ss.check_interval()); + _index_data_insert.bind_value_as_str(5, "0"); + _index_data_insert.bind_value_as_str(6, special ? "1" : "0"); std::promise p; std::future future = p.get_future(); _mysql.run_statement_and_get_int( _index_data_insert, std::move(p), database::mysql_task::LAST_INSERT_ID, conn); - try { - index_id = future.get(); - SPDLOG_LOGGER_DEBUG( - log_v2::sql(), - "sql: new index {} added for service ({}, {}), special {}", index_id, - ss.host_id(), ss.service_id(), special ? "1" : "0"); - index_info info{ - .index_id = index_id, - .host_name = ss.host_name(), - .service_description = ss.description(), - .rrd_retention = _rrd_len, - .interval = ss.check_interval(), - .special = special, - .locked = false, - }; - SPDLOG_LOGGER_DEBUG( - log_v2::sql(), "sql: loaded index {} of ({}, {}) with rrd_len={}", - index_id, ss.host_id(), ss.service_id(), info.rrd_retention); - _index_cache[{ss.host_id(), ss.service_id()}] = std::move(info); - // Create the metric mapping. - auto im{std::make_shared()}; - auto& im_obj = im->mut_obj(); - im_obj.set_index_id(info.index_id); - im_obj.set_host_id(ss.host_id()); - im_obj.set_service_id(ss.service_id()); - multiplexing::publisher pblshr; - pblshr.write(im); - } catch (const std::exception& e) { - SPDLOG_LOGGER_DEBUG( - log_v2::sql(), - "sql: cannot insert new index for service ({}, {}): {}", ss.host_id(), - ss.service_id(), e.what()); - if (!_index_data_query.prepared()) - _index_data_query = _mysql.prepare_query( - "SELECT " - "id,host_name,service_description,rrd_retention,check_" - "interval," - "special,locked from index_data WHERE host_id=? AND " - "service_id=?"); - - _index_data_query.bind_value_as_i32(0, ss.host_id()); - _index_data_query.bind_value_as_i32(1, ss.service_id()); - std::promise pq; - std::future future_pq = pq.get_future(); - SPDLOG_LOGGER_DEBUG( - log_v2::sql(), - "Attempt to get the index from the database for service ({}, {})", - ss.host_id(), ss.service_id()); - - _mysql.run_statement_and_get_result( - _index_data_query, std::move(pq), conn, - get_index_data_col_size(index_data_service_description)); - - try { - database::mysql_result res(future_pq.get()); - if (_mysql.fetch_row(res)) { - index_id = res.value_as_u64(0); - index_info info{ - .index_id = index_id, - .host_name = res.value_as_str(1), - .service_description = res.value_as_str(2), - .rrd_retention = - res.value_as_u32(3) ? res.value_as_u32(3) : _rrd_len, - .interval = res.value_as_u32(4) ? res.value_as_u32(4) : 5, - .special = res.value_as_str(5) == "1", - .locked = res.value_as_str(6) == "1", - }; - SPDLOG_LOGGER_DEBUG( - log_v2::sql(), - "sql: loaded index {} of ({}, {}) with rrd_len={}, special={}, " - "locked={}", - index_id, ss.host_id(), ss.service_id(), info.rrd_retention, - info.special, info.locked); - _index_cache[{ss.host_id(), ss.service_id()}] = std::move(info); - // Create the metric mapping. - auto im{std::make_shared()}; - auto& im_obj = im->mut_obj(); - im_obj.set_index_id(info.index_id); - im_obj.set_host_id(ss.host_id()); - im_obj.set_service_id(ss.service_id()); - multiplexing::publisher pblshr; - pblshr.write(im); - } - } catch (const std::exception& e) { - } - if (index_id == 0) - throw exceptions::msg_fmt( - "Could not fetch index id of service ({}, {}): {}", ss.host_id(), - ss.service_id(), e.what()); - } + index_id = future.get(); + SPDLOG_LOGGER_DEBUG( + log_v2::sql(), + "sql: new index {} added for service ({}, {}), special {}", index_id, + ss.host_id(), ss.service_id(), special ? "1" : "0"); + index_info info{ + .index_id = index_id, + .host_name = ss.host_name(), + .service_description = ss.description(), + .rrd_retention = _rrd_len, + .interval = ss.check_interval(), + .special = special, + .locked = false, + }; + SPDLOG_LOGGER_DEBUG( + log_v2::sql(), + "sql: loaded index {} of ({}, {}) with rrd_len={} and interval={}", + index_id, ss.host_id(), ss.service_id(), info.rrd_retention, + info.interval); + _index_cache[{ss.host_id(), ss.service_id()}] = std::move(info); + + // Create the metric mapping. + auto im{std::make_shared()}; + auto& im_obj = im->mut_obj(); + im_obj.set_index_id(info.index_id); + im_obj.set_host_id(ss.host_id()); + im_obj.set_service_id(ss.service_id()); + multiplexing::publisher pblshr; + pblshr.write(im); } else { uint64_t index_id = it_index_cache->second.index_id; diff --git a/broker/unified_sql/src/stream_storage.cc b/broker/unified_sql/src/stream_storage.cc index ba56bee52e7..a9ae9b89229 100644 --- a/broker/unified_sql/src/stream_storage.cc +++ b/broker/unified_sql/src/stream_storage.cc @@ -80,7 +80,8 @@ void stream::_unified_sql_process_pb_service_status( uint64_t host_id = ss.host_id(), service_id = ss.service_id(); - log_v2::perfdata()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::perfdata(), "unified sql::_unified_sql service_status processing: host_id:{}, " "service_id:{}", host_id, service_id); @@ -103,14 +104,17 @@ void stream::_unified_sql_process_pb_service_status( rrd_len = it_index_cache->second.rrd_retention; index_locked = it_index_cache->second.locked; uint32_t interval = it_index_cache->second.interval * _interval_length; - log_v2::perfdata()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::perfdata(), "unified sql: host_id:{}, service_id:{} - index already in cache " - "- index_id {}, rrd_len {}", - host_id, service_id, index_id, rrd_len); + "- index_id {}, rrd_len {}, serv_interval {}, interval {}", + host_id, service_id, index_id, rrd_len, it_index_cache->second.interval, + interval); if (index_id) { /* Generate status event */ - log_v2::perfdata()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::perfdata(), "unified sql: host_id:{}, service_id:{} - generating status event " "with index_id {}, rrd_len: {}", host_id, service_id, index_id, rrd_len); @@ -151,7 +155,8 @@ void stream::_unified_sql_process_pb_service_status( bool need_metric_mapping = true; if (it_index_cache == _metric_cache.end()) { rlck.unlock(); - log_v2::perfdata()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::perfdata(), "unified sql: no metrics corresponding to index {} and " "perfdata '{}' found in cache", index_id, pd.name()); @@ -233,7 +238,8 @@ void stream::_unified_sql_process_pb_service_status( pd.value_type(static_cast( it_index_cache->second.type)); - log_v2::perfdata()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::perfdata(), "unified sql: metric {} concerning index {}, perfdata " "'{}' found in cache", it_index_cache->second.metric_id, index_id, pd.name()); @@ -273,8 +279,9 @@ void stream::_unified_sql_process_pb_service_status( _metrics[it_index_cache->second.metric_id] = it_index_cache->second; } - log_v2::perfdata()->debug("new metric with metric_id={}", - it_index_cache->second.metric_id); + SPDLOG_LOGGER_DEBUG(log_v2::perfdata(), + "new metric with metric_id={}", + it_index_cache->second.metric_id); } } if (need_metric_mapping) { @@ -337,14 +344,16 @@ void stream::_unified_sql_process_pb_service_status( m.set_name(pd.name()); m.set_host_id(ss.host_id()); m.set_service_id(ss.service_id()); - log_v2::perfdata()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::perfdata(), "unified sql: generating perfdata event for metric {} " "(name '{}', time {}, value {}, rrd_len {}, data_type {})", m.metric_id(), pd.name(), m.time(), m.value(), rrd_len, m.value_type()); to_publish.emplace_back(std::move(perf)); } else { - log_v2::perfdata()->trace( + SPDLOG_LOGGER_TRACE( + log_v2::perfdata(), "unified sql: index {} is locked, so metric {} event not sent " "to rrd", index_id, metric_id); @@ -368,7 +377,8 @@ void stream::_unified_sql_process_service_status( neb::service_status const& ss{*static_cast(d.get())}; uint64_t host_id = ss.host_id, service_id = ss.service_id; - log_v2::perfdata()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::perfdata(), "unified sql::_unified_sql_process_service_status(): host_id:{}, " "service_id:{}", host_id, service_id); @@ -408,7 +418,8 @@ void stream::_unified_sql_process_service_status( _index_cache[{host_id, service_id}] = std::move(info); rrd_len = _rrd_len; - log_v2::perfdata()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::perfdata(), "add metric in cache: (host: {}, service: {}, index: {}, returned " "rrd_len {}", ss.host_name, ss.service_description, index_id, rrd_len); @@ -423,16 +434,14 @@ void stream::_unified_sql_process_service_status( /* Index does not exist */ if (it_index_cache == _index_cache.end()) { _finish_action(-1, actions::index_data); - log_v2::perfdata()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::perfdata(), "unified sql::_unified_sql_process_service_status(): host_id:{}, " "service_id:{} - index not found in cache", host_id, service_id); if (!_index_data_insert.prepared()) - _index_data_insert = _mysql.prepare_query( - "INSERT INTO index_data " - "(host_id,host_name,service_id,service_description,must_be_rebuild," - "special) VALUES (?,?,?,?,?,?)"); + _index_data_insert = _mysql.prepare_query(_index_data_insert_request); fmt::string_view hv(misc::string::truncate( ss.host_name, get_index_data_col_size(index_data_host_name))); @@ -443,88 +452,24 @@ void stream::_unified_sql_process_service_status( _index_data_insert.bind_value_as_str(1, hv); _index_data_insert.bind_value_as_i32(2, service_id); _index_data_insert.bind_value_as_str(3, sv); - _index_data_insert.bind_value_as_str(4, "0"); - _index_data_insert.bind_value_as_str(5, special ? "1" : "0"); + _index_data_insert.bind_value_as_u32( + 4, static_cast(ss.check_interval)); + _index_data_insert.bind_value_as_str(5, "0"); + _index_data_insert.bind_value_as_str(6, special ? "1" : "0"); std::promise promise; std::future future = promise.get_future(); _mysql.run_statement_and_get_int( _index_data_insert, std::move(promise), database::mysql_task::LAST_INSERT_ID, conn); - try { - index_id = future.get(); - add_metric_in_cache(index_id, host_id, service_id, ss, index_locked, - special, rrd_len); - } catch (std::exception const& e) { - try { - if (!_index_data_query.prepared()) - _index_data_query = _mysql.prepare_query( - "SELECT id from index_data WHERE host_id=? AND service_id=?"); - - _index_data_query.bind_value_as_i32(0, host_id); - _index_data_query.bind_value_as_i32(1, service_id); - { - std::promise promise; - std::future future = promise.get_future(); - log_v2::sql()->debug( - "Query for index_data for host_id={} and service_id={}", host_id, - service_id); - _mysql.run_statement_and_get_result(_index_data_query, - std::move(promise), conn, 50); - - database::mysql_result res(future.get()); - if (_mysql.fetch_row(res)) - index_id = res.value_as_u64(0); - else - index_id = 0; - } - - if (index_id == 0) - throw msg_fmt( - "unified_sql: could not fetch index_id of newly inserted index " - "({}, " - "{})", - host_id, service_id); - - if (!_index_data_update.prepared()) - _index_data_update = _mysql.prepare_query( - "UPDATE index_data " - "SET host_name=?, service_description=?, must_be_rebuild=?, " - "special=? " - "WHERE id=?"); - - log_v2::sql()->debug( - "Updating index_data for host_id={} and service_id={}", host_id, - service_id); - _index_data_update.bind_value_as_str(0, hv); - _index_data_update.bind_value_as_str(1, sv); - _index_data_update.bind_value_as_str(2, "0"); - _index_data_update.bind_value_as_str(3, special ? "1" : "0"); - _index_data_update.bind_value_as_u64(4, index_id); - { - std::promise promise; - std::future future = promise.get_future(); - _mysql.run_statement_and_get_result(_index_data_update, - std::move(promise), conn, 50); - future.get(); - } - - add_metric_in_cache(index_id, host_id, service_id, ss, index_locked, - special, rrd_len); - log_v2::sql()->debug( - "Index {} stored in cache for host_id={} and service_id={}", - index_id, host_id, service_id); - } catch (std::exception const& e) { - throw msg_fmt( - "unified_sql: insertion of index ( {}, {}" - ") failed: {}", - host_id, service_id, e.what()); - } - } + index_id = future.get(); + add_metric_in_cache(index_id, host_id, service_id, ss, index_locked, + special, rrd_len); } else { index_id = it_index_cache->second.index_id; rrd_len = it_index_cache->second.rrd_retention; index_locked = it_index_cache->second.locked; - log_v2::perfdata()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::perfdata(), "unified sql: host_id:{}, service_id:{} - index already in cache " "- index_id {}, rrd_len {}", host_id, service_id, index_id, rrd_len); @@ -532,7 +477,8 @@ void stream::_unified_sql_process_service_status( if (index_id) { /* Generate status event */ - log_v2::perfdata()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::perfdata(), "unified sql: host_id:{}, service_id:{} - generating status event " "with index_id {}, rrd_len: {}", host_id, service_id, index_id, rrd_len); @@ -570,7 +516,8 @@ void stream::_unified_sql_process_service_status( bool need_metric_mapping = true; if (it_index_cache == _metric_cache.end()) { rlck.unlock(); - log_v2::perfdata()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::perfdata(), "unified sql: no metrics corresponding to index {} and " "perfdata '{}' found in cache", index_id, pd.name()); @@ -652,7 +599,8 @@ void stream::_unified_sql_process_service_status( pd.value_type(static_cast( it_index_cache->second.type)); - log_v2::perfdata()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::perfdata(), "unified sql: metric {} concerning index {}, perfdata " "'{}' found in cache", it_index_cache->second.metric_id, index_id, pd.name()); @@ -692,8 +640,9 @@ void stream::_unified_sql_process_service_status( _metrics[it_index_cache->second.metric_id] = it_index_cache->second; } - log_v2::perfdata()->debug("new metric with metric_id={}", - it_index_cache->second.metric_id); + SPDLOG_LOGGER_DEBUG(log_v2::perfdata(), + "new metric with metric_id={}", + it_index_cache->second.metric_id); } } if (need_metric_mapping) @@ -743,7 +692,8 @@ void stream::_unified_sql_process_service_status( static_cast(ss.check_interval * _interval_length), false, metric_id, rrd_len, pd.value(), static_cast(pd.value_type()))}; - log_v2::perfdata()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::perfdata(), "unified sql: generating perfdata event for metric {} " "(name '{}', time {}, value {}, rrd_len {}, data_type {})", perf->metric_id, perf->name, perf->time, perf->value, rrd_len, diff --git a/tests/broker-engine/services-and-bulk-stmt.robot b/tests/broker-engine/services-and-bulk-stmt.robot index febc6301806..fbb661c73af 100644 --- a/tests/broker-engine/services-and-bulk-stmt.robot +++ b/tests/broker-engine/services-and-bulk-stmt.robot @@ -1,11 +1,7 @@ *** Settings *** -Resource ../resources/resources.robot -Suite Setup Clean Before Suite -Suite Teardown Clean After Suite -Test Setup Stop Processes -Test Teardown Save logs If Failed Documentation Centreon Broker and Engine progressively add services +Resource ../resources/resources.robot Library Process Library OperatingSystem Library DateTime @@ -14,11 +10,15 @@ Library DatabaseLibrary Library ../resources/Engine.py Library ../resources/Broker.py Library ../resources/Common.py +Suite Setup Clean Before Suite +Suite Teardown Clean After Suite +Test Setup Stop Processes +Test Teardown Test Clean *** Test Cases *** EBBPS1 [Documentation] 1000 service check results are sent to the poller. The test is done with the unified_sql stream, no service status is lost, we find the 1000 results in the database: table resources. - [Tags] Broker Engine services unified_sql + [Tags] broker engine services unified_sql Config Engine ${1} ${1} ${1000} # We want all the services to be passive to avoid parasite checks during our test. Set Services passive ${0} service_.* @@ -28,66 +28,82 @@ EBBPS1 Broker Config Add Item module0 bbdo_version 3.0.1 Broker Config Add Item central bbdo_version 3.0.1 Broker Config Add Item rrd bbdo_version 3.0.1 - Broker Config Log central core error + Broker Config Log central core info Broker Config Log central tcp error Broker Config Log central sql trace + Broker Config Log central perfdata trace Config Broker Sql Output central unified_sql Clear Retention - ${start}= Get Current Date + ${start} Get Current Date + ${start_broker} Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1000; - ${result}= Find In Log with Timeout ${engineLog0} ${start} ${content} 30 - Should Be True ${result} msg=An Initial service state on host_1:service_1000 should be raised before we can start external commands. + ${content} Create List INITIAL SERVICE STATE: host_1;service_1000; + ${result} Find In Log with Timeout ${engineLog0} ${start} ${content} 30 + Should Be True + ... ${result} + ... An Initial service state on host_1:service_1000 should be raised before we can start external commands. FOR ${i} IN RANGE ${1000} Process Service Check result host_1 service_${i+1} 1 warning${i} END - ${content}= Create List connected to 'MariaDB' Server Unified sql stream supports column-wise binding in prepared statements - ${result}= Find In Log with timeout ${centralLog} ${start} ${content} 30 - Should Be True ${result} msg=Prepared statements should be supported with this version of MariaDB. + ${content} Create List + ... connected to 'MariaDB' Server + ... it supports column-wise binding in prepared statements + ${result} Find In Log with timeout ${centralLog} ${start} ${content} 30 + Should Be True ${result} Prepared statements should be supported with this version of MariaDB. Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} - ${date}= Get Current Date result_format=epoch + ${date} Get Current Date result_format=epoch Log To Console date=${date} FOR ${index} IN RANGE 60 - ${output}= Query SELECT count(*) FROM resources WHERE name like 'service\_%' and parent_name='host_1' and status <> 1 + ${output} Query + ... SELECT count(*) FROM resources WHERE name like 'service\_%' and parent_name='host_1' and status <> 1 Log To Console ${output} Sleep 1s - EXIT FOR LOOP IF "${output}" == "((0,),)" + IF "${output}" == "((0,),)" BREAK END Should Be Equal As Strings ${output} ((0,),) FOR ${i} IN RANGE ${1000} Process Service Check result host_1 service_${i+1} 2 warning${i} IF ${i} % 200 == 0 + ${first_service_status_content} Create List unified_sql service_status processing + ${result} Find In Log with timeout + ... ${centralLog} + ... ${start_broker} + ... ${first_service_status_content} + ... 30 + Should Be True ${result} No service_status processing found. Log to Console Stopping Broker Kindly Stop Broker Log to Console Waiting for 5s Sleep 5s Log to Console Restarting Broker + ${start_broker} Get Current Date Start Broker END END - ${content}= Create List connected to 'MariaDB' Server Unified sql stream supports column-wise binding in prepared statements - ${result}= Find In Log with timeout ${centralLog} ${start} ${content} 30 - Should Be True ${result} msg=Prepared statements should be supported with this version of MariaDB. + ${content} Create List + ... connected to 'MariaDB' Server + ... it supports column-wise binding in prepared statements + ${result} Find In Log with timeout ${centralLog} ${start} ${content} 30 + Should Be True ${result} Prepared statements should be supported with this version of MariaDB. Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} - ${date}= Get Current Date result_format=epoch + ${date} Get Current Date result_format=epoch Log To Console date=${date} FOR ${index} IN RANGE 120 - ${output}= Query SELECT count(*) FROM resources WHERE name like 'service\_%' and parent_name='host_1' and status <> 2 + ${output} Query + ... SELECT count(*) FROM resources WHERE name like 'service\_%' and parent_name='host_1' and status <> 2 Log To Console ${output} Sleep 1s - EXIT FOR LOOP IF "${output}" == "((0,),)" + IF "${output}" == "((0,),)" BREAK END Should Be Equal As Strings ${output} ((0,),) - Stop Engine - Kindly Stop Broker EBBPS2 [Documentation] 1000 service check results are sent to the poller. The test is done with the unified_sql stream, no service status is lost, we find the 1000 results in the database: table services. - [Tags] Broker Engine services unified_sql + [Tags] broker engine services unified_sql Config Engine ${1} ${1} ${1000} # We want all the services to be passive to avoid parasite checks during our test. Set Services passive ${0} service_.* @@ -97,71 +113,87 @@ EBBPS2 Broker Config Add Item module0 bbdo_version 3.0.1 Broker Config Add Item central bbdo_version 3.0.1 Broker Config Add Item rrd bbdo_version 3.0.1 - Broker Config Log central core error + Broker Config Log central core info Broker Config Log central tcp error Broker Config Log central sql trace + Broker Config Log central perfdata trace Config Broker Sql Output central unified_sql Clear Retention - ${start}= Get Current Date + ${start} Get Current Date + ${start_broker} Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1000; - ${result}= Find In Log with Timeout ${engineLog0} ${start} ${content} 30 - Should Be True ${result} msg=An Initial service state on host_1:service_1000 should be raised before we can start external commands. + ${content} Create List INITIAL SERVICE STATE: host_1;service_1000; + ${result} Find In Log with Timeout ${engineLog0} ${start} ${content} 30 + Should Be True + ... ${result} + ... An Initial service state on host_1:service_1000 should be raised before we can start external commands. FOR ${i} IN RANGE ${1000} Process Service Check result host_1 service_${i+1} 1 warning${i} END - ${content}= Create List connected to 'MariaDB' Server Unified sql stream supports column-wise binding in prepared statements - ${result}= Find In Log with timeout ${centralLog} ${start} ${content} 30 - Should Be True ${result} msg=Prepared statements should be supported with this version of MariaDB. + ${content} Create List + ... connected to 'MariaDB' Server + ... it supports column-wise binding in prepared statements + ${result} Find In Log with timeout ${centralLog} ${start} ${content} 30 + Should Be True ${result} Prepared statements should be supported with this version of MariaDB. Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} - ${date}= Get Current Date result_format=epoch + ${date} Get Current Date result_format=epoch Log To Console date=${date} FOR ${index} IN RANGE 120 - ${output}= Query SELECT count(*) FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE h.name='host_1' AND s.description LIKE 'service\_%' AND s.state <> 1 + ${output} Query + ... SELECT count(*) FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE h.name='host_1' AND s.description LIKE 'service\_%' AND s.state <> 1 Log To Console ${output} Sleep 1s - EXIT FOR LOOP IF "${output}" == "((0,),)" + IF "${output}" == "((0,),)" BREAK END Should Be Equal As Strings ${output} ((0,),) FOR ${i} IN RANGE ${1000} - Process Service Check result host_1 service_${i+1} 2 warning${i} + Process Service Check result host_1 service_${i+1} 2 critical${i} IF ${i} % 200 == 0 - Log to Console Stopping Broker + ${first_service_status_content} Create List unified_sql service_status processing + ${result} Find In Log with timeout + ... ${centralLog} + ... ${start_broker} + ... ${first_service_status_content} + ... 30 + Should Be True ${result} No service_status processing found. Kindly Stop Broker Log to Console Waiting for 5s Sleep 5s Log to Console Restarting Broker + ${start_broker} Get Current Date Start Broker END END - ${content}= Create List connected to 'MariaDB' Server Unified sql stream supports column-wise binding in prepared statements - ${result}= Find In Log with timeout ${centralLog} ${start} ${content} 30 - Should Be True ${result} msg=Prepared statements should be supported with this version of MariaDB. + ${content} Create List + ... connected to 'MariaDB' Server + ... it supports column-wise binding in prepared statements + ${result} Find In Log with timeout ${centralLog} ${start} ${content} 30 + Should Be True ${result} Prepared statements should be supported with this version of MariaDB. Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} - ${date}= Get Current Date result_format=epoch + ${date} Get Current Date result_format=epoch Log To Console date=${date} FOR ${index} IN RANGE 60 - ${output}= Query SELECT count(*) FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE h.name='host_1' AND s.description LIKE 'service\_%' AND s.state <> 2 + ${output} Query + ... SELECT count(*) FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE h.name='host_1' AND s.description LIKE 'service\_%' AND s.state <> 2 Log To Console ${output} Sleep 1s - EXIT FOR LOOP IF "${output}" == "((0,),)" + IF "${output}" == "((0,),)" BREAK END Should Be Equal As Strings ${output} ((0,),) - Stop Engine - Kindly Stop Broker EBMSSM [Documentation] 1000 services are configured with 100 metrics each. The rrd output is removed from the broker configuration. GetSqlManagerStats is called to measure writes into data_bin. - [Tags] Broker Engine services unified_sql benchmark + [Tags] broker engine services unified_sql benchmark Clear Metrics Config Engine ${1} ${1} ${1000} # We want all the services to be passive to avoid parasite checks during our test. Set Services passive ${0} service_.* Config Broker central + Config Broker rrd Config Broker module ${1} Broker Config Add Item module0 bbdo_version 3.0.1 Broker Config Add Item central bbdo_version 3.0.1 @@ -171,45 +203,44 @@ EBMSSM Config Broker Sql Output central unified_sql Config Broker Remove Rrd Output central Clear Retention - ${start}= Get Current Date - Start Broker ${True} + ${start} Get Current Date + Start Broker Start Engine Broker Set Sql Manager Stats 51001 5 5 # Let's wait for the external command check start - ${content}= Create List check_for_external_commands() - ${result}= Find In Log with Timeout ${engineLog0} ${start} ${content} 60 - Should Be True ${result} msg=A message telling check_for_external_commands() should be available. + ${content} Create List check_for_external_commands() + ${result} Find In Log with Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. - ${start}= Get Round Current Date + ${start} Get Round Current Date # Let's wait for one "INSERT INTO data_bin" to appear in stats. FOR ${i} IN RANGE ${1000} Process Service Check result with metrics host_1 service_${i+1} 1 warning${i} 100 END - ${duration}= Broker Get Sql Manager Stats 51001 INSERT INTO data_bin 300 + ${duration} Broker Get Sql Manager Stats 51001 INSERT INTO data_bin 300 Should Be True ${duration} > 0 # Let's wait for all force checks to be in the storage database. Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${i} IN RANGE ${500} - ${output}= Query + ${output} Query ... SELECT COUNT(s.last_check) FROM metrics m LEFT JOIN index_data i ON m.index_id = i.id LEFT JOIN services s ON s.host_id = i.host_id AND s.service_id = i.service_id WHERE metric_name LIKE "metric_%" AND s.last_check >= ${start} IF ${output[0][0]} >= 100000 BREAK Sleep 1s END Should Be True ${output[0][0]} >= 100000 - Stop Engine - Kindly Stop Broker True EBPS2 - [Documentation] 1000 services are configured with 20 metrics each. The rrd output is removed from + [Documentation] 1000 services are configured with 20 metrics each. The rrd output is removed from the broker configuration to avoid to write too many rrd files. While metrics are written in bulk, the database is stopped. This must not crash broker. [Tags] broker engine services unified_sql benchmark Clear Metrics Config Engine ${1} ${1} ${1000} # We want all the services to be passive to avoid parasite checks during our test. Set Services passive ${0} service_.* Config Broker central + Config Broker rrd Config Broker module ${1} Broker Config Add Item module0 bbdo_version 3.0.1 Broker Config Add Item central bbdo_version 3.0.1 @@ -222,25 +253,30 @@ EBPS2 Config Broker Remove Rrd Output central Clear Retention - ${start}= Get Current Date - Start Broker ${True} + ${start} Get Current Date + Start Broker Start Engine # Let's wait for the external command check start - ${content}= Create List check_for_external_commands() - ${result}= Find In Log with Timeout ${engineLog0} ${start} ${content} 60 - Should Be True ${result} msg=A message telling check_for_external_commands() should be available. - - # We send 3000 service status and during the 1500th, we kill the database. This crashed cbd before - # the new patch. - FOR ${i} IN RANGE ${3000} - IF ${i} == 1500 Kill Mysql - Process Service Check result with metrics host_1 service_${${i}%1000+1} 1 warning${i} 20 - END + ${content} Create List check_for_external_commands() + ${result} Find In Log with Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. + + # Let's wait for one "INSERT INTO data_bin" to appear in stats. FOR ${i} IN RANGE ${1000} Process Service Check result with metrics host_1 service_${i+1} 1 warning${i} 20 END + ${start} Get Current Date + ${content} create list Check if some statements are ready, sscr_bind connections + ${result} Find In Log with Timeout ${centralLog} ${start} ${content} 60 + Should Be True ${result} A message telling that statements are available should be displayed + Stop mysql Stop Engine Start mysql - Kindly Stop Broker ${True} + +*** Keywords *** +Test Clean + Stop Engine + Kindly Stop Broker + Save logs If Failed diff --git a/tests/broker-engine/services-increased.robot b/tests/broker-engine/services-increased.robot index b2eb00072d1..1c74f874318 100644 --- a/tests/broker-engine/services-increased.robot +++ b/tests/broker-engine/services-increased.robot @@ -1,55 +1,161 @@ *** Settings *** -Resource ../resources/resources.robot -Suite Setup Clean Before Suite -Suite Teardown Clean After Suite -Test Setup Stop Processes -Test Teardown Save logs If Failed - -Documentation Centreon Broker and Engine progressively add services -Library Process -Library OperatingSystem -Library DateTime -Library Collections -Library ../resources/Engine.py -Library ../resources/Broker.py -Library ../resources/Common.py +Documentation Centreon Broker and Engine progressively add services + +Resource ../resources/resources.robot +Library Process +Library OperatingSystem +Library DateTime +Library Collections +Library DatabaseLibrary +Library ../resources/Engine.py +Library ../resources/Broker.py +Library ../resources/Common.py + +Suite Setup Clean Before Suite +Suite Teardown Clean After Suite +Test Setup Stop Processes +Test Teardown Save logs If Failed + *** Test Cases *** EBNSVC1 - [Documentation] New services with several pollers - [Tags] Broker Engine services protobuf - Config Engine ${3} ${50} ${20} - Config Broker rrd - Config Broker central - Config Broker module ${3} - Broker Config Add Item module0 bbdo_version 3.0.1 - Broker Config Add Item module1 bbdo_version 3.0.1 - Broker Config Add Item module2 bbdo_version 3.0.1 - Broker Config Add Item central bbdo_version 3.0.1 - Broker Config Add Item rrd bbdo_version 3.0.1 - Broker Config Log central sql debug - Config Broker Sql Output central unified_sql - Clear Retention - ${start}= Get Current Date - Start Broker - Start Engine - FOR ${i} IN RANGE ${3} - Sleep 10s - ${srv_by_host}= Evaluate 20 + 4 * $i - log to console ${srv_by_host} services by host with 50 hosts among 3 pollers. - Config Engine ${3} ${50} ${srv_by_host} - Reload Engine - Reload Broker - ${nb_srv}= Evaluate 17 * (20 + 4 * $i) - ${nb_res}= Evaluate $nb_srv + 17 - ${result}= Check Number Of Resources Monitored by Poller is ${1} ${nb_res} 30 - Should Be True ${result} msg=Poller 1 should monitor ${nb_srv} services and 17 hosts. - ${result}= Check Number Of Resources Monitored by Poller is ${2} ${nb_res} 30 - Should Be True ${result} msg=Poller 2 should monitor ${nb_srv} services and 17 hosts. - ${nb_srv}= Evaluate 16 * (20 + 4 * $i) - ${nb_res}= Evaluate $nb_srv + 16 - ${result}= Check Number Of Resources Monitored by Poller is ${3} ${nb_res} 30 - Should Be True ${result} msg=Poller 3 should monitor ${nb_srv} services and 16 hosts. - END - Stop Engine - Kindly Stop Broker + [Documentation] New services with several pollers + [Tags] broker engine services protobuf + Config Engine ${3} ${50} ${20} + Config Broker rrd + Config Broker central + Config Broker module ${3} + Broker Config Add Item module0 bbdo_version 3.0.1 + Broker Config Add Item module1 bbdo_version 3.0.1 + Broker Config Add Item module2 bbdo_version 3.0.1 + Broker Config Add Item central bbdo_version 3.0.1 + Broker Config Add Item rrd bbdo_version 3.0.1 + Broker Config Log central sql debug + Config Broker Sql Output central unified_sql + Clear Retention + ${start} Get Current Date + Start Broker + Start Engine + FOR ${i} IN RANGE ${3} + Sleep 10s + ${srv_by_host} Evaluate 20 + 4 * $i + log to console ${srv_by_host} services by host with 50 hosts among 3 pollers. + Config Engine ${3} ${50} ${srv_by_host} + Reload Engine + Reload Broker + ${nb_srv} Evaluate 17 * (20 + 4 * $i) + ${nb_res} Evaluate $nb_srv + 17 + ${result} Check Number Of Resources Monitored By Poller Is ${1} ${nb_res} 30 + Should Be True ${result} Poller 1 should monitor ${nb_srv} services and 17 hosts. + ${result} Check Number Of Resources Monitored By Poller Is ${2} ${nb_res} 30 + Should Be True ${result} Poller 2 should monitor ${nb_srv} services and 17 hosts. + ${nb_srv} Evaluate 16 * (20 + 4 * $i) + ${nb_res} Evaluate $nb_srv + 16 + ${result} Check Number Of Resources Monitored By Poller Is ${3} ${nb_res} 30 + Should Be True ${result} Poller 3 should monitor ${nb_srv} services and 16 hosts. + END + Stop Engine + Kindly Stop Broker + +Service_increased_huge_check_interval + [Documentation] New services with high check interval at creation time. + [Tags] broker engine services protobuf + Config Engine ${1} ${10} ${10} + Config Broker rrd + Config Broker central + Config Broker module ${1} + Broker Config Source Log central 1 + Broker Config Add Item module0 bbdo_version 3.0.1 + Broker Config Add Item central bbdo_version 3.0.1 + Broker Config Add Item rrd bbdo_version 3.0.1 + Broker Config Log rrd rrd trace + Broker Config Log central sql debug + Broker Config Log rrd core error + Config Broker Sql Output central unified_sql 10 + Broker Config Flush Log central 0 + Broker Config Flush Log rrd 0 + Clear Retention + Clear Db services + Clear Db index_data + Clear Db metrics + + Delete All rrd metrics + + ${start} Get Current Date + Start Broker + Start Engine + # Start Checkers + ${result} Check host status host_1 4 1 False + Should be true ${result} host_1 should be pending + + ${content} Create List INITIAL HOST STATE: host_1; + ${result} Find In Log with Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} "host_1 init not found in log" + # End Checkers + + Process Service Check result with metrics host_1 service_1 1 warning0 1 + + ${content} Create List new pb data for metric + ${result} Find In Log with Timeout ${rrdLog} ${start} ${content} 60 + + ${index} Get Indexes To Rebuild 2 + ${metrics} Get Metrics Matching Indexes ${index} + Log To Console Metrics: ${metrics} + + FOR ${m} IN @{metrics} + ${result} Check RRD Info ${m} ds[value].minimal_heartbeat 3000 + Should Be True + ... ${result} + ... ds[value].minimal_heartbeat must be equal to 3000 + ${result} Check RRD Info ${m} rra[0].pdp_per_row 300 + Should Be True + ... ${result} + ... rra[0].pdp_per_row must be equal to 300 + END + + ${new_service_id} Create Service 0 1 1 + + Log To Console new service: ${new_service_id} + + # do the same insert as php + Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} + Execute Sql String + ... INSERT INTO index_data (host_id, service_id, host_name, service_description) VALUES (1, ${new_service_id}, 'host1', 'service_${new_service_id}') + + Engine Config Replace Value In Services 0 service_${new_service_id} check_interval 90 + + ${start} Get Current Date + + Reload Engine + + ${content} Create List INITIAL SERVICE STATE: host_1;service_${new_service_id}; + ${result} Find In Log with Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} "service_"${new_service_id}" init not found in log" + + ${start} Get Current Date + + Sleep 5 + + Process Service Check result with metrics host_1 service_${new_service_id} 1 warning0 1 + + ${metrics} Get Metrics For Service ${new_service_id} + + Should Not Be Equal ${metrics} None no metric found for service ${new_service_id} + + FOR ${m} IN @{metrics} + ${result} Wait Until File Modified ${VarRoot}/lib/centreon/metrics/${m}.rrd ${start} + Should Be True + ... ${result} + ... ${VarRoot}/lib/centreon/metrics/${m}.rrd should have been modified since ${start} + + ${result} Check RRD Info ${m} ds[value].minimal_heartbeat 54000 + Should Be True + ... ${result} + ... ds[value].minimal_heartbeat must be equal to 54000 for metric ${m} + ${result} Check RRD Info ${m} rra[0].pdp_per_row 5400 + Should Be True + ... ${result} + ... rra[0].pdp_per_row must be equal to 5400 for metric ${m} + END + + [Teardown] Run Keywords Stop Engine AND Kindly Stop Broker diff --git a/tests/resources/Broker.py b/tests/resources/Broker.py index c7b635ac898..44e6fbb1cf3 100755 --- a/tests/resources/Broker.py +++ b/tests/resources/Broker.py @@ -649,7 +649,7 @@ def config_broker_bbdo_output(name, stream, port, proto, host=None): f.close() -def config_broker_sql_output(name, output): +def config_broker_sql_output(name, output, queries_per_transaction: int = 20000): if name == 'central': filename = "central-broker.json" elif name.startswith('module'): @@ -665,6 +665,7 @@ def config_broker_sql_output(name, output): for i, v in enumerate(output_dict): if v["type"] == "sql" or v["type"] == "storage" or v["type"] == "unified_sql": output_dict.pop(i) + str_queries_per_transaction = str(queries_per_transaction) if output == 'unified_sql': output_dict.append({ "name": "central-broker-unified-sql", @@ -676,7 +677,7 @@ def config_broker_sql_output(name, output): "db_name": DB_NAME_STORAGE, "interval": "60", "length": "15552000", - "queries_per_transaction": "20000", + "queries_per_transaction": str_queries_per_transaction, "connections_count": "4", "read_timeout": "60", "buffering_timeout": "0", @@ -1098,6 +1099,70 @@ def get_indexes_to_delete(count: int): return retval +def delete_all_rrd_metrics(): + """! remove all rrd metrics files + """ + with os.scandir(VAR_ROOT + "/lib/centreon/metrics/") as it: + for entry in it: + if entry.is_file(): + os.remove(entry.path) + + +def check_rrd_info(metric_id: int, key: str, value, timeout: int = 60): + """! execute rrdtool info and check one value of the returned informations + @param metric_id + @param key key to search in the rrdtool info result + @param value value to search in the rrdtool info result fot key + @param timeout timeout for metric file creation + @return True if key = value found + """ + + limit = time.time() + timeout + while time.time() < limit: + res = getoutput( + f"rrdtool info {VAR_ROOT}/lib/centreon/metrics/{metric_id}.rrd") + escaped_key = key.replace("[", "\\[").replace("]", "\\]") + line_search = re.compile( + f"{escaped_key}\s*=\s*{value}") + for line in res.splitlines(): + if (line_search.match(line)): + return True + time.sleep(5) + return False + + +def get_metrics_for_service(service_id: int, metric_name: str = "%", timeout: int = 60): + """! scan data base every 5s to extract metric ids for a service + + @param service_id id of the service + @param timeout timeout in second + @return array of metric ids + """ + limit = time.time() + timeout + + select_request = f"SELECT metric_id FROM metrics JOIN index_data ON index_id=id WHERE service_id={service_id} and metric_name like '{metric_name}'" + while time.time() < limit: + # Connect to the database + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, + charset='utf8mb4', + cursorclass=pymysql.cursors.DictCursor) + with connection: + with connection.cursor() as cursor: + cursor.execute(select_request) + result = cursor.fetchall() + metric_array = [r['metric_id'] for r in result] + if len(metric_array) > 0: + logger.console( + f"metrics {metric_array} found for service {service_id}") + return metric_array + time.sleep(10) + logger.console(f"no metric found for service_id={service_id}") + return None + + ## # @brief Gets count metrics that does not exist. # @@ -1167,13 +1232,12 @@ def get_metrics_to_delete(count: int): inter = list(set(ids) & set(ids_db)) return inter[:count] + ## # @brief creat metrics from available ones. # # @param count:int The number of metrics to create. # - - def create_metrics(count: int): files = [os.path.basename(x) for x in glob.glob( VAR_ROOT + "/lib/centreon/metrics/[0-9]*.rrd")] @@ -1609,14 +1673,36 @@ def compare_rrd_average_value(metric, value: float): return True -## -# @brief Call the GetSqlManagerStats function by gRPC and checks there are -# count active connections. -# -# @param count The expected number of active connections. -# -# @return A boolean. +def compare_rrd_average_value_with_grpc(metric, key, value: float): + """! Compare the average value for an RRD metric. + @param metric The metric id + @param key The key to search in the rrd info + @param float The value to compare with. + @return True if value pointed by key is equal to value param. + """ + res = getoutput( + f"rrdtool info {VAR_ROOT}/lib/centreon/metrics/{metric}.rrd" + ) + lst = res.split('\n') + if len(lst) >= 2: + for l in lst: + if key in l: + last_update = int(l.split('=')[1]) + logger.console(f"{key}: {last_update}") + return last_update == value*60 + else: + logger.console( + f"It was impossible to get the average value from the file {VAR_ROOT}/lib/centreon/metrics/{metric}.rrd") + return False + + def check_sql_connections_count_with_grpc(port, count, timeout=TIMEOUT): + """!Call the GetSqlManagerStats function by gRPC and checks there are count active connections. + @param port grpc port + @param count number of expected connections + @param timeout timeout in seconds + @return True is nb connections is equal to count + """ limit = time.time() + timeout while time.time() < limit: time.sleep(1) diff --git a/tests/resources/Common.py b/tests/resources/Common.py index ee60a84f1fb..52901fa94f4 100644 --- a/tests/resources/Common.py +++ b/tests/resources/Common.py @@ -1249,3 +1249,28 @@ def get_version(): if m3: patch = m3.group(1) return f"{maj}.{mini}.{patch}" + + +def wait_until_file_modified(path: str, date: str, timeout: int = TIMEOUT): + """! wait until file is modified + @param path path of the file + @param date minimal of modified time + @param path timeout timeout in seconds + @return True if file has been modified since date + """ + try: + my_date = parser.parse(date).timestamp() + except: + my_date = datetime.fromtimestamp(date).timestamp() + limit = time.time() + timeout + while time.time() < limit: + try: + stat_result = os.stat(path) + if stat_result.st_mtime > my_date: + return True + time.sleep(5) + except: + time.sleep(5) + + logger.console(f"{path} not modified since {date}") + return False diff --git a/tests/resources/Engine.py b/tests/resources/Engine.py index 9f369206aed..ee913684544 100755 --- a/tests/resources/Engine.py +++ b/tests/resources/Engine.py @@ -632,6 +632,30 @@ def engine_config_set_value_in_services(idx: int, desc: str, key: str, value: st f.close() +def engine_config_replace_value_in_services(idx: int, desc: str, key: str, value: str): + """! Function to update a value in the services.cfg for the config idx. + @param idx index of the configuration (from 0) + @param desc service description of the service to modify. + @param key the key to change the value. + @param value the new value to set to the key variable. + """ + + filename = f"{ETC_ROOT}/centreon-engine/config{idx}/services.cfg" + with open(filename, "r") as f: + lines = f.readlines() + r = re.compile(r"^\s*service_description\s+" + desc + "\s*$") + rkey = re.compile(r"^\s*" + key + "\s+[\w\.]+\s*$") + for i in range(len(lines)): + if r.match(lines[i]): + while i < len(lines) and lines[i] != "}": + if rkey.match(lines[i]): + lines[i] = f" {key} {value}\n" + break + i += 1 + + with open(filename, "w") as f: + f.writelines(lines) + ## # @brief Function to change a value in the hosts.cfg for the config idx. # @@ -640,6 +664,8 @@ def engine_config_set_value_in_services(idx: int, desc: str, key: str, value: st # @param key the key to change the value. # @param value the new value to set to the key variable. # + + def engine_config_set_value_in_hosts(idx: int, desc: str, key: str, value: str): filename = ETC_ROOT + "/centreon-engine/config{}/hosts.cfg".format(idx) f = open(filename, "r")