diff --git a/Cargo.lock b/Cargo.lock index 14bcb99..6554d96 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2584,6 +2584,25 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" +[[package]] +name = "strum" +version = "0.26.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" + +[[package]] +name = "strum_macros" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.66", +] + [[package]] name = "subtle" version = "2.5.0" @@ -3105,6 +3124,8 @@ dependencies = [ "rustls-pki-types", "serde", "serde_json", + "strum", + "strum_macros", "tokio", "tokio-pg-mapper", "tokio-postgres", diff --git a/Cargo.toml b/Cargo.toml index ee3b95c..f8f1008 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -46,6 +46,8 @@ jsonschema = { version = "0.18.0" } url = { version = "2.5.2" } nix = { version = "0.29.0", features = ["net"] } murmur2 = { version = "0.1.0" } +strum = { version = "0.26.3" } +strum_macros = { version = "0.26.3" } [build-dependencies] tonic-build = "0.11.0" diff --git a/Dockerfile b/Dockerfile index 731a8f4..5edfd3b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -57,7 +57,7 @@ RUN adduser cgw_runner && addgroup cgw_users_group RUN usermod -a -G cgw_users_group cgw_runner # CGW create log file under /var directory -# It is required to change direcory owner +# It is required to change directory owner RUN chown cgw_runner:cgw_users_group "/var" # Switch to non-root user diff --git a/Dockerfile.debug b/Dockerfile.debug index 3c8a277..1f2d011 100644 --- a/Dockerfile.debug +++ b/Dockerfile.debug @@ -45,7 +45,7 @@ RUN apt-get update -q -y && \ #RUN usermod -a -G cgw_users_group cgw_runner # CGW create log file under /var directory -# It is required to change direcory owner +# It is required to change directory owner #RUN chown cgw_runner:cgw_users_group "/var" # Switch to non-root user diff --git a/helm/README.md b/helm/README.md index 863b68e..f44b47e 100644 --- a/helm/README.md +++ b/helm/README.md @@ -1,6 +1,6 @@ # cgw -This Helm chart helps to deploy OpenLAN CGW (further on refered as __Gateway__) to the Kubernetes clusters. It is mainly used in [assembly chart](https://github.com/Telecominfraproject/wlan-cloud-ucentral-deploy/tree/main/cgwchart) as Gateway requires other services as dependencies that are considered in that Helm chart. This chart is purposed to define deployment logic close to the application code itself and define default values that could be overriden during deployment. +This Helm chart helps to deploy OpenLAN CGW (further on referred as __Gateway__) to the Kubernetes clusters. It is mainly used in [assembly chart](https://github.com/Telecominfraproject/wlan-cloud-ucentral-deploy/tree/main/cgwchart) as Gateway requires other services as dependencies that are considered in that Helm chart. This chart is purposed to define deployment logic close to the application code itself and define default values that could be overridden during deployment. ## TL;DR; @@ -71,7 +71,7 @@ The following table lists the configurable parameters of the chart and their def | public\_env\_variables | hash | Defines list of environment variables to be passed to the Gateway via ConfigMaps | | | secret\_env\_variables | hash | Defines list of secret environment variables to be passed to the Gateway via secrets | | | existingCertsSecret | string | Existing Kubernetes secret containing all required certificates and private keys for microservice operation. If set, certificates from `certs` key are ignored | `""` | -| certs | hash | Defines files (keys and certificates) that should be passed to the Gateway (PEM format is adviced to be used) (see `volumes.cgw` on where it is mounted). If `existingCertsSecret` is set, certificates passed this way will not be used. | | +| certs | hash | Defines files (keys and certificates) that should be passed to the Gateway (PEM format is advised to be used) (see `volumes.cgw` on where it is mounted). If `existingCertsSecret` is set, certificates passed this way will not be used. | | | certsCAs | hash | Defines files with CAs that should be passed to the Gateway (see `volumes.cgw` on where it is mounted) | | diff --git a/run_cgw.sh b/run_cgw.sh index d3782a1..2508b38 100755 --- a/run_cgw.sh +++ b/run_cgw.sh @@ -43,8 +43,8 @@ DEFAULT_REDIS_TLS="no" DEFAULT_METRICS_PORT=8080 -CONTAINTER_CERTS_VOLUME="/etc/cgw/certs" -CONTAINTER_NB_INFRA_CERTS_VOLUME="/etc/cgw/nb_infra/certs" +CONTAINER_CERTS_VOLUME="/etc/cgw/certs" +CONTAINER_NB_INFRA_CERTS_VOLUME="/etc/cgw/nb_infra/certs" DEFAULT_NB_INFRA_TLS="no" DEFAULT_ALLOW_CERT_MISMATCH="yes" @@ -160,8 +160,8 @@ docker run \ -p $CGW_GRPC_PUBLIC_PORT:$CGW_GRPC_PUBLIC_PORT \ -p $CGW_METRICS_PORT:$CGW_METRICS_PORT \ --cap-add=SYS_PTRACE --security-opt seccomp=unconfined \ - -v $CGW_CERTS_PATH:$CONTAINTER_CERTS_VOLUME \ - -v $CGW_NB_INFRA_CERTS_PATH:$CONTAINTER_NB_INFRA_CERTS_VOLUME \ + -v $CGW_CERTS_PATH:$CONTAINER_CERTS_VOLUME \ + -v $CGW_NB_INFRA_CERTS_PATH:$CONTAINER_NB_INFRA_CERTS_VOLUME \ -e CGW_LOG_LEVEL \ -e CGW_ID \ -e CGW_GROUPS_CAPACITY \ diff --git a/src/cgw_connection_processor.rs b/src/cgw_connection_processor.rs index 0be8aa9..83b7eae 100644 --- a/src/cgw_connection_processor.rs +++ b/src/cgw_connection_processor.rs @@ -2,7 +2,11 @@ use crate::{ cgw_connection_server::{CGWConnectionServer, CGWConnectionServerReqMsg}, cgw_device::{CGWDeviceCapabilities, CGWDeviceType}, cgw_errors::{Error, Result}, - cgw_nb_api_listener::cgw_construct_infra_request_result_msg, + cgw_nb_api_listener::{ + cgw_construct_infra_realtime_event_message, cgw_construct_infra_request_result_msg, + cgw_construct_infra_state_event_message, cgw_construct_unassigned_infra_join_msg, + CGWKafkaProducerTopic, + }, cgw_ucentral_messages_queue_manager::{ CGWUCentralMessagesQueueItem, CGWUCentralMessagesQueueState, CGW_MESSAGES_QUEUE, MESSAGE_TIMEOUT_DURATION, @@ -163,13 +167,10 @@ impl CGWConnectionProcessor { debug!("Parse Connect Event"); let evt = match cgw_ucentral_parse_connect_event(message.clone()) { - Ok(e) => { - debug!("Some: {:?}", e); - e - } - Err(_e) => { + Ok(event) => event, + Err(e) => { error!( - "Failed to receive connect message from: {}! Closing connection!", + "Failed to parse connect message from: {}! Error: {e}", self.addr ); return Err(Error::ConnectionProcessor( @@ -206,7 +207,6 @@ impl CGWConnectionProcessor { match evt.evt_type { CGWUCentralEventType::Connect(c) => { caps.firmware = c.firmware; - caps.uuid = c.uuid; caps.compatible = c.capabilities.compatible; caps.model = c.capabilities.model; caps.platform = c.capabilities.platform; @@ -330,76 +330,158 @@ impl CGWConnectionProcessor { timestamp.timestamp(), ) { kafka_msg.clone_from(&payload); - if let CGWUCentralEventType::State(_) = evt.evt_type { - if let Some(decompressed) = evt.decompressed.clone() { - kafka_msg = decompressed; - } - if self.feature_topomap_enabled { - let topomap = CGWUCentralTopologyMap::get_ref(); - - // TODO: remove this Arc clone: - // Dirty hack for now: pass Arc ref of srv to topomap; - // Future rework and refactoring would require to separate - // NB api from being an internal obj of conn_server to be a - // standalone (singleton?) object. - topomap.enqueue_event( - evt, - self.device_type, - self.serial, - self.group_id, - self.cgw_server.clone(), - ); + let event_type_str: String = evt.evt_type.to_string(); + match evt.evt_type { + CGWUCentralEventType::State(_) => { + if let Some(decompressed) = evt.decompressed.clone() { + kafka_msg = decompressed; + } + if self.feature_topomap_enabled { + let topomap = CGWUCentralTopologyMap::get_ref(); + + // TODO: remove this Arc clone: + // Dirty hack for now: pass Arc ref of srv to topomap; + // Future rework and refactoring would require to separate + // NB api from being an internal obj of conn_server to be a + // standalone (singleton?) object. + topomap.enqueue_event( + evt, + self.device_type, + self.serial, + self.group_id, + self.cgw_server.clone(), + ); + } + if let Ok(resp) = cgw_construct_infra_state_event_message( + event_type_str, + kafka_msg, + self.cgw_server.get_local_id(), + ) { + self.cgw_server + .enqueue_mbox_message_from_device_to_nb_api_c( + self.group_id, + resp, + CGWKafkaProducerTopic::State, + )?; + } else { + error!("Failed to construct infra_state_event message!"); + } } - } else if let CGWUCentralEventType::Reply(content) = evt.evt_type { - if *fsm_state != CGWUCentralMessageProcessorState::ResultPending { - error!( - "Unexpected FSM state: {}! Expected: ResultPending", - *fsm_state - ); + CGWUCentralEventType::Healthcheck => { + if let Ok(resp) = cgw_construct_infra_state_event_message( + event_type_str, + kafka_msg, + self.cgw_server.get_local_id(), + ) { + self.cgw_server + .enqueue_mbox_message_from_device_to_nb_api_c( + self.group_id, + resp, + CGWKafkaProducerTopic::State, + )?; + } else { + error!("Failed to construct infra_state_event message!"); + } } + CGWUCentralEventType::Reply(content) => { + if *fsm_state != CGWUCentralMessageProcessorState::ResultPending { + error!( + "Unexpected FSM state: {}! Expected: ResultPending", + *fsm_state + ); + } + + if content.id != pending_req_id { + error!( + "Pending request ID {} is not equal received reply ID {}!", + pending_req_id, content.id + ); + } - if content.id != pending_req_id { - error!( - "Pending request ID {} is not equal received reply ID {}!", - pending_req_id, content.id - ); + *fsm_state = CGWUCentralMessageProcessorState::Idle; + debug!("Got reply event for pending request id: {pending_req_id}"); + if let Ok(resp) = cgw_construct_infra_request_result_msg( + self.cgw_server.get_local_id(), + pending_req_uuid, + pending_req_id, + true, + None, + ) { + self.cgw_server.enqueue_mbox_message_from_cgw_to_nb_api( + self.group_id, + resp, + CGWKafkaProducerTopic::CnCRes, + ); + } else { + error!("Failed to construct infra_request_result message!"); + } } + CGWUCentralEventType::RealtimeEvent(_) => { + if self.feature_topomap_enabled { + let topomap = CGWUCentralTopologyMap::get_ref(); + // TODO: remove this Arc clone: + // Dirty hack for now: pass Arc ref of srv to topomap; + // Future rework and refactoring would require to separate + // NB api from being an internal obj of conn_server to be a + // standalone (singleton?) object. + topomap.enqueue_event( + evt, + self.device_type, + self.serial, + self.group_id, + self.cgw_server.clone(), + ); + } - *fsm_state = CGWUCentralMessageProcessorState::Idle; - debug!("Got reply event for pending request id: {pending_req_id}"); - if let Ok(resp) = cgw_construct_infra_request_result_msg( - self.cgw_server.get_local_id(), - pending_req_uuid, - pending_req_id, - true, - None, - ) { - self.cgw_server - .enqueue_mbox_message_from_cgw_to_nb_api(self.group_id, resp); - } else { - error!("Failed to construct rebalance_group message!"); + if let Ok(resp) = cgw_construct_infra_realtime_event_message( + event_type_str, + kafka_msg, + self.cgw_server.get_local_id(), + ) { + self.cgw_server + .enqueue_mbox_message_from_device_to_nb_api_c( + self.group_id, + resp, + CGWKafkaProducerTopic::InfraRealtime, + )?; + } else { + error!("Failed to construct infra_realtime_event message!"); + } + } + CGWUCentralEventType::Connect(_) => { + error!("Expected to receive Connect event as one of the first message from infra during connection procedure!"); + } + CGWUCentralEventType::Log + | CGWUCentralEventType::Event + | CGWUCentralEventType::Alarm + | CGWUCentralEventType::WifiScan + | CGWUCentralEventType::CrashLog + | CGWUCentralEventType::RebootLog + | CGWUCentralEventType::Ping + | CGWUCentralEventType::VenueBroadcast + | CGWUCentralEventType::CfgPending + | CGWUCentralEventType::DeviceUpdate + | CGWUCentralEventType::Recovery => { + if let Ok(resp) = cgw_construct_infra_realtime_event_message( + event_type_str, + kafka_msg, + self.cgw_server.get_local_id(), + ) { + self.cgw_server.enqueue_mbox_message_from_cgw_to_nb_api( + self.group_id, + resp, + CGWKafkaProducerTopic::InfraRealtime, + ) + } else { + error!("Failed to construct infra_realtime_event message!"); + } } - } else if let CGWUCentralEventType::RealtimeEvent(_) = evt.evt_type { - if self.feature_topomap_enabled { - let topomap = CGWUCentralTopologyMap::get_ref(); - // TODO: remove this Arc clone: - // Dirty hack for now: pass Arc ref of srv to topomap; - // Future rework and refactoring would require to separate - // NB api from being an internal obj of conn_server to be a - // standalone (singleton?) object. - topomap.enqueue_event( - evt, - self.device_type, - self.serial, - self.group_id, - self.cgw_server.clone(), - ); + CGWUCentralEventType::Unknown => { + error!("Received unknown event type! Message payload: {kafka_msg}"); } } } - self.cgw_server - .enqueue_mbox_message_from_device_to_nb_api_c(self.group_id, kafka_msg)?; return Ok(CGWConnectionState::IsActive); } Ping(_t) => { @@ -443,6 +525,24 @@ impl CGWConnectionProcessor { "Received GroupID change message: mac {} - old gid {} : new gid {}", self.serial, self.group_id, new_group_id ); + + if new_group_id != self.group_id { + if let Ok(unassigned_join) = cgw_construct_unassigned_infra_join_msg( + self.serial, + self.addr, + self.cgw_server.get_local_id(), + String::default(), + ) { + self.cgw_server.enqueue_mbox_message_from_cgw_to_nb_api( + new_group_id, + unassigned_join, + CGWKafkaProducerTopic::Connection, + ); + } else { + error!("Failed to construct unassigned_infra_join message!"); + } + } + self.group_id = new_group_id; } _ => { @@ -631,7 +731,7 @@ impl CGWConnectionProcessor { ) { // Currently Device Queue Manager does not store infras GID self.cgw_server - .enqueue_mbox_message_from_cgw_to_nb_api(self.group_id, resp); + .enqueue_mbox_message_from_cgw_to_nb_api(self.group_id, resp, CGWKafkaProducerTopic::CnCRes); } else { error!("Failed to construct message!"); } @@ -651,10 +751,13 @@ impl CGWConnectionProcessor { pending_req_uuid, pending_req_id, false, - Some(format!("Request timed out")), + Some("Request timed out".to_string()), ) { - self.cgw_server - .enqueue_mbox_message_from_cgw_to_nb_api(self.group_id, resp); + self.cgw_server.enqueue_mbox_message_from_cgw_to_nb_api( + self.group_id, + resp, + CGWKafkaProducerTopic::CnCRes, + ); } else { error!("Failed to construct rebalance_group message!"); } diff --git a/src/cgw_connection_server.rs b/src/cgw_connection_server.rs index 15e493e..83a7a6e 100644 --- a/src/cgw_connection_server.rs +++ b/src/cgw_connection_server.rs @@ -9,7 +9,7 @@ use crate::cgw_nb_api_listener::{ cgw_construct_infra_group_infras_del_response, cgw_construct_infra_join_msg, cgw_construct_infra_leave_msg, cgw_construct_infra_request_result_msg, cgw_construct_rebalance_group_response, cgw_construct_unassigned_infra_join_msg, - cgw_construct_unassigned_infra_leave_msg, + cgw_construct_unassigned_infra_leave_msg, CGWKafkaProducerTopic, }; use crate::cgw_runtime::{cgw_get_runtime, CGWRuntimeType}; use crate::cgw_tls::cgw_tls_get_cn_from_stream; @@ -348,11 +348,21 @@ impl CGWConnectionServer { // but use spawn_blocking where needed in contexts that rely on the // underlying async calls. let app_args_clone = app_args.validation_schema.clone(); - let get_config_validator_fut = tokio::task::spawn_blocking(move || { - CGWUCentralConfigValidators::new(app_args_clone).unwrap() - }); + let get_config_validator_fut = + tokio::task::spawn_blocking(move || CGWUCentralConfigValidators::new(app_args_clone)); let config_validator = match get_config_validator_fut.await { - Ok(res) => res, + Ok(res) => match res { + Ok(validator) => validator, + Err(e) => { + error!( + "Can't create CGW Connection server: Config validator create failed: {e}" + ); + + return Err(Error::ConnectionServer(format!( + "Can't create CGW Connection server: Config validator create failed: {e}", + ))); + } + }, Err(e) => { error!("Failed to retrieve json config validators! Error: {e}"); return Err(Error::ConnectionServer(format!( @@ -444,23 +454,29 @@ impl CGWConnectionServer { &self, group_id: i32, req: String, + topic: CGWKafkaProducerTopic, ) -> Result<()> { let nb_api_client_clone = self.nb_api_client.clone(); tokio::spawn(async move { let key = group_id.to_string(); nb_api_client_clone - .enqueue_mbox_message_from_cgw_server(key, req) + .enqueue_mbox_message_from_cgw_server(key, req, topic) .await; }); Ok(()) } - pub fn enqueue_mbox_message_from_cgw_to_nb_api(&self, gid: i32, req: String) { + pub fn enqueue_mbox_message_from_cgw_to_nb_api( + &self, + gid: i32, + req: String, + topic: CGWKafkaProducerTopic, + ) { let nb_api_client_clone = self.nb_api_client.clone(); self.mbox_nb_api_tx_runtime_handle.spawn(async move { nb_api_client_clone - .enqueue_mbox_message_from_cgw_server(gid.to_string(), req) + .enqueue_mbox_message_from_cgw_server(gid.to_string(), req, topic) .await; }); } @@ -718,6 +734,24 @@ impl CGWConnectionServer { } last_update_timestamp = current_timestamp; + + let mut infras_list: Vec = Vec::new(); + let connmap_r_lock = self.connmap.map.read().await; + + for (infra_mac, _) in connmap_r_lock.iter() { + if !self + .devices_cache + .read() + .await + .check_device_exists(infra_mac) + { + infras_list.push(*infra_mac); + } + } + + if !infras_list.is_empty() { + self.clone().notify_devices_on_gid_change(infras_list, 0); + } } } @@ -791,7 +825,11 @@ impl CGWConnectionServer { Some(format!("Failed to parse NB API message with key {key}")), local_shard_partition_key.clone(), ) { - self.enqueue_mbox_message_from_cgw_to_nb_api(-1, resp); + self.enqueue_mbox_message_from_cgw_to_nb_api( + -1, + resp, + CGWKafkaProducerTopic::CnCRes, + ); } else { error!("Failed to construct device_enqueue message!"); } @@ -843,7 +881,11 @@ impl CGWConnectionServer { true, None, ) { - self.enqueue_mbox_message_from_cgw_to_nb_api(gid, resp); + self.enqueue_mbox_message_from_cgw_to_nb_api( + gid, + resp, + CGWKafkaProducerTopic::CnCRes, + ); } else { error!("Failed to construct infra_group_create message!"); } @@ -858,7 +900,11 @@ impl CGWConnectionServer { false, Some(format!("Failed to create new group! Error: {e}")), ) { - self.enqueue_mbox_message_from_cgw_to_nb_api(gid, resp); + self.enqueue_mbox_message_from_cgw_to_nb_api( + gid, + resp, + CGWKafkaProducerTopic::CnCRes, + ); } else { error!("Failed to construct infra_group_create message!"); } @@ -902,7 +948,11 @@ impl CGWConnectionServer { true, None, ) { - self.enqueue_mbox_message_from_cgw_to_nb_api(gid, resp); + self.enqueue_mbox_message_from_cgw_to_nb_api( + gid, + resp, + CGWKafkaProducerTopic::CnCRes, + ); } else { error!("Failed to construct infra_group_create message!"); } @@ -921,7 +971,11 @@ impl CGWConnectionServer { "Failed to create new group to shard id {shard_id}! Error {e}" )), ) { - self.enqueue_mbox_message_from_cgw_to_nb_api(gid, resp); + self.enqueue_mbox_message_from_cgw_to_nb_api( + gid, + resp, + CGWKafkaProducerTopic::CnCRes, + ); } else { error!("Failed to construct infra_group_create message!"); } @@ -943,16 +997,6 @@ impl CGWConnectionServer { .await { Ok(()) => { - // We successfully updated both SQL and REDIS - // cache. In order to keep it in sync with local - // one, we have to make sure we latest - // update timestamp locally, to prevent CGW - // from trying to update it in next iteration - // of the main loop, while this very own - // local shard _is_ responsible for timestamp - // update. - last_update_timestamp = self.get_redis_last_update_timestamp().await; - // We try to help free topomap memory usage // by notifying it whenever GID get's destroyed. // However, for allocation we let topomap @@ -986,7 +1030,11 @@ impl CGWConnectionServer { true, None, ) { - self.enqueue_mbox_message_from_cgw_to_nb_api(gid, resp); + self.enqueue_mbox_message_from_cgw_to_nb_api( + gid, + resp, + CGWKafkaProducerTopic::CnCRes, + ); } else { error!("Failed to construct infra_group_delete message!"); } @@ -1003,7 +1051,11 @@ impl CGWConnectionServer { false, Some(format!("Failed to delete group! Error: {e}")), ) { - self.enqueue_mbox_message_from_cgw_to_nb_api(gid, resp); + self.enqueue_mbox_message_from_cgw_to_nb_api( + gid, + resp, + CGWKafkaProducerTopic::CnCRes, + ); } else { error!("Failed to construct infra_group_delete message!"); } @@ -1034,9 +1086,7 @@ impl CGWConnectionServer { // forwarded to. // In order to get it, match to parsed msg, and // get only gid field. - let gid: i32 = match parsed_msg { - CGWNBApiParsedMsg { gid, .. } => gid, - }; + let CGWNBApiParsedMsg { gid, .. } = parsed_msg; match self .cgw_remote_discovery @@ -1125,7 +1175,7 @@ impl CGWConnectionServer { Some(format!("Failed to relay MSG stream to remote CGW{cgw_id}")), local_shard_partition_key_clone, ) { - self_clone.enqueue_mbox_message_from_cgw_to_nb_api(-1, resp); + self_clone.enqueue_mbox_message_from_cgw_to_nb_api(-1, resp, CGWKafkaProducerTopic::CnCRes); } else { error!("Failed to construct device_enqueue message!"); } @@ -1175,7 +1225,7 @@ impl CGWConnectionServer { Some(format!("Failed to add infra list to nonexisting group, gid {gid}, uuid {uuid}")), local_shard_partition_key.clone(), ) { - self.enqueue_mbox_message_from_cgw_to_nb_api(gid, resp); + self.enqueue_mbox_message_from_cgw_to_nb_api(gid, resp, CGWKafkaProducerTopic::CnCRes); } else { error!("Failed to construct infra_group_device_add message!"); } @@ -1206,7 +1256,11 @@ impl CGWConnectionServer { None, local_shard_partition_key.clone(), ) { - self.enqueue_mbox_message_from_cgw_to_nb_api(gid, resp); + self.enqueue_mbox_message_from_cgw_to_nb_api( + gid, + resp, + CGWKafkaProducerTopic::CnCRes, + ); } else { error!( "Failed to construct infra_group_device_add message!" @@ -1247,6 +1301,7 @@ impl CGWConnectionServer { self.enqueue_mbox_message_from_cgw_to_nb_api( dev_gid, resp, + CGWKafkaProducerTopic::Connection ); } else { error!( @@ -1300,7 +1355,7 @@ impl CGWConnectionServer { Some(format!("Failed to create few MACs from infras list (partial create), gid {gid}, uuid {uuid}")), local_shard_partition_key.clone(), ) { - self.enqueue_mbox_message_from_cgw_to_nb_api(gid, resp); + self.enqueue_mbox_message_from_cgw_to_nb_api(gid, resp, CGWKafkaProducerTopic::CnCRes); } else { error!( "Failed to construct infra_group_device_add message!" @@ -1334,7 +1389,7 @@ impl CGWConnectionServer { Some(format!("Failed to delete MACs from infra list, gid {gid}, uuid {uuid}: group does not exist")), local_shard_partition_key.clone(), ) { - self.enqueue_mbox_message_from_cgw_to_nb_api(gid, resp); + self.enqueue_mbox_message_from_cgw_to_nb_api(gid, resp, CGWKafkaProducerTopic::CnCRes); } else { error!( "Failed to construct infra_group_device_del message!" @@ -1369,7 +1424,11 @@ impl CGWConnectionServer { None, local_shard_partition_key.clone(), ) { - self.enqueue_mbox_message_from_cgw_to_nb_api(gid, resp); + self.enqueue_mbox_message_from_cgw_to_nb_api( + gid, + resp, + CGWKafkaProducerTopic::CnCRes, + ); } else { error!( "Failed to construct infra_group_device_del message!" @@ -1404,7 +1463,7 @@ impl CGWConnectionServer { Some(format!("Failed to destroy few MACs from infras list (partial delete), gid {gid}, uuid {uuid}")), local_shard_partition_key.clone(), ) { - self.enqueue_mbox_message_from_cgw_to_nb_api(gid, resp); + self.enqueue_mbox_message_from_cgw_to_nb_api(gid, resp, CGWKafkaProducerTopic::CnCRes); } else { error!( "Failed to construct infra_group_device_del message!" @@ -1440,7 +1499,7 @@ impl CGWConnectionServer { Some(format!("Failed to sink down msg to device of nonexisting group, gid {gid}, uuid {uuid}: group does not exist")), local_shard_partition_key.clone(), ) { - self.enqueue_mbox_message_from_cgw_to_nb_api(gid, resp); + self.enqueue_mbox_message_from_cgw_to_nb_api(gid, resp, CGWKafkaProducerTopic::CnCRes); } else { error!("Failed to construct device_enqueue message!"); } @@ -1464,9 +1523,11 @@ impl CGWConnectionServer { Ok(()) => { // 3. Add message to queue self.enqueue_infrastructure_request( - device_mac, - infra.get_device_state(), - infra.get_device_group_id(), + ( + device_mac, + infra.get_device_state(), + infra.get_device_group_id(), + ), parsed_cmd, msg, uuid, @@ -1484,7 +1545,7 @@ impl CGWConnectionServer { Some(format!("Failed to validate config message! Invalid configure message for device: {device_mac}, uuid {uuid}\nError: {e}")), local_shard_partition_key.clone(), ) { - self.enqueue_mbox_message_from_cgw_to_nb_api(gid, resp); + self.enqueue_mbox_message_from_cgw_to_nb_api(gid, resp, CGWKafkaProducerTopic::CnCRes); } else { error!("Failed to construct device_enqueue message!"); } @@ -1493,9 +1554,11 @@ impl CGWConnectionServer { } } else { self.enqueue_infrastructure_request( - device_mac, - infra.get_device_state(), - infra.get_device_group_id(), + ( + device_mac, + infra.get_device_state(), + infra.get_device_group_id(), + ), parsed_cmd, msg, uuid, @@ -1518,7 +1581,7 @@ impl CGWConnectionServer { Some(format!("Failed to parse command message to device: {device_mac}, uuid {uuid}")), local_shard_partition_key.clone(), ) { - self.enqueue_mbox_message_from_cgw_to_nb_api(gid, resp); + self.enqueue_mbox_message_from_cgw_to_nb_api(gid, resp, CGWKafkaProducerTopic::CnCRes); } else { error!("Failed to construct device_enqueue message!"); } @@ -1540,7 +1603,11 @@ impl CGWConnectionServer { true, None, ) { - self.enqueue_mbox_message_from_cgw_to_nb_api(gid, resp); + self.enqueue_mbox_message_from_cgw_to_nb_api( + gid, + resp, + CGWKafkaProducerTopic::CnCRes, + ); } else { error!("Failed to construct rebalance_group message!"); } @@ -1559,7 +1626,11 @@ impl CGWConnectionServer { false, Some(format!("Failed to rebalance groups! Error: {e}")), ) { - self.enqueue_mbox_message_from_cgw_to_nb_api(gid, resp); + self.enqueue_mbox_message_from_cgw_to_nb_api( + gid, + resp, + CGWKafkaProducerTopic::CnCRes, + ); } else { error!("Failed to construct rebalance_group message!"); } @@ -1582,7 +1653,11 @@ impl CGWConnectionServer { Some(format!("Failed to parse NB API message with key {key}")), local_shard_partition_key.clone(), ) { - self.enqueue_mbox_message_from_cgw_to_nb_api(-1, resp); + self.enqueue_mbox_message_from_cgw_to_nb_api( + -1, + resp, + CGWKafkaProducerTopic::CnCRes, + ); } else { error!("Failed to construct device_enqueue message!"); } @@ -1713,7 +1788,7 @@ impl CGWConnectionServer { .get_infra_group_owner_id(device_group_id) .await { - foreign_infra_join = self.local_cgw_id == group_owner_id; + foreign_infra_join = self.local_cgw_id != group_owner_id; group_owner_shard_id = group_owner_id; } @@ -1778,7 +1853,11 @@ impl CGWConnectionServer { self.local_cgw_id, group_owner_shard_id, ) { - self.enqueue_mbox_message_from_cgw_to_nb_api(device_group_id, resp); + self.enqueue_mbox_message_from_cgw_to_nb_api( + device_group_id, + resp, + CGWKafkaProducerTopic::Connection, + ); } else { error!("Failed to construct foreign_infra_connection message!"); } @@ -1815,7 +1894,11 @@ impl CGWConnectionServer { }; if let Ok(resp) = join_message { - self.enqueue_mbox_message_from_cgw_to_nb_api(device_group_id, resp); + self.enqueue_mbox_message_from_cgw_to_nb_api( + device_group_id, + resp, + CGWKafkaProducerTopic::Connection, + ); } else { error!("Failed to construct [un]assigned_infra_join message!"); } @@ -1828,7 +1911,11 @@ impl CGWConnectionServer { &diff, self.local_cgw_id, ) { - self.enqueue_mbox_message_from_cgw_to_nb_api(device_group_id, resp); + self.enqueue_mbox_message_from_cgw_to_nb_api( + device_group_id, + resp, + CGWKafkaProducerTopic::Connection, + ); } else { error!("Failed to construct device_capabilities_changed message!"); } @@ -1922,7 +2009,11 @@ impl CGWConnectionServer { }; if let Ok(resp) = leave_message { - self.enqueue_mbox_message_from_cgw_to_nb_api(device_group_id, resp); + self.enqueue_mbox_message_from_cgw_to_nb_api( + device_group_id, + resp, + CGWKafkaProducerTopic::Connection, + ); } else { error!("Failed to construct [un]assigned_infra_leave message!"); } @@ -1982,17 +2073,15 @@ impl CGWConnectionServer { async fn enqueue_infrastructure_request( &self, - mac: MacAddress, - infra_state: CGWDeviceState, - infra_gid: i32, + infra: (MacAddress, CGWDeviceState, i32), command: CGWUCentralCommand, message: String, uuid: Uuid, timeout: Option, local_shard_partition_key: Option, ) { - if (infra_state == CGWDeviceState::CGWDeviceConnected) - || (infra_state == CGWDeviceState::CGWDeviceDisconnected + if (infra.1 == CGWDeviceState::CGWDeviceConnected) + || (infra.1 == CGWDeviceState::CGWDeviceDisconnected && (command.cmd_type == CGWUCentralCommandType::Configure || command.cmd_type == CGWUCentralCommandType::Upgrade)) { @@ -2000,7 +2089,7 @@ impl CGWConnectionServer { CGWUCentralMessagesQueueItem::new(command, message, uuid, timeout); let queue_lock = CGW_MESSAGES_QUEUE.read().await; - let resp_result = match queue_lock.push_device_message(mac, queue_msg).await { + let resp_result = match queue_lock.push_device_message(infra.0, queue_msg).await { Ok(replaced_item) => match replaced_item { Some(req) => cgw_construct_infra_enqueue_response( self.local_cgw_id, @@ -2027,25 +2116,32 @@ impl CGWConnectionServer { }; match resp_result { - Ok(resp) => self.enqueue_mbox_message_from_cgw_to_nb_api(infra_gid, resp), + Ok(resp) => self.enqueue_mbox_message_from_cgw_to_nb_api( + infra.2, + resp, + CGWKafkaProducerTopic::CnCRes, + ), Err(e) => { error!("Failed to construct infra_request_result message! Error: {e}") } } + } else if let Ok(resp) = cgw_construct_infra_enqueue_response( + self.local_cgw_id, + uuid, + false, + Some(format!( + "Device {} is disconnected! Accepting only Configure and Upgrade requests!", + infra.0 + )), + local_shard_partition_key, + ) { + self.enqueue_mbox_message_from_cgw_to_nb_api( + infra.2, + resp, + CGWKafkaProducerTopic::CnCRes, + ); } else { - if let Ok(resp) = cgw_construct_infra_enqueue_response( - self.local_cgw_id, - uuid, - false, - Some(format!( - "Device {mac} is disconnected! Accepting only Configure and Upgrade requests!" - )), - local_shard_partition_key, - ) { - self.enqueue_mbox_message_from_cgw_to_nb_api(infra_gid, resp); - } else { - error!("Failed to construct infra_request_result message!"); - } + error!("Failed to construct infra_request_result message!"); } } @@ -2067,7 +2163,11 @@ impl CGWConnectionServer { false, Some(format!("Request failed due to infra {} disconnect", infra)), ) { - self.enqueue_mbox_message_from_cgw_to_nb_api(req.0, resp); + self.enqueue_mbox_message_from_cgw_to_nb_api( + req.0, + resp, + CGWKafkaProducerTopic::CnCRes, + ); } else { error!("Failed to construct message!"); } @@ -2146,7 +2246,7 @@ mod tests { let event: CGWUCentralEvent = cgw_ucentral_ap_parse_message(false, msg, 0)?; match event.evt_type { - CGWUCentralEventType::Log(_) => { + CGWUCentralEventType::Log => { debug!("Assertion passed") } _ => { diff --git a/src/cgw_device.rs b/src/cgw_device.rs index 8423d0b..03620cd 100644 --- a/src/cgw_device.rs +++ b/src/cgw_device.rs @@ -39,7 +39,6 @@ pub struct OldNew { #[derive(Clone, Debug, Default, Deserialize, Serialize)] pub struct CGWDeviceCapabilities { pub firmware: String, - pub uuid: u64, pub compatible: String, pub model: String, pub platform: String, @@ -49,7 +48,6 @@ pub struct CGWDeviceCapabilities { impl CGWDeviceCapabilities { pub fn update_device_capabilities(&mut self, new_capabilities: &CGWDeviceCapabilities) { self.firmware.clone_from(&new_capabilities.firmware); - self.uuid = new_capabilities.uuid; self.compatible.clone_from(&new_capabilities.compatible); self.model.clone_from(&new_capabilities.model); self.platform.clone_from(&new_capabilities.platform); @@ -142,15 +140,6 @@ pub fn cgw_detect_device_changes( ); } - if cur_capabilities.uuid != new_capabilities.uuid { - diff.insert( - "uuid".to_string(), - OldNew { - old_value: cur_capabilities.uuid.to_string(), - new_value: new_capabilities.uuid.to_string(), - }, - ); - } if cur_capabilities.compatible != new_capabilities.compatible { diff.insert( "compatible".to_string(), @@ -160,6 +149,7 @@ pub fn cgw_detect_device_changes( }, ); } + if cur_capabilities.model != new_capabilities.model { diff.insert( "model".to_string(), @@ -169,6 +159,7 @@ pub fn cgw_detect_device_changes( }, ); } + if cur_capabilities.platform != new_capabilities.platform { diff.insert( "platform".to_string(), diff --git a/src/cgw_errors.rs b/src/cgw_errors.rs index 80601a3..d5d563e 100644 --- a/src/cgw_errors.rs +++ b/src/cgw_errors.rs @@ -32,6 +32,8 @@ pub enum Error { Runtime(String), + KafkaInit(String), + // -- Externals #[from] Io(std::io::Error), @@ -89,6 +91,7 @@ impl std::fmt::Display for Error { | Error::Tls(message) | Error::ConnectionServer(message) | Error::Runtime(message) + | Error::KafkaInit(message) | Error::Redis(message) | Error::Tcp(message) | Error::UCentralMessagesQueue(message) diff --git a/src/cgw_kafka_init.rs b/src/cgw_kafka_init.rs new file mode 100644 index 0000000..d15bf43 --- /dev/null +++ b/src/cgw_kafka_init.rs @@ -0,0 +1,234 @@ +use crate::cgw_app_args::{CGWKafkaArgs, CGWRedisArgs}; +use crate::cgw_errors::{Error, Result}; +use crate::cgw_remote_discovery::cgw_create_redis_client; + +use rdkafka::admin::{AdminClient, AdminOptions, NewPartitions, NewTopic, TopicReplication}; +use rdkafka::client::DefaultClientContext; +use rdkafka::config::ClientConfig; + +use std::time::Duration; + +const CGW_KAFKA_TOPICS_LIST: [&str; 6] = [ + "CnC", + "CnC_Res", + "Connection", + "Infra_Realtime", + "State", + "Topology", +]; + +async fn cgw_get_active_cgw_number(redis_args: &CGWRedisArgs) -> Result { + let redis_client = match cgw_create_redis_client(redis_args).await { + Ok(client) => client, + Err(e) => { + return Err(Error::KafkaInit(format!( + "Failed to create redis client! Error: {e}" + ))); + } + }; + + let mut redis_connection = match redis_client + .get_multiplexed_tokio_connection_with_response_timeouts( + Duration::from_secs(1), + Duration::from_secs(5), + ) + .await + { + Ok(conn) => conn, + Err(e) => { + return Err(Error::KafkaInit(format!( + "Failed to get redis connection! Error: {e}" + ))); + } + }; + + let redis_keys: Vec = match redis::cmd("KEYS") + .arg("shard_id_*".to_string()) + .query_async(&mut redis_connection) + .await + { + Err(e) => { + return Err(Error::KafkaInit(format!( + "Failed to get shard_id list from REDIS! Error: {e}" + ))); + } + Ok(keys) => keys, + }; + + Ok(redis_keys.len()) +} + +fn cgw_create_kafka_admin(kafka_args: &CGWKafkaArgs) -> Result> { + let admin_client: AdminClient = match ClientConfig::new() + .set( + "bootstrap.servers", + format!("{}:{}", kafka_args.kafka_host, kafka_args.kafka_port), + ) + .create() + { + Ok(client) => client, + Err(e) => { + return Err(Error::KafkaInit(format!( + "Failed to create kafka admin client! Error: {e}" + ))); + } + }; + + Ok(admin_client) +} + +fn cgw_get_kafka_topics( + admin_client: &AdminClient, +) -> Result> { + let metadata = match admin_client + .inner() + .fetch_metadata(None, Duration::from_millis(2000)) + { + Ok(data) => data, + Err(e) => { + return Err(Error::KafkaInit(format!( + "Failed to get kafka topics metadata! Error: {e}" + ))); + } + }; + + let existing_topics: Vec<(String, usize)> = metadata + .topics() + .iter() + .map(|t| (t.name().to_string(), t.partitions().len())) + .collect(); + + Ok(existing_topics) +} + +async fn cgw_create_kafka_topics(admin_client: &AdminClient) -> Result<()> { + let mut new_topics: Vec> = Vec::new(); + let default_replication: i32 = 1; + let default_topic_partitions_num: i32 = 2; + let default_cnc_topic_partitions_num: i32 = 1; + + for topic_name in CGW_KAFKA_TOPICS_LIST { + new_topics.push(NewTopic::new( + topic_name, + if topic_name == "CnC" { + default_cnc_topic_partitions_num + } else { + default_topic_partitions_num + }, + TopicReplication::Fixed(default_replication), + )); + } + + match admin_client + .create_topics(&new_topics, &AdminOptions::new()) + .await + { + Ok(results) => { + for result in results { + match result { + Ok(topic) => info!("Successfully created topic: {}", topic), + Err((topic, err)) => { + return Err(Error::KafkaInit(format!( + "Failed to create topic {topic}!, Error: {err}" + ))); + } + } + } + } + Err(e) => { + return Err(Error::KafkaInit(format!( + "Failed to create kafka topics! Error: {e}" + ))); + } + } + + Ok(()) +} + +async fn cgw_update_kafka_topics_partitions( + admin_client: &AdminClient, + topic_name: &str, + partitions_num: usize, +) -> Result<()> { + match admin_client + .create_partitions( + &[NewPartitions::new(topic_name, partitions_num)], + &AdminOptions::new(), + ) + .await + { + Ok(results) => { + for result in results { + match result { + Ok(topic) => { + info!("Successfully increased partitions for topic: {}", topic) + } + Err((topic, e)) => { + return Err(Error::KafkaInit(format!( + "Failed to update partitions num for {topic} topic! Error: {e}" + ))); + } + } + } + } + Err(e) => { + return Err(Error::KafkaInit(format!( + "Failed to update topic partitions num for! Error: {e}" + ))); + } + } + + Ok(()) +} + +pub async fn cgw_init_kafka_topics( + kafka_args: &CGWKafkaArgs, + redis_args: &CGWRedisArgs, +) -> Result<()> { + // Kafka topics creation is done at CGW start early begin + // At that moment of time we do not create shard info record in Redis + // So, just simply add 1 to received number of CGW instances + let active_cgw_number = cgw_get_active_cgw_number(redis_args).await? + 1; + let admin_client = cgw_create_kafka_admin(kafka_args)?; + let existing_topics: Vec<(String, usize)> = cgw_get_kafka_topics(&admin_client)?; + + if existing_topics.is_empty() { + error!("Creating kafka topics"); + cgw_create_kafka_topics(&admin_client).await?; + } else { + // Find missing topics + let missing_topics: Vec<&str> = CGW_KAFKA_TOPICS_LIST + .iter() + .filter(|topic| !existing_topics.iter().any(|(name, _)| name == *topic)) + .copied() + .collect(); + + if !missing_topics.is_empty() { + return Err(Error::KafkaInit(format!( + "Failed to init kafka topics! Missed kafka topics: {}", + missing_topics.join(", ") + ))); + } + + match existing_topics.iter().find(|(key, _)| key == "CnC") { + Some((topic_name, partitions_num)) => { + if active_cgw_number > *partitions_num { + error!("Updating number of partitions for CnC topic!"); + cgw_update_kafka_topics_partitions( + &admin_client, + topic_name, + active_cgw_number, + ) + .await?; + } + } + None => { + return Err(Error::KafkaInit( + "Failed to find CnC topic in existing topics list!".to_string(), + )); + } + } + } + + Ok(()) +} diff --git a/src/cgw_nb_api_listener.rs b/src/cgw_nb_api_listener.rs index 8b5b7b1..70180e8 100644 --- a/src/cgw_nb_api_listener.rs +++ b/src/cgw_nb_api_listener.rs @@ -16,6 +16,7 @@ use rdkafka::message::Message; use rdkafka::topic_partition_list::TopicPartitionList; use rdkafka::{ consumer::{stream_consumer::StreamConsumer, Consumer, ConsumerContext, Rebalance}, + producer::future_producer::OwnedDeliveryResult, producer::{FutureProducer, FutureRecord}, }; use serde::{Deserialize, Serialize}; @@ -23,6 +24,8 @@ use std::collections::HashMap; use std::net::SocketAddr; use std::ops::Range; use std::sync::Arc; +use strum::IntoEnumIterator; +use strum_macros::EnumIter; use tokio::{ runtime::{Builder, Runtime}, sync::mpsc::UnboundedSender, @@ -31,8 +34,29 @@ use tokio::{ use uuid::Uuid; type CGWConnectionServerMboxTx = UnboundedSender; -type CGWCNCConsumerType = StreamConsumer; -type CGWCNCProducerType = FutureProducer; +type CGWKafkaConsumerType = StreamConsumer; +type CGWKafkaProducerType = FutureProducer; + +#[derive(EnumIter, Eq, Hash, PartialEq)] +pub enum CGWKafkaProducerTopic { + CnCRes, + Connection, + State, + InfraRealtime, + Topology, +} + +impl std::fmt::Display for CGWKafkaProducerTopic { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match *self { + CGWKafkaProducerTopic::CnCRes => write!(f, "CnC_Res"), + CGWKafkaProducerTopic::Connection => write!(f, "Connection"), + CGWKafkaProducerTopic::State => write!(f, "State"), + CGWKafkaProducerTopic::InfraRealtime => write!(f, "Infra_Realtime"), + CGWKafkaProducerTopic::Topology => write!(f, "Topology"), + } + } +} #[derive(Debug, Serialize)] pub struct InfraGroupCreateResponse { @@ -190,6 +214,52 @@ pub struct InfraLeaveMessage { pub reporter_shard_id: i32, } +#[derive(Debug, Serialize, Deserialize)] +pub struct InfraStateEventMessage { + pub r#type: &'static str, + pub event_type: String, + pub payload: String, + pub reporter_shard_id: i32, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct InfraRealtimeEventMessage { + pub r#type: &'static str, + pub event_type: String, + pub payload: String, + pub reporter_shard_id: i32, +} + +pub fn cgw_construct_infra_state_event_message( + event_type: String, + payload: String, + reporter_shard_id: i32, +) -> Result { + let state_message = InfraStateEventMessage { + r#type: "infrastructure_state_event_message", + event_type, + payload, + reporter_shard_id, + }; + + Ok(serde_json::to_string(&state_message)?) +} + +pub fn cgw_construct_infra_realtime_event_message( + event_type: String, + payload: String, + reporter_shard_id: i32, +) -> Result { + let realtime_message = InfraRealtimeEventMessage { + r#type: "infrastructure_realtime_event_message", + event_type, + payload, + reporter_shard_id, + }; + + Ok(serde_json::to_string(&realtime_message)?) +} + pub fn cgw_construct_infra_group_create_response( infra_group_id: i32, reporter_shard_id: i32, @@ -524,7 +594,7 @@ struct CGWConsumerContextData { // A bit ugly, but we need a way to get // consumer (to retrieve partition num) whenever // client->context rebalance callback is being called. - consumer_client: Option>, + consumer_client: Option>, } struct CustomContext { @@ -561,10 +631,10 @@ impl CGWConsumerContextData { let hash_res = murmur2(key_bytes, DEFAULT_HASH_SEED) & 0x7fffffff; let part_idx = hash_res.rem_euclid(partition_num as u32); - if !key_map.contains_key(&part_idx) { + key_map.entry(part_idx).or_insert_with(|| { debug!("Inserted key '{key_str}' for '{part_idx}' partition"); - key_map.insert(part_idx, key_str); - } + key_str + }); } info!( @@ -710,24 +780,33 @@ impl ConsumerContext for CustomContext { } static GROUP_ID: &str = "CGW"; -const CONSUMER_TOPICS: [&str; 1] = ["CnC"]; -const PRODUCER_TOPICS: &str = "CnC_Res"; +const CONSUMER_TOPICS: &[&str] = &["CnC"]; -struct CGWCNCProducer { - p: CGWCNCProducerType, +struct CGWKafkaConsumer { + consumer: Arc, } -struct CGWCNCConsumer { - c: Arc, -} +impl CGWKafkaConsumer { + pub fn new(cgw_id: i32, kafka_args: &CGWKafkaArgs, topics: &[&str]) -> Result { + // let topics_str = topics.join(", "); + + let consumer = Self::create_consumer(cgw_id, kafka_args, topics)?; -impl CGWCNCConsumer { - pub fn new(cgw_id: i32, kafka_args: &CGWKafkaArgs) -> Result { - let consumer = Self::create_consumer(cgw_id, kafka_args)?; - Ok(CGWCNCConsumer { c: consumer }) + debug!( + "(consumer) Created lazy connection to kafka broker ({}:{}). Topics: {}", + kafka_args.kafka_host, + kafka_args.kafka_port, + topics.join(", ") + ); + + Ok(CGWKafkaConsumer { consumer }) } - fn create_consumer(cgw_id: i32, kafka_args: &CGWKafkaArgs) -> Result> { + fn create_consumer( + cgw_id: i32, + kafka_args: &CGWKafkaArgs, + topics: &[&str], + ) -> Result> { let context = CustomContext { ctx_data: std::sync::RwLock::new(CGWConsumerContextData { partition_mapping: HashMap::new(), @@ -738,7 +817,7 @@ impl CGWCNCConsumer { }), }; - let consumer: CGWCNCConsumerType = match ClientConfig::new() + let consumer: CGWKafkaConsumerType = match ClientConfig::new() .set("group.id", GROUP_ID) .set("client.id", GROUP_ID.to_string() + &cgw_id.to_string()) .set("group.instance.id", cgw_id.to_string()) @@ -765,12 +844,7 @@ impl CGWCNCConsumer { // Need to set this guy for context let consumer_clone = consumer.clone(); - debug!( - "(consumer) Created lazy connection to kafka broker ({}:{})...", - kafka_args.kafka_host, kafka_args.kafka_port, - ); - - if let Err(e) = consumer.subscribe(&CONSUMER_TOPICS) { + if let Err(e) = consumer.subscribe(topics) { error!( "Kafka consumer was unable to subscribe to {:?}! Error: {e}", CONSUMER_TOPICS @@ -786,13 +860,24 @@ impl CGWCNCConsumer { } } -impl CGWCNCProducer { - pub fn new(kafka_args: &CGWKafkaArgs) -> Result { - let prod: CGWCNCProducerType = Self::create_producer(kafka_args)?; - Ok(CGWCNCProducer { p: prod }) +struct CGWKafkaProducer { + producer: CGWKafkaProducerType, + topic: String, +} + +impl CGWKafkaProducer { + fn new(kafka_args: &CGWKafkaArgs, topic: String) -> Result { + let producer: CGWKafkaProducerType = Self::create_producer(kafka_args)?; + + debug!( + "(producer) Created lazy connection to kafka broker ({}:{}). Topic: {topic}", + kafka_args.kafka_host, kafka_args.kafka_port, + ); + + Ok(CGWKafkaProducer { producer, topic }) } - fn create_producer(kafka_args: &CGWKafkaArgs) -> Result { + fn create_producer(kafka_args: &CGWKafkaArgs) -> Result { let producer: FutureProducer = match ClientConfig::new() .set( "bootstrap.servers", @@ -808,20 +893,52 @@ impl CGWCNCProducer { } }; - debug!( - "(producer) Created lazy connection to kafka broker ({}:{})...", - kafka_args.kafka_host, kafka_args.kafka_port, - ); - Ok(producer) } + + async fn send(&self, key: String, payload: String) -> OwnedDeliveryResult { + self.producer + .send( + FutureRecord::to(&self.topic).key(&key).payload(&payload), + Duration::from_secs(0), + ) + .await + } +} + +struct CGWKafkaProducersMap { + kafka_producer_map: HashMap, +} + +impl CGWKafkaProducersMap { + fn new(kafka_args: &CGWKafkaArgs) -> Result { + let mut map: HashMap = HashMap::new(); + + for topic in CGWKafkaProducerTopic::iter() { + match CGWKafkaProducer::new(kafka_args, topic.to_string()) { + Ok(producer) => map.insert(topic, producer), + Err(e) => { + error!("Failed to create Kafka producer for topic: {topic}. Error: {e}"); + return Err(e); + } + }; + } + + Ok(CGWKafkaProducersMap { + kafka_producer_map: map, + }) + } + + fn get(&self, key: CGWKafkaProducerTopic) -> Option<&CGWKafkaProducer> { + self.kafka_producer_map.get(&key) + } } pub struct CGWNBApiClient { working_runtime_handle: Runtime, cgw_server_tx_mbox: CGWConnectionServerMboxTx, - prod: CGWCNCProducer, - consumer: Arc, + producers: CGWKafkaProducersMap, + consumer: Arc, // TBD: split different implementations through a defined trait, // that implements async R W operations? } @@ -839,12 +956,14 @@ impl CGWNBApiClient { .enable_all() .build()?; - let consumer: Arc = Arc::new(CGWCNCConsumer::new(cgw_id, kafka_args)?); + let producers = CGWKafkaProducersMap::new(kafka_args)?; + let consumer: Arc = + Arc::new(CGWKafkaConsumer::new(cgw_id, kafka_args, CONSUMER_TOPICS)?); let consumer_clone = consumer.clone(); let cl = Arc::new(CGWNBApiClient { working_runtime_handle: working_runtime_h, cgw_server_tx_mbox: cgw_tx.clone(), - prod: CGWCNCProducer::new(kafka_args)?, + producers, consumer: consumer_clone, }); @@ -852,41 +971,42 @@ impl CGWNBApiClient { cl.working_runtime_handle.spawn(async move { loop { let cl_clone = cl_clone.clone(); - let stream_processor = consumer.c.stream().try_for_each(|borrowed_message| { - let cl_clone = cl_clone.clone(); - async move { - // Process each message - // Borrowed messages can't outlive the consumer they are received from, so they need to - // be owned in order to be sent to a separate thread. - //record_owned_message_receipt(&owned_message).await; - let owned = borrowed_message.detach(); - - let key = match owned.key_view::() { - None => "", - Some(Ok(s)) => s, - Some(Err(e)) => { - warn!("Error while deserializing message payload! Error: {e}"); - "" - } - }; - - let payload = match owned.payload_view::() { - None => "", - Some(Ok(s)) => s, - Some(Err(e)) => { - warn!("Deserializing message payload failed! Error: {e}"); - "" - } - }; - cl_clone - .enqueue_mbox_message_to_cgw_server( - key.to_string(), - payload.to_string(), - ) - .await; - Ok(()) - } - }); + let stream_processor = + consumer.consumer.stream().try_for_each(|borrowed_message| { + let cl_clone = cl_clone.clone(); + async move { + // Process each message + // Borrowed messages can't outlive the consumer they are received from, so they need to + // be owned in order to be sent to a separate thread. + //record_owned_message_receipt(&owned_message).await; + let owned = borrowed_message.detach(); + + let key = match owned.key_view::() { + None => "", + Some(Ok(s)) => s, + Some(Err(e)) => { + warn!("Error while deserializing message payload! Error: {e}"); + "" + } + }; + + let payload = match owned.payload_view::() { + None => "", + Some(Ok(s)) => s, + Some(Err(e)) => { + warn!("Deserializing message payload failed! Error: {e}"); + "" + } + }; + cl_clone + .enqueue_mbox_message_to_cgw_server( + key.to_string(), + payload.to_string(), + ) + .await; + Ok(()) + } + }); if let Err(e) = stream_processor.await { error!("Failed to create NB API Client! Error: {e}"); @@ -899,7 +1019,7 @@ impl CGWNBApiClient { pub fn get_partition_to_local_shard_mapping(&self) -> Vec<(u32, String)> { let mut return_vec: Vec<(u32, String)> = Vec::new(); - if let Ok(mut ctx) = self.consumer.c.context().ctx_data.write() { + if let Ok(mut ctx) = self.consumer.consumer.context().ctx_data.write() { let (assigned_partition_list, mut partition_mapping) = ctx.get_partition_info(); if !partition_mapping.is_empty() @@ -917,16 +1037,23 @@ impl CGWNBApiClient { return_vec } - pub async fn enqueue_mbox_message_from_cgw_server(&self, key: String, payload: String) { - let produce_future = self.prod.p.send( - FutureRecord::to(PRODUCER_TOPICS) - .key(&key) - .payload(&payload), - Duration::from_secs(0), - ); - - if let Err((e, _)) = produce_future.await { - error!("{e}") + pub async fn enqueue_mbox_message_from_cgw_server( + &self, + key: String, + payload: String, + topic: CGWKafkaProducerTopic, + ) { + if let Some(producer) = self.producers.get(topic) { + let produce_future = producer.send(key, payload); + + if let Err((e, _)) = produce_future.await { + error!("{e}") + } + } else { + error!( + "Failed to get kafka producer for {} topic!", + CGWKafkaProducerTopic::CnCRes + ); } } diff --git a/src/cgw_remote_discovery.rs b/src/cgw_remote_discovery.rs index b893521..4e824ea 100644 --- a/src/cgw_remote_discovery.rs +++ b/src/cgw_remote_discovery.rs @@ -157,7 +157,7 @@ pub struct CGWRemoteDiscovery { local_shard_id: i32, } -async fn cgw_create_redis_client(redis_args: &CGWRedisArgs) -> Result { +pub async fn cgw_create_redis_client(redis_args: &CGWRedisArgs) -> Result { let redis_client_info = ConnectionInfo { addr: match redis_args.redis_tls { true => redis::ConnectionAddr::TcpTls { @@ -549,7 +549,7 @@ impl CGWRemoteDiscovery { + ":" + &shard.server_port.to_string(); let cgw_iface = CGWRemoteIface { - shard: shard, + shard, client: CGWRemoteClient::new(endpoint_str)?, }; lock.insert(cgw_iface.shard.id, cgw_iface); @@ -916,7 +916,7 @@ impl CGWRemoteDiscovery { if device.get_device_state() == CGWDeviceState::CGWDeviceConnected { device.set_device_remains_in_db(false); device.set_device_group_id(0); - devices_to_update.push((key.clone(), device.clone())); + devices_to_update.push((*key, device.clone())); } else { devices_to_remove.push(*key); } @@ -1447,7 +1447,7 @@ impl CGWRemoteDiscovery { } }; - let mut splitted_key = key.split_terminator("|"); + let mut splitted_key = key.split_terminator('|'); let _shard_id = splitted_key.next(); let device_mac = match splitted_key.next() { Some(mac) => match MacAddress::from_str(mac) { @@ -1526,13 +1526,12 @@ impl CGWRemoteDiscovery { } for key in redis_keys { - let res: RedisResult<()> = redis::cmd("DEL").arg(&key).query_async(&mut con).await; - if res.is_err() { - warn!( - "Failed to delete cache entry {}! Error: {}", - key, - res.err().unwrap() - ); + if let Err(res) = redis::cmd("DEL") + .arg(&key) + .query_async::(&mut con) + .await + { + warn!("Failed to delete cache entry {}! Error: {}", key, res); } } } diff --git a/src/cgw_ucentral_ap_parser.rs b/src/cgw_ucentral_ap_parser.rs index 4e9c0de..3c88377 100644 --- a/src/cgw_ucentral_ap_parser.rs +++ b/src/cgw_ucentral_ap_parser.rs @@ -9,7 +9,7 @@ use crate::cgw_errors::{Error, Result}; use crate::cgw_ucentral_parser::{ CGWUCentralEvent, CGWUCentralEventConnect, CGWUCentralEventConnectParamsCaps, - CGWUCentralEventLog, CGWUCentralEventRealtimeEvent, CGWUCentralEventRealtimeEventType, + CGWUCentralEventRealtimeEvent, CGWUCentralEventRealtimeEventType, CGWUCentralEventRealtimeEventWClientJoin, CGWUCentralEventRealtimeEventWClientLeave, CGWUCentralEventReply, CGWUCentralEventState, CGWUCentralEventStateClients, CGWUCentralEventStateClientsData, CGWUCentralEventStateClientsType, @@ -832,29 +832,8 @@ pub fn cgw_ucentral_ap_parse_message( warn!("Received malformed JSONRPC msg!"); Error::UCentralParser("JSONRPC field is missing in message") })?; - if method == "log" { - let params = map.get("params").ok_or_else(|| { - warn!("Received JRPC without params!"); - Error::UCentralParser("Received JRPC without params") - })?; - let serial = MacAddress::from_str( - params["serial"] - .as_str() - .ok_or_else(|| Error::UCentralParser("Failed to parse serial from params"))?, - )?; - - let log_event = CGWUCentralEvent { - serial, - evt_type: CGWUCentralEventType::Log(CGWUCentralEventLog { - serial, - log: params["log"].to_string(), - severity: serde_json::from_value(params["severity"].clone())?, - }), - decompressed: None, - }; - - return Ok(log_event); - } else if method == "connect" { + let mut event_type: CGWUCentralEventType = CGWUCentralEventType::Unknown; + if method == "connect" { let params = map .get("params") .ok_or_else(|| Error::UCentralParser("Params are missing"))?; @@ -867,7 +846,12 @@ pub fn cgw_ucentral_ap_parse_message( .as_str() .ok_or_else(|| Error::UCentralParser("Failed to parse firmware from params"))? .to_string(); - let caps: CGWUCentralEventConnectParamsCaps = + + let uuid = params["uuid"] + .as_u64() + .ok_or_else(|| Error::UCentralParser("Failed to parse uuid from params"))?; + + let capabilities: CGWUCentralEventConnectParamsCaps = serde_json::from_value(params["capabilities"].clone())?; let connect_event = CGWUCentralEvent { @@ -875,8 +859,8 @@ pub fn cgw_ucentral_ap_parse_message( evt_type: CGWUCentralEventType::Connect(CGWUCentralEventConnect { serial, firmware, - uuid: 1, - capabilities: caps, + uuid, + capabilities, }), decompressed: None, }; @@ -892,7 +876,35 @@ pub fn cgw_ucentral_ap_parse_message( "Received unexpected event while topomap feature is disabled", )); } + } else if method == "log" { + event_type = CGWUCentralEventType::Log; + } else if method == "healthcheck" { + event_type = CGWUCentralEventType::Healthcheck; + } else if method == "alarm" { + event_type = CGWUCentralEventType::Alarm; + } else if method == "wifiscan" { + event_type = CGWUCentralEventType::WifiScan; + } else if method == "crashlog" { + event_type = CGWUCentralEventType::CrashLog; + } else if method == "rebootLog" { + event_type = CGWUCentralEventType::RebootLog; + } else if method == "cfgpending" { + event_type = CGWUCentralEventType::CfgPending; + } else if method == "deviceupdate" { + event_type = CGWUCentralEventType::DeviceUpdate; + } else if method == "ping" { + event_type = CGWUCentralEventType::Ping; + } else if method == "recovery" { + event_type = CGWUCentralEventType::Recovery; + } else if method == "venue_broadcast" { + event_type = CGWUCentralEventType::VenueBroadcast; } + + return Ok(CGWUCentralEvent { + serial: MacAddress::default(), + evt_type: event_type, + decompressed: None, + }); } else if map.contains_key("result") { if let Value::Object(result) = &map["result"] { if !result.contains_key("id") { diff --git a/src/cgw_ucentral_parser.rs b/src/cgw_ucentral_parser.rs index 7fe1a37..ad8d268 100644 --- a/src/cgw_ucentral_parser.rs +++ b/src/cgw_ucentral_parser.rs @@ -231,7 +231,7 @@ pub enum CGWUCentralEventType { Connect(CGWUCentralEventConnect), State(CGWUCentralEventState), Healthcheck, - Log(CGWUCentralEventLog), + Log, Event, Alarm, WifiScan, @@ -244,6 +244,33 @@ pub enum CGWUCentralEventType { VenueBroadcast, RealtimeEvent(CGWUCentralEventRealtimeEvent), Reply(CGWUCentralEventReply), + Unknown, +} + +impl std::fmt::Display for CGWUCentralEventType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + CGWUCentralEventType::Connect(_) => write!(f, "connect"), + CGWUCentralEventType::State(_) => write!(f, "state"), + CGWUCentralEventType::Healthcheck => write!(f, "healthcheck"), + CGWUCentralEventType::Log => write!(f, "log"), + CGWUCentralEventType::Event => write!(f, "event"), + CGWUCentralEventType::Alarm => write!(f, "alarm"), + CGWUCentralEventType::WifiScan => write!(f, "wifiscan"), + CGWUCentralEventType::CrashLog => write!(f, "crashlog"), + CGWUCentralEventType::RebootLog => write!(f, "rebootLog"), + CGWUCentralEventType::CfgPending => write!(f, "cfgpending"), + CGWUCentralEventType::DeviceUpdate => write!(f, "deviceupdate"), + CGWUCentralEventType::Ping => write!(f, "ping"), + CGWUCentralEventType::Recovery => write!(f, "recovery"), + CGWUCentralEventType::VenueBroadcast => write!(f, "venue_broadcast"), + CGWUCentralEventType::RealtimeEvent(_) => { + write!(f, "realtime_event") + } + CGWUCentralEventType::Reply(_) => write!(f, "reply"), + CGWUCentralEventType::Unknown => write!(f, "unknown"), + } + } } #[derive(Debug, Deserialize, Serialize)] @@ -352,11 +379,17 @@ pub fn cgw_ucentral_parse_connect_event(message: Message) -> Result Result without params!"); Error::UCentralParser("Received JRPC without params") })?; - if method == "log" { - let params = map - .get("params") - .ok_or_else(|| Error::UCentralParser("Params are missing"))?; - let serial = MacAddress::from_str( - params["serial"] - .as_str() - .ok_or_else(|| Error::UCentralParser("Failed to parse serial from params"))?, - )?; - - let log_event = CGWUCentralEvent { - serial, - evt_type: CGWUCentralEventType::Log(CGWUCentralEventLog { - serial, - log: params["log"].to_string(), - severity: serde_json::from_value(params["severity"].clone())?, - }), - decompressed: None, - }; - - return Ok(log_event); - } else if method == "state" { + if method == "state" { let params = map .get("params") .ok_or_else(|| Error::UCentralParser("Params are missing"))?; @@ -251,7 +231,37 @@ pub fn cgw_ucentral_switch_parse_message( return Ok(state_event); } + } else if method == "log" { + event_type = CGWUCentralEventType::Log; + } else if method == "healthcheck" { + event_type = CGWUCentralEventType::Healthcheck; + } else if method == "event" { + event_type = CGWUCentralEventType::Event; + } else if method == "alarm" { + event_type = CGWUCentralEventType::Alarm; + } else if method == "wifiscan" { + event_type = CGWUCentralEventType::WifiScan; + } else if method == "crashlog" { + event_type = CGWUCentralEventType::CrashLog; + } else if method == "rebootLog" { + event_type = CGWUCentralEventType::RebootLog; + } else if method == "cfgpending" { + event_type = CGWUCentralEventType::CfgPending; + } else if method == "deviceupdate" { + event_type = CGWUCentralEventType::DeviceUpdate; + } else if method == "ping" { + event_type = CGWUCentralEventType::Ping; + } else if method == "recovery" { + event_type = CGWUCentralEventType::Recovery; + } else if method == "venue_broadcast" { + event_type = CGWUCentralEventType::VenueBroadcast; } + + return Ok(CGWUCentralEvent { + serial: MacAddress::default(), + evt_type: event_type, + decompressed: None, + }); } else if map.contains_key("result") { // For now, let's mimic AP's basic reply / result // format. diff --git a/src/cgw_ucentral_topology_map.rs b/src/cgw_ucentral_topology_map.rs index 533743f..6cb370d 100644 --- a/src/cgw_ucentral_topology_map.rs +++ b/src/cgw_ucentral_topology_map.rs @@ -3,7 +3,7 @@ use crate::{ cgw_device::CGWDeviceType, cgw_nb_api_listener::{ cgw_construct_client_join_msg, cgw_construct_client_leave_msg, - cgw_construct_client_migrate_msg, + cgw_construct_client_migrate_msg, CGWKafkaProducerTopic, }, cgw_ucentral_parser::{ CGWUCentralEvent, CGWUCentralEventRealtimeEventType, CGWUCentralEventStateClientsType, @@ -39,6 +39,38 @@ type ClientsMigrateList = Vec<(MacAddress, MacAddress, String, String)>; // Last seen, ssid, band type ClientsConnectedList = (ClientLastSeenTimestamp, String, String); +// Topology map item +type TopologyMapItem = HashMap< + i32, + ( + CGWUCentralTopologyMapData, + // This hashmap is needed to keep track of _all_ topomap nodes + // connected (directly reported) by this device, to detect _migration_ + // process: + // we need to keep track whenever WiFi client of AP_1, for example, + // 'silently' migrates to AP_2. + // + // We should also track the last time seen value of this + // client / node, to make appropriate decision + // whenever leave/join/migrate happens. + // + // LIMITATION: + // * Works only on a per-group basis (if wifi-client migrates to + // another GID, this event would be missed) + // (as per current implementation). + // Track key:client mac, values:parent AP mac, last seen timestamp, ssid and band + HashMap, + ), +>; + +type TopologyMapNode = ( + MacAddress, + ( + CGWUCentralTopologyMapNodeOrigin, + CGWUCentralTopologyMapConnections, + HashMap, + ), +); struct CGWTopologyMapQueueMessage { evt: CGWUCentralEvent, dev_type: CGWDeviceType, @@ -149,32 +181,7 @@ struct CGWUCentralTopologyMapData { #[derive(Debug)] pub struct CGWUCentralTopologyMap { // Stored on a per-gid basis - data: Arc< - RwLock< - HashMap< - i32, - ( - CGWUCentralTopologyMapData, - // This hashmap is needed to keep track of _all_ topomap nodes - // connected (directly reported) by this device, to detect _migration_ - // process: - // we need to keep track whenever WiFi client of AP_1, for example, - // 'silently' migrates to AP_2. - // - // We should also track the last time seen value of this - // client / node, to make appropriate decision - // whenever leave/join/migrate happens. - // - // LIMITATION: - // * Works only on a per-group basis (if wifi-client migrates to - // another GID, this event would be missed) - // (as per current implementation). - // Track key:client mac, values:parent AP mac, last seen timestamp, ssid and band - HashMap, - ), - >, - >, - >, + data: Arc>, queue: ( Arc, Arc>, @@ -439,7 +446,11 @@ impl CGWUCentralTopologyMap { for (client_mac, new_ssid, new_band) in clients_list { let msg = cgw_construct_client_join_msg(gid, client_mac, node_mac, new_ssid, new_band); if let Ok(r) = msg { - let _ = conn_server.enqueue_mbox_message_from_device_to_nb_api_c(gid, r); + let _ = conn_server.enqueue_mbox_message_from_device_to_nb_api_c( + gid, + r, + CGWKafkaProducerTopic::Topology, + ); } else { warn!("Failed to convert client leave event to string!"); } @@ -469,7 +480,11 @@ impl CGWUCentralTopologyMap { for (client_mac, band) in clients_list { let msg = cgw_construct_client_leave_msg(gid, client_mac, node_mac, band); if let Ok(r) = msg { - let _ = conn_server.enqueue_mbox_message_from_device_to_nb_api_c(gid, r); + let _ = conn_server.enqueue_mbox_message_from_device_to_nb_api_c( + gid, + r, + CGWKafkaProducerTopic::Topology, + ); } else { warn!("Failed to convert client leave event to string!"); } @@ -504,7 +519,11 @@ impl CGWUCentralTopologyMap { new_band, ); if let Ok(r) = msg { - let _ = conn_server.enqueue_mbox_message_from_device_to_nb_api_c(gid, r); + let _ = conn_server.enqueue_mbox_message_from_device_to_nb_api_c( + gid, + r, + CGWKafkaProducerTopic::Topology, + ); } else { warn!("Failed to convert client leave event to string!"); } @@ -556,14 +575,7 @@ impl CGWUCentralTopologyMap { let mut upstream_lldp_node: Option<(MacAddress, CGWUCentralEventStatePort)> = None; let mut downstream_lldp_nodes: HashMap = HashMap::new(); - let mut nodes_to_create: Vec<( - MacAddress, - ( - CGWUCentralTopologyMapNodeOrigin, - CGWUCentralTopologyMapConnections, - HashMap, - ), - )> = Vec::new(); + let mut nodes_to_create: Vec = Vec::new(); // Map connections that will be populated on behalf of device // that sent the state data itself. diff --git a/src/main.rs b/src/main.rs index 4763308..d1220ae 100644 --- a/src/main.rs +++ b/src/main.rs @@ -6,6 +6,7 @@ mod cgw_db_accessor; mod cgw_device; mod cgw_devices_cache; mod cgw_errors; +mod cgw_kafka_init; mod cgw_metrics; mod cgw_nb_api_listener; mod cgw_remote_client; @@ -26,6 +27,7 @@ extern crate log; extern crate lazy_static; use cgw_app_args::AppArgs; +use cgw_kafka_init::cgw_init_kafka_topics; use cgw_runtime::cgw_initialize_runtimes; use nix::sys::socket::{setsockopt, sockopt}; @@ -356,6 +358,12 @@ async fn main() -> Result<()> { // Configure logger setup_logger(args.log_level); + // Initialize Kafka topics + if let Err(e) = cgw_init_kafka_topics(&args.kafka_args, &args.redis_args).await { + error!("Failed to initialize kafka topics! Error: {e}"); + return Err(e); + } + // Initialize runtimes if let Err(e) = cgw_initialize_runtimes(args.wss_args.wss_t_num) { error!("Failed to initialize CGW runtimes! Error: {e}"); diff --git a/tests/conftest.py b/tests/conftest.py index dcde3bb..481dbfa 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -59,7 +59,7 @@ def __init__(self): producer = KafkaProducer(db='localhost:9092', topic='CnC') consumer = KafkaConsumer( - db='localhost:9092', topic='CnC_Res', consumer_timeout=12000) + db='localhost:9092', topics=['CnC_Res', 'Connection', 'State', 'Infra_Realtime', 'Topology'], consumer_timeout=12000) admin = KafkaAdmin(host='localhost', port=9092) self.kafka_producer = producer diff --git a/tests/test_cgw_infra_events.py b/tests/test_cgw_infra_events.py new file mode 100644 index 0000000..2bed679 --- /dev/null +++ b/tests/test_cgw_infra_events.py @@ -0,0 +1,4173 @@ +import pytest +import uuid +import time + +from metrics import cgw_metrics_get_groups_assigned_num, \ + cgw_metrics_get_group_infras_assigned_num, \ + cgw_metrics_get_connections_num + + +class TestCgwInfraEvents: + @pytest.mark.usefixtures("test_context", + "cgw_probe", + "kafka_probe", + "redis_probe", + "psql_probe") + def test_infra_state_event(self, test_context): + assert test_context.kafka_producer.is_connected(), \ + f'Cannot create default group: kafka producer is not connected to Kafka' + + assert test_context.kafka_consumer.is_connected(), \ + f'Cannot create default group: kafka consumer is not connected to Kafka' + + default_shard_id = test_context.default_shard_id() + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + uuid_val = uuid.uuid4() + group_id = 100 + + # Create single group + test_context.kafka_producer.handle_single_group_create( + str(group_id), uuid_val.int, default_shard_id) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive create group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive create group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_create_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group create failed!') + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra group create failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra group create failed!') + + # Validate group + assert group_info_psql[0] == int( + group_info_redis.get('gid')) == group_id + + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 1 + + # Infra add + infra_mac = test_context.default_dev_sim_mac() + test_context.kafka_producer.handle_single_device_assign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra assign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra assign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + print(ret_msg.value["failed_infras"]) + raise Exception('Infra assign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_add_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra assign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infras assigned number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 1 + + # Get infra info from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + if not infra_info_redis: + print(f'Failed to get infra {infra_mac} info from Redis!') + raise Exception('Infra assign failed!') + + # Get infra info from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + if not infra_info_psql: + print(f'Failed to get infra {infra_mac} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infra assigned group id + assert infra_info_psql[1] == int( + infra_info_redis.get('group_id')) == group_id + + # Connect infra to CGW + test_context.device_sim.connect() + test_context.device_sim.send_hello(test_context.device_sim._socket) + + # Simulate at least 1 sec sleep before checking metrics + time.sleep(1) + assert cgw_metrics_get_connections_num() == 1 + assert test_context.device_sim._socket is not None, \ + f"Expected websocket connection NOT to be NULL after reconnect." + + test_context.device_sim.send_state_event( + test_context.device_sim._socket) + + # Get message from Kafka + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'infrastructure_state_event_message') + if ret_msg is None: + print('Failed to receive infra state event message!') + raise Exception( + 'Failed to receive infra state event message!') + + assert ret_msg.topic == 'State' + assert ret_msg.value['type'] == 'infrastructure_state_event_message' + assert ret_msg.value['event_type'] == 'state' + + # Simulate infra leave + test_context.device_sim.disconnect() + + # Infra del + uuid_val = uuid.uuid4() + test_context.kafka_producer.handle_single_device_deassign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra deassign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra deassign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra deassign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_del_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Validate infra removed from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + assert infra_info_redis == None + + # Validate infra removed from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + assert infra_info_psql == None + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra deassign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra deassign failed!') + + # Validate number of assigned infra number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 0 + + # Delete single group + uuid_val = uuid.uuid4() + + test_context.kafka_producer.handle_single_group_delete( + str(group_id), uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive delete group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive delete group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_delete_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group delete failed!') + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate group removed from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate group removed from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + @pytest.mark.usefixtures("test_context", + "cgw_probe", + "kafka_probe", + "redis_probe", + "psql_probe") + def test_infra_healthcheck_event(self, test_context): + assert test_context.kafka_producer.is_connected(), \ + f'Cannot create default group: kafka producer is not connected to Kafka' + + assert test_context.kafka_consumer.is_connected(), \ + f'Cannot create default group: kafka consumer is not connected to Kafka' + + default_shard_id = test_context.default_shard_id() + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + uuid_val = uuid.uuid4() + group_id = 100 + + # Create single group + test_context.kafka_producer.handle_single_group_create( + str(group_id), uuid_val.int, default_shard_id) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive create group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive create group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_create_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group create failed!') + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra group create failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra group create failed!') + + # Validate group + assert group_info_psql[0] == int( + group_info_redis.get('gid')) == group_id + + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 1 + + # Infra add + infra_mac = test_context.default_dev_sim_mac() + test_context.kafka_producer.handle_single_device_assign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra assign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra assign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + print(ret_msg.value["failed_infras"]) + raise Exception('Infra assign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_add_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra assign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infras assigned number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 1 + + # Get infra info from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + if not infra_info_redis: + print(f'Failed to get infra {infra_mac} info from Redis!') + raise Exception('Infra assign failed!') + + # Get infra info from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + if not infra_info_psql: + print(f'Failed to get infra {infra_mac} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infra assigned group id + assert infra_info_psql[1] == int( + infra_info_redis.get('group_id')) == group_id + + # Connect infra to CGW + test_context.device_sim.connect() + test_context.device_sim.send_hello(test_context.device_sim._socket) + + # Simulate at least 1 sec sleep before checking metrics + time.sleep(1) + assert cgw_metrics_get_connections_num() == 1 + assert test_context.device_sim._socket is not None, \ + f"Expected websocket connection NOT to be NULL after reconnect." + + test_context.device_sim.send_healthcheck_event( + test_context.device_sim._socket) + + # Get message from Kafka + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'infrastructure_state_event_message') + if ret_msg is None: + print('Failed to receive infra state event message!') + raise Exception( + 'Failed to receive infra state event message!') + + assert ret_msg.topic == 'State' + assert ret_msg.value['type'] == 'infrastructure_state_event_message' + assert ret_msg.value['event_type'] == 'healthcheck' + + # Simulate infra leave + test_context.device_sim.disconnect() + + # Infra del + uuid_val = uuid.uuid4() + test_context.kafka_producer.handle_single_device_deassign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra deassign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra deassign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra deassign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_del_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Validate infra removed from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + assert infra_info_redis == None + + # Validate infra removed from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + assert infra_info_psql == None + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra deassign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra deassign failed!') + + # Validate number of assigned infra number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 0 + + # Delete single group + uuid_val = uuid.uuid4() + + test_context.kafka_producer.handle_single_group_delete( + str(group_id), uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive delete group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive delete group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_delete_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group delete failed!') + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate group removed from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate group removed from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + @pytest.mark.usefixtures("test_context", + "cgw_probe", + "kafka_probe", + "redis_probe", + "psql_probe") + def test_infra_crashlog_event(self, test_context): + assert test_context.kafka_producer.is_connected(), \ + f'Cannot create default group: kafka producer is not connected to Kafka' + + assert test_context.kafka_consumer.is_connected(), \ + f'Cannot create default group: kafka consumer is not connected to Kafka' + + default_shard_id = test_context.default_shard_id() + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + uuid_val = uuid.uuid4() + group_id = 100 + + # Create single group + test_context.kafka_producer.handle_single_group_create( + str(group_id), uuid_val.int, default_shard_id) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive create group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive create group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_create_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group create failed!') + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra group create failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra group create failed!') + + # Validate group + assert group_info_psql[0] == int( + group_info_redis.get('gid')) == group_id + + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 1 + + # Infra add + infra_mac = test_context.default_dev_sim_mac() + test_context.kafka_producer.handle_single_device_assign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra assign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra assign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + print(ret_msg.value["failed_infras"]) + raise Exception('Infra assign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_add_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra assign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infras assigned number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 1 + + # Get infra info from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + if not infra_info_redis: + print(f'Failed to get infra {infra_mac} info from Redis!') + raise Exception('Infra assign failed!') + + # Get infra info from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + if not infra_info_psql: + print(f'Failed to get infra {infra_mac} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infra assigned group id + assert infra_info_psql[1] == int( + infra_info_redis.get('group_id')) == group_id + + # Connect infra to CGW + test_context.device_sim.connect() + test_context.device_sim.send_hello(test_context.device_sim._socket) + + # Simulate at least 1 sec sleep before checking metrics + time.sleep(1) + assert cgw_metrics_get_connections_num() == 1 + assert test_context.device_sim._socket is not None, \ + f"Expected websocket connection NOT to be NULL after reconnect." + + test_context.device_sim.send_crashlog_event( + test_context.device_sim._socket) + + # Get message from Kafka + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'infrastructure_realtime_event_message') + if ret_msg is None: + print('Failed to receive infra realtime event message!') + raise Exception( + 'Failed to receive infra realtime event message!') + + assert ret_msg.topic == 'Infra_Realtime' + assert ret_msg.value['type'] == 'infrastructure_realtime_event_message' + assert ret_msg.value['event_type'] == 'crashlog' + + # Simulate infra leave + test_context.device_sim.disconnect() + + # Infra del + uuid_val = uuid.uuid4() + test_context.kafka_producer.handle_single_device_deassign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra deassign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra deassign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra deassign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_del_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Validate infra removed from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + assert infra_info_redis == None + + # Validate infra removed from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + assert infra_info_psql == None + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra deassign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra deassign failed!') + + # Validate number of assigned infra number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 0 + + # Delete single group + uuid_val = uuid.uuid4() + + test_context.kafka_producer.handle_single_group_delete( + str(group_id), uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive delete group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive delete group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_delete_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group delete failed!') + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate group removed from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate group removed from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + @pytest.mark.usefixtures("test_context", + "cgw_probe", + "kafka_probe", + "redis_probe", + "psql_probe") + def test_infra_rebootlog_event(self, test_context): + assert test_context.kafka_producer.is_connected(), \ + f'Cannot create default group: kafka producer is not connected to Kafka' + + assert test_context.kafka_consumer.is_connected(), \ + f'Cannot create default group: kafka consumer is not connected to Kafka' + + default_shard_id = test_context.default_shard_id() + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + uuid_val = uuid.uuid4() + group_id = 100 + + # Create single group + test_context.kafka_producer.handle_single_group_create( + str(group_id), uuid_val.int, default_shard_id) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive create group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive create group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_create_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group create failed!') + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra group create failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra group create failed!') + + # Validate group + assert group_info_psql[0] == int( + group_info_redis.get('gid')) == group_id + + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 1 + + # Infra add + infra_mac = test_context.default_dev_sim_mac() + test_context.kafka_producer.handle_single_device_assign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra assign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra assign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + print(ret_msg.value["failed_infras"]) + raise Exception('Infra assign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_add_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra assign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infras assigned number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 1 + + # Get infra info from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + if not infra_info_redis: + print(f'Failed to get infra {infra_mac} info from Redis!') + raise Exception('Infra assign failed!') + + # Get infra info from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + if not infra_info_psql: + print(f'Failed to get infra {infra_mac} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infra assigned group id + assert infra_info_psql[1] == int( + infra_info_redis.get('group_id')) == group_id + + # Connect infra to CGW + test_context.device_sim.connect() + test_context.device_sim.send_hello(test_context.device_sim._socket) + + # Simulate at least 1 sec sleep before checking metrics + time.sleep(1) + assert cgw_metrics_get_connections_num() == 1 + assert test_context.device_sim._socket is not None, \ + f"Expected websocket connection NOT to be NULL after reconnect." + + test_context.device_sim.send_rebootlog_event( + test_context.device_sim._socket) + + # Get message from Kafka + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'infrastructure_realtime_event_message') + if ret_msg is None: + print('Failed to receive infra realtime event message!') + raise Exception( + 'Failed to receive infra realtime event message!') + + assert ret_msg.topic == 'Infra_Realtime' + assert ret_msg.value['type'] == 'infrastructure_realtime_event_message' + assert ret_msg.value['event_type'] == 'rebootLog' + + # Simulate infra leave + test_context.device_sim.disconnect() + + # Infra del + uuid_val = uuid.uuid4() + test_context.kafka_producer.handle_single_device_deassign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra deassign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra deassign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra deassign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_del_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Validate infra removed from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + assert infra_info_redis == None + + # Validate infra removed from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + assert infra_info_psql == None + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra deassign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra deassign failed!') + + # Validate number of assigned infra number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 0 + + # Delete single group + uuid_val = uuid.uuid4() + + test_context.kafka_producer.handle_single_group_delete( + str(group_id), uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive delete group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive delete group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_delete_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group delete failed!') + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate group removed from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate group removed from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + @pytest.mark.usefixtures("test_context", + "cgw_probe", + "kafka_probe", + "redis_probe", + "psql_probe") + def test_infra_cfgpending_event(self, test_context): + assert test_context.kafka_producer.is_connected(), \ + f'Cannot create default group: kafka producer is not connected to Kafka' + + assert test_context.kafka_consumer.is_connected(), \ + f'Cannot create default group: kafka consumer is not connected to Kafka' + + default_shard_id = test_context.default_shard_id() + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + uuid_val = uuid.uuid4() + group_id = 100 + + # Create single group + test_context.kafka_producer.handle_single_group_create( + str(group_id), uuid_val.int, default_shard_id) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive create group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive create group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_create_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group create failed!') + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra group create failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra group create failed!') + + # Validate group + assert group_info_psql[0] == int( + group_info_redis.get('gid')) == group_id + + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 1 + + # Infra add + infra_mac = test_context.default_dev_sim_mac() + test_context.kafka_producer.handle_single_device_assign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra assign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra assign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + print(ret_msg.value["failed_infras"]) + raise Exception('Infra assign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_add_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra assign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infras assigned number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 1 + + # Get infra info from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + if not infra_info_redis: + print(f'Failed to get infra {infra_mac} info from Redis!') + raise Exception('Infra assign failed!') + + # Get infra info from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + if not infra_info_psql: + print(f'Failed to get infra {infra_mac} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infra assigned group id + assert infra_info_psql[1] == int( + infra_info_redis.get('group_id')) == group_id + + # Connect infra to CGW + test_context.device_sim.connect() + test_context.device_sim.send_hello(test_context.device_sim._socket) + + # Simulate at least 1 sec sleep before checking metrics + time.sleep(1) + assert cgw_metrics_get_connections_num() == 1 + assert test_context.device_sim._socket is not None, \ + f"Expected websocket connection NOT to be NULL after reconnect." + + test_context.device_sim.send_cfgpending_event( + test_context.device_sim._socket) + + # Get message from Kafka + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'infrastructure_realtime_event_message') + if ret_msg is None: + print('Failed to receive infra realtime event message!') + raise Exception( + 'Failed to receive infra realtime event message!') + + assert ret_msg.topic == 'Infra_Realtime' + assert ret_msg.value['type'] == 'infrastructure_realtime_event_message' + assert ret_msg.value['event_type'] == 'cfgpending' + + # Simulate infra leave + test_context.device_sim.disconnect() + + # Infra del + uuid_val = uuid.uuid4() + test_context.kafka_producer.handle_single_device_deassign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra deassign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra deassign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra deassign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_del_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Validate infra removed from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + assert infra_info_redis == None + + # Validate infra removed from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + assert infra_info_psql == None + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra deassign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra deassign failed!') + + # Validate number of assigned infra number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 0 + + # Delete single group + uuid_val = uuid.uuid4() + + test_context.kafka_producer.handle_single_group_delete( + str(group_id), uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive delete group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive delete group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_delete_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group delete failed!') + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate group removed from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate group removed from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + @pytest.mark.usefixtures("test_context", + "cgw_probe", + "kafka_probe", + "redis_probe", + "psql_probe") + def test_infra_ping_event(self, test_context): + assert test_context.kafka_producer.is_connected(), \ + f'Cannot create default group: kafka producer is not connected to Kafka' + + assert test_context.kafka_consumer.is_connected(), \ + f'Cannot create default group: kafka consumer is not connected to Kafka' + + default_shard_id = test_context.default_shard_id() + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + uuid_val = uuid.uuid4() + group_id = 100 + + # Create single group + test_context.kafka_producer.handle_single_group_create( + str(group_id), uuid_val.int, default_shard_id) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive create group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive create group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_create_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group create failed!') + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra group create failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra group create failed!') + + # Validate group + assert group_info_psql[0] == int( + group_info_redis.get('gid')) == group_id + + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 1 + + # Infra add + infra_mac = test_context.default_dev_sim_mac() + test_context.kafka_producer.handle_single_device_assign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra assign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra assign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + print(ret_msg.value["failed_infras"]) + raise Exception('Infra assign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_add_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra assign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infras assigned number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 1 + + # Get infra info from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + if not infra_info_redis: + print(f'Failed to get infra {infra_mac} info from Redis!') + raise Exception('Infra assign failed!') + + # Get infra info from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + if not infra_info_psql: + print(f'Failed to get infra {infra_mac} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infra assigned group id + assert infra_info_psql[1] == int( + infra_info_redis.get('group_id')) == group_id + + # Connect infra to CGW + test_context.device_sim.connect() + test_context.device_sim.send_hello(test_context.device_sim._socket) + + # Simulate at least 1 sec sleep before checking metrics + time.sleep(1) + assert cgw_metrics_get_connections_num() == 1 + assert test_context.device_sim._socket is not None, \ + f"Expected websocket connection NOT to be NULL after reconnect." + + test_context.device_sim.send_ping_event( + test_context.device_sim._socket) + + # Get message from Kafka + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'infrastructure_realtime_event_message') + if ret_msg is None: + print('Failed to receive infra realtime event message!') + raise Exception( + 'Failed to receive infra realtime event message!') + + assert ret_msg.topic == 'Infra_Realtime' + assert ret_msg.value['type'] == 'infrastructure_realtime_event_message' + assert ret_msg.value['event_type'] == 'ping' + + # Simulate infra leave + test_context.device_sim.disconnect() + + # Infra del + uuid_val = uuid.uuid4() + test_context.kafka_producer.handle_single_device_deassign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra deassign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra deassign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra deassign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_del_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Validate infra removed from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + assert infra_info_redis == None + + # Validate infra removed from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + assert infra_info_psql == None + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra deassign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra deassign failed!') + + # Validate number of assigned infra number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 0 + + # Delete single group + uuid_val = uuid.uuid4() + + test_context.kafka_producer.handle_single_group_delete( + str(group_id), uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive delete group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive delete group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_delete_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group delete failed!') + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate group removed from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate group removed from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + @pytest.mark.usefixtures("test_context", + "cgw_probe", + "kafka_probe", + "redis_probe", + "psql_probe") + def test_infra_recovery_event(self, test_context): + assert test_context.kafka_producer.is_connected(), \ + f'Cannot create default group: kafka producer is not connected to Kafka' + + assert test_context.kafka_consumer.is_connected(), \ + f'Cannot create default group: kafka consumer is not connected to Kafka' + + default_shard_id = test_context.default_shard_id() + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + uuid_val = uuid.uuid4() + group_id = 100 + + # Create single group + test_context.kafka_producer.handle_single_group_create( + str(group_id), uuid_val.int, default_shard_id) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive create group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive create group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_create_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group create failed!') + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra group create failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra group create failed!') + + # Validate group + assert group_info_psql[0] == int( + group_info_redis.get('gid')) == group_id + + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 1 + + # Infra add + infra_mac = test_context.default_dev_sim_mac() + test_context.kafka_producer.handle_single_device_assign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra assign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra assign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + print(ret_msg.value["failed_infras"]) + raise Exception('Infra assign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_add_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra assign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infras assigned number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 1 + + # Get infra info from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + if not infra_info_redis: + print(f'Failed to get infra {infra_mac} info from Redis!') + raise Exception('Infra assign failed!') + + # Get infra info from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + if not infra_info_psql: + print(f'Failed to get infra {infra_mac} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infra assigned group id + assert infra_info_psql[1] == int( + infra_info_redis.get('group_id')) == group_id + + # Connect infra to CGW + test_context.device_sim.connect() + test_context.device_sim.send_hello(test_context.device_sim._socket) + + # Simulate at least 1 sec sleep before checking metrics + time.sleep(1) + assert cgw_metrics_get_connections_num() == 1 + assert test_context.device_sim._socket is not None, \ + f"Expected websocket connection NOT to be NULL after reconnect." + + test_context.device_sim.send_recovery_event( + test_context.device_sim._socket) + + # Get message from Kafka + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'infrastructure_realtime_event_message') + if ret_msg is None: + print('Failed to receive infra realtime event message!') + raise Exception( + 'Failed to receive infra realtime event message!') + + assert ret_msg.topic == 'Infra_Realtime' + assert ret_msg.value['type'] == 'infrastructure_realtime_event_message' + assert ret_msg.value['event_type'] == 'recovery' + + # Simulate infra leave + test_context.device_sim.disconnect() + + # Infra del + uuid_val = uuid.uuid4() + test_context.kafka_producer.handle_single_device_deassign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra deassign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra deassign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra deassign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_del_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Validate infra removed from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + assert infra_info_redis == None + + # Validate infra removed from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + assert infra_info_psql == None + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra deassign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra deassign failed!') + + # Validate number of assigned infra number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 0 + + # Delete single group + uuid_val = uuid.uuid4() + + test_context.kafka_producer.handle_single_group_delete( + str(group_id), uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive delete group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive delete group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_delete_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group delete failed!') + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate group removed from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate group removed from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + @pytest.mark.usefixtures("test_context", + "cgw_probe", + "kafka_probe", + "redis_probe", + "psql_probe") + def test_infra_log_event(self, test_context): + assert test_context.kafka_producer.is_connected(), \ + f'Cannot create default group: kafka producer is not connected to Kafka' + + assert test_context.kafka_consumer.is_connected(), \ + f'Cannot create default group: kafka consumer is not connected to Kafka' + + default_shard_id = test_context.default_shard_id() + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + uuid_val = uuid.uuid4() + group_id = 100 + + # Create single group + test_context.kafka_producer.handle_single_group_create( + str(group_id), uuid_val.int, default_shard_id) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive create group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive create group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_create_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group create failed!') + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra group create failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra group create failed!') + + # Validate group + assert group_info_psql[0] == int( + group_info_redis.get('gid')) == group_id + + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 1 + + # Infra add + infra_mac = test_context.default_dev_sim_mac() + test_context.kafka_producer.handle_single_device_assign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra assign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra assign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + print(ret_msg.value["failed_infras"]) + raise Exception('Infra assign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_add_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra assign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infras assigned number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 1 + + # Get infra info from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + if not infra_info_redis: + print(f'Failed to get infra {infra_mac} info from Redis!') + raise Exception('Infra assign failed!') + + # Get infra info from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + if not infra_info_psql: + print(f'Failed to get infra {infra_mac} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infra assigned group id + assert infra_info_psql[1] == int( + infra_info_redis.get('group_id')) == group_id + + # Connect infra to CGW + test_context.device_sim.connect() + test_context.device_sim.send_hello(test_context.device_sim._socket) + + # Simulate at least 1 sec sleep before checking metrics + time.sleep(1) + assert cgw_metrics_get_connections_num() == 1 + assert test_context.device_sim._socket is not None, \ + f"Expected websocket connection NOT to be NULL after reconnect." + + test_context.device_sim.send_log_event( + test_context.device_sim._socket) + + # Get message from Kafka + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'infrastructure_realtime_event_message') + if ret_msg is None: + print('Failed to receive infra realtime event message!') + raise Exception( + 'Failed to receive infra realtime event message!') + + assert ret_msg.topic == 'Infra_Realtime' + assert ret_msg.value['type'] == 'infrastructure_realtime_event_message' + assert ret_msg.value['event_type'] == 'log' + + # Simulate infra leave + test_context.device_sim.disconnect() + + # Infra del + uuid_val = uuid.uuid4() + test_context.kafka_producer.handle_single_device_deassign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra deassign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra deassign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra deassign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_del_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Validate infra removed from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + assert infra_info_redis == None + + # Validate infra removed from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + assert infra_info_psql == None + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra deassign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra deassign failed!') + + # Validate number of assigned infra number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 0 + + # Delete single group + uuid_val = uuid.uuid4() + + test_context.kafka_producer.handle_single_group_delete( + str(group_id), uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive delete group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive delete group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_delete_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group delete failed!') + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate group removed from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate group removed from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + @pytest.mark.usefixtures("test_context", + "cgw_probe", + "kafka_probe", + "redis_probe", + "psql_probe") + def test_infra_event(self, test_context): + assert test_context.kafka_producer.is_connected(), \ + f'Cannot create default group: kafka producer is not connected to Kafka' + + assert test_context.kafka_consumer.is_connected(), \ + f'Cannot create default group: kafka consumer is not connected to Kafka' + + default_shard_id = test_context.default_shard_id() + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + uuid_val = uuid.uuid4() + group_id = 100 + + # Create single group + test_context.kafka_producer.handle_single_group_create( + str(group_id), uuid_val.int, default_shard_id) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive create group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive create group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_create_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group create failed!') + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra group create failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra group create failed!') + + # Validate group + assert group_info_psql[0] == int( + group_info_redis.get('gid')) == group_id + + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 1 + + # Infra add + infra_mac = test_context.default_dev_sim_mac() + test_context.kafka_producer.handle_single_device_assign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra assign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra assign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + print(ret_msg.value["failed_infras"]) + raise Exception('Infra assign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_add_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra assign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infras assigned number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 1 + + # Get infra info from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + if not infra_info_redis: + print(f'Failed to get infra {infra_mac} info from Redis!') + raise Exception('Infra assign failed!') + + # Get infra info from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + if not infra_info_psql: + print(f'Failed to get infra {infra_mac} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infra assigned group id + assert infra_info_psql[1] == int( + infra_info_redis.get('group_id')) == group_id + + # Connect infra to CGW + test_context.device_sim.connect() + test_context.device_sim.send_hello(test_context.device_sim._socket) + + # Simulate at least 1 sec sleep before checking metrics + time.sleep(1) + assert cgw_metrics_get_connections_num() == 1 + assert test_context.device_sim._socket is not None, \ + f"Expected websocket connection NOT to be NULL after reconnect." + + # Reuse existing event: client.join + test_context.device_sim.send_join( + test_context.device_sim._socket) + + # Get message from Kafka + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'infrastructure_realtime_event_message') + if ret_msg is None: + print('Failed to receive infra realtime event message!') + raise Exception( + 'Failed to receive infra realtime event message!') + + assert ret_msg.topic == 'Infra_Realtime' + assert ret_msg.value['type'] == 'infrastructure_realtime_event_message' + assert ret_msg.value['event_type'] == 'realtime_event' + + # Simulate infra leave + test_context.device_sim.disconnect() + + # Infra del + uuid_val = uuid.uuid4() + test_context.kafka_producer.handle_single_device_deassign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra deassign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra deassign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra deassign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_del_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Validate infra removed from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + assert infra_info_redis == None + + # Validate infra removed from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + assert infra_info_psql == None + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra deassign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra deassign failed!') + + # Validate number of assigned infra number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 0 + + # Delete single group + uuid_val = uuid.uuid4() + + test_context.kafka_producer.handle_single_group_delete( + str(group_id), uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive delete group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive delete group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_delete_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group delete failed!') + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate group removed from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate group removed from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + @pytest.mark.usefixtures("test_context", + "cgw_probe", + "kafka_probe", + "redis_probe", + "psql_probe") + def test_infra_alarm_event(self, test_context): + assert test_context.kafka_producer.is_connected(), \ + f'Cannot create default group: kafka producer is not connected to Kafka' + + assert test_context.kafka_consumer.is_connected(), \ + f'Cannot create default group: kafka consumer is not connected to Kafka' + + default_shard_id = test_context.default_shard_id() + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + uuid_val = uuid.uuid4() + group_id = 100 + + # Create single group + test_context.kafka_producer.handle_single_group_create( + str(group_id), uuid_val.int, default_shard_id) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive create group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive create group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_create_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group create failed!') + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra group create failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra group create failed!') + + # Validate group + assert group_info_psql[0] == int( + group_info_redis.get('gid')) == group_id + + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 1 + + # Infra add + infra_mac = test_context.default_dev_sim_mac() + test_context.kafka_producer.handle_single_device_assign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra assign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra assign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + print(ret_msg.value["failed_infras"]) + raise Exception('Infra assign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_add_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra assign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infras assigned number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 1 + + # Get infra info from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + if not infra_info_redis: + print(f'Failed to get infra {infra_mac} info from Redis!') + raise Exception('Infra assign failed!') + + # Get infra info from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + if not infra_info_psql: + print(f'Failed to get infra {infra_mac} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infra assigned group id + assert infra_info_psql[1] == int( + infra_info_redis.get('group_id')) == group_id + + # Connect infra to CGW + test_context.device_sim.connect() + test_context.device_sim.send_hello(test_context.device_sim._socket) + + # Simulate at least 1 sec sleep before checking metrics + time.sleep(1) + assert cgw_metrics_get_connections_num() == 1 + assert test_context.device_sim._socket is not None, \ + f"Expected websocket connection NOT to be NULL after reconnect." + + test_context.device_sim.send_alarm_event( + test_context.device_sim._socket) + + # Get message from Kafka + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'infrastructure_realtime_event_message') + if ret_msg is None: + print('Failed to receive infra realtime event message!') + raise Exception( + 'Failed to receive infra realtime event message!') + + assert ret_msg.topic == 'Infra_Realtime' + assert ret_msg.value['type'] == 'infrastructure_realtime_event_message' + assert ret_msg.value['event_type'] == 'alarm' + + # Simulate infra leave + test_context.device_sim.disconnect() + + # Infra del + uuid_val = uuid.uuid4() + test_context.kafka_producer.handle_single_device_deassign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra deassign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra deassign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra deassign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_del_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Validate infra removed from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + assert infra_info_redis == None + + # Validate infra removed from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + assert infra_info_psql == None + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra deassign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra deassign failed!') + + # Validate number of assigned infra number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 0 + + # Delete single group + uuid_val = uuid.uuid4() + + test_context.kafka_producer.handle_single_group_delete( + str(group_id), uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive delete group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive delete group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_delete_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group delete failed!') + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate group removed from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate group removed from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + @pytest.mark.usefixtures("test_context", + "cgw_probe", + "kafka_probe", + "redis_probe", + "psql_probe") + def test_infra_wifiscan_event(self, test_context): + assert test_context.kafka_producer.is_connected(), \ + f'Cannot create default group: kafka producer is not connected to Kafka' + + assert test_context.kafka_consumer.is_connected(), \ + f'Cannot create default group: kafka consumer is not connected to Kafka' + + default_shard_id = test_context.default_shard_id() + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + uuid_val = uuid.uuid4() + group_id = 100 + + # Create single group + test_context.kafka_producer.handle_single_group_create( + str(group_id), uuid_val.int, default_shard_id) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive create group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive create group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_create_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group create failed!') + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra group create failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra group create failed!') + + # Validate group + assert group_info_psql[0] == int( + group_info_redis.get('gid')) == group_id + + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 1 + + # Infra add + infra_mac = test_context.default_dev_sim_mac() + test_context.kafka_producer.handle_single_device_assign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra assign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra assign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + print(ret_msg.value["failed_infras"]) + raise Exception('Infra assign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_add_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra assign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infras assigned number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 1 + + # Get infra info from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + if not infra_info_redis: + print(f'Failed to get infra {infra_mac} info from Redis!') + raise Exception('Infra assign failed!') + + # Get infra info from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + if not infra_info_psql: + print(f'Failed to get infra {infra_mac} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infra assigned group id + assert infra_info_psql[1] == int( + infra_info_redis.get('group_id')) == group_id + + # Connect infra to CGW + test_context.device_sim.connect() + test_context.device_sim.send_hello(test_context.device_sim._socket) + + # Simulate at least 1 sec sleep before checking metrics + time.sleep(1) + assert cgw_metrics_get_connections_num() == 1 + assert test_context.device_sim._socket is not None, \ + f"Expected websocket connection NOT to be NULL after reconnect." + + test_context.device_sim.send_wifiscan_event( + test_context.device_sim._socket) + + # Get message from Kafka + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'infrastructure_realtime_event_message') + if ret_msg is None: + print('Failed to receive infra realtime event message!') + raise Exception( + 'Failed to receive infra realtime event message!') + + assert ret_msg.topic == 'Infra_Realtime' + assert ret_msg.value['type'] == 'infrastructure_realtime_event_message' + assert ret_msg.value['event_type'] == 'wifiscan' + + # Simulate infra leave + test_context.device_sim.disconnect() + + # Infra del + uuid_val = uuid.uuid4() + test_context.kafka_producer.handle_single_device_deassign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra deassign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra deassign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra deassign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_del_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Validate infra removed from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + assert infra_info_redis == None + + # Validate infra removed from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + assert infra_info_psql == None + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra deassign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra deassign failed!') + + # Validate number of assigned infra number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 0 + + # Delete single group + uuid_val = uuid.uuid4() + + test_context.kafka_producer.handle_single_group_delete( + str(group_id), uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive delete group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive delete group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_delete_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group delete failed!') + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate group removed from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate group removed from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + @pytest.mark.usefixtures("test_context", + "cgw_probe", + "kafka_probe", + "redis_probe", + "psql_probe") + def test_infra_deviceupdate_event(self, test_context): + assert test_context.kafka_producer.is_connected(), \ + f'Cannot create default group: kafka producer is not connected to Kafka' + + assert test_context.kafka_consumer.is_connected(), \ + f'Cannot create default group: kafka consumer is not connected to Kafka' + + default_shard_id = test_context.default_shard_id() + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + uuid_val = uuid.uuid4() + group_id = 100 + + # Create single group + test_context.kafka_producer.handle_single_group_create( + str(group_id), uuid_val.int, default_shard_id) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive create group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive create group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_create_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group create failed!') + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra group create failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra group create failed!') + + # Validate group + assert group_info_psql[0] == int( + group_info_redis.get('gid')) == group_id + + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 1 + + # Infra add + infra_mac = test_context.default_dev_sim_mac() + test_context.kafka_producer.handle_single_device_assign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra assign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra assign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + print(ret_msg.value["failed_infras"]) + raise Exception('Infra assign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_add_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra assign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infras assigned number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 1 + + # Get infra info from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + if not infra_info_redis: + print(f'Failed to get infra {infra_mac} info from Redis!') + raise Exception('Infra assign failed!') + + # Get infra info from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + if not infra_info_psql: + print(f'Failed to get infra {infra_mac} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infra assigned group id + assert infra_info_psql[1] == int( + infra_info_redis.get('group_id')) == group_id + + # Connect infra to CGW + test_context.device_sim.connect() + test_context.device_sim.send_hello(test_context.device_sim._socket) + + # Simulate at least 1 sec sleep before checking metrics + time.sleep(1) + assert cgw_metrics_get_connections_num() == 1 + assert test_context.device_sim._socket is not None, \ + f"Expected websocket connection NOT to be NULL after reconnect." + + test_context.device_sim.send_deviceupdate_event( + test_context.device_sim._socket) + + # Get message from Kafka + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'infrastructure_realtime_event_message') + if ret_msg is None: + print('Failed to receive infra realtime event message!') + raise Exception( + 'Failed to receive infra realtime event message!') + + assert ret_msg.topic == 'Infra_Realtime' + assert ret_msg.value['type'] == 'infrastructure_realtime_event_message' + assert ret_msg.value['event_type'] == 'deviceupdate' + + # Simulate infra leave + test_context.device_sim.disconnect() + + # Infra del + uuid_val = uuid.uuid4() + test_context.kafka_producer.handle_single_device_deassign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra deassign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra deassign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra deassign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_del_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Validate infra removed from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + assert infra_info_redis == None + + # Validate infra removed from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + assert infra_info_psql == None + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra deassign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra deassign failed!') + + # Validate number of assigned infra number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 0 + + # Delete single group + uuid_val = uuid.uuid4() + + test_context.kafka_producer.handle_single_group_delete( + str(group_id), uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive delete group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive delete group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_delete_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group delete failed!') + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate group removed from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate group removed from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + @pytest.mark.usefixtures("test_context", + "cgw_probe", + "kafka_probe", + "redis_probe", + "psql_probe") + def test_infra_venue_broadcast_event(self, test_context): + assert test_context.kafka_producer.is_connected(), \ + f'Cannot create default group: kafka producer is not connected to Kafka' + + assert test_context.kafka_consumer.is_connected(), \ + f'Cannot create default group: kafka consumer is not connected to Kafka' + + default_shard_id = test_context.default_shard_id() + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + uuid_val = uuid.uuid4() + group_id = 100 + + # Create single group + test_context.kafka_producer.handle_single_group_create( + str(group_id), uuid_val.int, default_shard_id) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive create group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive create group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_create_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group create failed!') + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra group create failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra group create failed!') + + # Validate group + assert group_info_psql[0] == int( + group_info_redis.get('gid')) == group_id + + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 1 + + # Infra add + infra_mac = test_context.default_dev_sim_mac() + test_context.kafka_producer.handle_single_device_assign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra assign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra assign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + print(ret_msg.value["failed_infras"]) + raise Exception('Infra assign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_add_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra assign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infras assigned number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 1 + + # Get infra info from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + if not infra_info_redis: + print(f'Failed to get infra {infra_mac} info from Redis!') + raise Exception('Infra assign failed!') + + # Get infra info from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + if not infra_info_psql: + print(f'Failed to get infra {infra_mac} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infra assigned group id + assert infra_info_psql[1] == int( + infra_info_redis.get('group_id')) == group_id + + # Connect infra to CGW + test_context.device_sim.connect() + test_context.device_sim.send_hello(test_context.device_sim._socket) + + # Simulate at least 1 sec sleep before checking metrics + time.sleep(1) + assert cgw_metrics_get_connections_num() == 1 + assert test_context.device_sim._socket is not None, \ + f"Expected websocket connection NOT to be NULL after reconnect." + + test_context.device_sim.send_venue_broadcast_event( + test_context.device_sim._socket) + + # Get message from Kafka + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'infrastructure_realtime_event_message') + if ret_msg is None: + print('Failed to receive infra realtime event message!') + raise Exception( + 'Failed to receive infra realtime event message!') + + assert ret_msg.topic == 'Infra_Realtime' + assert ret_msg.value['type'] == 'infrastructure_realtime_event_message' + assert ret_msg.value['event_type'] == 'venue_broadcast' + + # Simulate infra leave + test_context.device_sim.disconnect() + + # Infra del + uuid_val = uuid.uuid4() + test_context.kafka_producer.handle_single_device_deassign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra deassign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra deassign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra deassign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_del_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Validate infra removed from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + assert infra_info_redis == None + + # Validate infra removed from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + assert infra_info_psql == None + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra deassign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra deassign failed!') + + # Validate number of assigned infra number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 0 + + # Delete single group + uuid_val = uuid.uuid4() + + test_context.kafka_producer.handle_single_group_delete( + str(group_id), uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive delete group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive delete group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_delete_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group delete failed!') + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate group removed from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate group removed from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + @pytest.mark.usefixtures("test_context", + "cgw_probe", + "kafka_probe", + "redis_probe", + "psql_probe") + def test_infra_join_leave_events(self, test_context): + assert test_context.kafka_producer.is_connected(), \ + f'Kafka producer is not connected to Kafka' + + assert test_context.kafka_consumer.is_connected(), \ + f'Kafka consumer is not connected to Kafka' + + default_shard_id = test_context.default_shard_id() + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + uuid_val = uuid.uuid4() + group_id = 100 + + # Create single group + test_context.kafka_producer.handle_single_group_create( + str(group_id), uuid_val.int, default_shard_id) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive create group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive create group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_create_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + assert (ret_msg.topic == 'CnC_Res') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group create failed!') + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra group create failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra group create failed!') + + # Validate group + assert group_info_psql[0] == int( + group_info_redis.get('gid')) == group_id + + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 1 + + # Infra add + infra_mac = test_context.default_dev_sim_mac() + test_context.kafka_producer.handle_single_device_assign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra assign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra assign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + print(ret_msg.value["failed_infras"]) + raise Exception('Infra assign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_add_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + assert (ret_msg.topic == 'CnC_Res') + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra assign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infras assigned number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 1 + + # Get infra info from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + if not infra_info_redis: + print(f'Failed to get infra {infra_mac} info from Redis!') + raise Exception('Infra assign failed!') + + # Get infra info from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + if not infra_info_psql: + print(f'Failed to get infra {infra_mac} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infra assigned group id + assert infra_info_psql[1] == int( + infra_info_redis.get('group_id')) == group_id + + # Connect infra to CGW + test_context.device_sim.connect() + test_context.device_sim.send_hello(test_context.device_sim._socket) + + # Get message from Kafka + # Expected to get 2 events + # 1. Infra Join + # 2. Capabilities change event + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'infra_join') + if ret_msg is None: + print('Failed to receive infra join message!') + raise Exception( + 'Failed to receive infra join message!') + + assert ret_msg.topic == 'Connection' + assert ret_msg.value['type'] == 'infra_join' + + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'infrastructure_group_infra_capabilities_changed') + if ret_msg is None: + print('Failed to receive infra capabilities change event message!') + raise Exception( + 'Failed to receive infra capabilities change event message!') + + assert ret_msg.topic == 'Connection' + assert ret_msg.value['type'] == 'infrastructure_group_infra_capabilities_changed' + + # Simulate at least 1 sec sleep before checking metrics + time.sleep(1) + assert cgw_metrics_get_connections_num() == 1 + assert test_context.device_sim._socket is not None, \ + f"Expected websocket connection NOT to be NULL after reconnect." + + # Simulate infra leave + test_context.device_sim.disconnect() + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'infra_leave') + if ret_msg is None: + print('Failed to receive infra leave message!') + raise Exception( + 'Failed to receive infra leave message!') + + assert ret_msg.topic == 'Connection' + assert ret_msg.value['type'] == 'infra_leave' + + # Infra del + uuid_val = uuid.uuid4() + test_context.kafka_producer.handle_single_device_deassign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra deassign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra deassign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra deassign failed!') + + assert ret_msg.topic == 'CnC_Res' + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_del_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Validate infra removed from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + assert infra_info_redis == None + + # Validate infra removed from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + assert infra_info_psql == None + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra deassign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra deassign failed!') + + # Validate number of assigned infra number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 0 + + # Delete single group + uuid_val = uuid.uuid4() + + test_context.kafka_producer.handle_single_group_delete( + str(group_id), uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive delete group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive delete group result when expected') + + assert ret_msg.topic == 'CnC_Res' + assert (ret_msg.value['type'] == + 'infrastructure_group_delete_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group delete failed!') + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate group removed from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate group removed from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + @pytest.mark.usefixtures("test_context", + "cgw_probe", + "kafka_probe", + "redis_probe", + "psql_probe") + def test_unassigned_infra_join_leave_events(self, test_context): + assert test_context.kafka_producer.is_connected(), \ + f'Kafka producer is not connected to Kafka' + + assert test_context.kafka_consumer.is_connected(), \ + f'Kafka consumer is not connected to Kafka' + + default_shard_id = test_context.default_shard_id() + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + # Connect infra to CGW + test_context.device_sim.connect() + test_context.device_sim.send_hello(test_context.device_sim._socket) + + # Get message from Kafka + # Expected to get single event - unassigned infra join + # Capabilities change event - MUST NOT BE SENT - infra was unknown! + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'unassigned_infra_join') + if ret_msg is None: + print('Failed to receive unassigned infra join message!') + raise Exception( + 'Failed to receive unassigned infra join message!') + + assert ret_msg.topic == 'Connection' + assert ret_msg.value['type'] == 'unassigned_infra_join' + + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'infrastructure_group_infra_capabilities_changed') + if ret_msg is not None: + print('Received unexpected infra capabilities change event message!') + raise Exception( + 'Received unexpected infra capabilities change event message!') + + # Simulate at least 1 sec sleep before checking metrics + time.sleep(1) + assert cgw_metrics_get_connections_num() == 1 + assert test_context.device_sim._socket is not None, \ + f"Expected websocket connection NOT to be NULL after reconnect." + + # Simulate infra leave + test_context.device_sim.disconnect() + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'unassigned_infra_leave') + if ret_msg is None: + print('Failed to receive unassigned infra leave message!') + raise Exception( + 'Failed to receive unassigned infra leave message!') + + assert ret_msg.topic == 'Connection' + assert ret_msg.value['type'] == 'unassigned_infra_leave' + + @pytest.mark.usefixtures("test_context", + "cgw_probe", + "kafka_probe", + "redis_probe", + "psql_probe") + def test_infra_becomes_unassigned_events(self, test_context): + assert test_context.kafka_producer.is_connected(), \ + f'Kafka producer is not connected to Kafka' + + assert test_context.kafka_consumer.is_connected(), \ + f'Kafka consumer is not connected to Kafka' + + default_shard_id = test_context.default_shard_id() + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + uuid_val = uuid.uuid4() + group_id = 100 + + # Create single group + test_context.kafka_producer.handle_single_group_create( + str(group_id), uuid_val.int, default_shard_id) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive create group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive create group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_create_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + assert (ret_msg.topic == 'CnC_Res') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group create failed!') + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra group create failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra group create failed!') + + # Validate group + assert group_info_psql[0] == int( + group_info_redis.get('gid')) == group_id + + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 1 + + # Infra add + infra_mac = test_context.default_dev_sim_mac() + test_context.kafka_producer.handle_single_device_assign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra assign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra assign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + print(ret_msg.value["failed_infras"]) + raise Exception('Infra assign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_add_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + assert (ret_msg.topic == 'CnC_Res') + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra assign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infras assigned number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 1 + + # Get infra info from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + if not infra_info_redis: + print(f'Failed to get infra {infra_mac} info from Redis!') + raise Exception('Infra assign failed!') + + # Get infra info from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + if not infra_info_psql: + print(f'Failed to get infra {infra_mac} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infra assigned group id + assert infra_info_psql[1] == int( + infra_info_redis.get('group_id')) == group_id + + # Connect infra to CGW + test_context.device_sim.connect() + test_context.device_sim.send_hello(test_context.device_sim._socket) + + # Get message from Kafka + # Expected to get 2 events + # 1. Infra Join + # 2. Capabilities change event + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'infra_join') + if ret_msg is None: + print('Failed to receive infra join message!') + raise Exception( + 'Failed to receive infra join message!') + + assert ret_msg.topic == 'Connection' + assert ret_msg.value['type'] == 'infra_join' + + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'infrastructure_group_infra_capabilities_changed') + if ret_msg is None: + print('Failed to receive infra capabilities change event message!') + raise Exception( + 'Failed to receive infra capabilities change event message!') + + assert ret_msg.topic == 'Connection' + assert ret_msg.value['type'] == 'infrastructure_group_infra_capabilities_changed' + + # Simulate at least 1 sec sleep before checking metrics + time.sleep(1) + assert cgw_metrics_get_connections_num() == 1 + assert test_context.device_sim._socket is not None, \ + f"Expected websocket connection NOT to be NULL after reconnect." + + # Infra del + uuid_val = uuid.uuid4() + test_context.kafka_producer.handle_single_device_deassign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra deassign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra deassign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra deassign failed!') + + assert ret_msg.topic == 'CnC_Res' + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_del_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Validate infra exist in Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + assert infra_info_redis != None + + # Validate infra removed from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + assert infra_info_psql == None + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra deassign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra deassign failed!') + + # Validate number of assigned infra number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 0 + + # Expected to receive Unassigned infra join message as connection still alive + # While infra was removed from group + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'unassigned_infra_join') + if ret_msg is None: + print('Failed to receive infra join message!') + raise Exception( + 'Failed to receive infra join message!') + + # Simulate infra leave + test_context.device_sim.disconnect() + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'unassigned_infra_leave') + if ret_msg is None: + print('Failed to receive unassigned infra leave message!') + raise Exception( + 'Failed to receive unassigned infra leave message!') + + assert ret_msg.topic == 'Connection' + assert ret_msg.value['type'] == 'unassigned_infra_leave' + + # Delete single group + uuid_val = uuid.uuid4() + + test_context.kafka_producer.handle_single_group_delete( + str(group_id), uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive delete group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive delete group result when expected') + + assert ret_msg.topic == 'CnC_Res' + assert (ret_msg.value['type'] == + 'infrastructure_group_delete_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group delete failed!') + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate group removed from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate group removed from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + @pytest.mark.usefixtures("test_context", + "cgw_probe", + "kafka_probe", + "redis_probe", + "psql_probe") + def test_infra_becomes_unassigned_group_removed_events(self, test_context): + assert test_context.kafka_producer.is_connected(), \ + f'Kafka producer is not connected to Kafka' + + assert test_context.kafka_consumer.is_connected(), \ + f'Kafka consumer is not connected to Kafka' + + default_shard_id = test_context.default_shard_id() + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + uuid_val = uuid.uuid4() + group_id = 100 + + # Create single group + test_context.kafka_producer.handle_single_group_create( + str(group_id), uuid_val.int, default_shard_id) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive create group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive create group result when expected') + + assert (ret_msg.value['type'] == + 'infrastructure_group_create_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + assert (ret_msg.topic == 'CnC_Res') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group create failed!') + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra group create failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra group create failed!') + + # Validate group + assert group_info_psql[0] == int( + group_info_redis.get('gid')) == group_id + + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!!') + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 1 + + # Infra add + infra_mac = test_context.default_dev_sim_mac() + test_context.kafka_producer.handle_single_device_assign( + str(group_id), infra_mac, uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if ret_msg is None: + print('Failed to receive infra assign result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive infra assign result when expected') + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + print(ret_msg.value["failed_infras"]) + raise Exception('Infra assign failed!') + + assert (ret_msg.value['type'] == + 'infrastructure_group_infras_add_response') + assert (int(ret_msg.value["infra_group_id"]) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + assert (ret_msg.topic == 'CnC_Res') + # We don't expect to have even a single 'failed_infra', + # because the overall command succeeded + assert (len(list(ret_msg.value["failed_infras"])) == 0) + + # Get group info from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + if not group_info_redis: + print(f'Failed to get group {group_id} info from Redis!') + raise Exception('Infra assign failed!') + + # Get group info from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + if not group_info_psql: + print(f'Failed to get group {group_id} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infras assigned number + assert int(group_info_redis.get('infras_assigned') + ) == cgw_metrics_get_group_infras_assigned_num(group_id) == 1 + + # Get infra info from Redis Infra Cache + infra_info_redis = test_context.redis_client.get_infra( + default_shard_id, infra_mac) + if not infra_info_redis: + print(f'Failed to get infra {infra_mac} info from Redis!') + raise Exception('Infra assign failed!') + + # Get infra info from PSQL + infra_info_psql = test_context.psql_client.get_infra(infra_mac) + if not infra_info_psql: + print(f'Failed to get infra {infra_mac} info from PSQL!') + raise Exception('Infra assign failed!') + + # Validate infra assigned group id + assert infra_info_psql[1] == int( + infra_info_redis.get('group_id')) == group_id + + # Connect infra to CGW + test_context.device_sim.connect() + test_context.device_sim.send_hello(test_context.device_sim._socket) + + # Get message from Kafka + # Expected to get 2 events + # 1. Infra Join + # 2. Capabilities change event + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'infra_join') + if ret_msg is None: + print('Failed to receive infra join message!') + raise Exception( + 'Failed to receive infra join message!') + + assert ret_msg.topic == 'Connection' + assert ret_msg.value['type'] == 'infra_join' + + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'infrastructure_group_infra_capabilities_changed') + if ret_msg is None: + print('Failed to receive infra capabilities change event message!') + raise Exception( + 'Failed to receive infra capabilities change event message!') + + assert ret_msg.topic == 'Connection' + assert ret_msg.value['type'] == 'infrastructure_group_infra_capabilities_changed' + + # Simulate at least 1 sec sleep before checking metrics + time.sleep(1) + assert cgw_metrics_get_connections_num() == 1 + assert test_context.device_sim._socket is not None, \ + f"Expected websocket connection NOT to be NULL after reconnect." + + # Delete single group + uuid_val = uuid.uuid4() + test_context.kafka_producer.handle_single_group_delete( + str(group_id), uuid_val.int) + ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int) + if not ret_msg: + print('Failed to receive delete group result, was expecting ' + + str(uuid_val.int) + ' uuid reply') + raise Exception( + 'Failed to receive delete group result when expected') + + assert ret_msg.topic == 'CnC_Res' + assert (ret_msg.value['type'] == + 'infrastructure_group_delete_response') + assert (int(ret_msg.value['infra_group_id']) == group_id) + assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int)) + + if ret_msg.value['success'] is False: + print(ret_msg.value['error_message']) + raise Exception('Infra group delete failed!') + + # Get shard info from Redis + shard_info = test_context.redis_client.get_shard(default_shard_id) + if not shard_info: + print(f'Failed to get shard {default_shard_id} info from Redis!') + raise Exception( + f'Failed to get shard {default_shard_id} info from Redis!') + + # Validate group removed from Redis + group_info_redis = test_context.redis_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate group removed from PSQL + group_info_psql = test_context.psql_client.get_infrastructure_group( + group_id) + assert group_info_redis == {} + + # Validate number of assigned groups + assert int(shard_info.get('assigned_groups_num') + ) == cgw_metrics_get_groups_assigned_num() == 0 + + # Expected to receive Unassigned infra join message as connection still alive + # While group was removed + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'unassigned_infra_join') + if ret_msg is None: + print('Failed to receive unassigned infra join message!') + raise Exception( + 'Failed to receive unassigned infra join message!') + + # Simulate infra leave + test_context.device_sim.disconnect() + ret_msg = test_context.kafka_consumer.get_msg_by_type( + 'unassigned_infra_leave') + if ret_msg is None: + print('Failed to receive unassigned infra leave message!') + raise Exception( + 'Failed to receive unassigned infra leave message!') + + assert ret_msg.topic == 'Connection' + assert ret_msg.value['type'] == 'unassigned_infra_leave' diff --git a/utils/client_simulator/sim_data/message_templates.json b/utils/client_simulator/sim_data/message_templates.json index f2fb3c1..f45cfa7 100644 --- a/utils/client_simulator/sim_data/message_templates.json +++ b/utils/client_simulator/sim_data/message_templates.json @@ -44,7 +44,7 @@ "data": { "event": [ 1716801245, - { + { "type": "client.join", "payload": { "client": "38:ba:f8:11:25:38", @@ -97,6 +97,1753 @@ } } }, - "state": {"jsonrpc":"2.0","method":"state","params":{"serial":"MAC","state":{"serial":"MAC","interfaces":[{"counters":{"collisions":6,"multicast":649,"rx_bytes":11731812,"rx_dropped":11,"rx_errors":15,"rx_packets":9938,"tx_bytes":13585045,"tx_dropped":6,"tx_errors":19,"tx_packets":12135},"name":"up0v0","ssids":[{"associations":[{"ack_signal":-54,"ack_signal_avg":-52,"bssid":"53:49:4d:01:10:b1","connected":2684,"inactive":2984,"ipaddr_v4":"158.136.112.167","rssi":-58,"rx_bytes":5589601,"rx_duration":9742,"rx_packets":7621,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"10:fa:00:73:81:d5","tx_bytes":1551269,"tx_duration":12500,"tx_failed":9400,"tx_packets":2019,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":18},{"ack_signal":-52,"ack_signal_avg":-48,"bssid":"53:49:4d:01:10:b1","connected":2882,"inactive":2257,"ipaddr_v4":"221.0.192.150","rssi":-46,"rx_bytes":10064957,"rx_duration":10262,"rx_packets":13110,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"7c:9c:8e:2d:cb:f3","tx_bytes":1383674,"tx_duration":8582,"tx_failed":18716,"tx_packets":2016,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":9},{"ack_signal":-50,"ack_signal_avg":-54,"bssid":"53:49:4d:01:10:b1","connected":2451,"inactive":2059,"ipaddr_v4":"227.239.60.27","rssi":-52,"rx_bytes":9627292,"rx_duration":12802,"rx_packets":12499,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"a8:bb:f9:93:a8:57","tx_bytes":1366023,"tx_duration":10335,"tx_failed":12842,"tx_packets":1812,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":12},{"ack_signal":-47,"ack_signal_avg":-46,"bssid":"53:49:4d:01:10:b1","connected":2401,"inactive":3189,"ipaddr_v4":"246.140.147.135","rssi":-84,"rx_bytes":5965402,"rx_duration":10511,"rx_packets":8720,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"54:b3:f9:6b:cc:85","tx_bytes":1616171,"tx_duration":9908,"tx_failed":10717,"tx_packets":1895,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":11},{"ack_signal":-48,"ack_signal_avg":-46,"bssid":"53:49:4d:01:10:b1","connected":2375,"inactive":3252,"ipaddr_v4":"129.0.5.214","rssi":-67,"rx_bytes":7032879,"rx_duration":12317,"rx_packets":9960,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"7d:62:0b:94:55:d8","tx_bytes":1147001,"tx_duration":11423,"tx_failed":17132,"tx_packets":1686,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":14},{"ack_signal":-41,"ack_signal_avg":-42,"bssid":"53:49:4d:01:10:b1","connected":2996,"inactive":2533,"ipaddr_v4":"7.53.125.147","rssi":-44,"rx_bytes":5672084,"rx_duration":9137,"rx_packets":8103,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"c6:6f:5f:6d:78:dc","tx_bytes":1593251,"tx_duration":10493,"tx_failed":11153,"tx_packets":2110,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":20},{"ack_signal":-54,"ack_signal_avg":-55,"bssid":"53:49:4d:01:10:b1","connected":2229,"inactive":2928,"ipaddr_v4":"156.195.124.175","rssi":-46,"rx_bytes":9754994,"rx_duration":11168,"rx_packets":12367,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"10:0c:68:18:4a:fd","tx_bytes":1435959,"tx_duration":10337,"tx_failed":13551,"tx_packets":1952,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":22},{"ack_signal":-49,"ack_signal_avg":-52,"bssid":"53:49:4d:01:10:b1","connected":3497,"inactive":2485,"ipaddr_v4":"139.174.21.11","rssi":-86,"rx_bytes":9952624,"rx_duration":11988,"rx_packets":12446,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"c3:53:93:52:5c:ce","tx_bytes":1547519,"tx_duration":10429,"tx_failed":10624,"tx_packets":1969,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":16},{"ack_signal":-48,"ack_signal_avg":-43,"bssid":"53:49:4d:01:10:b1","connected":3043,"inactive":3131,"ipaddr_v4":"196.64.95.194","rssi":-83,"rx_bytes":7407670,"rx_duration":9666,"rx_packets":10438,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"95:d7:94:32:c7:85","tx_bytes":1492670,"tx_duration":9357,"tx_failed":11643,"tx_packets":2198,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":14},{"ack_signal":-46,"ack_signal_avg":-49,"bssid":"53:49:4d:01:10:b1","connected":3463,"inactive":2774,"ipaddr_v4":"94.15.157.229","rssi":-83,"rx_bytes":8531713,"rx_duration":11760,"rx_packets":10810,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"1b:a4:d8:7b:53:9c","tx_bytes":1686882,"tx_duration":10391,"tx_failed":19925,"tx_packets":2176,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":19},{"ack_signal":-51,"ack_signal_avg":-52,"bssid":"53:49:4d:01:10:b1","connected":3081,"inactive":2882,"ipaddr_v4":"157.229.209.55","rssi":-77,"rx_bytes":8943663,"rx_duration":11956,"rx_packets":12362,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"0e:f4:a5:ef:f2:53","tx_bytes":1512199,"tx_duration":12646,"tx_failed":16607,"tx_packets":2157,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":18},{"ack_signal":-50,"ack_signal_avg":-55,"bssid":"53:49:4d:01:10:b1","connected":3205,"inactive":3267,"ipaddr_v4":"160.214.198.209","rssi":-61,"rx_bytes":7906873,"rx_duration":10711,"rx_packets":10330,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"34:79:f7:5c:a2:c7","tx_bytes":1440133,"tx_duration":10401,"tx_failed":15131,"tx_packets":1985,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":16},{"ack_signal":-36,"ack_signal_avg":-40,"bssid":"53:49:4d:01:10:b1","connected":3330,"inactive":3466,"ipaddr_v4":"2.105.55.62","rssi":-47,"rx_bytes":9222300,"rx_duration":9271,"rx_packets":12688,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"2a:4d:df:9f:b8:d8","tx_bytes":1506577,"tx_duration":10637,"tx_failed":14098,"tx_packets":1930,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":18},{"ack_signal":-50,"ack_signal_avg":-55,"bssid":"53:49:4d:01:10:b1","connected":3597,"inactive":2563,"ipaddr_v4":"13.79.198.161","rssi":-47,"rx_bytes":8655128,"rx_duration":12529,"rx_packets":11457,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"81:99:0a:05:14:80","tx_bytes":1435123,"tx_duration":11477,"tx_failed":18398,"tx_packets":2127,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":15}],"band":"5G","bssid":"53:49:4d:01:10:b1","counters":{"collisions":6,"multicast":649,"rx_bytes":11731812,"rx_dropped":11,"rx_errors":15,"rx_packets":9938,"tx_bytes":13585045,"tx_dropped":6,"tx_errors":19,"tx_packets":12135},"frequency":[5180,5260],"iface":"eth0","location":"/interfaces/0/ssids/0","mode":"ap","name":"up0v0","phy":"platform/soc/c000000.wifi+1","radio":{"$ref":"#/radios/1"},"ssid":"OpenWifi-test5"}]},{"clients":[{"ipv4_addresses":["239.95.16.229"],"ipv6_addresses":["2a5b:80bd:0652:d339:acb9:c2f9:894d:a396"],"mac":"6e:af:b0:a3:39:28","ports":["eth1"]},{"ipv4_addresses":["73.122.81.23"],"ipv6_addresses":["31d0:664c:54c3:75ce:ee6b:b92d:5b76:3845"],"mac":"09:57:cb:72:c8:2a","ports":["eth1"]},{"ipv4_addresses":["226.38.233.160"],"ipv6_addresses":["af3c:def0:35b6:7dc3:f540:1e55:de6f:486e"],"mac":"74:07:8e:06:76:6f","ports":["eth1"]},{"ipv4_addresses":["190.133.147.76"],"ipv6_addresses":["703c:384f:378f:7f4b:d68d:37ca:c0b2:48d3"],"mac":"ed:d5:a8:82:94:de","ports":["eth1"]},{"ipv4_addresses":["21.35.45.33"],"ipv6_addresses":["ecf6:2803:507c:1315:cb11:d7cc:4e5f:af17"],"mac":"73:b2:a4:a5:66:e0","ports":["eth1"]},{"ipv4_addresses":["29.90.235.125"],"ipv6_addresses":["e6c0:ecb2:0403:dd2f:99f5:9bc8:3848:f250"],"mac":"20:ad:27:9f:dd:31","ports":["eth1"]},{"ipv4_addresses":["142.216.212.44"],"ipv6_addresses":["ef69:df4b:25b6:253c:d699:2101:bc55:b4ac"],"mac":"9a:fd:99:e2:48:6b","ports":["eth1"]},{"ipv4_addresses":["158.142.109.105"],"ipv6_addresses":["b694:1a6a:89f6:0323:704d:0b91:a6e6:f961"],"mac":"b9:a0:9a:d4:ac:07","ports":["eth1"]},{"ipv4_addresses":["220.112.212.237"],"ipv6_addresses":["41a2:2799:0ce1:5710:73cf:54c5:a106:5e83"],"mac":"f5:c5:4a:7e:b8:b4","ports":["eth1"]},{"ipv4_addresses":["122.8.242.157"],"ipv6_addresses":["02e1:a6e4:c03f:d18f:66d5:f0bd:7183:365e"],"mac":"e7:74:ed:aa:42:c5","ports":["eth1"]},{"ipv4_addresses":["20.14.89.131"],"ipv6_addresses":["90b0:ef86:8b75:f7aa:9d59:3a89:b263:cddd"],"mac":"d1:c8:5a:19:0b:4b","ports":["eth1"]},{"ipv4_addresses":["254.126.232.106"],"ipv6_addresses":["d293:456c:4f52:df54:3715:265d:6e6c:1c35"],"last_seen":0,"mac":"bc:7e:4c:00:c1:73","ports":["wlan0"]},{"ipv4_addresses":["104.195.23.213"],"ipv6_addresses":["cd59:f149:76ef:da29:2a34:42e2:8704:c266"],"last_seen":0,"mac":"7e:54:9f:a7:e5:85","ports":["wlan0"]},{"ipv4_addresses":["74.191.165.53"],"ipv6_addresses":["4c26:b1b0:1a91:01ab:b40f:4f2b:4239:07cb"],"last_seen":0,"mac":"2c:91:8d:d1:ee:5f","ports":["wlan0"]},{"ipv4_addresses":["179.131.71.19"],"ipv6_addresses":["43f0:66d1:1317:25f9:b31c:0a7b:cc47:6cdd"],"last_seen":0,"mac":"51:50:2e:73:aa:ef","ports":["wlan0"]},{"ipv4_addresses":["89.170.21.171"],"ipv6_addresses":["43c9:2686:7a69:9e18:d699:9479:7a7a:7ecd"],"last_seen":0,"mac":"cf:12:cb:4e:db:2b","ports":["wlan0"]},{"ipv4_addresses":["191.145.135.130"],"ipv6_addresses":["3df6:99c4:6149:66fc:8292:bd6b:d485:7d87"],"last_seen":0,"mac":"5a:06:ca:00:38:dc","ports":["wlan0"]},{"ipv4_addresses":["249.216.233.16"],"ipv6_addresses":["8e00:16a6:f5b1:3fb6:f42e:abdf:8350:3fca"],"last_seen":0,"mac":"c0:d9:d9:96:e5:98","ports":["wlan0"]},{"ipv4_addresses":["123.247.151.99"],"ipv6_addresses":["2972:30c4:7c5e:41ea:689b:4006:48fe:b104"],"last_seen":0,"mac":"98:42:45:8a:cd:fc","ports":["wlan0"]},{"ipv4_addresses":["200.236.88.54"],"ipv6_addresses":["bea5:7218:1cfc:b999:6a8a:3593:edca:6b8e"],"last_seen":0,"mac":"3a:61:47:67:55:1e","ports":["wlan0"]},{"ipv4_addresses":["29.79.207.182"],"ipv6_addresses":["a52a:2708:92c2:1f53:6b12:30e6:2d78:cc2e"],"last_seen":0,"mac":"14:1c:ba:1f:06:a6","ports":["wlan0"]},{"ipv4_addresses":["194.224.148.122"],"ipv6_addresses":["ed60:c6ee:08c9:ff0f:b7fd:e35b:0030:de38"],"last_seen":0,"mac":"21:be:37:50:7c:8d","ports":["wlan0"]},{"ipv4_addresses":["248.18.202.62"],"ipv6_addresses":["2f9c:03b0:f56b:6c57:063e:128f:2190:ba5e"],"last_seen":0,"mac":"ab:dc:e1:75:c9:c1","ports":["wlan0"]},{"ipv4_addresses":["176.73.79.37"],"ipv6_addresses":["77f4:df1c:70db:9f29:d8af:f744:9c25:1c09"],"last_seen":0,"mac":"f7:f2:ce:ea:6c:d2","ports":["wlan0"]},{"ipv4_addresses":["21.43.226.17"],"ipv6_addresses":["bd81:7507:3a54:b4cf:c521:c097:c1fe:ca51"],"last_seen":0,"mac":"3f:42:15:82:8c:15","ports":["wlan0"]},{"ipv4_addresses":["116.143.69.120"],"ipv6_addresses":["9486:6f6f:f083:5ce9:41ce:36e2:781e:14f3"],"last_seen":0,"mac":"21:77:5c:18:8b:2e","ports":["wlan0"]},{"ipv4_addresses":["122.57.158.178"],"ipv6_addresses":["4a0e:d500:f4d4:8b89:8172:2304:ad5e:4cc1"],"last_seen":0,"mac":"b8:d8:a2:a9:b3:ec","ports":["wlan0"]},{"ipv4_addresses":["114.219.181.48"],"ipv6_addresses":["32a2:a31a:a2c8:fd03:62d8:b9dc:9ebb:4bd3"],"last_seen":0,"mac":"8a:d2:5b:83:4b:96","ports":["wlan0"]},{"ipv4_addresses":["211.16.112.65"],"ipv6_addresses":["db11:a1db:68cd:4e63:350a:1d95:ff8e:a392"],"last_seen":0,"mac":"a9:cc:69:81:22:0e","ports":["wlan0"]}],"counters":{"collisions":6,"multicast":513,"rx_bytes":14501248,"rx_dropped":8,"rx_errors":18,"rx_packets":12250,"tx_bytes":13167426,"tx_dropped":8,"tx_errors":17,"tx_packets":11450},"name":"down0v0","ssids":[{"associations":[{"ack_signal":-43,"ack_signal_avg":-42,"bssid":"53:49:4d:01:10:b2","connected":2506,"inactive":3349,"ipaddr_v4":"254.126.232.106","rssi":-58,"rx_bytes":7441827,"rx_duration":10391,"rx_packets":10301,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"bc:7e:4c:00:c1:73","tx_bytes":1429848,"tx_duration":12148,"tx_failed":17189,"tx_packets":2243,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":20},{"ack_signal":-54,"ack_signal_avg":-51,"bssid":"53:49:4d:01:10:b2","connected":2845,"inactive":2915,"ipaddr_v4":"104.195.23.213","rssi":-75,"rx_bytes":7766221,"rx_duration":9772,"rx_packets":8987,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"7e:54:9f:a7:e5:85","tx_bytes":1442311,"tx_duration":11076,"tx_failed":17714,"tx_packets":1869,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":18},{"ack_signal":-36,"ack_signal_avg":-40,"bssid":"53:49:4d:01:10:b2","connected":2959,"inactive":2335,"ipaddr_v4":"74.191.165.53","rssi":-51,"rx_bytes":8356941,"rx_duration":14067,"rx_packets":11553,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"2c:91:8d:d1:ee:5f","tx_bytes":1169808,"tx_duration":12023,"tx_failed":14581,"tx_packets":1642,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":16},{"ack_signal":-49,"ack_signal_avg":-46,"bssid":"53:49:4d:01:10:b2","connected":3364,"inactive":3283,"ipaddr_v4":"179.131.71.19","rssi":-76,"rx_bytes":7880344,"rx_duration":10294,"rx_packets":9896,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"51:50:2e:73:aa:ef","tx_bytes":1640048,"tx_duration":11983,"tx_failed":18469,"tx_packets":2072,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":13},{"ack_signal":-56,"ack_signal_avg":-55,"bssid":"53:49:4d:01:10:b2","connected":2830,"inactive":3233,"ipaddr_v4":"89.170.21.171","rssi":-88,"rx_bytes":10409522,"rx_duration":10710,"rx_packets":13210,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"cf:12:cb:4e:db:2b","tx_bytes":1430414,"tx_duration":10178,"tx_failed":13294,"tx_packets":2102,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":11},{"ack_signal":-54,"ack_signal_avg":-50,"bssid":"53:49:4d:01:10:b2","connected":2390,"inactive":2893,"ipaddr_v4":"191.145.135.130","rssi":-83,"rx_bytes":9129050,"rx_duration":10668,"rx_packets":11259,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"5a:06:ca:00:38:dc","tx_bytes":1402006,"tx_duration":8175,"tx_failed":15807,"tx_packets":1843,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":18},{"ack_signal":-49,"ack_signal_avg":-52,"bssid":"53:49:4d:01:10:b2","connected":2831,"inactive":2552,"ipaddr_v4":"249.216.233.16","rssi":-82,"rx_bytes":9350442,"rx_duration":11336,"rx_packets":11815,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"c0:d9:d9:96:e5:98","tx_bytes":1586105,"tx_duration":12075,"tx_failed":17631,"tx_packets":2107,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":13},{"ack_signal":-49,"ack_signal_avg":-44,"bssid":"53:49:4d:01:10:b2","connected":3411,"inactive":3368,"ipaddr_v4":"123.247.151.99","rssi":-88,"rx_bytes":10037327,"rx_duration":13889,"rx_packets":12016,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"98:42:45:8a:cd:fc","tx_bytes":1369675,"tx_duration":9078,"tx_failed":22409,"tx_packets":2129,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":17},{"ack_signal":-49,"ack_signal_avg":-53,"bssid":"53:49:4d:01:10:b2","connected":3120,"inactive":2495,"ipaddr_v4":"200.236.88.54","rssi":-48,"rx_bytes":6781758,"rx_duration":10932,"rx_packets":9979,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"3a:61:47:67:55:1e","tx_bytes":1605006,"tx_duration":10187,"tx_failed":14891,"tx_packets":2036,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":9},{"ack_signal":-49,"ack_signal_avg":-49,"bssid":"53:49:4d:01:10:b2","connected":2824,"inactive":2311,"ipaddr_v4":"29.79.207.182","rssi":-57,"rx_bytes":9920330,"rx_duration":11135,"rx_packets":13610,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"14:1c:ba:1f:06:a6","tx_bytes":1487516,"tx_duration":10616,"tx_failed":18205,"tx_packets":2082,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":14},{"ack_signal":-56,"ack_signal_avg":-51,"bssid":"53:49:4d:01:10:b2","connected":3620,"inactive":3135,"ipaddr_v4":"194.224.148.122","rssi":-79,"rx_bytes":8711145,"rx_duration":11817,"rx_packets":13496,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"21:be:37:50:7c:8d","tx_bytes":1492336,"tx_duration":9205,"tx_failed":13520,"tx_packets":1956,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":15}],"band":"2G","bssid":"53:49:4d:01:10:b2","counters":{"collisions":6,"multicast":513,"rx_bytes":14501248,"rx_dropped":8,"rx_errors":18,"rx_packets":12250,"tx_bytes":13167426,"tx_dropped":8,"tx_errors":17,"tx_packets":11450},"frequency":[2401,2423],"iface":"eth1","location":"/interfaces/0/ssids/0","mode":"ap","name":"down0v0","phy":"platform/soc/c000000.wifi","radio":{"$ref":"#/radios/0"},"ssid":"OpenWifi-test2"},{"associations":[{"ack_signal":-52,"ack_signal_avg":-48,"bssid":"53:49:4d:01:10:b3","connected":2939,"inactive":2710,"ipaddr_v4":"248.18.202.62","rssi":-60,"rx_bytes":8722031,"rx_duration":11747,"rx_packets":10995,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"ab:dc:e1:75:c9:c1","tx_bytes":1342264,"tx_duration":11585,"tx_failed":21055,"tx_packets":1836,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":13},{"ack_signal":-41,"ack_signal_avg":-40,"bssid":"53:49:4d:01:10:b3","connected":3155,"inactive":3030,"ipaddr_v4":"176.73.79.37","rssi":-56,"rx_bytes":8577501,"rx_duration":12375,"rx_packets":12133,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"f7:f2:ce:ea:6c:d2","tx_bytes":1738314,"tx_duration":12382,"tx_failed":17392,"tx_packets":2255,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":14},{"ack_signal":-60,"ack_signal_avg":-57,"bssid":"53:49:4d:01:10:b3","connected":2848,"inactive":2229,"ipaddr_v4":"21.43.226.17","rssi":-37,"rx_bytes":9336935,"rx_duration":11209,"rx_packets":11991,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"3f:42:15:82:8c:15","tx_bytes":1519775,"tx_duration":8118,"tx_failed":10278,"tx_packets":2015,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":19},{"ack_signal":-53,"ack_signal_avg":-52,"bssid":"53:49:4d:01:10:b3","connected":3375,"inactive":2893,"ipaddr_v4":"116.143.69.120","rssi":-76,"rx_bytes":9662448,"rx_duration":10661,"rx_packets":12961,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"21:77:5c:18:8b:2e","tx_bytes":1602040,"tx_duration":10461,"tx_failed":15715,"tx_packets":2089,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":20},{"ack_signal":-45,"ack_signal_avg":-44,"bssid":"53:49:4d:01:10:b3","connected":3407,"inactive":3291,"ipaddr_v4":"122.57.158.178","rssi":-40,"rx_bytes":9327445,"rx_duration":11662,"rx_packets":12254,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"b8:d8:a2:a9:b3:ec","tx_bytes":1384912,"tx_duration":9887,"tx_failed":11064,"tx_packets":1981,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":19},{"ack_signal":-49,"ack_signal_avg":-45,"bssid":"53:49:4d:01:10:b3","connected":3035,"inactive":2970,"ipaddr_v4":"114.219.181.48","rssi":-82,"rx_bytes":7910744,"rx_duration":10614,"rx_packets":9743,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"8a:d2:5b:83:4b:96","tx_bytes":1549043,"tx_duration":10498,"tx_failed":14219,"tx_packets":1929,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":14},{"ack_signal":-44,"ack_signal_avg":-40,"bssid":"53:49:4d:01:10:b3","connected":3193,"inactive":2271,"ipaddr_v4":"211.16.112.65","rssi":-60,"rx_bytes":7965726,"rx_duration":10481,"rx_packets":10096,"rx_rate":{"bitrate":200000,"chwidth":40,"mcs":9,"nss":9,"sgi":true,"vht":true},"station":"a9:cc:69:81:22:0e","tx_bytes":1288470,"tx_duration":10518,"tx_failed":10258,"tx_packets":1854,"tx_rate":{"bitrate":200000,"chwidth":40,"ht":true,"mcs":9,"sgi":true},"tx_retries":17}],"band":"5G","bssid":"53:49:4d:01:10:b3","counters":{"collisions":6,"multicast":513,"rx_bytes":14501248,"rx_dropped":8,"rx_errors":18,"rx_packets":12250,"tx_bytes":13167426,"tx_dropped":8,"tx_errors":17,"tx_packets":11450},"frequency":[5180,5260],"iface":"eth1","location":"/interfaces/0/ssids/1","mode":"ap","name":"down0v0","phy":"platform/soc/c000000.wifi+1","radio":{"$ref":"#/radios/1"},"ssid":"OpenWifi-test2"}]}],"link-state":{"downstream":{"eth1":{"carrier":1,"duplex":"full","speed":1000}},"upstream":{"eth0":{"carrier":1,"duplex":"full","speed":1000}}},"radios":[{"active_ms":14274,"band":["2G"],"busy_ms":12274,"channel":1,"channel_width":20,"channels":[1],"frequency":[2401,2423],"noise":-97,"phy":"platform/soc/c000000.wifi","receive_ms":10092,"temperature":48,"transmit_ms":1526,"tx_power":23},{"active_ms":10208,"band":["5G"],"busy_ms":17360,"channel":44,"channel_width":80,"channels":[44,50],"frequency":[5180,5260],"noise":-98,"phy":"platform/soc/c000000.wifi+1","receive_ms":11722,"temperature":50,"transmit_ms":1446,"tx_power":23}],"unit":{"load":[1.1156589147286822,1.1373643410852714,0.4951937984496124],"localtime":1726828161,"memory":{"buffered":53182510,"cached":89456440,"free":815413706,"total":973131776},"temperature":[57,48],"uptime":360},"version":1},"uuid":1726827801}}, - "state_obf": {"jsonrpc":"2.0","method":"state", "params":{"compress_64": "eJzVWU1v48gRvedXGMgxHLq/P+oW7GFPQY45LAZGs9k9JkaWtCJlrzHQf89rSrTIkWasDXzYGIZMUmyyqt6rV1XtPu26sKJ//fOXqh/CkOhbtx7SLoeYevrtW9zsy2lPOFqtur7brHuS1dN+NXQx9ANJbqvdHw/N64D7jWNCW1EutLvNdpta8uUk7XYbPGM83ob4NQ24VzhXDdNKqzxT3pQL00pbTk4rxy+mlVYId6jW4SnRfsueWdX3XVuMDX2/iV0YRiNxGr8+9N2XNdz7pGV1Pn0Iz19wyVVNWUlakvKkWmJs/OVV3KzXKQ4wghulqm4d4tA9J5wJUXXb0La7h2dF3Kmac1k7V3MN5/A0+mT5OR7SCeWUHuOx342GkbVczuOgpTLlfDcGv+mG8UCw8lPFx5euHR5JseoplgCu+/LZf+lo2O1T9fw4jAeHEbzyeIOQK2ojJU/MUSvPMeZMKCnFGOM3ayRX5UIO3aq4y83xhsk6Lrku5+9Zd7LjZOVk32FcmoZdV16vDtUSE3aBCWL1PiaCS3/GRDAw8IyJ4LbmStXK1kKyCRN3xkRpa+WJo1MUjBVuwU2rPxITx0l7somipaBJpTnvlfV+AYn2TMwgUd4u2M+5+ChE/OFz1YQ1ov3rT8L+VxWAvEu/79M6vtJvd7pgfaclZ3efq66IF6XhkVWrTTwG9f6savfsfhSMe+C3aROF7VxLto+vtF2FIW92T/eQk/s4RpbVL13u/oHcDm23oW/09/vxqL/nh1F/6N/btP4P7vkEbwZ9+Ayyx1WX1kORom77rB4KRVPfF1294wLSoaAbjtfc8GL19tksbom5ceSEbyhzH6l1VlCbgJBsG0smGkFaa4+lTyHiIilDUZHCrZlkrLabXXn3HQKB58OcSyPwblUbVwumrlnQKsOJp4xg8hDIpxzISMTW2SwoK6mJmahPFkB2YCuHKTAxQhxvscDJ2ni839RCX7XBiYDU0Ynh+daANBA13SIeooVxRjaenLHpZIPlFGBDJqfJC5DqBhsAgmJ1wePInu8tkBrhFiZzUiECEe85cpkl8rzkim0t5bYVJwsSQICFkRpRjqG/71ugUT54LV1troZAN15T4MYQ996TDE6CFw0AaRpFJkRBQjb2ZIDCdYnoEEKmEIV0SwhABAcrPILgrpmQVJQQJvjkRPaoLUwSD9KQkqpBpHVLwUp1MkEEkohMQzFTbqj1N5ggAIM0CASrjb5mgmj56HumtvWWdHLgYQDdIDiGsuGaRIoTERRCYEgKiC05lEF+ExkRBoEwoHTwq0ioGBs8FOyTOnr41gBtA19jQF5G5gQ1Pk8pCbUHNIHDarKgZLjFBunLywGDKLZc5aPTIGCLPMiZS/CdBwoCdUWgclAEiKQDz1NGBNR9pGlJh9ZRuCkrWREGWxtRW3PNBGZB8BBKZrYNst2WKKemcEIAHykbNBZ5ooOxZJEYRczIYsktKcENyjjeLzir0bdcVYZWBzwScXDZSkijUpRkEQrPwEkLQqB0TmAgVZE7iAbDQQtgbuGkBhBC1uNf5q+CobSg2EoQ3UcNFYYmahsDyOAzjpAYolHtZIQrKckgjgK23SjS1hR9RlYgIFdZGSTEuTFFH1gCIQA+Wj+0gMFAphWkq9B1IoRJJRYQjhZ53EC/btEHVXsPfbS1uhoFlPJMTU5wLONtLmo83gqNhjmjjmvQTwbxlheJmrYkZpbIW2qam6LAagdG8vpqCKLAeySP4H8jSmIKgTcyVKSIZIkGlbTJvixdoWd56FNaE5sqBsulaLSMcMrZmzEvq7BmPyCG1KVscQle+KsZ4jy0Rwq4GkIL9c4KLwnQQ5aQCDw6hCVKecWgIq95lFCUkYaMucUgDvk0yBQrQdhr9ngL6seW45m2BWPRblEKyJMUIaEKdZRcy+IVewLyC+1GRhTRdCDXb7LHQ0FYacF1PXp5WVFig2e3UEctDRInweOYcybv0DzYnEDTEMwVg3B3Kf+SoHYcVLpE7PP7Xavwfta1cuOU8vOu1c261sVooIVX567VQ39Obey0Us+6VjfvWh1n+jS2tpuX9a2Dq78cXO0PunUxH1w1mw+uXrD54HrOptOEpPVsQjLOyOMAdJ6QmBHL5t2yD5yQLrPwLcROKyP8d0Ornw+taEbkYkISSnzQhGS+G1nV5TaC4jeggTI2R0M5Mx9ZF2pyAsTJGSAeQRBqAYjjTi0Acc59ICCXKjTbRjBomM1yZkUeLLYRTtsK56FVsg+CxE1Dq/jR0Cr+wuk/H1qF0rzCp5XzoZX/qaF1UpKfjq2XQyu7NrSKQvYfKxEE4OYtNDlXIvB7rkSI10yJ5oVrov5sQ0EzDPV+qUXaHNXqDRPHxAdS/7LgzagvPVNuKUaK8Tn1DVPfqZH5KDW62ED7E7VhgYhi8w20sncyQ2RRuk+QKDaDRKOY+OUGmpZH0N52aBizH7mpeVHyz+WBMc/tEhHt5og4EHAJiPgoLeLs3R00+X8iRtd30N4RI/4/iNHNe2jiUDq5Vbf++un0n4ny/H7YpfBE34pxiGjYAYgd8ardb1fpD8r71arqt6mwHG88HKr9draE3bLkcDTwqH8lRR6ekHusFPoCNYT7V8So2fev5QuvtAJtApJrhTo3HT4ceSTYdOG44YaFPywB603XJ/oEtXtHzVNMJ6u08+B+etomkHi/Q88MyHdh3T91Q/kegnXczt+8wGUhRwGZXEJj5SeX9NwlVzJqcuns3cklt3AJX98Z9p1bCzJNbun3eTFzTJUe8GeOHTuQN8c+V/t1N9C31SYUf0p/a+Y/tsI1C+3GHwVNnn9VhgzQHPkIEnPLvZf4BLnT02b3Co3Y55x2ZUcaqaqkBMghPpb80pjMCwRwPiGpvEX2onMeNgP02VuJPshaNJFzTxCeEpwy2uy34ysFxy3PkIiSZ/xv/wUILeA7"}} -} + "state": { + "jsonrpc": "2.0", + "method": "state", + "params": { + "serial": "MAC", + "state": { + "serial": "MAC", + "interfaces": [ + { + "counters": { + "collisions": 6, + "multicast": 649, + "rx_bytes": 11731812, + "rx_dropped": 11, + "rx_errors": 15, + "rx_packets": 9938, + "tx_bytes": 13585045, + "tx_dropped": 6, + "tx_errors": 19, + "tx_packets": 12135 + }, + "name": "up0v0", + "ssids": [ + { + "associations": [ + { + "ack_signal": -54, + "ack_signal_avg": -52, + "bssid": "53:49:4d:01:10:b1", + "connected": 2684, + "inactive": 2984, + "ipaddr_v4": "158.136.112.167", + "rssi": -58, + "rx_bytes": 5589601, + "rx_duration": 9742, + "rx_packets": 7621, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "10:fa:00:73:81:d5", + "tx_bytes": 1551269, + "tx_duration": 12500, + "tx_failed": 9400, + "tx_packets": 2019, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 18 + }, + { + "ack_signal": -52, + "ack_signal_avg": -48, + "bssid": "53:49:4d:01:10:b1", + "connected": 2882, + "inactive": 2257, + "ipaddr_v4": "221.0.192.150", + "rssi": -46, + "rx_bytes": 10064957, + "rx_duration": 10262, + "rx_packets": 13110, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "7c:9c:8e:2d:cb:f3", + "tx_bytes": 1383674, + "tx_duration": 8582, + "tx_failed": 18716, + "tx_packets": 2016, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 9 + }, + { + "ack_signal": -50, + "ack_signal_avg": -54, + "bssid": "53:49:4d:01:10:b1", + "connected": 2451, + "inactive": 2059, + "ipaddr_v4": "227.239.60.27", + "rssi": -52, + "rx_bytes": 9627292, + "rx_duration": 12802, + "rx_packets": 12499, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "a8:bb:f9:93:a8:57", + "tx_bytes": 1366023, + "tx_duration": 10335, + "tx_failed": 12842, + "tx_packets": 1812, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 12 + }, + { + "ack_signal": -47, + "ack_signal_avg": -46, + "bssid": "53:49:4d:01:10:b1", + "connected": 2401, + "inactive": 3189, + "ipaddr_v4": "246.140.147.135", + "rssi": -84, + "rx_bytes": 5965402, + "rx_duration": 10511, + "rx_packets": 8720, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "54:b3:f9:6b:cc:85", + "tx_bytes": 1616171, + "tx_duration": 9908, + "tx_failed": 10717, + "tx_packets": 1895, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 11 + }, + { + "ack_signal": -48, + "ack_signal_avg": -46, + "bssid": "53:49:4d:01:10:b1", + "connected": 2375, + "inactive": 3252, + "ipaddr_v4": "129.0.5.214", + "rssi": -67, + "rx_bytes": 7032879, + "rx_duration": 12317, + "rx_packets": 9960, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "7d:62:0b:94:55:d8", + "tx_bytes": 1147001, + "tx_duration": 11423, + "tx_failed": 17132, + "tx_packets": 1686, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 14 + }, + { + "ack_signal": -41, + "ack_signal_avg": -42, + "bssid": "53:49:4d:01:10:b1", + "connected": 2996, + "inactive": 2533, + "ipaddr_v4": "7.53.125.147", + "rssi": -44, + "rx_bytes": 5672084, + "rx_duration": 9137, + "rx_packets": 8103, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "c6:6f:5f:6d:78:dc", + "tx_bytes": 1593251, + "tx_duration": 10493, + "tx_failed": 11153, + "tx_packets": 2110, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 20 + }, + { + "ack_signal": -54, + "ack_signal_avg": -55, + "bssid": "53:49:4d:01:10:b1", + "connected": 2229, + "inactive": 2928, + "ipaddr_v4": "156.195.124.175", + "rssi": -46, + "rx_bytes": 9754994, + "rx_duration": 11168, + "rx_packets": 12367, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "10:0c:68:18:4a:fd", + "tx_bytes": 1435959, + "tx_duration": 10337, + "tx_failed": 13551, + "tx_packets": 1952, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 22 + }, + { + "ack_signal": -49, + "ack_signal_avg": -52, + "bssid": "53:49:4d:01:10:b1", + "connected": 3497, + "inactive": 2485, + "ipaddr_v4": "139.174.21.11", + "rssi": -86, + "rx_bytes": 9952624, + "rx_duration": 11988, + "rx_packets": 12446, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "c3:53:93:52:5c:ce", + "tx_bytes": 1547519, + "tx_duration": 10429, + "tx_failed": 10624, + "tx_packets": 1969, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 16 + }, + { + "ack_signal": -48, + "ack_signal_avg": -43, + "bssid": "53:49:4d:01:10:b1", + "connected": 3043, + "inactive": 3131, + "ipaddr_v4": "196.64.95.194", + "rssi": -83, + "rx_bytes": 7407670, + "rx_duration": 9666, + "rx_packets": 10438, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "95:d7:94:32:c7:85", + "tx_bytes": 1492670, + "tx_duration": 9357, + "tx_failed": 11643, + "tx_packets": 2198, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 14 + }, + { + "ack_signal": -46, + "ack_signal_avg": -49, + "bssid": "53:49:4d:01:10:b1", + "connected": 3463, + "inactive": 2774, + "ipaddr_v4": "94.15.157.229", + "rssi": -83, + "rx_bytes": 8531713, + "rx_duration": 11760, + "rx_packets": 10810, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "1b:a4:d8:7b:53:9c", + "tx_bytes": 1686882, + "tx_duration": 10391, + "tx_failed": 19925, + "tx_packets": 2176, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 19 + }, + { + "ack_signal": -51, + "ack_signal_avg": -52, + "bssid": "53:49:4d:01:10:b1", + "connected": 3081, + "inactive": 2882, + "ipaddr_v4": "157.229.209.55", + "rssi": -77, + "rx_bytes": 8943663, + "rx_duration": 11956, + "rx_packets": 12362, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "0e:f4:a5:ef:f2:53", + "tx_bytes": 1512199, + "tx_duration": 12646, + "tx_failed": 16607, + "tx_packets": 2157, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 18 + }, + { + "ack_signal": -50, + "ack_signal_avg": -55, + "bssid": "53:49:4d:01:10:b1", + "connected": 3205, + "inactive": 3267, + "ipaddr_v4": "160.214.198.209", + "rssi": -61, + "rx_bytes": 7906873, + "rx_duration": 10711, + "rx_packets": 10330, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "34:79:f7:5c:a2:c7", + "tx_bytes": 1440133, + "tx_duration": 10401, + "tx_failed": 15131, + "tx_packets": 1985, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 16 + }, + { + "ack_signal": -36, + "ack_signal_avg": -40, + "bssid": "53:49:4d:01:10:b1", + "connected": 3330, + "inactive": 3466, + "ipaddr_v4": "2.105.55.62", + "rssi": -47, + "rx_bytes": 9222300, + "rx_duration": 9271, + "rx_packets": 12688, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "2a:4d:df:9f:b8:d8", + "tx_bytes": 1506577, + "tx_duration": 10637, + "tx_failed": 14098, + "tx_packets": 1930, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 18 + }, + { + "ack_signal": -50, + "ack_signal_avg": -55, + "bssid": "53:49:4d:01:10:b1", + "connected": 3597, + "inactive": 2563, + "ipaddr_v4": "13.79.198.161", + "rssi": -47, + "rx_bytes": 8655128, + "rx_duration": 12529, + "rx_packets": 11457, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "81:99:0a:05:14:80", + "tx_bytes": 1435123, + "tx_duration": 11477, + "tx_failed": 18398, + "tx_packets": 2127, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 15 + } + ], + "band": "5G", + "bssid": "53:49:4d:01:10:b1", + "counters": { + "collisions": 6, + "multicast": 649, + "rx_bytes": 11731812, + "rx_dropped": 11, + "rx_errors": 15, + "rx_packets": 9938, + "tx_bytes": 13585045, + "tx_dropped": 6, + "tx_errors": 19, + "tx_packets": 12135 + }, + "frequency": [ + 5180, + 5260 + ], + "iface": "eth0", + "location": "/interfaces/0/ssids/0", + "mode": "ap", + "name": "up0v0", + "phy": "platform/soc/c000000.wifi+1", + "radio": { + "$ref": "#/radios/1" + }, + "ssid": "OpenWifi-test5" + } + ] + }, + { + "clients": [ + { + "ipv4_addresses": [ + "239.95.16.229" + ], + "ipv6_addresses": [ + "2a5b:80bd:0652:d339:acb9:c2f9:894d:a396" + ], + "mac": "6e:af:b0:a3:39:28", + "ports": [ + "eth1" + ] + }, + { + "ipv4_addresses": [ + "73.122.81.23" + ], + "ipv6_addresses": [ + "31d0:664c:54c3:75ce:ee6b:b92d:5b76:3845" + ], + "mac": "09:57:cb:72:c8:2a", + "ports": [ + "eth1" + ] + }, + { + "ipv4_addresses": [ + "226.38.233.160" + ], + "ipv6_addresses": [ + "af3c:def0:35b6:7dc3:f540:1e55:de6f:486e" + ], + "mac": "74:07:8e:06:76:6f", + "ports": [ + "eth1" + ] + }, + { + "ipv4_addresses": [ + "190.133.147.76" + ], + "ipv6_addresses": [ + "703c:384f:378f:7f4b:d68d:37ca:c0b2:48d3" + ], + "mac": "ed:d5:a8:82:94:de", + "ports": [ + "eth1" + ] + }, + { + "ipv4_addresses": [ + "21.35.45.33" + ], + "ipv6_addresses": [ + "ecf6:2803:507c:1315:cb11:d7cc:4e5f:af17" + ], + "mac": "73:b2:a4:a5:66:e0", + "ports": [ + "eth1" + ] + }, + { + "ipv4_addresses": [ + "29.90.235.125" + ], + "ipv6_addresses": [ + "e6c0:ecb2:0403:dd2f:99f5:9bc8:3848:f250" + ], + "mac": "20:ad:27:9f:dd:31", + "ports": [ + "eth1" + ] + }, + { + "ipv4_addresses": [ + "142.216.212.44" + ], + "ipv6_addresses": [ + "ef69:df4b:25b6:253c:d699:2101:bc55:b4ac" + ], + "mac": "9a:fd:99:e2:48:6b", + "ports": [ + "eth1" + ] + }, + { + "ipv4_addresses": [ + "158.142.109.105" + ], + "ipv6_addresses": [ + "b694:1a6a:89f6:0323:704d:0b91:a6e6:f961" + ], + "mac": "b9:a0:9a:d4:ac:07", + "ports": [ + "eth1" + ] + }, + { + "ipv4_addresses": [ + "220.112.212.237" + ], + "ipv6_addresses": [ + "41a2:2799:0ce1:5710:73cf:54c5:a106:5e83" + ], + "mac": "f5:c5:4a:7e:b8:b4", + "ports": [ + "eth1" + ] + }, + { + "ipv4_addresses": [ + "122.8.242.157" + ], + "ipv6_addresses": [ + "02e1:a6e4:c03f:d18f:66d5:f0bd:7183:365e" + ], + "mac": "e7:74:ed:aa:42:c5", + "ports": [ + "eth1" + ] + }, + { + "ipv4_addresses": [ + "20.14.89.131" + ], + "ipv6_addresses": [ + "90b0:ef86:8b75:f7aa:9d59:3a89:b263:cddd" + ], + "mac": "d1:c8:5a:19:0b:4b", + "ports": [ + "eth1" + ] + }, + { + "ipv4_addresses": [ + "254.126.232.106" + ], + "ipv6_addresses": [ + "d293:456c:4f52:df54:3715:265d:6e6c:1c35" + ], + "last_seen": 0, + "mac": "bc:7e:4c:00:c1:73", + "ports": [ + "wlan0" + ] + }, + { + "ipv4_addresses": [ + "104.195.23.213" + ], + "ipv6_addresses": [ + "cd59:f149:76ef:da29:2a34:42e2:8704:c266" + ], + "last_seen": 0, + "mac": "7e:54:9f:a7:e5:85", + "ports": [ + "wlan0" + ] + }, + { + "ipv4_addresses": [ + "74.191.165.53" + ], + "ipv6_addresses": [ + "4c26:b1b0:1a91:01ab:b40f:4f2b:4239:07cb" + ], + "last_seen": 0, + "mac": "2c:91:8d:d1:ee:5f", + "ports": [ + "wlan0" + ] + }, + { + "ipv4_addresses": [ + "179.131.71.19" + ], + "ipv6_addresses": [ + "43f0:66d1:1317:25f9:b31c:0a7b:cc47:6cdd" + ], + "last_seen": 0, + "mac": "51:50:2e:73:aa:ef", + "ports": [ + "wlan0" + ] + }, + { + "ipv4_addresses": [ + "89.170.21.171" + ], + "ipv6_addresses": [ + "43c9:2686:7a69:9e18:d699:9479:7a7a:7ecd" + ], + "last_seen": 0, + "mac": "cf:12:cb:4e:db:2b", + "ports": [ + "wlan0" + ] + }, + { + "ipv4_addresses": [ + "191.145.135.130" + ], + "ipv6_addresses": [ + "3df6:99c4:6149:66fc:8292:bd6b:d485:7d87" + ], + "last_seen": 0, + "mac": "5a:06:ca:00:38:dc", + "ports": [ + "wlan0" + ] + }, + { + "ipv4_addresses": [ + "249.216.233.16" + ], + "ipv6_addresses": [ + "8e00:16a6:f5b1:3fb6:f42e:abdf:8350:3fca" + ], + "last_seen": 0, + "mac": "c0:d9:d9:96:e5:98", + "ports": [ + "wlan0" + ] + }, + { + "ipv4_addresses": [ + "123.247.151.99" + ], + "ipv6_addresses": [ + "2972:30c4:7c5e:41ea:689b:4006:48fe:b104" + ], + "last_seen": 0, + "mac": "98:42:45:8a:cd:fc", + "ports": [ + "wlan0" + ] + }, + { + "ipv4_addresses": [ + "200.236.88.54" + ], + "ipv6_addresses": [ + "bea5:7218:1cfc:b999:6a8a:3593:edca:6b8e" + ], + "last_seen": 0, + "mac": "3a:61:47:67:55:1e", + "ports": [ + "wlan0" + ] + }, + { + "ipv4_addresses": [ + "29.79.207.182" + ], + "ipv6_addresses": [ + "a52a:2708:92c2:1f53:6b12:30e6:2d78:cc2e" + ], + "last_seen": 0, + "mac": "14:1c:ba:1f:06:a6", + "ports": [ + "wlan0" + ] + }, + { + "ipv4_addresses": [ + "194.224.148.122" + ], + "ipv6_addresses": [ + "ed60:c6ee:08c9:ff0f:b7fd:e35b:0030:de38" + ], + "last_seen": 0, + "mac": "21:be:37:50:7c:8d", + "ports": [ + "wlan0" + ] + }, + { + "ipv4_addresses": [ + "248.18.202.62" + ], + "ipv6_addresses": [ + "2f9c:03b0:f56b:6c57:063e:128f:2190:ba5e" + ], + "last_seen": 0, + "mac": "ab:dc:e1:75:c9:c1", + "ports": [ + "wlan0" + ] + }, + { + "ipv4_addresses": [ + "176.73.79.37" + ], + "ipv6_addresses": [ + "77f4:df1c:70db:9f29:d8af:f744:9c25:1c09" + ], + "last_seen": 0, + "mac": "f7:f2:ce:ea:6c:d2", + "ports": [ + "wlan0" + ] + }, + { + "ipv4_addresses": [ + "21.43.226.17" + ], + "ipv6_addresses": [ + "bd81:7507:3a54:b4cf:c521:c097:c1fe:ca51" + ], + "last_seen": 0, + "mac": "3f:42:15:82:8c:15", + "ports": [ + "wlan0" + ] + }, + { + "ipv4_addresses": [ + "116.143.69.120" + ], + "ipv6_addresses": [ + "9486:6f6f:f083:5ce9:41ce:36e2:781e:14f3" + ], + "last_seen": 0, + "mac": "21:77:5c:18:8b:2e", + "ports": [ + "wlan0" + ] + }, + { + "ipv4_addresses": [ + "122.57.158.178" + ], + "ipv6_addresses": [ + "4a0e:d500:f4d4:8b89:8172:2304:ad5e:4cc1" + ], + "last_seen": 0, + "mac": "b8:d8:a2:a9:b3:ec", + "ports": [ + "wlan0" + ] + }, + { + "ipv4_addresses": [ + "114.219.181.48" + ], + "ipv6_addresses": [ + "32a2:a31a:a2c8:fd03:62d8:b9dc:9ebb:4bd3" + ], + "last_seen": 0, + "mac": "8a:d2:5b:83:4b:96", + "ports": [ + "wlan0" + ] + }, + { + "ipv4_addresses": [ + "211.16.112.65" + ], + "ipv6_addresses": [ + "db11:a1db:68cd:4e63:350a:1d95:ff8e:a392" + ], + "last_seen": 0, + "mac": "a9:cc:69:81:22:0e", + "ports": [ + "wlan0" + ] + } + ], + "counters": { + "collisions": 6, + "multicast": 513, + "rx_bytes": 14501248, + "rx_dropped": 8, + "rx_errors": 18, + "rx_packets": 12250, + "tx_bytes": 13167426, + "tx_dropped": 8, + "tx_errors": 17, + "tx_packets": 11450 + }, + "name": "down0v0", + "ssids": [ + { + "associations": [ + { + "ack_signal": -43, + "ack_signal_avg": -42, + "bssid": "53:49:4d:01:10:b2", + "connected": 2506, + "inactive": 3349, + "ipaddr_v4": "254.126.232.106", + "rssi": -58, + "rx_bytes": 7441827, + "rx_duration": 10391, + "rx_packets": 10301, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "bc:7e:4c:00:c1:73", + "tx_bytes": 1429848, + "tx_duration": 12148, + "tx_failed": 17189, + "tx_packets": 2243, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 20 + }, + { + "ack_signal": -54, + "ack_signal_avg": -51, + "bssid": "53:49:4d:01:10:b2", + "connected": 2845, + "inactive": 2915, + "ipaddr_v4": "104.195.23.213", + "rssi": -75, + "rx_bytes": 7766221, + "rx_duration": 9772, + "rx_packets": 8987, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "7e:54:9f:a7:e5:85", + "tx_bytes": 1442311, + "tx_duration": 11076, + "tx_failed": 17714, + "tx_packets": 1869, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 18 + }, + { + "ack_signal": -36, + "ack_signal_avg": -40, + "bssid": "53:49:4d:01:10:b2", + "connected": 2959, + "inactive": 2335, + "ipaddr_v4": "74.191.165.53", + "rssi": -51, + "rx_bytes": 8356941, + "rx_duration": 14067, + "rx_packets": 11553, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "2c:91:8d:d1:ee:5f", + "tx_bytes": 1169808, + "tx_duration": 12023, + "tx_failed": 14581, + "tx_packets": 1642, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 16 + }, + { + "ack_signal": -49, + "ack_signal_avg": -46, + "bssid": "53:49:4d:01:10:b2", + "connected": 3364, + "inactive": 3283, + "ipaddr_v4": "179.131.71.19", + "rssi": -76, + "rx_bytes": 7880344, + "rx_duration": 10294, + "rx_packets": 9896, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "51:50:2e:73:aa:ef", + "tx_bytes": 1640048, + "tx_duration": 11983, + "tx_failed": 18469, + "tx_packets": 2072, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 13 + }, + { + "ack_signal": -56, + "ack_signal_avg": -55, + "bssid": "53:49:4d:01:10:b2", + "connected": 2830, + "inactive": 3233, + "ipaddr_v4": "89.170.21.171", + "rssi": -88, + "rx_bytes": 10409522, + "rx_duration": 10710, + "rx_packets": 13210, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "cf:12:cb:4e:db:2b", + "tx_bytes": 1430414, + "tx_duration": 10178, + "tx_failed": 13294, + "tx_packets": 2102, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 11 + }, + { + "ack_signal": -54, + "ack_signal_avg": -50, + "bssid": "53:49:4d:01:10:b2", + "connected": 2390, + "inactive": 2893, + "ipaddr_v4": "191.145.135.130", + "rssi": -83, + "rx_bytes": 9129050, + "rx_duration": 10668, + "rx_packets": 11259, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "5a:06:ca:00:38:dc", + "tx_bytes": 1402006, + "tx_duration": 8175, + "tx_failed": 15807, + "tx_packets": 1843, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 18 + }, + { + "ack_signal": -49, + "ack_signal_avg": -52, + "bssid": "53:49:4d:01:10:b2", + "connected": 2831, + "inactive": 2552, + "ipaddr_v4": "249.216.233.16", + "rssi": -82, + "rx_bytes": 9350442, + "rx_duration": 11336, + "rx_packets": 11815, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "c0:d9:d9:96:e5:98", + "tx_bytes": 1586105, + "tx_duration": 12075, + "tx_failed": 17631, + "tx_packets": 2107, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 13 + }, + { + "ack_signal": -49, + "ack_signal_avg": -44, + "bssid": "53:49:4d:01:10:b2", + "connected": 3411, + "inactive": 3368, + "ipaddr_v4": "123.247.151.99", + "rssi": -88, + "rx_bytes": 10037327, + "rx_duration": 13889, + "rx_packets": 12016, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "98:42:45:8a:cd:fc", + "tx_bytes": 1369675, + "tx_duration": 9078, + "tx_failed": 22409, + "tx_packets": 2129, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 17 + }, + { + "ack_signal": -49, + "ack_signal_avg": -53, + "bssid": "53:49:4d:01:10:b2", + "connected": 3120, + "inactive": 2495, + "ipaddr_v4": "200.236.88.54", + "rssi": -48, + "rx_bytes": 6781758, + "rx_duration": 10932, + "rx_packets": 9979, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "3a:61:47:67:55:1e", + "tx_bytes": 1605006, + "tx_duration": 10187, + "tx_failed": 14891, + "tx_packets": 2036, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 9 + }, + { + "ack_signal": -49, + "ack_signal_avg": -49, + "bssid": "53:49:4d:01:10:b2", + "connected": 2824, + "inactive": 2311, + "ipaddr_v4": "29.79.207.182", + "rssi": -57, + "rx_bytes": 9920330, + "rx_duration": 11135, + "rx_packets": 13610, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "14:1c:ba:1f:06:a6", + "tx_bytes": 1487516, + "tx_duration": 10616, + "tx_failed": 18205, + "tx_packets": 2082, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 14 + }, + { + "ack_signal": -56, + "ack_signal_avg": -51, + "bssid": "53:49:4d:01:10:b2", + "connected": 3620, + "inactive": 3135, + "ipaddr_v4": "194.224.148.122", + "rssi": -79, + "rx_bytes": 8711145, + "rx_duration": 11817, + "rx_packets": 13496, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "21:be:37:50:7c:8d", + "tx_bytes": 1492336, + "tx_duration": 9205, + "tx_failed": 13520, + "tx_packets": 1956, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 15 + } + ], + "band": "2G", + "bssid": "53:49:4d:01:10:b2", + "counters": { + "collisions": 6, + "multicast": 513, + "rx_bytes": 14501248, + "rx_dropped": 8, + "rx_errors": 18, + "rx_packets": 12250, + "tx_bytes": 13167426, + "tx_dropped": 8, + "tx_errors": 17, + "tx_packets": 11450 + }, + "frequency": [ + 2401, + 2423 + ], + "iface": "eth1", + "location": "/interfaces/0/ssids/0", + "mode": "ap", + "name": "down0v0", + "phy": "platform/soc/c000000.wifi", + "radio": { + "$ref": "#/radios/0" + }, + "ssid": "OpenWifi-test2" + }, + { + "associations": [ + { + "ack_signal": -52, + "ack_signal_avg": -48, + "bssid": "53:49:4d:01:10:b3", + "connected": 2939, + "inactive": 2710, + "ipaddr_v4": "248.18.202.62", + "rssi": -60, + "rx_bytes": 8722031, + "rx_duration": 11747, + "rx_packets": 10995, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "ab:dc:e1:75:c9:c1", + "tx_bytes": 1342264, + "tx_duration": 11585, + "tx_failed": 21055, + "tx_packets": 1836, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 13 + }, + { + "ack_signal": -41, + "ack_signal_avg": -40, + "bssid": "53:49:4d:01:10:b3", + "connected": 3155, + "inactive": 3030, + "ipaddr_v4": "176.73.79.37", + "rssi": -56, + "rx_bytes": 8577501, + "rx_duration": 12375, + "rx_packets": 12133, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "f7:f2:ce:ea:6c:d2", + "tx_bytes": 1738314, + "tx_duration": 12382, + "tx_failed": 17392, + "tx_packets": 2255, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 14 + }, + { + "ack_signal": -60, + "ack_signal_avg": -57, + "bssid": "53:49:4d:01:10:b3", + "connected": 2848, + "inactive": 2229, + "ipaddr_v4": "21.43.226.17", + "rssi": -37, + "rx_bytes": 9336935, + "rx_duration": 11209, + "rx_packets": 11991, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "3f:42:15:82:8c:15", + "tx_bytes": 1519775, + "tx_duration": 8118, + "tx_failed": 10278, + "tx_packets": 2015, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 19 + }, + { + "ack_signal": -53, + "ack_signal_avg": -52, + "bssid": "53:49:4d:01:10:b3", + "connected": 3375, + "inactive": 2893, + "ipaddr_v4": "116.143.69.120", + "rssi": -76, + "rx_bytes": 9662448, + "rx_duration": 10661, + "rx_packets": 12961, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "21:77:5c:18:8b:2e", + "tx_bytes": 1602040, + "tx_duration": 10461, + "tx_failed": 15715, + "tx_packets": 2089, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 20 + }, + { + "ack_signal": -45, + "ack_signal_avg": -44, + "bssid": "53:49:4d:01:10:b3", + "connected": 3407, + "inactive": 3291, + "ipaddr_v4": "122.57.158.178", + "rssi": -40, + "rx_bytes": 9327445, + "rx_duration": 11662, + "rx_packets": 12254, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "b8:d8:a2:a9:b3:ec", + "tx_bytes": 1384912, + "tx_duration": 9887, + "tx_failed": 11064, + "tx_packets": 1981, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 19 + }, + { + "ack_signal": -49, + "ack_signal_avg": -45, + "bssid": "53:49:4d:01:10:b3", + "connected": 3035, + "inactive": 2970, + "ipaddr_v4": "114.219.181.48", + "rssi": -82, + "rx_bytes": 7910744, + "rx_duration": 10614, + "rx_packets": 9743, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "8a:d2:5b:83:4b:96", + "tx_bytes": 1549043, + "tx_duration": 10498, + "tx_failed": 14219, + "tx_packets": 1929, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 14 + }, + { + "ack_signal": -44, + "ack_signal_avg": -40, + "bssid": "53:49:4d:01:10:b3", + "connected": 3193, + "inactive": 2271, + "ipaddr_v4": "211.16.112.65", + "rssi": -60, + "rx_bytes": 7965726, + "rx_duration": 10481, + "rx_packets": 10096, + "rx_rate": { + "bitrate": 200000, + "chwidth": 40, + "mcs": 9, + "nss": 9, + "sgi": true, + "vht": true + }, + "station": "a9:cc:69:81:22:0e", + "tx_bytes": 1288470, + "tx_duration": 10518, + "tx_failed": 10258, + "tx_packets": 1854, + "tx_rate": { + "bitrate": 200000, + "chwidth": 40, + "ht": true, + "mcs": 9, + "sgi": true + }, + "tx_retries": 17 + } + ], + "band": "5G", + "bssid": "53:49:4d:01:10:b3", + "counters": { + "collisions": 6, + "multicast": 513, + "rx_bytes": 14501248, + "rx_dropped": 8, + "rx_errors": 18, + "rx_packets": 12250, + "tx_bytes": 13167426, + "tx_dropped": 8, + "tx_errors": 17, + "tx_packets": 11450 + }, + "frequency": [ + 5180, + 5260 + ], + "iface": "eth1", + "location": "/interfaces/0/ssids/1", + "mode": "ap", + "name": "down0v0", + "phy": "platform/soc/c000000.wifi+1", + "radio": { + "$ref": "#/radios/1" + }, + "ssid": "OpenWifi-test2" + } + ] + } + ], + "link-state": { + "downstream": { + "eth1": { + "carrier": 1, + "duplex": "full", + "speed": 1000 + } + }, + "upstream": { + "eth0": { + "carrier": 1, + "duplex": "full", + "speed": 1000 + } + } + }, + "radios": [ + { + "active_ms": 14274, + "band": [ + "2G" + ], + "busy_ms": 12274, + "channel": 1, + "channel_width": 20, + "channels": [ + 1 + ], + "frequency": [ + 2401, + 2423 + ], + "noise": -97, + "phy": "platform/soc/c000000.wifi", + "receive_ms": 10092, + "temperature": 48, + "transmit_ms": 1526, + "tx_power": 23 + }, + { + "active_ms": 10208, + "band": [ + "5G" + ], + "busy_ms": 17360, + "channel": 44, + "channel_width": 80, + "channels": [ + 44, + 50 + ], + "frequency": [ + 5180, + 5260 + ], + "noise": -98, + "phy": "platform/soc/c000000.wifi+1", + "receive_ms": 11722, + "temperature": 50, + "transmit_ms": 1446, + "tx_power": 23 + } + ], + "unit": { + "load": [ + 1.1156589147286822, + 1.1373643410852714, + 0.4951937984496124 + ], + "localtime": 1726828161, + "memory": { + "buffered": 53182510, + "cached": 89456440, + "free": 815413706, + "total": 973131776 + }, + "temperature": [ + 57, + 48 + ], + "uptime": 360 + }, + "version": 1 + }, + "uuid": 1726827801 + } + }, + "state_obf": { + "jsonrpc": "2.0", + "method": "state", + "params": { + "compress_64": "eJzVWU1v48gRvedXGMgxHLq/P+oW7GFPQY45LAZGs9k9JkaWtCJlrzHQf89rSrTIkWasDXzYGIZMUmyyqt6rV1XtPu26sKJ//fOXqh/CkOhbtx7SLoeYevrtW9zsy2lPOFqtur7brHuS1dN+NXQx9ANJbqvdHw/N64D7jWNCW1EutLvNdpta8uUk7XYbPGM83ob4NQ24VzhXDdNKqzxT3pQL00pbTk4rxy+mlVYId6jW4SnRfsueWdX3XVuMDX2/iV0YRiNxGr8+9N2XNdz7pGV1Pn0Iz19wyVVNWUlakvKkWmJs/OVV3KzXKQ4wghulqm4d4tA9J5wJUXXb0La7h2dF3Kmac1k7V3MN5/A0+mT5OR7SCeWUHuOx342GkbVczuOgpTLlfDcGv+mG8UCw8lPFx5euHR5JseoplgCu+/LZf+lo2O1T9fw4jAeHEbzyeIOQK2ojJU/MUSvPMeZMKCnFGOM3ayRX5UIO3aq4y83xhsk6Lrku5+9Zd7LjZOVk32FcmoZdV16vDtUSE3aBCWL1PiaCS3/GRDAw8IyJ4LbmStXK1kKyCRN3xkRpa+WJo1MUjBVuwU2rPxITx0l7somipaBJpTnvlfV+AYn2TMwgUd4u2M+5+ChE/OFz1YQ1ov3rT8L+VxWAvEu/79M6vtJvd7pgfaclZ3efq66IF6XhkVWrTTwG9f6savfsfhSMe+C3aROF7VxLto+vtF2FIW92T/eQk/s4RpbVL13u/oHcDm23oW/09/vxqL/nh1F/6N/btP4P7vkEbwZ9+Ayyx1WX1kORom77rB4KRVPfF1294wLSoaAbjtfc8GL19tksbom5ceSEbyhzH6l1VlCbgJBsG0smGkFaa4+lTyHiIilDUZHCrZlkrLabXXn3HQKB58OcSyPwblUbVwumrlnQKsOJp4xg8hDIpxzISMTW2SwoK6mJmahPFkB2YCuHKTAxQhxvscDJ2ni839RCX7XBiYDU0Ynh+daANBA13SIeooVxRjaenLHpZIPlFGBDJqfJC5DqBhsAgmJ1wePInu8tkBrhFiZzUiECEe85cpkl8rzkim0t5bYVJwsSQICFkRpRjqG/71ugUT54LV1troZAN15T4MYQ996TDE6CFw0AaRpFJkRBQjb2ZIDCdYnoEEKmEIV0SwhABAcrPILgrpmQVJQQJvjkRPaoLUwSD9KQkqpBpHVLwUp1MkEEkohMQzFTbqj1N5ggAIM0CASrjb5mgmj56HumtvWWdHLgYQDdIDiGsuGaRIoTERRCYEgKiC05lEF+ExkRBoEwoHTwq0ioGBs8FOyTOnr41gBtA19jQF5G5gQ1Pk8pCbUHNIHDarKgZLjFBunLywGDKLZc5aPTIGCLPMiZS/CdBwoCdUWgclAEiKQDz1NGBNR9pGlJh9ZRuCkrWREGWxtRW3PNBGZB8BBKZrYNst2WKKemcEIAHykbNBZ5ooOxZJEYRczIYsktKcENyjjeLzir0bdcVYZWBzwScXDZSkijUpRkEQrPwEkLQqB0TmAgVZE7iAbDQQtgbuGkBhBC1uNf5q+CobSg2EoQ3UcNFYYmahsDyOAzjpAYolHtZIQrKckgjgK23SjS1hR9RlYgIFdZGSTEuTFFH1gCIQA+Wj+0gMFAphWkq9B1IoRJJRYQjhZ53EC/btEHVXsPfbS1uhoFlPJMTU5wLONtLmo83gqNhjmjjmvQTwbxlheJmrYkZpbIW2qam6LAagdG8vpqCKLAeySP4H8jSmIKgTcyVKSIZIkGlbTJvixdoWd56FNaE5sqBsulaLSMcMrZmzEvq7BmPyCG1KVscQle+KsZ4jy0Rwq4GkIL9c4KLwnQQ5aQCDw6hCVKecWgIq95lFCUkYaMucUgDvk0yBQrQdhr9ngL6seW45m2BWPRblEKyJMUIaEKdZRcy+IVewLyC+1GRhTRdCDXb7LHQ0FYacF1PXp5WVFig2e3UEctDRInweOYcybv0DzYnEDTEMwVg3B3Kf+SoHYcVLpE7PP7Xavwfta1cuOU8vOu1c261sVooIVX567VQ39Obey0Us+6VjfvWh1n+jS2tpuX9a2Dq78cXO0PunUxH1w1mw+uXrD54HrOptOEpPVsQjLOyOMAdJ6QmBHL5t2yD5yQLrPwLcROKyP8d0Ornw+taEbkYkISSnzQhGS+G1nV5TaC4jeggTI2R0M5Mx9ZF2pyAsTJGSAeQRBqAYjjTi0Acc59ICCXKjTbRjBomM1yZkUeLLYRTtsK56FVsg+CxE1Dq/jR0Cr+wuk/H1qF0rzCp5XzoZX/qaF1UpKfjq2XQyu7NrSKQvYfKxEE4OYtNDlXIvB7rkSI10yJ5oVrov5sQ0EzDPV+qUXaHNXqDRPHxAdS/7LgzagvPVNuKUaK8Tn1DVPfqZH5KDW62ED7E7VhgYhi8w20sncyQ2RRuk+QKDaDRKOY+OUGmpZH0N52aBizH7mpeVHyz+WBMc/tEhHt5og4EHAJiPgoLeLs3R00+X8iRtd30N4RI/4/iNHNe2jiUDq5Vbf++un0n4ny/H7YpfBE34pxiGjYAYgd8ardb1fpD8r71arqt6mwHG88HKr9draE3bLkcDTwqH8lRR6ekHusFPoCNYT7V8So2fev5QuvtAJtApJrhTo3HT4ceSTYdOG44YaFPywB603XJ/oEtXtHzVNMJ6u08+B+etomkHi/Q88MyHdh3T91Q/kegnXczt+8wGUhRwGZXEJj5SeX9NwlVzJqcuns3cklt3AJX98Z9p1bCzJNbun3eTFzTJUe8GeOHTuQN8c+V/t1N9C31SYUf0p/a+Y/tsI1C+3GHwVNnn9VhgzQHPkIEnPLvZf4BLnT02b3Co3Y55x2ZUcaqaqkBMghPpb80pjMCwRwPiGpvEX2onMeNgP02VuJPshaNJFzTxCeEpwy2uy34ysFxy3PkIiSZ/xv/wUILeA7" + } + }, + "healthcheck": { + "jsonrpc": "2.0", + "method": "healthcheck", + "params": { + "serial": "MAC", + "uuid": 100401, + "request_uuid": 0, + "sanity": 100, + "data": "" + } + }, + "alarm": { + "jsonrpc": "2.0", + "method": "alarm", + "params": { + "serial": "MAC", + "data": "" + } + }, + "wifiscan": { + "jsonrpc": "2.0", + "method": "wifiscan", + "params": { + "serial": "MAC", + "data": "" + } + }, + "crashlog": { + "jsonrpc": "2.0", + "method": "crashlog", + "params": { + "serial": "MAC", + "uuid": 100402, + "loglines": [] + } + }, + "rebootLog": { + "jsonrpc": "2.0", + "method": "rebootLog", + "params": { + "serial": "MAC", + "uuid": 100403, + "date": 1736758447.2709587, + "type": "Dummy", + "info": [ + "info 1", + "info 2" + ] + } + }, + "cfgpending": { + "jsonrpc": "2.0", + "method": "cfgpending", + "params": { + "serial": "MAC", + "active": 112233860, + "uuid": 100404 + } + }, + "deviceupdate": { + "jsonrpc": "2.0", + "method": "deviceupdate", + "params": { + "serial": "MAC", + "currentPassword": "mynewpassword" + } + }, + "ping": { + "jsonrpc": "2.0", + "method": "ping", + "params": { + "serial": "MAC", + "uuid": 100405 + } + }, + "recovery": { + "jsonrpc": "2.0", + "method": "recovery", + "params": { + "serial": "MAC", + "uuid": 100406, + "firmware": "Rel 1.6 build 1", + "reboot": false, + "loglines": [] + } + }, + "venue_broadcast": { + "jsonrpc": "2.0", + "method": "venue_broadcast", + "params": { + "serial": "MAC", + "timestamp": 1736758447.2709587, + "data": "" + } + } +} \ No newline at end of file diff --git a/utils/client_simulator/src/simulation_runner.py b/utils/client_simulator/src/simulation_runner.py index 6346ffc..dc0456c 100644 --- a/utils/client_simulator/src/simulation_runner.py +++ b/utils/client_simulator/src/simulation_runner.py @@ -37,6 +37,26 @@ def __init__(self, mac: str, size: int): self.log = json.dumps(self.log).replace("MAC", mac) self.join = json.dumps(self.templates["join"]).replace("MAC", mac) self.leave = json.dumps(self.templates["leave"]).replace("MAC", mac) + self.healthcheck = json.dumps( + self.templates["healthcheck"]).replace("MAC", mac) + self.alarm = json.dumps( + self.templates["alarm"]).replace("MAC", mac) + self.wifiscan = json.dumps( + self.templates["wifiscan"]).replace("MAC", mac) + self.crashlog = json.dumps( + self.templates["crashlog"]).replace("MAC", mac) + self.rebootLog = json.dumps( + self.templates["rebootLog"]).replace("MAC", mac) + self.cfgpending = json.dumps( + self.templates["cfgpending"]).replace("MAC", mac) + self.deviceupdate = json.dumps( + self.templates["deviceupdate"]).replace("MAC", mac) + self.ping = json.dumps( + self.templates["ping"]).replace("MAC", mac) + self.recovery = json.dumps( + self.templates["recovery"]).replace("MAC", mac) + self.venue_broadcast = json.dumps( + self.templates["venue_broadcast"]).replace("MAC", mac) @staticmethod def to_json(msg) -> str: @@ -78,10 +98,10 @@ def send_hello(self, socket: client.ClientConnection): logger.debug(self.messages.connect) socket.send(self.messages.connect) - def send_log(self, socket: client.ClientConnection): + def send_log_event(self, socket: client.ClientConnection): socket.send(self.messages.log) - def send_state(self, socket: client.ClientConnection): + def send_state_event(self, socket: client.ClientConnection): socket.send(self.messages.state) def send_join(self, socket: client.ClientConnection): @@ -90,6 +110,36 @@ def send_join(self, socket: client.ClientConnection): def send_leave(self, socket: client.ClientConnection): socket.send(self.messages.leave) + def send_healthcheck_event(self, socket: client.ClientConnection): + socket.send(self.messages.healthcheck) + + def send_alarm_event(self, socket: client.ClientConnection): + socket.send(self.messages.alarm) + + def send_wifiscan_event(self, socket: client.ClientConnection): + socket.send(self.messages.wifiscan) + + def send_crashlog_event(self, socket: client.ClientConnection): + socket.send(self.messages.crashlog) + + def send_rebootlog_event(self, socket: client.ClientConnection): + socket.send(self.messages.rebootLog) + + def send_cfgpending_event(self, socket: client.ClientConnection): + socket.send(self.messages.cfgpending) + + def send_deviceupdate_event(self, socket: client.ClientConnection): + socket.send(self.messages.deviceupdate) + + def send_ping_event(self, socket: client.ClientConnection): + socket.send(self.messages.ping) + + def send_recovery_event(self, socket: client.ClientConnection): + socket.send(self.messages.recovery) + + def send_venue_broadcast_event(self, socket: client.ClientConnection): + socket.send(self.messages.venue_broadcast) + def get_single_message(self, socket: client.ClientConnection): try: msg = socket.recv(self.interval) @@ -129,7 +179,7 @@ def handle_reboot(self, socket: client.ClientConnection, msg: dict): def connect(self): if self._socket is None: - # 20 seconds is more then enough to establish conne and exchange + # 20 seconds is more then enough to establish connection and exchange # them handshakes. self._socket = client.connect( self.server_addr, ssl=self.ssl_context, open_timeout=20, close_timeout=20) @@ -153,8 +203,8 @@ def single_run(self): self.connect() if time.time() - start > self.interval: logger.info(f"Device sim heartbeat") - self.send_state(self._socket) - self.send_log(self._socket) + self.send_state_event(self._socket) + self.send_log_event(self._socket) start = time.time() self.handle_messages(self._socket) finally: @@ -178,8 +228,8 @@ def job(self): self.connect() if time.time() - start > self.interval: logger.info(f"Device sim heartbeat") - self.send_state(self._socket) - self.send_log(self._socket) + self.send_state_event(self._socket) + self.send_log_event(self._socket) start = time.time() self.handle_messages(self._socket) finally: @@ -187,7 +237,7 @@ def job(self): logger.debug("simulation done") -def get_avail_mac_addrs(path, mask="XX:XX:XX:XX:XX:XX"): +def get_available_mac_address(path, mask="XX:XX:XX:XX:XX:XX"): mask = mask.upper() _mask = "".join(("[0-9a-fA-F]" if c == "X" else c) for c in mask) macs = open(path + '/macs.txt', 'r').read().split() @@ -216,7 +266,7 @@ def process(args: Args, mask: str, start_event: multiprocessing.Event, stop_even signal.signal(signal.SIGINT, signal.SIG_IGN) threading.current_thread().name = mask logger.info(f"process started") - macs = get_avail_mac_addrs(args.cert_path, mask) + macs = get_available_mac_address(args.cert_path, mask) if len(macs) < args.number_of_connections: logger.warn(f"expected {args.number_of_connections} certificates, but only found {len(macs)} " f"({mask = })") @@ -235,7 +285,7 @@ def process(args: Args, mask: str, start_event: multiprocessing.Event, stop_even def verify_cert_availability(cert_path: str, masks: List[str], count: int): for mask in masks: - macs = get_avail_mac_addrs(cert_path, mask) + macs = get_available_mac_address(cert_path, mask) assert len(macs) >= count, \ f"Simulation requires {count} certificates, but only found {len(macs)}" diff --git a/utils/docker/StartMultiCGW.py b/utils/docker/StartMultiCGW.py index 84a696f..4f464c7 100644 --- a/utils/docker/StartMultiCGW.py +++ b/utils/docker/StartMultiCGW.py @@ -64,8 +64,8 @@ DEFAULT_CERTS_PATH = "../cert_generator/certs/server/" DEFAULT_CLIENT_CERTS_PATH = "../cert_generator/certs/client/" -CONTAINTER_CERTS_VOLUME: Final[str] = "/etc/cgw/certs" -CONTAINTER_NB_INFRA_CERTS_VOLUME: Final[str] = "/etc/cgw/nb_infra/certs" +CONTAINER_CERTS_VOLUME: Final[str] = "/etc/cgw/certs" +CONTAINER_NB_INFRA_CERTS_VOLUME: Final[str] = "/etc/cgw/nb_infra/certs" # Cert & key files name DEFAULT_CERT_GENERATOR_PATH = "../cert_generator" @@ -169,7 +169,7 @@ def certificates_update(certs_path: str = DEFAULT_CERTS_PATH, client_certs_path: print(f"Returned to original directory: {os.getcwd()}") -# Jinga2 template generator +# Jinja2 template generator def get_cgw_image_base_name() -> str: """ Returns CGW Docker image base name @@ -314,10 +314,10 @@ def generate_docker_compose_file(instances_num: int, cgw_groups_capacity=DEFAULT_GROUPS_CAPACITY, cgw_groups_threshold=DEFAULT_GROUPS_THRESHOLD, cgw_group_infras_capacity=DEFAULT_GROUP_INFRAS_CAPACITY, - cgw_allow_certs_missmatch=DEFAULT_ALLOW_CERT_MISMATCH, + cgw_allow_certs_mismatch=DEFAULT_ALLOW_CERT_MISMATCH, cgw_nb_infra_tls=DEFAULT_NB_INFRA_TLS, - container_certs_voulume=CONTAINTER_CERTS_VOLUME, - container_nb_infra_certs_voulume=CONTAINTER_NB_INFRA_CERTS_VOLUME, + container_certs_volume=CONTAINER_CERTS_VOLUME, + container_nb_infra_certs_volume=CONTAINER_NB_INFRA_CERTS_VOLUME, default_certs_path=certs_realpath) # 6. Save the rendered template as docker-compose.yml @@ -400,7 +400,7 @@ def docker_compose_down(docker_compose_file: str = "docker-compose.yml"): # 3. Remove old multi cgw docker compose file remove_docker_compose_multi_cgw_file() - # 4. Update Certitifacates + # 4. Update Certificates certificates_update() # 4. Generate new multi cgw docker compose file diff --git a/utils/docker/docker-compose-template.yml.j2 b/utils/docker/docker-compose-template.yml.j2 index 4b6f396..504126f 100644 --- a/utils/docker/docker-compose-template.yml.j2 +++ b/utils/docker/docker-compose-template.yml.j2 @@ -15,7 +15,7 @@ services: - KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=1@docker-broker-1:9093 - ALLOW_PLAINTEXT_LISTENER=yes - KAFKA_CFG_NODE_ID=1 - - KAFKA_AUTO_CREATE_TOPICS_ENABLE=true + - KAFKA_AUTO_CREATE_TOPICS_ENABLE=false - BITNAMI_DEBUG=yes - KAFKA_CFG_NUM_PARTITIONS=2 healthcheck: @@ -54,23 +54,6 @@ services: - ALLOW_EMPTY_PASSWORD=yes networks: - cgw_multi_instances_network - init-broker-container: - image: docker.io/bitnami/kafka:latest - depends_on: - - broker - entrypoint: [ '/bin/sh', '-c' ] - command: | - " - # rather than giving sleep 15 use this - # to block init container to wait for Kafka broker to be ready - kafka-topics --bootstrap-server broker:9092 --list - - # create CnC and CnC_Res topics - kafka-topics.sh --create --partitions {{ cgw_instances_num }} --bootstrap-server broker:9092 --topic CnC - kafka-topics.sh --create --bootstrap-server broker:9092 --partitions 2 --topic CnC_Res - " - networks: - - cgw_multi_instances_network {% for i in range(0, cgw_instances_num) %} cgw_instance_{{ i }}: @@ -109,7 +92,7 @@ services: - CGW_WSS_CERT={{ cgw_wss_cert }} - CGW_WSS_KEY={{ cgw_wss_key }} - DEFAULT_WSS_THREAD_NUM={{ cgw_wss_t_num }} - - CGW_ALLOW_CERT_MISMATCH={{ cgw_allow_certs_missmatch }} + - CGW_ALLOW_CERT_MISMATCH={{ cgw_allow_certs_mismatch }} - CGW_NB_INFRA_TLS={{ cgw_nb_infra_tls }} - CGW_UCENTRAL_AP_DATAMODEL_URI={{ cgw_ucentral_ap_datamodel_uri }} - CGW_UCENTRAL_SWITCH_DATAMODEL_URI={{ cgw_ucentral_switch_datamodel_uri }} @@ -120,11 +103,21 @@ services: depends_on: broker: condition: service_healthy + {% if i != 0 %} + cgw_instance_{{ i - 1 }}: + condition: service_healthy + {% endif %} volumes: - - {{ default_certs_path }}:{{ container_certs_voulume }} - - {{ default_certs_path }}:{{ container_nb_infra_certs_voulume }} + - {{ default_certs_path }}:{{ container_certs_volume }} + - {{ default_certs_path }}:{{ container_nb_infra_certs_volume }} networks: - cgw_multi_instances_network + healthcheck: + test: ["CMD-SHELL", "ps -aux | grep cgw_instance_{{ i }}"] + interval: 10s + timeout: 5s + retries: 3 + start_period: 3s {% endfor %} networks: diff --git a/utils/kafka_producer/src/cli_parser.py b/utils/kafka_producer/src/cli_parser.py index 2240400..4eb4031 100644 --- a/utils/kafka_producer/src/cli_parser.py +++ b/utils/kafka_producer/src/cli_parser.py @@ -38,10 +38,10 @@ def parse_args(): help="delete an existing group") parser.add_argument("-d", "--assign-to-group", metavar=("GROUP-ID", "MAC-RANGE"), nargs=2, action="append", - help="add a range of mac addrs to a group") + help="add a range of mac address to a group") parser.add_argument("-D", "--remove-from-group", metavar=("GROUP-ID", "MAC-RANGE"), nargs=2, action="append", - help="remove mac addrs from a group") + help="remove mac address from a group") parser.add_argument("-T", "--topic", default="CnC", help="kafka topic (default: \"CnC\")") parser.add_argument("-s", "--bootstrap-server", metavar="ADDRESS", default="127.0.0.1:9092", @@ -57,7 +57,7 @@ def parse_args(): help="time between messages (default: \"1.0s\")") parser.add_argument("-p", "--send-to-group", metavar="GROUP-ID", type=str) parser.add_argument("-r", "--send-to-mac", metavar="MAC-RANGE", type=MacRange, - help="range of mac addrs that will be receiving the messages") + help="range of mac address that will be receiving the messages") parsed_args = parser.parse_args() diff --git a/utils/kafka_producer/src/consumer.py b/utils/kafka_producer/src/consumer.py index 5050fbc..4d2ea0d 100644 --- a/utils/kafka_producer/src/consumer.py +++ b/utils/kafka_producer/src/consumer.py @@ -12,10 +12,10 @@ class Consumer: - def __init__(self, db: str, topic: str, consumer_timeout: int) -> None: + def __init__(self, db: str, topics: List[str], consumer_timeout: int) -> None: self.db = db self.conn = None - self.topic = topic + self.topics = topics self.consumer_timeout = consumer_timeout self.message = Message() @@ -27,7 +27,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): def connect(self) -> kafka.KafkaConsumer: if self.is_connected() is False: - self.conn = kafka.KafkaConsumer(self.topic, + self.conn = kafka.KafkaConsumer(*self.topics, bootstrap_servers=self.db, client_id="consumer_1", group_id="cgw_tests_consumer", @@ -52,7 +52,7 @@ def is_connected(self) -> bool: def flush(self, timeout_ms: int = 1000): assert self.is_connected(), \ - f"consumer: Cannot flush kafka topic while not connected!" + f"consumer: Cannot flush kafka topics while not connected!" while True: # We explicitly use get_single_msg instead of @@ -106,6 +106,26 @@ def get_infra_request_result_msg(self, uuid_val: int, timeout_ms: int = 12000): return message return None + def get_msg_by_type(self, msg_type: str, timeout_ms: int = 12000): + assert self.is_connected(), \ + f"consumer: Cannot get Kafka result msg, Not connected!" + + while True: + # We explicitly use get_single_msg instead of + # to make sure we return as soon as we find result, + # without waiting for potential T/O + message = self.get_single_msg(timeout_ms=timeout_ms) + if message is None: + break + + logger.debug("Flushed kafka msg: %s key=%s value=%s ts=%s" % + (message.topic, message.key, message.value, message.timestamp)) + + if message.value['type'] == msg_type: + return message + + return None + def get_result_msg(self, uuid_val: int, timeout_ms: int = 12000): res_uuid = str(uuid.UUID(int=uuid_val)) diff --git a/utils/kafka_producer/src/utils.py b/utils/kafka_producer/src/utils.py index baff328..fca3659 100644 --- a/utils/kafka_producer/src/utils.py +++ b/utils/kafka_producer/src/utils.py @@ -8,7 +8,7 @@ class MacRange: """ Return an object that produces a sequence of MAC addresses from - START (inclusive) to END (inclusive). START and END are exctracted + START (inclusive) to END (inclusive). START and END are extracted from the input string if it is in the format "11:22:AA:BB:00:00-11:22:AA:BB:00:05" (where START=11:22:AA:BB:00:00, END=11:22:AA:BB:00:05, and the total amount of MACs in the range is 6).