From 999d9f8543693a335bfdc2cd747b119b08eaba12 Mon Sep 17 00:00:00 2001 From: Hanbeom kim Date: Wed, 15 Jan 2025 16:49:47 +0900 Subject: [PATCH] Modify the `node` structure. - Modify the node structure so as to configure "TI Container" remotely like "Datalake". - Rename `Giganto` to `ConnectionlessAgent`. - Add a `ti_container` field of type `ConnectionlessAgent` within `node`. - Modify migration function `migrate_0_29_node`. - Add migration function `migrate_0_35_node`. Close: #399 --- CHANGELOG.md | 10 + Cargo.toml | 2 +- src/lib.rs | 14 +- src/migration.rs | 265 +++++++++++++++++++++++--- src/migration/migration_structures.rs | 95 ++++++++- src/tables.rs | 2 +- src/tables/agent.rs | 4 + src/tables/node.rs | 53 ++++-- 8 files changed, 383 insertions(+), 62 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f28f6f2..2206a61 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,15 @@ file is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [Unreleased] + +### Changed + +- Modified the node structure so as to configure "TI Container" remotely like + "Datalake". + - Renamed `Giganto` to `ConnectionlessAgent`. + - Added a `ti_container` field of type `ConnectionlessAgent` within `node`. + ## [0.34.0] - 2025-01-20 ### Added @@ -800,6 +809,7 @@ AsRef<[u8]>`). This change accommodates scenarios where the information stored - Modified `FtpBruteForce` by adding an `is_internal` field which is a boolean indicating whether it is internal or not. +[Unreleased]: https://github.com/petabi/review-database/compare/0.34.0...main [0.34.0]: https://github.com/petabi/review-database/compare/0.33.1...0.34.0 [0.33.1]: https://github.com/petabi/review-database/compare/0.33.0...0.33.1 [0.33.0]: https://github.com/petabi/review-database/compare/0.32.0...0.33.0 diff --git a/Cargo.toml b/Cargo.toml index ebd1ce6..e315c58 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "review-database" -version = "0.34.0" +version = "0.35.0-alpha.1" edition = "2021" [dependencies] diff --git a/src/lib.rs b/src/lib.rs index 51a92fb..e2d3345 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -65,13 +65,13 @@ use self::tables::StateDb; pub use self::tables::{ AccessToken, AccountPolicy, Agent, AgentConfig, AgentKind, AgentStatus, AllowNetwork, AllowNetworkUpdate, AttrCmpKind, BlockNetwork, BlockNetworkUpdate, Confidence, - CsvColumnExtra as CsvColumnExtraConfig, Customer, CustomerNetwork, CustomerUpdate, DataSource, - DataSourceUpdate, DataType, Filter, Giganto, IndexedTable, Iterable, ModelIndicator, Network, - NetworkUpdate, Node, NodeProfile, NodeTable, NodeUpdate, OutlierInfo, OutlierInfoKey, - OutlierInfoValue, PacketAttr, ProtocolPorts, Response, ResponseKind, SamplingInterval, - SamplingKind, SamplingPeriod, SamplingPolicy, SamplingPolicyUpdate, Structured, - StructuredClusteringAlgorithm, Table, Template, Ti, TiCmpKind, Tidb, TidbKind, TidbRule, - TorExitNode, TrafficFilter, TriagePolicy, TriagePolicyUpdate, TriageResponse, + ConnectionlessAgent, CsvColumnExtra as CsvColumnExtraConfig, Customer, CustomerNetwork, + CustomerUpdate, DataSource, DataSourceUpdate, DataType, Filter, IndexedTable, Iterable, + ModelIndicator, Network, NetworkUpdate, Node, NodeProfile, NodeTable, NodeUpdate, OutlierInfo, + OutlierInfoKey, OutlierInfoValue, PacketAttr, ProtocolPorts, Response, ResponseKind, + SamplingInterval, SamplingKind, SamplingPeriod, SamplingPolicy, SamplingPolicyUpdate, + Structured, StructuredClusteringAlgorithm, Table, Template, Ti, TiCmpKind, Tidb, TidbKind, + TidbRule, TorExitNode, TrafficFilter, TriagePolicy, TriagePolicyUpdate, TriageResponse, TriageResponseUpdate, TrustedDomain, TrustedUserAgent, UniqueKey, Unstructured, UnstructuredClusteringAlgorithm, ValueKind, }; diff --git a/src/migration.rs b/src/migration.rs index e01d2f9..96d0d63 100644 --- a/src/migration.rs +++ b/src/migration.rs @@ -16,7 +16,7 @@ use semver::{Version, VersionReq}; use serde::{Deserialize, Serialize}; use tracing::{info, warn}; -use crate::{Agent, AgentStatus, Giganto, Indexed, IterableMap}; +use crate::{Agent, AgentStatus, ConnectionlessAgent, Indexed, IterableMap}; /// The range of versions that use the current database format. /// @@ -99,7 +99,7 @@ use crate::{Agent, AgentStatus, Giganto, Indexed, IterableMap}; /// // release that involves database format change) to 3.5.0, including /// // all alpha changes finalized in 3.5.0. /// ``` -const COMPATIBLE_VERSION_REQ: &str = ">=0.34.0,<0.35.0"; +const COMPATIBLE_VERSION_REQ: &str = ">=0.35.0-alpha.1,<0.35.0-alpha.2"; /// Migrates data exists in `PostgresQL` to Rocksdb if necessary. /// @@ -202,6 +202,11 @@ pub fn migrate_data_dir>(data_dir: P, backup_dir: P) -> Result<() Version::parse("0.34.0")?, migrate_0_30_to_0_34_0, ), + ( + VersionReq::parse(">=0.34.0,<0.35.0-alpha.1")?, + Version::parse("0.35.0-alpha.1")?, + migrate_0_34_to_0_35_0, + ), ]; let mut store = super::Store::new(data_dir, backup_dir)?; @@ -277,6 +282,28 @@ fn read_version_file(path: &Path) -> Result { Version::parse(&ver).context("cannot parse VERSION") } +fn migrate_0_34_to_0_35_0(store: &super::Store) -> Result<()> { + migrate_0_35_node(store) +} + +fn migrate_0_35_node(store: &super::Store) -> Result<()> { + use bincode::Options; + use migration_structures::OldInnerFromV29BeforeV34; + + use crate::{tables::InnerNode, IterableMap}; + + let map = store.node_map(); + let node_raw = map.raw(); + for (_key, old_value) in node_raw.iter_forward()? { + let old_inner_node = bincode::DefaultOptions::new() + .deserialize::(&old_value) + .context("Failed to migrate node database: invalid node value")?; + let new_inner_node: InnerNode = old_inner_node.into(); + node_raw.overwrite(&new_inner_node)?; + } + Ok(()) +} + fn migrate_0_30_to_0_34_0(store: &super::Store) -> Result<()> { migrate_0_34_account(store)?; migrate_0_34_events(store) @@ -745,14 +772,17 @@ fn migrate_0_29_node(store: &super::Store) -> Result<()> { use bincode::Options; use chrono::{DateTime, Utc}; + use migration_structures::{OldInnerFromV29BeforeV34, OldNodeFromV29BeforeV34}; - use crate::IterableMap; - use crate::{Node, NodeProfile}; + use crate::{ + tables::{UniqueKey, Value}, + {collections::Indexed, IterableMap, NodeProfile}, + }; type PortNumber = u16; #[derive(Clone, Deserialize, Serialize)] - pub struct OldNode { + pub struct OldNodeBeforeV29 { pub id: u32, pub name: String, pub name_draft: Option, @@ -824,10 +854,10 @@ fn migrate_0_29_node(store: &super::Store) -> Result<()> { } } - impl TryFrom for Node { + impl TryFrom for OldNodeFromV29BeforeV34 { type Error = anyhow::Error; - fn try_from(input: OldNode) -> Result { + fn try_from(input: OldNodeBeforeV29) -> Result { use migration_structures::{ DumpHttpContentType, DumpItem, GigantoConfig, HogConfig, PigletConfig, ProtocolForHog, @@ -966,7 +996,7 @@ fn migrate_0_29_node(store: &super::Store) -> Result<()> { } if s.giganto { - giganto = Some(Giganto { + giganto = Some(ConnectionlessAgent { status: AgentStatus::Enabled, draft: None, }); @@ -1146,7 +1176,7 @@ fn migrate_0_29_node(store: &super::Store) -> Result<()> { ack_transmission: u16::MAX, }; let draft = Some(toml::to_string(&draft)?.try_into()?); - giganto = Some(Giganto { + giganto = Some(ConnectionlessAgent { status: AgentStatus::Enabled, draft, }); @@ -1166,26 +1196,26 @@ fn migrate_0_29_node(store: &super::Store) -> Result<()> { } let map = store.node_map(); - let raw = map.raw(); - let mut nodes = vec![]; - for (_key, old_value) in raw.iter_forward()? { + let node_raw = map.raw(); + let agent_raw = map.agent_raw(); + for (_key, old_value) in node_raw.iter_forward()? { let old_node = bincode::DefaultOptions::new() - .deserialize::(&old_value) + .deserialize::(&old_value) .context("Failed to migrate node database: invalid node value")?; - match TryInto::::try_into(old_node) { + match TryInto::::try_into(old_node) { Ok(new_node) => { - raw.deactivate(new_node.id)?; - nodes.push(new_node); + for agent in &new_node.agents { + agent_raw.insert(agent.unique_key().as_ref(), agent.value().as_ref())?; + } + let inner: OldInnerFromV29BeforeV34 = new_node.into(); + node_raw.overwrite(&inner)?; } Err(e) => { warn!("Skip the migration for an item: {e}"); } } } - raw.clear_inactive()?; - for node in nodes { - let _ = map.put(node)?; - } + Ok(()) } @@ -3455,11 +3485,16 @@ mod tests { use serde::{Deserialize, Serialize}; use crate::{ - collections::Indexed, migration::migration_structures::PigletConfig, Indexable, + collections::Indexed, + migration::migration_structures::{ + OldInnerFromV29BeforeV34, OldNodeFromV29BeforeV34, PigletConfig, + }, + types::FromKeyValue, + Agent, Indexable, }; #[derive(Clone, Deserialize, Serialize)] - pub struct OldNode { + pub struct OldNodeBeforeV29 { pub id: u32, pub name: String, pub name_draft: Option, @@ -3521,7 +3556,7 @@ mod tests { pub sensor_list: HashMap, } - impl Indexable for OldNode { + impl Indexable for OldNodeBeforeV29 { fn key(&self) -> Cow<[u8]> { Cow::from(self.name.as_bytes()) } @@ -3549,7 +3584,7 @@ mod tests { let map = settings.store.node_map(); let node_db = map.raw(); - let old_node = OldNode { + let old_node = OldNodeBeforeV29 { id: 0, name: "name".to_string(), name_draft: None, @@ -3601,19 +3636,42 @@ mod tests { }), }; - let res = node_db.insert(old_node.clone()); - assert!(res.is_ok()); - let id = res.unwrap(); + assert!(node_db.insert(old_node.clone()).is_ok()); let (db_dir, backup_dir) = settings.close(); let settings = TestSchema::new_with_dir(db_dir, backup_dir); assert!(super::migrate_0_29_node(&settings.store).is_ok()); let map = settings.store.node_map(); - let (new_node, invalid_agent) = map.get_by_id(id).unwrap().unwrap(); + let node_db = map.raw(); + let agent_db = map.agent_raw(); + let raw_data = node_db.get_by_key("name".as_bytes()).unwrap().unwrap(); + let new_inner_node: OldInnerFromV29BeforeV34 = bincode::DefaultOptions::new() + .deserialize(raw_data.as_ref()) + .expect("deserializable"); - assert!(invalid_agent.is_empty()); - assert_eq!(new_node.id, id); + let mut agents = vec![]; + for aid in new_inner_node.agents { + let mut key = new_inner_node.id.to_be_bytes().to_vec(); + key.extend(aid.as_bytes()); + if let Ok(Some(value)) = agent_db.get(&key) { + if let Ok(agent) = Agent::from_key_value(&key, value.as_ref()) { + agents.push(agent) + } + } + } + let new_node = OldNodeFromV29BeforeV34 { + id: new_inner_node.id, + name: new_inner_node.name, + name_draft: new_inner_node.name_draft, + profile: new_inner_node.profile, + profile_draft: new_inner_node.profile_draft, + agents, + giganto: new_inner_node.giganto, + creation_time: new_inner_node.creation_time, + }; + + assert_eq!(new_node.id, 0); assert_eq!(new_node.name, "name"); assert_eq!(new_node.agents.len(), 2); assert_eq!(new_node.agents[0].key, "hog"); @@ -4109,4 +4167,151 @@ mod tests { let settings = TestSchema::new_with_dir(db_dir, backup_dir); assert!(super::migrate_0_30_to_0_34_0(&settings.store).is_ok()); } + + #[test] + fn migrate_0_30_to_0_34_node() { + use std::{ + net::{IpAddr, SocketAddr}, + str::FromStr, + }; + + use chrono::Utc; + + use super::migration_structures::{ + DumpItem, OldInnerFromV29BeforeV34, OldNodeFromV29BeforeV34, + }; + use crate::{ + collections::Indexed, + migration::migration_structures::{GigantoConfig, HogConfig, PigletConfig}, + tables::{UniqueKey, Value}, + Agent, AgentKind, AgentStatus, ConnectionlessAgent, NodeProfile, + }; + + let hog_config = HogConfig { + active_protocols: Some(Vec::new()), + active_sources: Some(Vec::new()), + giganto_publish_srv_addr: Some(SocketAddr::new( + IpAddr::from_str("1.1.1.1").unwrap(), + 3050, + )), + cryptocurrency_mining_pool: String::new(), + log_dir: String::new(), + export_dir: String::new(), + services_path: String::new(), + }; + + let hog_agent = Agent { + node: 0, + key: "hog".to_string(), + kind: AgentKind::SemiSupervised, + status: AgentStatus::Enabled, + config: None, + draft: Some(toml::to_string(&hog_config).unwrap().try_into().unwrap()), + }; + + let piglet_config = PigletConfig { + dpdk_args: String::new(), + dpdk_input: Vec::new(), + dpdk_output: Vec::new(), + src_mac: String::new(), + dst_mac: String::new(), + log_dir: String::new(), + dump_dir: String::new(), + dump_items: Some(vec![DumpItem::Pcap]), + dump_http_content_types: Some(Vec::new()), + giganto_ingest_srv_addr: SocketAddr::new(IpAddr::from_str("1.1.1.2").unwrap(), 3030), + giganto_name: String::new(), + pcap_max_size: 4294967295, + }; + + let piglet_agent = Agent { + node: 0, + key: "piglet".to_string(), + kind: AgentKind::Sensor, + status: AgentStatus::Enabled, + config: None, + draft: Some(toml::to_string(&piglet_config).unwrap().try_into().unwrap()), + }; + + let giganto_config = GigantoConfig { + ingest_srv_addr: SocketAddr::new(IpAddr::from_str("0.0.0.0").unwrap(), 3030), + publish_srv_addr: SocketAddr::new(IpAddr::from_str("0.0.0.0").unwrap(), 3050), + graphql_srv_addr: SocketAddr::new(IpAddr::from_str("0.0.0.0").unwrap(), 5050), + data_dir: String::new(), + log_dir: String::new(), + export_dir: String::new(), + retention: { + let days = u64::from(100_u16); + std::time::Duration::from_secs(days * 24 * 60 * 60) + }, + max_open_files: i32::MAX, + max_mb_of_level_base: u64::MIN, + num_of_thread: i32::MAX, + max_sub_compactions: u32::MAX, + ack_transmission: u16::MAX, + }; + + let old_node = OldNodeFromV29BeforeV34 { + id: 0, + name: "name".to_string(), + name_draft: None, + profile: None, + profile_draft: Some(NodeProfile { + customer_id: 20, + description: "description".to_string(), + hostname: "host".to_string(), + }), + agents: vec![hog_agent.clone(), piglet_agent.clone()], + giganto: Some(ConnectionlessAgent { + status: AgentStatus::Enabled, + draft: Some( + toml::to_string(&giganto_config) + .unwrap() + .try_into() + .unwrap(), + ), + }), + creation_time: Utc::now(), + }; + + let settings = TestSchema::new(); + let map = settings.store.node_map(); + let node_db = map.raw(); + let agent_db = map.agent_raw(); + + let hog_res = agent_db.insert(hog_agent.unique_key().as_ref(), hog_agent.value().as_ref()); + assert!(hog_res.is_ok()); + let piglet_res = agent_db.insert( + piglet_agent.unique_key().as_ref(), + piglet_agent.value().as_ref(), + ); + assert!(piglet_res.is_ok()); + let old_inner_node: OldInnerFromV29BeforeV34 = old_node.clone().into(); + let res = node_db.insert(old_inner_node); + assert!(res.is_ok()); + + let id = res.unwrap(); + let (db_dir, backup_dir) = settings.close(); + let settings = TestSchema::new_with_dir(db_dir, backup_dir); + + assert!(super::migrate_0_35_node(&settings.store).is_ok()); + + let map = settings.store.node_map(); + let (new_node, invalid_agent) = map.get_by_id(id).unwrap().unwrap(); + + assert!(invalid_agent.is_empty()); + assert_eq!(new_node.id, id); + assert_eq!(new_node.name, "name"); + assert_eq!(new_node.agents.len(), 2); + assert_eq!(new_node.agents[0].key, "hog"); + assert!(new_node.agents[0].config.is_none()); + assert!(new_node.agents[0].draft.is_some()); + assert_eq!(new_node.agents[1].key, "piglet"); + assert!(new_node.agents[1].config.is_none()); + let draft = new_node.agents[1].draft.clone().unwrap(); + let piglet: PigletConfig = toml::from_str(draft.as_ref()).unwrap(); + assert!(piglet.dump_items.is_some()); + assert!(piglet.dump_http_content_types.is_some_and(|v| v.is_empty())); + assert!(new_node.ti_container.is_none()); + } } diff --git a/src/migration/migration_structures.rs b/src/migration/migration_structures.rs index a058637..f2f9dba 100644 --- a/src/migration/migration_structures.rs +++ b/src/migration/migration_structures.rs @@ -1,4 +1,5 @@ use std::{ + borrow::Cow, net::{IpAddr, SocketAddr}, time::Duration, }; @@ -8,13 +9,13 @@ use serde::{Deserialize, Serialize}; use strum_macros::{Display, EnumString}; use crate::{ - BlockListConnFields, BlockListDnsFields, BlockListHttpFields, BlockListKerberosFields, - BlockListNtlmFields, BlockListRdpFields, BlockListSmtpFields, BlockListSshFields, - BlockListTlsFields, CryptocurrencyMiningPoolFields, DgaFields, DnsEventFields, EventCategory, - ExternalDdosFields, ExtraThreat, FtpBruteForceFields, FtpEventFields, HttpEventFields, - HttpThreatFields, LdapBruteForceFields, LdapEventFields, MultiHostPortScanFields, - NetworkThreat, PortScanFields, RdpBruteForceFields, RepeatedHttpSessionsFields, TriageScore, - WindowsThreat, + tables::InnerNode, Agent, BlockListConnFields, BlockListDnsFields, BlockListHttpFields, + BlockListKerberosFields, BlockListNtlmFields, BlockListRdpFields, BlockListSmtpFields, + BlockListSshFields, BlockListTlsFields, ConnectionlessAgent, CryptocurrencyMiningPoolFields, + DgaFields, DnsEventFields, EventCategory, ExternalDdosFields, ExtraThreat, FtpBruteForceFields, + FtpEventFields, HttpEventFields, HttpThreatFields, Indexable, LdapBruteForceFields, + LdapEventFields, MultiHostPortScanFields, NetworkThreat, NodeProfile, PortScanFields, + RdpBruteForceFields, RepeatedHttpSessionsFields, TriageScore, WindowsThreat, }; #[derive(Deserialize, Serialize)] @@ -2102,3 +2103,83 @@ pub struct GigantoConfig { pub ack_transmission: u16, } + +#[derive(Clone, Deserialize, Serialize, PartialEq, Debug)] +pub struct OldNodeFromV29BeforeV34 { + pub id: u32, + pub name: String, + pub name_draft: Option, + pub profile: Option, + pub profile_draft: Option, + pub agents: Vec, + pub giganto: Option, + pub creation_time: DateTime, +} + +#[derive(Clone, Deserialize, Serialize)] +pub struct OldInnerFromV29BeforeV34 { + pub id: u32, + pub name: String, + pub name_draft: Option, + pub profile: Option, + pub profile_draft: Option, + pub creation_time: DateTime, + pub agents: Vec, + pub giganto: Option, +} + +impl From for OldInnerFromV29BeforeV34 { + fn from(input: OldNodeFromV29BeforeV34) -> Self { + Self { + id: input.id, + name: input.name, + name_draft: input.name_draft, + profile: input.profile, + profile_draft: input.profile_draft, + creation_time: input.creation_time, + agents: input.agents.iter().map(|a| a.key.clone()).collect(), + giganto: input.giganto, + } + } +} + +impl From for InnerNode { + fn from(input: OldInnerFromV29BeforeV34) -> Self { + Self { + id: input.id, + name: input.name, + name_draft: input.name_draft, + profile: input.profile, + profile_draft: input.profile_draft, + agents: input.agents, + giganto: input.giganto, + ti_container: None, + creation_time: input.creation_time, + } + } +} + +impl Indexable for OldInnerFromV29BeforeV34 { + fn key(&self) -> Cow<[u8]> { + Cow::from(self.name.as_bytes()) + } + + fn index(&self) -> u32 { + self.id + } + + fn make_indexed_key(key: Cow<[u8]>, _index: u32) -> Cow<[u8]> { + key + } + + fn value(&self) -> Vec { + use bincode::Options; + bincode::DefaultOptions::new() + .serialize(self) + .unwrap_or_default() + } + + fn set_index(&mut self, index: u32) { + self.id = index; + } +} diff --git a/src/tables.rs b/src/tables.rs index 11a56ba..0fbfdb1 100644 --- a/src/tables.rs +++ b/src/tables.rs @@ -45,7 +45,7 @@ pub use self::model_indicator::ModelIndicator; pub use self::network::{Network, Update as NetworkUpdate}; pub(crate) use self::node::Inner as InnerNode; pub use self::node::{ - Giganto, Node, Profile as NodeProfile, Table as NodeTable, Update as NodeUpdate, + ConnectionlessAgent, Node, Profile as NodeProfile, Table as NodeTable, Update as NodeUpdate, }; pub use self::outlier_info::{Key as OutlierInfoKey, OutlierInfo, Value as OutlierInfoValue}; pub use self::sampling_policy::{ diff --git a/src/tables/agent.rs b/src/tables/agent.rs index 0ab2424..b8600ab 100644 --- a/src/tables/agent.rs +++ b/src/tables/agent.rs @@ -181,6 +181,10 @@ impl<'d> Table<'d, Agent> { Map::open(db, super::AGENTS).map(Table::new) } + pub(crate) fn raw(&self) -> &Map<'_> { + &self.map + } + /// Returns an agent with the given `node` and `id`. /// /// # Errors diff --git a/src/tables/node.rs b/src/tables/node.rs index 76cde77..cf79e6e 100644 --- a/src/tables/node.rs +++ b/src/tables/node.rs @@ -14,7 +14,7 @@ use crate::{ }; #[derive(Default, Debug, Clone, Serialize, Deserialize, PartialEq)] -pub struct Giganto { +pub struct ConnectionlessAgent { pub status: AgentStatus, pub draft: Option, } @@ -27,7 +27,8 @@ pub struct Node { pub profile: Option, pub profile_draft: Option, pub agents: Vec, - pub giganto: Option, + pub giganto: Option, + pub ti_container: Option, pub creation_time: DateTime, } @@ -38,7 +39,8 @@ pub struct Update { pub profile: Option, pub profile_draft: Option, pub agents: Vec, - pub giganto: Option, + pub giganto: Option, + pub ti_container: Option, } impl UniqueKey for Node { @@ -58,6 +60,7 @@ impl From for Update { profile_draft: input.profile_draft, agents: input.agents, giganto: input.giganto, + ti_container: input.ti_container, } } } @@ -112,6 +115,10 @@ impl<'d> Table<'d> { self.node.raw() } + pub(crate) fn agent_raw(&self) -> &Map<'_> { + self.agent.raw() + } + /// Returns the total count of nodes available. /// /// # Errors @@ -149,6 +156,7 @@ impl<'d> Table<'d> { profile_draft: inner.profile_draft, agents, giganto: inner.giganto, + ti_container: inner.ti_container, creation_time: inner.creation_time, }; Ok(Some((node, invalid_agents))) @@ -169,6 +177,7 @@ impl<'d> Table<'d> { creation_time: entry.creation_time, agents: entry.agents.iter().map(|a| a.key.clone()).collect(), giganto: entry.giganto, + ti_container: entry.ti_container, }; let node = self.node.put(inner)?; @@ -256,6 +265,7 @@ impl<'d> Table<'d> { profile_draft: old.profile_draft.clone(), agents: old.agents.iter().map(|a| a.key.clone()).collect(), giganto: old.giganto.clone(), + ti_container: old.ti_container.clone(), }; let new_inner = InnerUpdate { @@ -265,6 +275,7 @@ impl<'d> Table<'d> { profile_draft: new.profile_draft.clone(), agents: new.agents.iter().map(|a| a.key.clone()).collect(), giganto: new.giganto.clone(), + ti_container: new.ti_container.clone(), }; self.node.update(id, &old_inner, &new_inner) @@ -297,6 +308,7 @@ impl Iterator for TableIter<'_> { profile_draft: inner.profile_draft, agents, giganto: inner.giganto, + ti_container: inner.ti_container, creation_time: inner.creation_time, } }) @@ -313,15 +325,15 @@ pub struct Profile { #[derive(Clone, Deserialize, Serialize)] pub(crate) struct Inner { - id: u32, - name: String, - name_draft: Option, - profile: Option, - profile_draft: Option, - creation_time: DateTime, - - agents: Vec, - giganto: Option, + pub id: u32, + pub name: String, + pub name_draft: Option, + pub profile: Option, + pub profile_draft: Option, + pub creation_time: DateTime, + pub agents: Vec, + pub giganto: Option, + pub ti_container: Option, } impl FromKeyValue for Inner { @@ -374,7 +386,8 @@ struct InnerUpdate { pub profile: Option, pub profile_draft: Option, pub agents: Vec, - pub giganto: Option, + pub giganto: Option, + pub ti_container: Option, } impl From for InnerUpdate { @@ -386,6 +399,7 @@ impl From for InnerUpdate { profile_draft: input.profile_draft, agents: input.agents, giganto: input.giganto, + ti_container: input.ti_container, } } } @@ -406,6 +420,7 @@ impl IndexedMapUpdate for InnerUpdate { value.profile_draft.clone_from(&self.profile_draft); value.agents.clone_from(&self.agents); value.giganto.clone_from(&self.giganto); + value.ti_container.clone_from(&self.ti_container); Ok(value) } @@ -427,7 +442,10 @@ impl IndexedMapUpdate for InnerUpdate { if self.agents != value.agents { return false; } - self.giganto == value.giganto + if self.giganto != value.giganto { + return false; + } + self.ti_container == value.ti_container } } @@ -495,6 +513,7 @@ mod test { agents, creation_time, giganto: None, + ti_container: None, } } @@ -686,7 +705,8 @@ mod test { profile: Some(profile.clone()), profile_draft: Some(profile.clone()), agents: agents[1..].to_vec(), - giganto: Some(Giganto::default()), + giganto: Some(ConnectionlessAgent::default()), + ti_container: Some(ConnectionlessAgent::default()), }; let old = node.clone().into(); @@ -702,7 +722,8 @@ mod test { node.profile = Some(profile.clone()); node.profile_draft = Some(profile.clone()); node.agents = node.agents.into_iter().skip(1).collect(); - node.giganto = Some(Giganto::default()); + node.giganto = Some(ConnectionlessAgent::default()); + node.ti_container = Some(ConnectionlessAgent::default()); assert_eq!(updated, node); }