diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..d491081 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +*.pem binary diff --git a/CHANGELOG.md b/CHANGELOG.md index 8d208e5..737f446 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ file is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [Unreleased] +## [0.16.0] - 2024-01-08 ### Added @@ -19,14 +19,10 @@ Versioning](https://semver.org/spec/v2.0.0.html). - Added `set_ack_transmission_count` GraphQL query to set the ack transmission count.This query changes the `AckTransmissionCount` used in ingest and `ack_transmission` in the config file to the input `count` value. -- Added giganto cluster support for GraphQL requests. - Added documentation for implementing cluster-supported GraphQL APIs in `docs/guide-giganto-cluster-graphql.md`. - Added `ConvertGraphQLEdgesNode` derive macro that implements `From` trait from GraphQL client structs to project structs. -- Added `request_from_peer: Option` argument to GraphQL endpoints: - `netflow5_raw_events`, `netflow9_raw_events`, `secu_log_raw_events`, - `statistics`. - Supported `log-broker` to send/receive operation log with redis server. - Set the redis server with `redis_log_address`, `redis_log_agent_id` and `redis_log_fetch_interval` in configuration options. @@ -42,8 +38,13 @@ Versioning](https://semver.org/spec/v2.0.0.html). mode can also be read correctly. - Changed `export` GraphQL query's response value format from `{export_path}` to `{export_path}@{giganto_node_name}` -- Changed `PEER_VERSION_REQ` to ">=0.16.0-alpha.1,<0.17.0" - Changed logging from `tracing` to `log-broker`. +- Changed `PEER_VERSION_REQ` to ">=0.16.0,<0.17.0" +- Changed `PUBLISH_VERSION_REQ` to ">=0.16.0,<0.17.0" +- Added giganto cluster support for GraphQL and publish message requests. +- Added `request_from_peer: Option` argument to GraphQL endpoints: + `netflow5_raw_events`, `netflow9_raw_events`, `secu_log_raw_events`, + `statistics`. ### Fixed @@ -400,7 +401,7 @@ Versioning](https://semver.org/spec/v2.0.0.html). - Initial release. -[Unreleased]: +[0.16.0]: [0.15.4]: [0.15.3]: [0.15.2]: diff --git a/Cargo.lock b/Cargo.lock index 7bae634..9158d82 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -607,6 +607,19 @@ dependencies = [ "syn 2.0.48", ] +[[package]] +name = "dashmap" +version = "5.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +dependencies = [ + "cfg-if", + "hashbrown 0.14.3", + "lock_api", + "once_cell", + "parking_lot_core", +] + [[package]] name = "data-encoding" version = "2.5.0" @@ -939,7 +952,7 @@ dependencies = [ [[package]] name = "giganto" -version = "0.16.0-alpha.1" +version = "0.16.0" dependencies = [ "anyhow", "async-graphql", @@ -976,6 +989,7 @@ dependencies = [ "semver", "serde", "serde_json", + "serial_test", "syn 2.0.48", "tempfile", "tokio", @@ -1404,9 +1418,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.151" +version = "0.2.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" +checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" [[package]] name = "libloading" @@ -1719,20 +1733,20 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683751d591e6d81200c39fb0d1032608b77724f34114db54f571ff1317b337c0" +checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845" dependencies = [ "num_enum_derive", ] [[package]] name = "num_enum_derive" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c11e44798ad209ccdd91fc192f0526a369a01234f7373e1b141c96d7cee4f0e" +checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" dependencies = [ - "proc-macro-crate 2.0.0", + "proc-macro-crate 3.0.0", "proc-macro2", "quote", "syn 2.0.48", @@ -2082,18 +2096,18 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "2.0.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" +checksum = "6b2685dd208a3771337d8d386a89840f0f43cd68be8dae90a5f8c2384effc9cd" dependencies = [ - "toml_edit 0.20.7", + "toml_edit 0.21.0", ] [[package]] name = "proc-macro2" -version = "1.0.75" +version = "1.0.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "907a61bd0f64c2f29cd1cf1dc34d05176426a3f504a78010f08416ddb7b13708" +checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c" dependencies = [ "unicode-ident", ] @@ -2551,18 +2565,18 @@ checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" [[package]] name = "serde" -version = "1.0.194" +version = "1.0.195" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b114498256798c94a0689e1a15fec6005dee8ac1f41de56404b67afc2a4b773" +checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.194" +version = "1.0.195" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3385e45322e8f9931410f01b3031ec534c3947d0e94c18049af4d9f9907d4e0" +checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c" dependencies = [ "proc-macro2", "quote", @@ -2634,6 +2648,31 @@ dependencies = [ "unsafe-libyaml", ] +[[package]] +name = "serial_test" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e56dd856803e253c8f298af3f4d7eb0ae5e23a737252cd90bb4f3b435033b2d" +dependencies = [ + "dashmap", + "futures", + "lazy_static", + "log", + "parking_lot", + "serial_test_derive", +] + +[[package]] +name = "serial_test_derive" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + [[package]] name = "sha1" version = "0.10.6" @@ -3054,17 +3093,6 @@ dependencies = [ "winnow", ] -[[package]] -name = "toml_edit" -version = "0.20.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" -dependencies = [ - "indexmap 2.1.0", - "toml_datetime", - "winnow", -] - [[package]] name = "toml_edit" version = "0.21.0" @@ -3700,9 +3728,9 @@ checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" [[package]] name = "winnow" -version = "0.5.32" +version = "0.5.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8434aeec7b290e8da5c3f0d628cb0eac6cabcb31d14bb74f779a08109a5914d6" +checksum = "b7520bbdec7211caa7c4e682eb1fbe07abe20cee6756b6e00f537c82c11816aa" dependencies = [ "memchr", ] diff --git a/Cargo.toml b/Cargo.toml index 75d5ff4..d1e0c29 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "giganto" -version = "0.16.0-alpha.1" +version = "0.16.0" edition = "2021" [lib] @@ -52,10 +52,11 @@ warp = { version = "0.3", features = ["tls"] } x509-parser = "0.15" [dev-dependencies] +mockito = "1.2.0" +regex = "1" +serial_test = "2.0.0" tempfile = "3" url = "2" -regex = "1" -mockito = "1.2.0" [features] default = ["benchmark"] diff --git a/src/graphql/packet.rs b/src/graphql/packet.rs index f8c524f..aeb3b27 100644 --- a/src/graphql/packet.rs +++ b/src/graphql/packet.rs @@ -71,6 +71,7 @@ impl RawEventFilter for PacketFilter { #[allow(clippy::struct_field_names)] #[derive(SimpleObject, ConvertGraphQLEdgesNode)] #[graphql_client_type(names = [packets::PacketsPacketsEdgesNode, ])] +#[allow(clippy::struct_field_names)] struct Packet { request_time: DateTime, packet_time: DateTime, diff --git a/src/ingest.rs b/src/ingest.rs index 9a98931..09d9733 100644 --- a/src/ingest.rs +++ b/src/ingest.rs @@ -4,7 +4,7 @@ mod tests; use crate::publish::send_direct_stream; use crate::server::{ - certificate_info, config_server, extract_cert_from_conn, SERVER_CONNNECTION_DELAY, + certificate_info, config_server, extract_cert_from_conn, Certs, SERVER_CONNNECTION_DELAY, SERVER_ENDPOINT_DELAY, }; use crate::storage::{Database, RawEventStore, StorageKey}; @@ -27,7 +27,6 @@ use giganto_client::{ }; use log_broker::{error, info, LogLocation}; use quinn::{Endpoint, RecvStream, SendStream, ServerConfig}; -use rustls::{Certificate, PrivateKey}; use std::sync::atomic::AtomicU16; use std::{ net::SocketAddr, @@ -68,14 +67,9 @@ pub struct Server { } impl Server { - pub fn new( - addr: SocketAddr, - certs: Vec, - key: PrivateKey, - files: Vec>, - ) -> Self { - let server_config = config_server(certs, key, files) - .expect("server configuration error with cert, key or root"); + pub fn new(addr: SocketAddr, certs: &Arc) -> Self { + let server_config = + config_server(certs).expect("server configuration error with cert, key or root"); Server { server_config, server_address: addr, diff --git a/src/ingest/tests.rs b/src/ingest/tests.rs index 5b4713e..b963863 100644 --- a/src/ingest/tests.rs +++ b/src/ingest/tests.rs @@ -2,7 +2,7 @@ use super::Server; use crate::{ new_ingest_sources, new_pcap_sources, new_stream_direct_channels, storage::{Database, DbOptions}, - to_cert_chain, to_private_key, + to_cert_chain, to_private_key, to_root_cert, Certs, }; use base64::{engine::general_purpose::STANDARD as base64_engine, Engine}; use chrono::{Duration, Utc}; @@ -22,6 +22,7 @@ use giganto_client::{ RawEventKind, }; use quinn::{Connection, Endpoint}; +use std::path::PathBuf; use std::{ fs, net::{IpAddr, Ipv6Addr, SocketAddr}, @@ -40,10 +41,10 @@ fn get_token() -> &'static Mutex { TOKEN.get_or_init(|| Mutex::new(0)) } -const CERT_PATH: &str = "tests/cert.pem"; -const KEY_PATH: &str = "tests/key.pem"; -const CA_CERT_PATH: &str = "tests/root.pem"; -const HOST: &str = "localhost"; +const CERT_PATH: &str = "tests/certs/node1/cert.pem"; +const KEY_PATH: &str = "tests/certs/node1/key.pem"; +const CA_CERT_PATH: &str = "tests/certs/root.pem"; +const HOST: &str = "node1"; const TEST_PORT: u16 = 60190; const PROTOCOL_VERSION: &str = "0.15.2"; @@ -75,13 +76,18 @@ fn server() -> Server { let cert = to_cert_chain(&cert_pem).unwrap(); let key_pem = fs::read(KEY_PATH).unwrap(); let key = to_private_key(&key_pem).unwrap(); - let ca_cert = fs::read("tests/root.pem").unwrap(); + let ca_cert_path: Vec = vec![PathBuf::from(CA_CERT_PATH)]; + let ca_certs = to_root_cert(&ca_cert_path).unwrap(); + + let certs = Arc::new(Certs { + certs: cert, + key, + ca_certs, + }); Server::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), TEST_PORT), - cert, - key, - vec![ca_cert], + &certs, ) } diff --git a/src/main.rs b/src/main.rs index 51bcc06..7029148 100644 --- a/src/main.rs +++ b/src/main.rs @@ -11,13 +11,13 @@ mod web; use crate::{ graphql::NodeName, redis::fetch_and_store_op_logs, - server::{certificate_info, SERVER_REBOOT_DELAY}, + server::{certificate_info, Certs, SERVER_REBOOT_DELAY}, storage::migrate_data_dir, }; use anyhow::{anyhow, Context, Result}; use chrono::{DateTime, Utc}; use log_broker::{error, info, init_redis_connection, init_tracing, warn, LogLocation}; -use peer::{PeerInfo, Peers}; +use peer::{PeerIdentity, PeerIdents, PeerInfo, Peers}; use quinn::Connection; use rocksdb::DB; use rustls::{Certificate, PrivateKey}; @@ -25,6 +25,7 @@ use settings::Settings; use std::{ collections::{HashMap, HashSet}, env, fs, + path::PathBuf, process::exit, sync::Arc, time::{Duration, Instant}, @@ -78,6 +79,12 @@ async fn main() -> Result<()> { ) })?; let key = to_private_key(&key_pem).context("cannot read private key")?; + let root_cert = to_root_cert(&settings.roots)?; + let certs = Arc::new(Certs { + certs: cert.clone(), + key: key.clone(), + ca_certs: root_cert.clone(), + }); let _guard = init_tracing(&settings.log_dir, env!("CARGO_PKG_NAME"))?; @@ -105,12 +112,6 @@ async fn main() -> Result<()> { } let database = storage::Database::open(&db_path, &db_options)?; - let mut files: Vec> = Vec::new(); - for root in &settings.roots { - let file = fs::read(root).expect("Failed to read file"); - files.push(file); - } - if let Err(e) = migrate_data_dir(&settings.data_dir, &database) { error!(LogLocation::Both, "migration failed: {e}"); return Ok(()); @@ -132,7 +133,7 @@ async fn main() -> Result<()> { let pcap_sources = new_pcap_sources(); let ingest_sources = new_ingest_sources(); let stream_direct_channels = new_stream_direct_channels(); - let peers = new_peers(); + let (peers, peer_idents) = new_peers_data(settings.peers); let notify_config_reload = Arc::new(Notify::new()); let notify_shutdown = Arc::new(Notify::new()); let mut notify_source_change = None; @@ -166,14 +167,12 @@ async fn main() -> Result<()> { )); if let Some(peer_address) = settings.peer_address { - let peer_server = - peer::Peer::new(peer_address, cert.clone(), key.clone(), files.clone())?; + let peer_server = peer::Peer::new(peer_address, &certs.clone())?; let notify_source = Arc::new(Notify::new()); - let peer_identities = settings.peers.unwrap_or_else(HashSet::new); task::spawn(peer_server.run( - peer_identities, ingest_sources.clone(), peers.clone(), + peer_idents.clone(), notify_source.clone(), notify_shutdown.clone(), settings.cfg_path.clone(), @@ -181,25 +180,19 @@ async fn main() -> Result<()> { notify_source_change = Some(notify_source); } - let publish_server = publish::Server::new( - settings.publish_address, - cert.clone(), - key.clone(), - files.clone(), - ); + let publish_server = publish::Server::new(settings.publish_address, &certs.clone()); task::spawn(publish_server.run( database.clone(), pcap_sources.clone(), stream_direct_channels.clone(), + ingest_sources.clone(), + peers.clone(), + peer_idents.clone(), + certs.clone(), notify_shutdown.clone(), )); - let ingest_server = ingest::Server::new( - settings.ingest_address, - cert.clone(), - key.clone(), - files.clone(), - ); + let ingest_server = ingest::Server::new(settings.ingest_address, &certs.clone()); task::spawn(ingest_server.run( database.clone(), pcap_sources, @@ -305,6 +298,27 @@ fn to_private_key(pem: &[u8]) -> Result { } } +fn to_root_cert(root_cert_paths: &Vec) -> Result { + let mut root_files: Vec> = Vec::new(); + for root in root_cert_paths { + let file = fs::read(root).expect("Failed to read file"); + root_files.push(file); + } + + let mut root_cert = rustls::RootCertStore::empty(); + for file in root_files { + let root_certs: Vec = rustls_pemfile::certs(&mut &*file) + .context("invalid PEM-encoded certificate")? + .into_iter() + .map(rustls::Certificate) + .collect(); + if let Some(cert) = root_certs.first() { + root_cert.add(cert).context("failed to add root cert")?; + } + } + Ok(root_cert) +} + fn to_hms(dur: Duration) -> String { let total_sec = dur.as_secs(); let hours = total_sec / 3600; @@ -332,6 +346,9 @@ fn new_ack_transmission_count(count: u16) -> AckTransmissionCount { Arc::new(RwLock::new(count)) } -fn new_peers() -> Peers { - Arc::new(RwLock::new(HashMap::::new())) +fn new_peers_data(peers_list: Option>) -> (Peers, PeerIdents) { + ( + Arc::new(RwLock::new(HashMap::::new())), + Arc::new(RwLock::new(peers_list.unwrap_or_default())), + ) } diff --git a/src/peer.rs b/src/peer.rs index e7b231a..bea6bc7 100644 --- a/src/peer.rs +++ b/src/peer.rs @@ -4,7 +4,7 @@ use crate::{ TomlPeers, CONFIG_GRAPHQL_ADDRESS, CONFIG_PUBLISH_ADDRESS, }, server::{ - certificate_info, config_client, config_server, extract_cert_from_conn, + certificate_info, config_client, config_server, extract_cert_from_conn, Certs, SERVER_CONNNECTION_DELAY, SERVER_ENDPOINT_DELAY, }, IngestSources, @@ -19,7 +19,6 @@ use num_enum::{IntoPrimitive, TryFromPrimitive}; use quinn::{ ClientConfig, Connection, ConnectionError, Endpoint, RecvStream, SendStream, ServerConfig, }; -use rustls::{Certificate, PrivateKey}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use std::{ collections::{HashMap, HashSet}, @@ -38,10 +37,12 @@ use tokio::{ }; use toml_edit::Document; -const PEER_VERSION_REQ: &str = ">=0.16.0-alpha.1,<0.17.0"; +const PEER_VERSION_REQ: &str = ">=0.16.0,<0.17.0"; const PEER_RETRY_INTERVAL: u64 = 5; pub type Peers = Arc>>; +#[allow(clippy::module_name_repetitions)] +pub type PeerIdents = Arc>>; #[allow(clippy::module_name_repetitions)] #[derive(Deserialize, Serialize, Debug, Default)] @@ -86,7 +87,7 @@ pub struct PeerConns { peer_conns: Arc>>, // `peer_identities` is in sync with config toml's `peers`; // e.g. { PeerIdentity {"node2", "1.2.3.2:38384"}, PeerIdentity {"node1", "1.2.3.1:38384"}, } - peer_identities: Arc>>, + peer_identities: PeerIdents, ingest_sources: IngestSources, // Key string is peer's address(without port); Value is `ingest_sources`, `graphql_port`, // and `publish_port` belonging to that peer; @@ -107,19 +108,14 @@ pub struct Peer { } impl Peer { - pub fn new( - local_address: SocketAddr, - certs: Vec, - key: PrivateKey, - files: Vec>, - ) -> Result { - let (_, local_host_name) = certificate_info(&certs)?; + pub fn new(local_address: SocketAddr, certs: &Arc) -> Result { + let (_, local_host_name) = certificate_info(certs.certs.as_slice())?; - let server_config = config_server(certs.clone(), key.clone(), files.clone()) - .expect("server configuration error with cert, key or root"); + let server_config = + config_server(certs).expect("server configuration error with cert, key or root"); - let client_config = config_client(certs, key, files) - .expect("client configuration error with cert, key or root"); + let client_config = + config_client(certs).expect("client configuration error with cert, key or root"); Ok(Peer { client_config, @@ -131,9 +127,9 @@ impl Peer { pub async fn run( self, - peer_identities: HashSet, ingest_sources: IngestSources, peers: Peers, + peer_idents: PeerIdents, notify_source: Arc, notify_shutdown: Arc, config_path: String, @@ -164,7 +160,7 @@ impl Peer { // A structure of values common to peer connections. let peer_conn_info = PeerConns { peer_conns: Arc::new(RwLock::new(HashMap::new())), - peer_identities: Arc::new(RwLock::new(peer_identities)), + peer_identities: peer_idents, peers, ingest_sources, peer_sender: sender, @@ -751,7 +747,8 @@ pub mod tests { use super::Peer; use crate::{ peer::{receive_peer_data, request_init_info, PeerCode, PeerIdentity}, - to_cert_chain, to_private_key, PeerInfo, + server::Certs, + to_cert_chain, to_private_key, to_root_cert, PeerInfo, }; use chrono::Utc; use giganto_client::connection::client_handshake; @@ -760,7 +757,7 @@ pub mod tests { collections::{HashMap, HashSet}, fs::{self, File}, net::{IpAddr, Ipv6Addr, SocketAddr}, - path::Path, + path::{Path, PathBuf}, sync::{Arc, OnceLock}, }; use tempfile::TempDir; @@ -772,12 +769,12 @@ pub mod tests { TOKEN.get_or_init(|| Mutex::new(0)) } - const CERT_PATH: &str = "tests/cert.pem"; - const KEY_PATH: &str = "tests/key.pem"; - const CA_CERT_PATH: &str = "tests/root.pem"; - const HOST: &str = "localhost"; + const CERT_PATH: &str = "tests/certs/node1/cert.pem"; + const KEY_PATH: &str = "tests/certs/node1/key.pem"; + const CA_CERT_PATH: &str = "tests/certs/root.pem"; + const HOST: &str = "node1"; const TEST_PORT: u16 = 60191; - const PROTOCOL_VERSION: &str = "0.16.0-alpha.1"; + const PROTOCOL_VERSION: &str = "0.16.0"; pub struct TestClient { send: SendStream, @@ -884,13 +881,18 @@ pub mod tests { let cert = to_cert_chain(&cert_pem).unwrap(); let key_pem = fs::read(KEY_PATH).unwrap(); let key = to_private_key(&key_pem).unwrap(); - let ca_cert = fs::read("tests/root.pem").unwrap(); + let ca_cert_path: Vec = vec![PathBuf::from(CA_CERT_PATH)]; + let ca_certs = to_root_cert(&ca_cert_path).unwrap(); + + let certs = Arc::new(Certs { + certs: cert, + key, + ca_certs, + }); Peer::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), TEST_PORT), - cert, - key, - vec![ca_cert], + &certs, ) .unwrap() } @@ -907,6 +909,7 @@ pub mod tests { address: peer_addr, host_name: peer_name.clone(), }); + let peer_idents = Arc::new(RwLock::new(peer_identities)); // peer server's source list let source_name = String::from("einsis_source"); @@ -924,9 +927,9 @@ pub mod tests { // run peer tokio::spawn(peer_init().run( - peer_identities, ingest_sources.clone(), peers, + peer_idents, notify_source.clone(), Arc::new(Notify::new()), file_path.to_str().unwrap().to_string(), diff --git a/src/publish.rs b/src/publish.rs index c0e1589..c6cc5c1 100644 --- a/src/publish.rs +++ b/src/publish.rs @@ -5,14 +5,31 @@ mod tests; use self::implement::RequestStreamMessage; use crate::graphql::TIMESTAMP_SIZE; use crate::ingest::{implement::EventFilter, NetworkKey}; +use crate::peer::{PeerIdents, Peers}; use crate::server::{ - certificate_info, config_server, extract_cert_from_conn, SERVER_CONNNECTION_DELAY, - SERVER_ENDPOINT_DELAY, + certificate_info, config_client, config_server, extract_cert_from_conn, Certs, + SERVER_CONNNECTION_DELAY, SERVER_ENDPOINT_DELAY, }; use crate::storage::{Database, Direction, RawEventStore, StorageKey}; -use crate::{PcapSources, StreamDirectChannels}; +use crate::{IngestSources, PcapSources, StreamDirectChannels}; use anyhow::{anyhow, bail, Context, Result}; use chrono::{TimeZone, Utc}; +use giganto_client::connection::client_handshake; +use giganto_client::frame::send_raw; +use giganto_client::ingest::log::Log; +use giganto_client::ingest::netflow::{Netflow5, Netflow9}; +use giganto_client::ingest::network::{ + Conn, DceRpc, Dns, Ftp, Http, Kerberos, Ldap, Mqtt, Nfs, Ntlm, Rdp, Smb, Smtp, Ssh, Tls, +}; +use giganto_client::ingest::sysmon::{ + DnsEvent, FileCreate, FileCreateStreamHash, FileCreationTimeChanged, FileDelete, + FileDeleteDetected, ImageLoaded, NetworkConnection, PipeEvent, ProcessCreate, ProcessTampering, + ProcessTerminated, RegistryKeyValueRename, RegistryValueSet, +}; +use giganto_client::ingest::timeseries::PeriodicTimeSeries; +use giganto_client::publish::{ + receive_range_data, recv_ack_response, send_range_data_request, PublishError, +}; use giganto_client::{ connection::server_handshake, frame, @@ -28,8 +45,9 @@ use giganto_client::{ }; use log_broker::{debug, error, info, warn, LogLocation}; use quinn::{Connection, Endpoint, RecvStream, SendStream, ServerConfig}; -use rustls::{Certificate, PrivateKey}; use serde::{de::DeserializeOwned, Serialize}; +use std::collections::{HashMap, HashSet}; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::str::FromStr; use std::{net::SocketAddr, sync::Arc, time::Duration}; use tokio::{ @@ -38,7 +56,7 @@ use tokio::{ time::sleep, }; -const PUBLISH_VERSION_REQ: &str = ">=0.15.0,<0.17.0"; +const PUBLISH_VERSION_REQ: &str = ">=0.16.0,<0.17.0"; pub struct Server { server_config: ServerConfig, @@ -46,25 +64,25 @@ pub struct Server { } impl Server { - pub fn new( - addr: SocketAddr, - certs: Vec, - key: PrivateKey, - files: Vec>, - ) -> Self { - let server_config = config_server(certs, key, files) - .expect("server configuration error with cert, key or root"); + pub fn new(addr: SocketAddr, certs: &Arc) -> Self { + let server_config = + config_server(certs).expect("server configuration error with cert, key or root"); Server { server_config, server_address: addr, } } + #[allow(clippy::too_many_arguments)] pub async fn run( self, db: Database, pcap_sources: PcapSources, stream_direct_channels: StreamDirectChannels, + ingest_sources: IngestSources, + peers: Peers, + peer_idents: PeerIdents, + certs: Arc, notify_shutdown: Arc, ) { let endpoint = Endpoint::server(self.server_config, self.server_address).expect("endpoint"); @@ -81,12 +99,20 @@ impl Server { let pcap_sources = pcap_sources.clone(); let stream_direct_channels = stream_direct_channels.clone(); let notify_shutdown = notify_shutdown.clone(); + let ingest_sources = ingest_sources.clone(); + let peers = peers.clone(); + let peer_idents = peer_idents.clone(); + let certs = certs.clone(); tokio::spawn(async move { if let Err(e) = handle_connection( conn, db, pcap_sources, stream_direct_channels, + ingest_sources, + peers, + peer_idents, + certs, notify_shutdown ) .await @@ -106,11 +132,16 @@ impl Server { } } +#[allow(clippy::too_many_arguments)] async fn handle_connection( conn: quinn::Connecting, db: Database, pcap_sources: PcapSources, stream_direct_channels: StreamDirectChannels, + ingest_sources: IngestSources, + peers: Peers, + peer_idents: PeerIdents, + certs: Arc, notify_shutdown: Arc, ) -> Result<()> { let connection = conn.await?; @@ -127,15 +158,22 @@ async fn handle_connection( } }; let (_, source) = certificate_info(&extract_cert_from_conn(&connection)?)?; - tokio::spawn(request_stream( - connection.clone(), - db.clone(), - send, - recv, - source, - pcap_sources.clone(), - stream_direct_channels.clone(), - )); + + tokio::spawn({ + let certs = certs.clone(); + request_stream( + connection.clone(), + db.clone(), + send, + recv, + source, + pcap_sources.clone(), + stream_direct_channels.clone(), + peers.clone(), + peer_idents.clone(), + certs, + ) + }); loop { select! { @@ -152,8 +190,12 @@ async fn handle_connection( let db = db.clone(); let pcap_sources = pcap_sources.clone(); + let ingest_sources = ingest_sources.clone(); + let peers = peers.clone(); + let peer_idents = peer_idents.clone(); + let certs = certs.clone(); tokio::spawn(async move { - if let Err(e) = handle_request(stream, db, pcap_sources).await { + if let Err(e) = handle_request(stream, db, pcap_sources, ingest_sources, peers, peer_idents, certs).await { error!(LogLocation::Both, "failed: {}", e); } }); @@ -168,6 +210,7 @@ async fn handle_connection( } } +#[allow(clippy::too_many_arguments)] async fn request_stream( connection: Connection, stream_db: Database, @@ -176,6 +219,9 @@ async fn request_stream( conn_source: String, pcap_sources: PcapSources, stream_direct_channels: StreamDirectChannels, + peers: Peers, + peer_idents: PeerIdents, + certs: Arc, ) -> Result<()> { loop { match receive_stream_request(&mut recv).await { @@ -185,7 +231,15 @@ async fn request_stream( let source = conn_source.clone(); let stream_direct_channels = stream_direct_channels.clone(); if record_type == RequestStreamRecord::Pcap { - process_pcap_extract(&raw_data, pcap_sources.clone(), &mut send).await?; + process_pcap_extract( + &raw_data, + pcap_sources.clone(), + peers.clone(), + peer_idents.clone(), + certs.clone(), + &mut send, + ) + .await?; } else { tokio::spawn(async move { match node_type { @@ -257,6 +311,9 @@ async fn request_stream( async fn process_pcap_extract( filter_data: &[u8], pcap_sources: PcapSources, + peers: Peers, + peer_idents: PeerIdents, + certs: Arc, resp_send: &mut SendStream, ) -> Result<()> { let mut buf = Vec::new(); @@ -275,25 +332,69 @@ async fn process_pcap_extract( } }; + let certs = certs.clone(); tokio::spawn(async move { for filter in filters { - if let Some(source_conn) = pcap_sources.read().await.get(&filter.source) { + if let Some(source_conn) = + get_pcap_conn_if_current_giganto_in_charge(pcap_sources.clone(), &filter.source) + .await + { // send/receive extract request from piglet - match pcap_extract_request(source_conn, &filter).await { + match pcap_extract_request(&source_conn, &filter).await { Ok(()) => (), Err(e) => debug!(LogLocation::Local, "failed to relay pcap request, {e}"), } + } else if let Some(peer_addr) = + peer_in_charge_publish_addr(peers.clone(), &filter.source).await + { + let peer_name: String = { + let peer_idents_guard = peer_idents.read().await; + let peer_ident = peer_idents_guard + .iter() + .find(|idents| idents.address.eq(&peer_addr)); + + if let Some(peer_ident) = peer_ident { + peer_ident.host_name.clone() + } else { + error!(LogLocation::Both, "Peer giganto's server name cannot be identitified. addr: {peer_addr}, source: {}", filter.source); + continue; + } + }; + if let Ok((mut _peer_send, mut peer_recv)) = request_range_data_to_peer( + peer_addr, + peer_name.as_str(), + certs.clone(), + MessageCode::Pcap, + filter, + ) + .await + { + if let Err(e) = recv_ack_response(&mut peer_recv).await { + error!(LogLocation::Both, "Failed to receive ack response from peer giganto. addr: {peer_addr} name: {peer_name} {e}"); + } + } else { + error!(LogLocation::Both, "Failed to connect to peer giganto's publish module. addr: {peer_addr} name: {peer_name}"); + } } else { error!( LogLocation::Both, - "Failed to get {}'s connection", filter.source + "Neither current nor peer gigantos are in charge of requested pcap source {}", + filter.source ); } } }); + Ok(()) } +async fn get_pcap_conn_if_current_giganto_in_charge( + pcap_sources: PcapSources, + source: &String, +) -> Option { + pcap_sources.read().await.get(source).cloned() +} + #[allow(clippy::too_many_arguments, clippy::too_many_lines)] async fn process_stream( db: Database, @@ -834,324 +935,461 @@ async fn handle_request( (mut send, mut recv): (SendStream, RecvStream), db: Database, pcap_sources: PcapSources, + ingest_sources: IngestSources, + peers: Peers, + peer_idents: PeerIdents, + certs: Arc, ) -> Result<()> { let (msg_type, msg_buf) = receive_range_data_request(&mut recv).await?; match msg_type { MessageCode::ReqRange => { let msg = bincode::deserialize::(&msg_buf) .map_err(|e| anyhow!("Failed to deserialize message: {}", e))?; + match RawEventKind::from_str(msg.kind.as_str()).unwrap_or_default() { RawEventKind::Conn => { - process_range_data( + process_range_data::( &mut send, db.conn_store().context("Failed to open conn store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::Dns => { - process_range_data( + process_range_data::( &mut send, db.dns_store().context("Failed to open dns store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::Rdp => { - process_range_data( + process_range_data::( &mut send, db.rdp_store().context("Failed to open rdp store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::Http => { - process_range_data( + process_range_data::( &mut send, db.http_store().context("Failed to open http store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::Smtp => { - process_range_data( + process_range_data::( &mut send, db.smtp_store().context("Failed to open smtp store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::Log => { - process_range_data( + process_range_data::( &mut send, db.log_store().context("Failed to open log store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), true, ) .await?; } RawEventKind::Ntlm => { - process_range_data( + process_range_data::( &mut send, db.ntlm_store().context("Failed to open ntlm store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::Kerberos => { - process_range_data( + process_range_data::( &mut send, db.kerberos_store() .context("Failed to open kerberos store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::Ssh => { - process_range_data( + process_range_data::( &mut send, db.ssh_store().context("Failed to open ssh store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::DceRpc => { - process_range_data( + process_range_data::( &mut send, db.dce_rpc_store().context("Failed to open dce rpc store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::Ftp => { - process_range_data( + process_range_data::( &mut send, db.ftp_store().context("Failed to open ftp store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::Mqtt => { - process_range_data( + process_range_data::( &mut send, db.mqtt_store().context("Failed to open mqtt store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::PeriodicTimeSeries => { - process_range_data( + process_range_data::( &mut send, db.periodic_time_series_store() .context("Failed to open periodic time series storage")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::Ldap => { - process_range_data( + process_range_data::( &mut send, db.ldap_store().context("Failed to open ldap store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::Tls => { - process_range_data( + process_range_data::( &mut send, db.tls_store().context("Failed to open tls store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::Smb => { - process_range_data( + process_range_data::( &mut send, db.smb_store().context("Failed to open smb store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::Nfs => { - process_range_data( + process_range_data::( &mut send, db.nfs_store().context("Failed to open nfs store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::ProcessCreate => { - process_range_data( + process_range_data::( &mut send, db.process_create_store() .context("Failed to open process_create store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::FileCreateTime => { - process_range_data( + process_range_data::( &mut send, - db.file_create_store() - .context("Failed to open file_create store")?, + db.file_create_time_store() + .context("Failed to open file_create_time store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::NetworkConnect => { - process_range_data( + process_range_data::( &mut send, db.network_connect_store() .context("Failed to open network_connect store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::ProcessTerminate => { - process_range_data( + process_range_data::( &mut send, db.process_terminate_store() .context("Failed to open process_terminate store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::ImageLoad => { - process_range_data( + process_range_data::( &mut send, db.image_load_store() .context("Failed to open image_load store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::FileCreate => { - process_range_data( + process_range_data::( &mut send, db.file_create_store() .context("Failed to open file_create store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::RegistryValueSet => { - process_range_data( + process_range_data::( &mut send, db.registry_value_set_store() .context("Failed to open registry_value_set store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::RegistryKeyRename => { - process_range_data( + process_range_data::( &mut send, db.registry_key_rename_store() .context("Failed to open registry_key_rename store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::FileCreateStreamHash => { - process_range_data( + process_range_data::( &mut send, db.file_create_stream_hash_store() .context("Failed to open file_create_stream_hash store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::PipeEvent => { - process_range_data( + process_range_data::( &mut send, db.pipe_event_store() .context("Failed to open pipe_event store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::DnsQuery => { - process_range_data( + process_range_data::( &mut send, db.dns_query_store() .context("Failed to open dns_query store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::FileDelete => { - process_range_data( + process_range_data::( &mut send, db.file_delete_store() .context("Failed to open file_delete store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::ProcessTamper => { - process_range_data( + process_range_data::( &mut send, db.process_tamper_store() .context("Failed to open process_tamper store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::FileDeleteDetected => { - process_range_data( + process_range_data::( &mut send, db.file_delete_detected_store() .context("Failed to open file_delete_detected store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::Netflow5 => { - process_range_data( + process_range_data::( &mut send, db.netflow5_store() .context("Failed to open netflow5 store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; } RawEventKind::Netflow9 => { - process_range_data( + process_range_data::( &mut send, db.netflow9_store() .context("Failed to open netflow9 store")?, msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), false, ) .await?; @@ -1163,116 +1401,416 @@ async fn handle_request( } } MessageCode::Pcap => { - process_pcap_extract(&msg_buf, pcap_sources.clone(), &mut send).await?; + process_pcap_extract( + &msg_buf, + pcap_sources.clone(), + peers, + peer_idents.clone(), + certs.clone(), + &mut send, + ) + .await?; } MessageCode::RawData => { - let msg = bincode::deserialize::(&msg_buf) + let msg: RequestRawData = bincode::deserialize::(&msg_buf) .map_err(|e| anyhow!("Failed to deserialize message: {}", e))?; match RawEventKind::from_str(msg.kind.as_str()).unwrap_or_default() { RawEventKind::Conn => { - process_raw_events(&mut send, db.conn_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.conn_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::Dns => { - process_raw_events(&mut send, db.dns_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.dns_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::Rdp => { - process_raw_events(&mut send, db.rdp_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.rdp_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::Http => { - process_raw_events(&mut send, db.http_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.http_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::Smtp => { - process_raw_events(&mut send, db.smtp_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.smtp_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::Ntlm => { - process_raw_events(&mut send, db.ntlm_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.ntlm_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::Kerberos => { - process_raw_events(&mut send, db.kerberos_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.kerberos_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::Ssh => { - process_raw_events(&mut send, db.ssh_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.ssh_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::DceRpc => { - process_raw_events(&mut send, db.dce_rpc_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.dce_rpc_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::Ftp => { - process_raw_events(&mut send, db.ftp_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.ftp_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::Mqtt => { - process_raw_events(&mut send, db.mqtt_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.mqtt_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::Ldap => { - process_raw_events(&mut send, db.ldap_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.ldap_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::Tls => { - process_raw_events(&mut send, db.tls_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.tls_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::Smb => { - process_raw_events(&mut send, db.smb_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.smb_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::Nfs => { - process_raw_events(&mut send, db.nfs_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.nfs_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::Log => { // For RawEventKind::LOG, the source_kind is required as the source. - process_raw_events(&mut send, db.log_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.log_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::PeriodicTimeSeries => { - process_raw_events(&mut send, db.periodic_time_series_store()?, msg.input) - .await?; + process_raw_events::( + &mut send, + db.periodic_time_series_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::ProcessCreate => { - process_raw_events(&mut send, db.process_create_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.process_create_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::FileCreateTime => { - process_raw_events(&mut send, db.file_create_time_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.file_create_time_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::NetworkConnect => { - process_raw_events(&mut send, db.network_connect_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.network_connect_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::ProcessTerminate => { - process_raw_events(&mut send, db.process_terminate_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.process_terminate_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::ImageLoad => { - process_raw_events(&mut send, db.image_load_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.image_load_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::FileCreate => { - process_raw_events(&mut send, db.file_create_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.file_create_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::RegistryValueSet => { - process_raw_events(&mut send, db.registry_value_set_store()?, msg.input) - .await?; + process_raw_events::( + &mut send, + db.registry_value_set_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::RegistryKeyRename => { - process_raw_events(&mut send, db.registry_key_rename_store()?, msg.input) - .await?; + process_raw_events::( + &mut send, + db.registry_key_rename_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::FileCreateStreamHash => { - process_raw_events(&mut send, db.file_create_stream_hash_store()?, msg.input) - .await?; + process_raw_events::( + &mut send, + db.file_create_stream_hash_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::PipeEvent => { - process_raw_events(&mut send, db.pipe_event_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.pipe_event_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::DnsQuery => { - process_raw_events(&mut send, db.dns_query_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.dns_query_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::FileDelete => { - process_raw_events(&mut send, db.file_delete_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.file_delete_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::ProcessTamper => { - process_raw_events(&mut send, db.process_tamper_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.process_tamper_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::FileDeleteDetected => { - process_raw_events(&mut send, db.file_delete_detected_store()?, msg.input) - .await?; + process_raw_events::( + &mut send, + db.file_delete_detected_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::Netflow5 => { - process_raw_events(&mut send, db.netflow5_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.netflow5_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } RawEventKind::Netflow9 => { - process_raw_events(&mut send, db.netflow9_store()?, msg.input).await?; + process_raw_events::( + &mut send, + db.netflow9_store()?, + msg, + ingest_sources, + peers, + peer_idents, + certs.clone(), + ) + .await?; } _ => { // do nothing @@ -1283,53 +1821,219 @@ async fn handle_request( } Ok(()) } +#[allow(clippy::too_many_arguments)] +async fn process_range_data<'c, T, I>( + send: &mut SendStream, + store: RawEventStore<'c, T>, + request_range: RequestRange, + ingest_sources: IngestSources, + peers: Peers, + peer_idents: PeerIdents, + certs: Arc, + availed_kind: bool, +) -> Result<()> +where + T: DeserializeOwned + ResponseRangeData, + I: DeserializeOwned + Serialize, +{ + if is_current_giganto_in_charge(ingest_sources, &request_range.source).await { + process_range_data_in_current_giganto(send, store, request_range, availed_kind).await?; + } else if let Some(peer_addr) = peer_in_charge_publish_addr(peers, &request_range.source).await + { + process_range_data_in_peer_giganto::( + send, + peer_idents, + peer_addr, + certs.clone(), + request_range, + ) + .await?; + } else { + bail!( + "Neither current nor peer gigantos are in charge of requested source {}", + &request_range.source + ) + } + send_range_data::(send, None).await?; + send.finish().await?; + Ok(()) +} + +async fn is_current_giganto_in_charge(ingest_sources: IngestSources, source: &String) -> bool { + ingest_sources.read().await.contains_key(source) +} -async fn process_range_data<'c, T>( +async fn peer_in_charge_publish_addr(peers: Peers, source: &String) -> Option { + peers.read().await.iter().find_map(|(peer_address, peer_info)| { + peer_info + .ingest_sources + .contains(source) + .then(|| { + SocketAddr::new( + peer_address.parse::().expect("Peer's IP address must be valid, because it is validated when peer giganto started."), + peer_info.publish_port.expect("Peer's publish port must be valid, because it is validated when peer giganto started."), + ) + }) + }) +} + +async fn process_range_data_in_current_giganto<'c, T>( send: &mut SendStream, store: RawEventStore<'c, T>, - msg: RequestRange, + request_range: RequestRange, availed_kind: bool, ) -> Result<()> where T: DeserializeOwned + ResponseRangeData, { - let key_builder = StorageKey::builder().start_key(&msg.source); + let key_builder = StorageKey::builder().start_key(&request_range.source); let key_builder = if availed_kind { - key_builder.mid_key(Some(msg.kind.as_bytes().to_vec())) + key_builder.mid_key(Some(request_range.kind.as_bytes().to_vec())) } else { key_builder }; let from_key = key_builder .clone() - .lower_closed_bound_end_key(Some(Utc.timestamp_nanos(msg.start))) + .lower_closed_bound_end_key(Some(Utc.timestamp_nanos(request_range.start))) .build(); let to_key = key_builder - .upper_open_bound_end_key(Some(Utc.timestamp_nanos(msg.end))) + .upper_open_bound_end_key(Some(Utc.timestamp_nanos(request_range.end))) .build(); let iter = store.boundary_iter(&from_key.key(), &to_key.key(), Direction::Forward); - for item in iter.take(msg.count) { + for item in iter.take(request_range.count) { let (key, val) = item.context("Failed to read Database")?; let timestamp = i64::from_be_bytes(key[(key.len() - TIMESTAMP_SIZE)..].try_into()?); - send_range_data(send, Some((val, timestamp, &msg.source))).await?; + send_range_data(send, Some((val, timestamp, &request_range.source))).await?; + } + + Ok(()) +} + +async fn process_range_data_in_peer_giganto( + send: &mut SendStream, + peer_idents: PeerIdents, + peer_addr: SocketAddr, + certs: Arc, + request_range: RequestRange, +) -> Result<()> +where + I: DeserializeOwned + Serialize, +{ + let peer_name = peer_name(peer_idents, &peer_addr).await?; + let (_peer_send, mut peer_recv) = request_range_data_to_peer( + peer_addr, + peer_name.as_str(), + certs.clone(), + MessageCode::ReqRange, + request_range, + ) + .await?; + loop { + let event: Option<(i64, String, Vec)> = receive_range_data(&mut peer_recv).await?; + if let Some(event_data) = event { + let event_data_again: Option<(i64, String, Vec)> = Some(event_data); + let send_buf = bincode::serialize(&event_data_again) + .map_err(PublishError::SerialDeserialFailure)?; + send_raw(send, &send_buf).await?; + } else { + break; + } } + Ok(()) +} + +async fn request_range_data_to_peer( + peer_addr: SocketAddr, + peer_name: &str, + certs: Arc, + message_code: MessageCode, + request_data: T, +) -> Result<(SendStream, RecvStream)> +where + T: Serialize, +{ + let connection = connect(peer_addr, peer_name, certs).await?; + + let (mut send, recv) = connection.open_bi().await?; + send_range_data_request(&mut send, message_code, request_data).await?; + + Ok((send, recv)) +} + +async fn process_raw_events<'c, T, I>( + send: &mut SendStream, + store: RawEventStore<'c, T>, + req: RequestRawData, + ingest_sources: IngestSources, + peers: Peers, + peer_idents: PeerIdents, + certs: Arc, +) -> Result<()> +where + T: DeserializeOwned + ResponseRangeData, + I: DeserializeOwned + Serialize + Clone, +{ + let (handle_by_current_giganto, handle_by_peer_gigantos) = + req_inputs_by_gigantos_in_charge(ingest_sources, req.input).await; + + if !handle_by_current_giganto.is_empty() { + process_raw_event_in_current_giganto(send, store, handle_by_current_giganto).await?; + } + + if !handle_by_peer_gigantos.is_empty() { + process_raw_event_in_peer_gigantos::( + send, + req.kind, + certs, + peers, + peer_idents, + handle_by_peer_gigantos, + ) + .await?; + } + send_range_data::(send, None).await?; send.finish().await?; Ok(()) } -async fn process_raw_events<'c, T>( +async fn req_inputs_by_gigantos_in_charge( + ingest_sources: IngestSources, + req_inputs: Vec<(String, Vec)>, +) -> (Vec<(String, Vec)>, Vec<(String, Vec)>) { + let current_giganto_sources: HashSet = ingest_sources + .read() + .await + .keys() + .cloned() + .collect::>(); + + let mut handle_by_current_giganto = Vec::with_capacity(req_inputs.len()); + let mut handle_by_peer_gigantos = Vec::with_capacity(req_inputs.len()); + for req_input in req_inputs { + if current_giganto_sources.contains(&req_input.0) { + handle_by_current_giganto.push(req_input); + } else { + handle_by_peer_gigantos.push(req_input); + } + } + + (handle_by_current_giganto, handle_by_peer_gigantos) +} + +async fn process_raw_event_in_current_giganto<'c, T>( send: &mut SendStream, store: RawEventStore<'c, T>, - msg: Vec<(String, Vec)>, + handle_by_current_giganto: Vec<(String, Vec)>, ) -> Result<()> where T: DeserializeOwned + ResponseRangeData, { let mut output: Vec<(i64, String, Vec)> = Vec::new(); - for (source, timestamps) in msg { + for (source, timestamps) in handle_by_current_giganto { output.extend_from_slice(&store.batched_multi_get_with_source(&source, ×tamps)); } @@ -1338,7 +2042,114 @@ where send_range_data(send, Some((val, timestamp, &source))).await?; } - send_range_data::(send, None).await?; - send.finish().await?; Ok(()) } + +async fn process_raw_event_in_peer_gigantos( + send: &mut SendStream, + kind: String, + certs: Arc, + peers: Peers, + peer_idents: PeerIdents, + handle_by_peer_gigantos: Vec<(String, Vec)>, +) -> Result<()> +where + I: DeserializeOwned + Serialize, +{ + let peer_gigantos_by_source: HashMap)>> = handle_by_peer_gigantos + .into_iter() + .fold(HashMap::new(), |mut acc, (source, timestamps)| { + acc.entry(source.clone()) + .or_default() + .push((source, timestamps)); + acc + }); + + for (source, input) in peer_gigantos_by_source { + if let Some(peer_addr) = peer_in_charge_publish_addr(peers.clone(), &source).await { + let peer_name = peer_name(peer_idents.clone(), &peer_addr).await?; + + let connection = connect(peer_addr, peer_name.as_str(), certs.clone()).await?; + let (mut peer_send, mut peer_recv) = connection.open_bi().await?; + + send_range_data_request( + &mut peer_send, + MessageCode::RawData, + RequestRawData { + kind: kind.clone(), + input, + }, + ) + .await?; + + while let Some(event) = + receive_range_data::)>>(&mut peer_recv).await? + { + let send_buf = bincode::serialize(&Some(event)) + .map_err(PublishError::SerialDeserialFailure)?; + send_raw(send, &send_buf).await?; + } + } + } + + Ok(()) +} + +async fn connect( + server_addr: SocketAddr, + server_name: &str, + certs: Arc, +) -> Result { + let client_addr = if server_addr.is_ipv6() { + IpAddr::V6(Ipv6Addr::UNSPECIFIED) + } else { + IpAddr::V4(Ipv4Addr::UNSPECIFIED) + }; + + let mut endpoint = Endpoint::client(SocketAddr::new(client_addr, 0))?; + endpoint.set_default_client_config(config_client(&certs)?); + + let conn = connect_repeatedly(&endpoint, server_addr, server_name).await; + + client_handshake(&conn, env!("CARGO_PKG_VERSION")).await?; + Ok(conn) +} + +async fn connect_repeatedly( + endpoint: &Endpoint, + server_addr: SocketAddr, + server_name: &str, +) -> Connection { + let max_delay = Duration::from_secs(30); + let mut delay = Duration::from_millis(500); + + loop { + info!(LogLocation::Both, "connecting to {}", server_addr); + match endpoint.connect(server_addr, server_name) { + Ok(connecting) => match connecting.await { + Ok(conn) => { + info!(LogLocation::Both, "connected to {}", server_addr); + return conn; + } + Err(e) => error!(LogLocation::Both, "cannot connect to controller: {:#}", e), + }, + Err(e) => { + error!(LogLocation::Both, "{:#}", e); + } + } + delay = std::cmp::min(max_delay, delay * 2); + tokio::time::sleep(delay).await; + } +} + +async fn peer_name(peer_idents: PeerIdents, peer_addr: &SocketAddr) -> Result { + let peer_idents_guard = peer_idents.read().await; + let peer_ident = peer_idents_guard + .iter() + .find(|idents| idents.address.eq(peer_addr)); + + match peer_ident { + Some(peer_ident) => Ok(peer_ident.host_name.clone()), + None => bail!("Peer giganto's server name cannot be identitified"), + } +} diff --git a/src/publish/tests.rs b/src/publish/tests.rs index b8eb16c..b56a570 100644 --- a/src/publish/tests.rs +++ b/src/publish/tests.rs @@ -1,8 +1,10 @@ use super::Server; use crate::{ - new_pcap_sources, new_stream_direct_channels, + new_pcap_sources, new_peers_data, new_stream_direct_channels, + peer::{PeerIdentity, PeerInfo}, + server::Certs, storage::{Database, DbOptions, RawEventStore}, - to_cert_chain, to_private_key, + to_cert_chain, to_private_key, to_root_cert, }; use base64::{engine::general_purpose::STANDARD as base64_engine, Engine}; use chrono::{DateTime, Duration, NaiveDate, Utc}; @@ -25,14 +27,16 @@ use giganto_client::{ }; use quinn::{Connection, Endpoint, SendStream}; use serde::Serialize; +use serial_test::serial; use std::{ cell::RefCell, + collections::{HashMap, HashSet}, fs, net::{IpAddr, Ipv6Addr, SocketAddr}, - path::Path, + path::{Path, PathBuf}, sync::{Arc, OnceLock}, }; -use tokio::sync::{Mutex, Notify}; +use tokio::sync::{Mutex, Notify, RwLock}; fn get_token() -> &'static Mutex { static TOKEN: OnceLock> = OnceLock::new(); @@ -40,12 +44,21 @@ fn get_token() -> &'static Mutex { TOKEN.get_or_init(|| Mutex::new(0)) } -const CERT_PATH: &str = "tests/cert.pem"; -const KEY_PATH: &str = "tests/key.pem"; -const CA_CERT_PATH: &str = "tests/root.pem"; -const HOST: &str = "localhost"; -const TEST_PORT: u16 = 60191; -const PROTOCOL_VERSION: &str = "0.15.2"; +const CA_CERT_PATH: &str = "tests/certs/root.pem"; +const PROTOCOL_VERSION: &str = "0.16.0"; + +const NODE1_CERT_PATH: &str = "tests/certs/node1/cert.pem"; +const NODE1_KEY_PATH: &str = "tests/certs/node1/key.pem"; +const NODE1_HOST: &str = "node1"; +const NODE1_TEST_PORT: u16 = 60191; + +const NODE2_CERT_PATH: &str = "tests/certs/node2/cert.pem"; +const NODE2_KEY_PATH: &str = "tests/certs/node2/key.pem"; +const NODE2_HOST: &str = "node2"; +const NODE2_PORT: u16 = 60192; + +const NODE1_GIGANTO_INGEST_SOURCES: [&str; 3] = ["src1", "src 1", "ingest src 1"]; +const NODE2_GIGANTO_INGEST_SOURCES: [&str; 3] = ["src2", "src 2", "ingest src 2"]; struct TestClient { send: SendStream, @@ -58,8 +71,8 @@ impl TestClient { let endpoint = init_client(); let conn = endpoint .connect( - SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), TEST_PORT), - HOST, + SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), NODE1_TEST_PORT), + NODE1_HOST, ) .expect( "Failed to connect server's endpoint, Please check if the setting value is correct", @@ -76,35 +89,43 @@ impl TestClient { } fn server() -> Server { - let cert_pem = fs::read(CERT_PATH).unwrap(); + let cert_pem = fs::read(NODE1_CERT_PATH).unwrap(); let cert = to_cert_chain(&cert_pem).unwrap(); - let key_pem = fs::read(KEY_PATH).unwrap(); + let key_pem = fs::read(NODE1_KEY_PATH).unwrap(); let key = to_private_key(&key_pem).unwrap(); - let ca_cert = fs::read("tests/root.pem").unwrap(); + let ca_cert_path: Vec = vec![PathBuf::from(CA_CERT_PATH)]; + let ca_certs = to_root_cert(&ca_cert_path).unwrap(); - Server::new( - SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), TEST_PORT), - cert, + let certs = Arc::new(Certs { + certs: cert, key, - vec![ca_cert], + ca_certs, + }); + + Server::new( + SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), NODE1_TEST_PORT), + &certs, ) } fn init_client() -> Endpoint { - let (cert, key) = match fs::read(CERT_PATH) - .map(|x| (x, fs::read(KEY_PATH).expect("Failed to Read key file"))) - { + let (cert, key) = match fs::read(NODE1_CERT_PATH).map(|x| { + ( + x, + fs::read(NODE1_KEY_PATH).expect("Failed to Read key file"), + ) + }) { Ok(x) => x, Err(_) => { panic!( "failed to read (cert, key) file, {}, {} read file error. Cert or key doesn't exist in default test folder", - CERT_PATH, - KEY_PATH, + NODE1_CERT_PATH, + NODE1_KEY_PATH, ); } }; - let pv_key = if Path::new(KEY_PATH) + let pv_key = if Path::new(NODE1_KEY_PATH) .extension() .map_or(false, |x| x == "der") { @@ -128,7 +149,7 @@ fn init_client() -> Endpoint { } } }; - let cert_chain = if Path::new(CERT_PATH) + let cert_chain = if Path::new(NODE1_CERT_PATH) .extension() .map_or(false, |x| x == "der") { @@ -663,7 +684,7 @@ fn insert_nfs_raw_event(store: &RawEventStore, source: &str, timestamp: i64 #[tokio::test] async fn request_range_data_with_protocol() { const PUBLISH_RANGE_MESSAGE_CODE: MessageCode = MessageCode::ReqRange; - const SOURCE: &str = "einsis"; + const SOURCE: &str = "ingest src 1"; const CONN_KIND: &str = "conn"; const DNS_KIND: &str = "dns"; const HTTP_KIND: &str = "http"; @@ -685,10 +706,35 @@ async fn request_range_data_with_protocol() { let db = Database::open(db_dir.path(), &DbOptions::default()).unwrap(); let pcap_sources = new_pcap_sources(); let stream_direct_channels = new_stream_direct_channels(); + let ingest_sources = Arc::new(tokio::sync::RwLock::new( + NODE1_GIGANTO_INGEST_SOURCES + .into_iter() + .map(|source| (source.to_string(), Utc::now())) + .collect::>>(), + )); + let (peers, peer_idents) = new_peers_data(None); + + let cert_pem = fs::read(NODE1_CERT_PATH).unwrap(); + let cert = to_cert_chain(&cert_pem).unwrap(); + let key_pem = fs::read(NODE1_KEY_PATH).unwrap(); + let key = to_private_key(&key_pem).unwrap(); + let ca_cert_path: Vec = vec![PathBuf::from(CA_CERT_PATH)]; + let ca_certs = to_root_cert(&ca_cert_path).unwrap(); + + let certs = Arc::new(Certs { + certs: cert, + key, + ca_certs, + }); + tokio::spawn(server().run( db.clone(), pcap_sources, stream_direct_channels, + ingest_sources, + peers, + peer_idents, + certs, Arc::new(Notify::new()), )); let publish = TestClient::new().await; @@ -1642,7 +1688,7 @@ async fn request_range_data_with_protocol() { #[tokio::test] async fn request_range_data_with_log() { const PUBLISH_RANGE_MESSAGE_CODE: MessageCode = MessageCode::ReqRange; - const SOURCE: &str = "einsis"; + const SOURCE: &str = "src1"; const KIND: &str = "Hello"; #[derive(Serialize)] @@ -1659,10 +1705,35 @@ async fn request_range_data_with_log() { let db = Database::open(db_dir.path(), &DbOptions::default()).unwrap(); let pcap_sources = new_pcap_sources(); let stream_direct_channels = new_stream_direct_channels(); + let ingest_sources = Arc::new(tokio::sync::RwLock::new( + NODE1_GIGANTO_INGEST_SOURCES + .into_iter() + .map(|source| (source.to_string(), Utc::now())) + .collect::>>(), + )); + let (peers, peer_idents) = new_peers_data(None); + + let cert_pem = fs::read(NODE1_CERT_PATH).unwrap(); + let cert = to_cert_chain(&cert_pem).unwrap(); + let key_pem = fs::read(NODE1_KEY_PATH).unwrap(); + let key = to_private_key(&key_pem).unwrap(); + let ca_cert_path: Vec = vec![PathBuf::from(CA_CERT_PATH)]; + let ca_certs = to_root_cert(&ca_cert_path).unwrap(); + + let certs = Arc::new(Certs { + certs: cert, + key, + ca_certs, + }); + tokio::spawn(server().run( db.clone(), pcap_sources, stream_direct_channels, + ingest_sources, + peers, + peer_idents, + certs, Arc::new(Notify::new()), )); let publish = TestClient::new().await; @@ -1733,7 +1804,7 @@ async fn request_range_data_with_log() { #[tokio::test] async fn request_range_data_with_period_time_series() { const PUBLISH_RANGE_MESSAGE_CODE: MessageCode = MessageCode::ReqRange; - const SAMPLING_POLICY_ID: &str = "policy_one"; + const SAMPLING_POLICY_ID_AS_SOURCE: &str = "ingest src 1"; const KIND: &str = "timeseries"; let _lock = get_token().lock().await; @@ -1741,10 +1812,35 @@ async fn request_range_data_with_period_time_series() { let db = Database::open(db_dir.path(), &DbOptions::default()).unwrap(); let pcap_sources = new_pcap_sources(); let stream_direct_channels = new_stream_direct_channels(); + let ingest_sources = Arc::new(tokio::sync::RwLock::new( + NODE1_GIGANTO_INGEST_SOURCES + .into_iter() + .map(|source| (source.to_string(), Utc::now())) + .collect::>>(), + )); + let (peers, peer_idents) = new_peers_data(None); + + let cert_pem = fs::read(NODE1_CERT_PATH).unwrap(); + let cert = to_cert_chain(&cert_pem).unwrap(); + let key_pem = fs::read(NODE1_KEY_PATH).unwrap(); + let key = to_private_key(&key_pem).unwrap(); + let ca_cert_path: Vec = vec![PathBuf::from(CA_CERT_PATH)]; + let ca_certs = to_root_cert(&ca_cert_path).unwrap(); + + let certs = Arc::new(Certs { + certs: cert, + key, + ca_certs, + }); + tokio::spawn(server().run( db.clone(), pcap_sources, stream_direct_channels, + ingest_sources, + peers, + peer_idents, + certs, Arc::new(Notify::new()), )); let publish = TestClient::new().await; @@ -1756,7 +1852,7 @@ async fn request_range_data_with_period_time_series() { let time_series_data = bincode::deserialize::(&insert_periodic_time_series_raw_event( &time_series_store, - SAMPLING_POLICY_ID, + SAMPLING_POLICY_ID_AS_SOURCE, send_time_series_time, )) .unwrap(); @@ -1776,7 +1872,7 @@ async fn request_range_data_with_period_time_series() { Utc, ); let message = RequestRange { - source: String::from(SAMPLING_POLICY_ID), + source: String::from(SAMPLING_POLICY_ID_AS_SOURCE), kind: String::from(KIND), start: start.timestamp_nanos_opt().unwrap(), end: end.timestamp_nanos_opt().unwrap(), @@ -1805,7 +1901,7 @@ async fn request_range_data_with_period_time_series() { ); assert_eq!( time_series_data - .response_data(send_time_series_time, SAMPLING_POLICY_ID) + .response_data(send_time_series_time, SAMPLING_POLICY_ID_AS_SOURCE) .unwrap(), bincode::serialize::)>>(&result_data.pop().unwrap()).unwrap() ); @@ -1862,10 +1958,35 @@ async fn request_network_event_stream() { }; let pcap_sources = new_pcap_sources(); let stream_direct_channels = new_stream_direct_channels(); + let ingest_sources = Arc::new(tokio::sync::RwLock::new( + NODE1_GIGANTO_INGEST_SOURCES + .into_iter() + .map(|source| (source.to_string(), Utc::now())) + .collect::>>(), + )); + let (peers, peer_idents) = new_peers_data(None); + + let cert_pem = fs::read(NODE1_CERT_PATH).unwrap(); + let cert = to_cert_chain(&cert_pem).unwrap(); + let key_pem = fs::read(NODE1_KEY_PATH).unwrap(); + let key = to_private_key(&key_pem).unwrap(); + let ca_cert_path: Vec = vec![PathBuf::from(CA_CERT_PATH)]; + let ca_certs = to_root_cert(&ca_cert_path).unwrap(); + + let certs = Arc::new(Certs { + certs: cert, + key, + ca_certs, + }); + tokio::spawn(server().run( db.clone(), pcap_sources, stream_direct_channels.clone(), + ingest_sources, + peers, + peer_idents, + certs, Arc::new(Notify::new()), )); let mut publish = TestClient::new().await; @@ -3508,10 +3629,35 @@ async fn request_raw_events() { let db = Database::open(db_dir.path(), &DbOptions::default()).unwrap(); let pcap_sources = new_pcap_sources(); let stream_direct_channels = new_stream_direct_channels(); + let ingest_sources = Arc::new(tokio::sync::RwLock::new( + NODE1_GIGANTO_INGEST_SOURCES + .into_iter() + .map(|source| (source.to_string(), Utc::now())) + .collect::>>(), + )); + let (peers, peer_idents) = new_peers_data(None); + + let cert_pem = fs::read(NODE1_CERT_PATH).unwrap(); + let cert = to_cert_chain(&cert_pem).unwrap(); + let key_pem = fs::read(NODE1_KEY_PATH).unwrap(); + let key = to_private_key(&key_pem).unwrap(); + let ca_cert_path: Vec = vec![PathBuf::from(CA_CERT_PATH)]; + let ca_certs = to_root_cert(&ca_cert_path).unwrap(); + + let certs = Arc::new(Certs { + certs: cert, + key, + ca_certs, + }); + tokio::spawn(server().run( db.clone(), pcap_sources, stream_direct_channels, + ingest_sources, + peers, + peer_idents, + certs, Arc::new(Notify::new()), )); let publish = TestClient::new().await; @@ -3554,3 +3700,817 @@ async fn request_raw_events() { bincode::serialize::)>>(&result_data.pop()).unwrap() ); } + +#[tokio::test] +#[serial] +async fn request_range_data_with_protocol_giganto_cluster() { + const PUBLISH_RANGE_MESSAGE_CODE: MessageCode = MessageCode::ReqRange; + const SOURCE: &str = "ingest src 2"; + const CONN_KIND: &str = "conn"; + + let (oneshot_send, oneshot_recv) = tokio::sync::oneshot::channel(); + + // spawn node2 publish server + tokio::spawn(async { + let db_dir = tempfile::tempdir().unwrap(); + let db = Database::open(db_dir.path(), &DbOptions::default()).unwrap(); + let pcap_sources = new_pcap_sources(); + let stream_direct_channels = new_stream_direct_channels(); + let ingest_sources = Arc::new(tokio::sync::RwLock::new( + NODE2_GIGANTO_INGEST_SOURCES + .into_iter() + .map(|source| (source.to_string(), Utc::now())) + .collect::>>(), + )); + + let cert_pem = fs::read(NODE2_CERT_PATH).unwrap(); + let cert = to_cert_chain(&cert_pem).unwrap(); + let key_pem = fs::read(NODE2_KEY_PATH).unwrap(); + let key = to_private_key(&key_pem).unwrap(); + let ca_cert_path: Vec = vec![PathBuf::from(CA_CERT_PATH)]; + let ca_certs = to_root_cert(&ca_cert_path).unwrap(); + let certs = Arc::new(Certs { + certs: cert, + key, + ca_certs, + }); + + let peers = Arc::new(tokio::sync::RwLock::new(HashMap::from([( + Ipv6Addr::LOCALHOST.to_string(), + PeerInfo { + ingest_sources: NODE1_GIGANTO_INGEST_SOURCES + .into_iter() + .map(|source| (source.to_string())) + .collect::>(), + graphql_port: None, + publish_port: Some(NODE1_TEST_PORT), + }, + )]))); + + let mut peer_identities = HashSet::new(); + peer_identities.insert(PeerIdentity { + address: SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), NODE1_TEST_PORT), + host_name: NODE1_HOST.to_string(), + }); + let peer_idents = Arc::new(RwLock::new(peer_identities)); + + let notify_shutdown = Arc::new(Notify::new()); + + // prepare data in node2 database + let conn_store = db.conn_store().unwrap(); + let send_conn_time = Utc::now().timestamp_nanos_opt().unwrap(); + let conn_data = bincode::deserialize::(&insert_conn_raw_event( + &conn_store, + SOURCE, + send_conn_time, + )) + .unwrap(); + + if let Err(_) = oneshot_send.send(conn_data.response_data(send_conn_time, SOURCE).unwrap()) + { + eprintln!("the receiver is dropped"); + } + + let node2_server = Server::new( + SocketAddr::new("127.0.0.1".parse::().unwrap(), NODE2_PORT), + &certs, + ); + node2_server + .run( + db, + pcap_sources, + stream_direct_channels, + ingest_sources, + peers, + peer_idents, + certs, + notify_shutdown, + ) + .await + }); + + let _lock = get_token().lock().await; + let db_dir = tempfile::tempdir().unwrap(); + let db = Database::open(db_dir.path(), &DbOptions::default()).unwrap(); + let pcap_sources = new_pcap_sources(); + let stream_direct_channels = new_stream_direct_channels(); + let ingest_sources = Arc::new(tokio::sync::RwLock::new( + NODE1_GIGANTO_INGEST_SOURCES + .into_iter() + .map(|source| (source.to_string(), Utc::now())) + .collect::>>(), + )); + + let peers = Arc::new(tokio::sync::RwLock::new(HashMap::from([( + "127.0.0.1".to_string(), + PeerInfo { + ingest_sources: NODE2_GIGANTO_INGEST_SOURCES + .into_iter() + .map(|source| (source.to_string())) + .collect::>(), + graphql_port: None, + publish_port: Some(NODE2_PORT), + }, + )]))); + let mut peer_identities = HashSet::new(); + let peer_address = SocketAddr::new("127.0.0.1".parse::().unwrap(), NODE2_PORT); + peer_identities.insert(PeerIdentity { + address: peer_address.clone(), + host_name: NODE2_HOST.to_string(), + }); + let peer_idents = Arc::new(RwLock::new(peer_identities)); + + let cert_pem = fs::read(NODE1_CERT_PATH).unwrap(); + let cert = to_cert_chain(&cert_pem).unwrap(); + let key_pem = fs::read(NODE1_KEY_PATH).unwrap(); + let key = to_private_key(&key_pem).unwrap(); + let ca_cert_path: Vec = vec![PathBuf::from(CA_CERT_PATH)]; + let ca_certs = to_root_cert(&ca_cert_path).unwrap(); + + let certs = Arc::new(Certs { + certs: cert, + key, + ca_certs, + }); + + tokio::spawn(server().run( + db.clone(), + pcap_sources, + stream_direct_channels, + ingest_sources, + peers, + peer_idents, + certs, + Arc::new(Notify::new()), + )); + + let publish = TestClient::new().await; + + let (mut send_pub_req, mut recv_pub_resp) = + publish.conn.open_bi().await.expect("failed to open stream"); + + let start = DateTime::::from_naive_utc_and_offset( + NaiveDate::from_ymd_opt(1970, 1, 1) + .expect("valid date") + .and_hms_opt(00, 00, 00) + .expect("valid time"), + Utc, + ); + let end = DateTime::::from_naive_utc_and_offset( + NaiveDate::from_ymd_opt(2050, 12, 31) + .expect("valid date") + .and_hms_opt(23, 59, 59) + .expect("valid time"), + Utc, + ); + let message = RequestRange { + source: String::from(SOURCE), + kind: String::from(CONN_KIND), + start: start.timestamp_nanos_opt().unwrap(), + end: end.timestamp_nanos_opt().unwrap(), + count: 5, + }; + + send_range_data_request(&mut send_pub_req, PUBLISH_RANGE_MESSAGE_CODE, message) + .await + .unwrap(); + + let mut result_data = Vec::new(); + loop { + let resp_data = receive_range_data::)>>(&mut recv_pub_resp) + .await + .unwrap(); + + result_data.push(resp_data.clone()); + if resp_data.is_none() { + break; + } + } + + let raw_data = match oneshot_recv.await { + Ok(v) => v, + Err(_) => { + eprintln!("the sender dropped"); + Vec::new() + } + }; + + assert_eq!( + Conn::response_done().unwrap(), + bincode::serialize::)>>(&result_data.pop().unwrap()).unwrap() + ); + assert_eq!( + raw_data, + bincode::serialize::)>>(&result_data.pop().unwrap()).unwrap() + ); + + publish.conn.close(0u32.into(), b"publish_time_done"); + publish.endpoint.wait_idle().await; +} + +#[tokio::test] +#[serial] +async fn request_range_data_with_log_giganto_cluster() { + const PUBLISH_RANGE_MESSAGE_CODE: MessageCode = MessageCode::ReqRange; + const SOURCE: &str = "src2"; + const KIND: &str = "Hello"; + + #[derive(Serialize)] + struct RequestRangeMessage { + source: String, + kind: String, + start: i64, + end: i64, + count: usize, + } + + let (oneshot_send, oneshot_recv) = tokio::sync::oneshot::channel(); + + // spawn node2 publish server + tokio::spawn(async { + let db_dir = tempfile::tempdir().unwrap(); + let db = Database::open(db_dir.path(), &DbOptions::default()).unwrap(); + let pcap_sources = new_pcap_sources(); + let stream_direct_channels = new_stream_direct_channels(); + let ingest_sources = Arc::new(tokio::sync::RwLock::new( + NODE2_GIGANTO_INGEST_SOURCES + .into_iter() + .map(|source| (source.to_string(), Utc::now())) + .collect::>>(), + )); + + let cert_pem = fs::read(NODE2_CERT_PATH).unwrap(); + let cert = to_cert_chain(&cert_pem).unwrap(); + let key_pem = fs::read(NODE2_KEY_PATH).unwrap(); + let key = to_private_key(&key_pem).unwrap(); + let ca_cert_path: Vec = vec![PathBuf::from(CA_CERT_PATH)]; + let ca_certs = to_root_cert(&ca_cert_path).unwrap(); + let certs = Arc::new(Certs { + certs: cert, + key, + ca_certs, + }); + + let peers = Arc::new(tokio::sync::RwLock::new(HashMap::from([( + Ipv6Addr::LOCALHOST.to_string(), + PeerInfo { + ingest_sources: NODE1_GIGANTO_INGEST_SOURCES + .into_iter() + .map(|source| (source.to_string())) + .collect::>(), + graphql_port: None, + publish_port: Some(NODE1_TEST_PORT), + }, + )]))); + + let mut peer_identities = HashSet::new(); + peer_identities.insert(PeerIdentity { + address: SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), NODE1_TEST_PORT), + host_name: NODE1_HOST.to_string(), + }); + let peer_idents = Arc::new(RwLock::new(peer_identities)); + + let notify_shutdown = Arc::new(Notify::new()); + + // prepare data in node2 database + let log_store = db.log_store().unwrap(); + let send_log_time = Utc::now().timestamp_nanos_opt().unwrap(); + let log_data = bincode::deserialize::(&insert_log_raw_event( + &log_store, + SOURCE, + KIND, + send_log_time, + )) + .unwrap(); + + if let Err(_) = oneshot_send.send(log_data.response_data(send_log_time, SOURCE).unwrap()) { + eprintln!("the receiver is dropped"); + } + + let node2_server = Server::new( + SocketAddr::new("127.0.0.1".parse::().unwrap(), NODE2_PORT), + &certs, + ); + node2_server + .run( + db, + pcap_sources, + stream_direct_channels, + ingest_sources, + peers, + peer_idents, + certs, + notify_shutdown, + ) + .await + }); + + let _lock = get_token().lock().await; + let db_dir = tempfile::tempdir().unwrap(); + let db = Database::open(db_dir.path(), &DbOptions::default()).unwrap(); + let pcap_sources = new_pcap_sources(); + let stream_direct_channels = new_stream_direct_channels(); + let ingest_sources = Arc::new(tokio::sync::RwLock::new( + NODE1_GIGANTO_INGEST_SOURCES + .into_iter() + .map(|source| (source.to_string(), Utc::now())) + .collect::>>(), + )); + + let peers = Arc::new(tokio::sync::RwLock::new(HashMap::from([( + "127.0.0.1".to_string(), + PeerInfo { + ingest_sources: NODE2_GIGANTO_INGEST_SOURCES + .into_iter() + .map(|source| (source.to_string())) + .collect::>(), + graphql_port: None, + publish_port: Some(NODE2_PORT), + }, + )]))); + let mut peer_identities = HashSet::new(); + let peer_address = SocketAddr::new("127.0.0.1".parse::().unwrap(), NODE2_PORT); + peer_identities.insert(PeerIdentity { + address: peer_address.clone(), + host_name: NODE2_HOST.to_string(), + }); + let peer_idents = Arc::new(RwLock::new(peer_identities)); + + let cert_pem = fs::read(NODE1_CERT_PATH).unwrap(); + let cert = to_cert_chain(&cert_pem).unwrap(); + let key_pem = fs::read(NODE1_KEY_PATH).unwrap(); + let key = to_private_key(&key_pem).unwrap(); + let ca_cert_path: Vec = vec![PathBuf::from(CA_CERT_PATH)]; + let ca_certs = to_root_cert(&ca_cert_path).unwrap(); + + let certs = Arc::new(Certs { + certs: cert, + key, + ca_certs, + }); + + tokio::spawn(server().run( + db.clone(), + pcap_sources, + stream_direct_channels, + ingest_sources, + peers, + peer_idents, + certs, + Arc::new(Notify::new()), + )); + let publish = TestClient::new().await; + let (mut send_pub_req, mut recv_pub_resp) = + publish.conn.open_bi().await.expect("failed to open stream"); + + let start = DateTime::::from_naive_utc_and_offset( + NaiveDate::from_ymd_opt(1970, 1, 1) + .expect("valid date") + .and_hms_opt(00, 00, 00) + .expect("valid time"), + Utc, + ); + let end = DateTime::::from_naive_utc_and_offset( + NaiveDate::from_ymd_opt(2050, 12, 31) + .expect("valid date") + .and_hms_opt(23, 59, 59) + .expect("valid time"), + Utc, + ); + let message = RequestRange { + source: String::from(SOURCE), + kind: String::from(KIND), + start: start.timestamp_nanos_opt().unwrap(), + end: end.timestamp_nanos_opt().unwrap(), + count: 5, + }; + + send_range_data_request(&mut send_pub_req, PUBLISH_RANGE_MESSAGE_CODE, message) + .await + .unwrap(); + + let mut result_data = Vec::new(); + loop { + let resp_data = receive_range_data::)>>(&mut recv_pub_resp) + .await + .unwrap(); + + result_data.push(resp_data.clone()); + if resp_data.is_none() { + break; + } + } + + let raw_data = match oneshot_recv.await { + Ok(v) => v, + Err(_) => { + eprintln!("the sender dropped"); + Vec::new() + } + }; + + assert_eq!( + Conn::response_done().unwrap(), + bincode::serialize::)>>(&result_data.pop().unwrap()).unwrap() + ); + assert_eq!( + raw_data, + bincode::serialize::)>>(&result_data.pop().unwrap()).unwrap() + ); + + publish.conn.close(0u32.into(), b"publish_log_done"); + publish.endpoint.wait_idle().await; +} + +#[tokio::test] +#[serial] +async fn request_range_data_with_period_time_series_giganto_cluster() { + const PUBLISH_RANGE_MESSAGE_CODE: MessageCode = MessageCode::ReqRange; + const SAMPLING_POLICY_ID_AS_SOURCE: &str = "ingest src 2"; + const KIND: &str = "timeseries"; + + let (oneshot_send, oneshot_recv) = tokio::sync::oneshot::channel(); + + // spawn node2 publish server + tokio::spawn(async { + let db_dir = tempfile::tempdir().unwrap(); + let db = Database::open(db_dir.path(), &DbOptions::default()).unwrap(); + let pcap_sources = new_pcap_sources(); + let stream_direct_channels = new_stream_direct_channels(); + let ingest_sources = Arc::new(tokio::sync::RwLock::new( + NODE2_GIGANTO_INGEST_SOURCES + .into_iter() + .map(|source| (source.to_string(), Utc::now())) + .collect::>>(), + )); + + let cert_pem = fs::read(NODE2_CERT_PATH).unwrap(); + let cert = to_cert_chain(&cert_pem).unwrap(); + let key_pem = fs::read(NODE2_KEY_PATH).unwrap(); + let key = to_private_key(&key_pem).unwrap(); + let ca_cert_path: Vec = vec![PathBuf::from(CA_CERT_PATH)]; + let ca_certs = to_root_cert(&ca_cert_path).unwrap(); + let certs = Arc::new(Certs { + certs: cert, + key, + ca_certs, + }); + + let peers = Arc::new(tokio::sync::RwLock::new(HashMap::from([( + Ipv6Addr::LOCALHOST.to_string(), + PeerInfo { + ingest_sources: NODE1_GIGANTO_INGEST_SOURCES + .into_iter() + .map(|source| (source.to_string())) + .collect::>(), + graphql_port: None, + publish_port: Some(NODE1_TEST_PORT), + }, + )]))); + + let mut peer_identities = HashSet::new(); + peer_identities.insert(PeerIdentity { + address: SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), NODE1_TEST_PORT), + host_name: NODE1_HOST.to_string(), + }); + let peer_idents = Arc::new(RwLock::new(peer_identities)); + + let notify_shutdown = Arc::new(Notify::new()); + + // prepare data in node2 database + let time_series_store = db.periodic_time_series_store().unwrap(); + let send_time_series_time = Utc::now().timestamp_nanos_opt().unwrap(); + let time_series_data = + bincode::deserialize::(&insert_periodic_time_series_raw_event( + &time_series_store, + SAMPLING_POLICY_ID_AS_SOURCE, + send_time_series_time, + )) + .unwrap(); + + if let Err(_) = oneshot_send.send( + time_series_data + .response_data(send_time_series_time, SAMPLING_POLICY_ID_AS_SOURCE) + .unwrap(), + ) { + eprintln!("the receiver is dropped"); + } + + let node2_server = Server::new( + SocketAddr::new("127.0.0.1".parse::().unwrap(), NODE2_PORT), + &certs, + ); + node2_server + .run( + db, + pcap_sources, + stream_direct_channels, + ingest_sources, + peers, + peer_idents, + certs, + notify_shutdown, + ) + .await + }); + + let _lock = get_token().lock().await; + let db_dir = tempfile::tempdir().unwrap(); + let db = Database::open(db_dir.path(), &DbOptions::default()).unwrap(); + let pcap_sources = new_pcap_sources(); + let stream_direct_channels = new_stream_direct_channels(); + let ingest_sources = Arc::new(tokio::sync::RwLock::new( + NODE1_GIGANTO_INGEST_SOURCES + .into_iter() + .map(|source| (source.to_string(), Utc::now())) + .collect::>>(), + )); + + let peers = Arc::new(tokio::sync::RwLock::new(HashMap::from([( + "127.0.0.1".to_string(), + PeerInfo { + ingest_sources: NODE2_GIGANTO_INGEST_SOURCES + .into_iter() + .map(|source| (source.to_string())) + .collect::>(), + graphql_port: None, + publish_port: Some(NODE2_PORT), + }, + )]))); + + let mut peer_identities = HashSet::new(); + let peer_address = SocketAddr::new("127.0.0.1".parse::().unwrap(), NODE2_PORT); + peer_identities.insert(PeerIdentity { + address: peer_address.clone(), + host_name: NODE2_HOST.to_string(), + }); + let peer_idents = Arc::new(RwLock::new(peer_identities)); + + let cert_pem = fs::read(NODE1_CERT_PATH).unwrap(); + let cert = to_cert_chain(&cert_pem).unwrap(); + let key_pem = fs::read(NODE1_KEY_PATH).unwrap(); + let key = to_private_key(&key_pem).unwrap(); + let ca_cert_path: Vec = vec![PathBuf::from(CA_CERT_PATH)]; + let ca_certs = to_root_cert(&ca_cert_path).unwrap(); + + let certs = Arc::new(Certs { + certs: cert, + key, + ca_certs, + }); + + tokio::spawn(server().run( + db.clone(), + pcap_sources, + stream_direct_channels, + ingest_sources, + peers, + peer_idents, + certs, + Arc::new(Notify::new()), + )); + let publish = TestClient::new().await; + let (mut send_pub_req, mut recv_pub_resp) = + publish.conn.open_bi().await.expect("failed to open stream"); + + let start = DateTime::::from_naive_utc_and_offset( + NaiveDate::from_ymd_opt(1970, 1, 1) + .expect("valid date") + .and_hms_opt(00, 00, 00) + .expect("valid time"), + Utc, + ); + let end = DateTime::::from_naive_utc_and_offset( + NaiveDate::from_ymd_opt(2050, 12, 31) + .expect("valid date") + .and_hms_opt(23, 59, 59) + .expect("valid time"), + Utc, + ); + let message = RequestRange { + source: String::from(SAMPLING_POLICY_ID_AS_SOURCE), + kind: String::from(KIND), + start: start.timestamp_nanos_opt().unwrap(), + end: end.timestamp_nanos_opt().unwrap(), + count: 5, + }; + + send_range_data_request(&mut send_pub_req, PUBLISH_RANGE_MESSAGE_CODE, message) + .await + .unwrap(); + + let mut result_data = Vec::new(); + loop { + let resp_data = receive_range_data::)>>(&mut recv_pub_resp) + .await + .unwrap(); + + result_data.push(resp_data.clone()); + if resp_data.is_none() { + break; + } + } + + let raw_data = match oneshot_recv.await { + Ok(v) => v, + Err(_) => { + eprintln!("the sender dropped"); + Vec::new() + } + }; + + assert_eq!( + PeriodicTimeSeries::response_done().unwrap(), + bincode::serialize::)>>(&result_data.pop().unwrap()).unwrap() + ); + assert_eq!( + raw_data, + bincode::serialize::)>>(&result_data.pop().unwrap()).unwrap() + ); + + publish.conn.close(0u32.into(), b"publish_time_done"); + publish.endpoint.wait_idle().await; +} + +#[tokio::test] +#[serial] +async fn request_raw_events_giganto_cluster() { + const SOURCE: &str = "src 2"; + const KIND: &str = "conn"; + const TIMESTAMP: i64 = 100; + + let (oneshot_send, oneshot_recv) = tokio::sync::oneshot::channel(); + + // spawn node2 publish server + tokio::spawn(async { + let db_dir = tempfile::tempdir().unwrap(); + let db = Database::open(db_dir.path(), &DbOptions::default()).unwrap(); + let pcap_sources = new_pcap_sources(); + let stream_direct_channels = new_stream_direct_channels(); + let ingest_sources = Arc::new(tokio::sync::RwLock::new( + NODE2_GIGANTO_INGEST_SOURCES + .into_iter() + .map(|source| (source.to_string(), Utc::now())) + .collect::>>(), + )); + + let cert_pem = fs::read(NODE2_CERT_PATH).unwrap(); + let cert = to_cert_chain(&cert_pem).unwrap(); + let key_pem = fs::read(NODE2_KEY_PATH).unwrap(); + let key = to_private_key(&key_pem).unwrap(); + let ca_cert_path: Vec = vec![PathBuf::from(CA_CERT_PATH)]; + let ca_certs = to_root_cert(&ca_cert_path).unwrap(); + let certs = Arc::new(Certs { + certs: cert, + key, + ca_certs, + }); + + let peers = Arc::new(tokio::sync::RwLock::new(HashMap::from([( + Ipv6Addr::LOCALHOST.to_string(), + PeerInfo { + ingest_sources: NODE1_GIGANTO_INGEST_SOURCES + .into_iter() + .map(|source| (source.to_string())) + .collect::>(), + graphql_port: None, + publish_port: Some(NODE1_TEST_PORT), + }, + )]))); + + let mut peer_identities = HashSet::new(); + peer_identities.insert(PeerIdentity { + address: SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), NODE1_TEST_PORT), + host_name: NODE1_HOST.to_string(), + }); + let peer_idents = Arc::new(RwLock::new(peer_identities)); + + let notify_shutdown = Arc::new(Notify::new()); + + // prepare data in node2 database + let conn_store = db.conn_store().unwrap(); + let send_conn_time = TIMESTAMP; + let conn_raw_data = insert_conn_raw_event(&conn_store, SOURCE, send_conn_time); + let conn_data = bincode::deserialize::(&conn_raw_data).unwrap(); + let raw_data = conn_data.response_data(TIMESTAMP, SOURCE).unwrap(); + + if let Err(_) = oneshot_send.send(raw_data) { + eprintln!("the receiver is dropped"); + } + + let node2_server = Server::new( + SocketAddr::new("127.0.0.1".parse::().unwrap(), NODE2_PORT), + &certs, + ); + node2_server + .run( + db, + pcap_sources, + stream_direct_channels, + ingest_sources, + peers, + peer_idents, + certs, + notify_shutdown, + ) + .await + }); + + let _lock = get_token().lock().await; + let db_dir = tempfile::tempdir().unwrap(); + let db = Database::open(db_dir.path(), &DbOptions::default()).unwrap(); + let pcap_sources = new_pcap_sources(); + let stream_direct_channels = new_stream_direct_channels(); + let ingest_sources = Arc::new(tokio::sync::RwLock::new( + NODE1_GIGANTO_INGEST_SOURCES + .into_iter() + .map(|source| (source.to_string(), Utc::now())) + .collect::>>(), + )); + + let peers = Arc::new(tokio::sync::RwLock::new(HashMap::from([( + "127.0.0.1".to_string(), + PeerInfo { + ingest_sources: NODE2_GIGANTO_INGEST_SOURCES + .into_iter() + .map(|source| (source.to_string())) + .collect::>(), + graphql_port: None, + publish_port: Some(NODE2_PORT), + }, + )]))); + + let mut peer_identities = HashSet::new(); + let peer_address = SocketAddr::new("127.0.0.1".parse::().unwrap(), NODE2_PORT); + peer_identities.insert(PeerIdentity { + address: peer_address.clone(), + host_name: NODE2_HOST.to_string(), + }); + let peer_idents = Arc::new(RwLock::new(peer_identities)); + + let cert_pem = fs::read(NODE1_CERT_PATH).unwrap(); + let cert = to_cert_chain(&cert_pem).unwrap(); + let key_pem = fs::read(NODE1_KEY_PATH).unwrap(); + let key = to_private_key(&key_pem).unwrap(); + let ca_cert_path: Vec = vec![PathBuf::from(CA_CERT_PATH)]; + let ca_certs = to_root_cert(&ca_cert_path).unwrap(); + + let certs = Arc::new(Certs { + certs: cert, + key, + ca_certs, + }); + + tokio::spawn(server().run( + db.clone(), + pcap_sources, + stream_direct_channels, + ingest_sources, + peers, + peer_idents, + certs, + Arc::new(Notify::new()), + )); + let publish = TestClient::new().await; + + let (mut send_pub_req, mut recv_pub_resp) = + publish.conn.open_bi().await.expect("failed to open stream"); + + let message = RequestRawData { + kind: String::from(KIND), + input: vec![(String::from(SOURCE), vec![TIMESTAMP])], + }; + + send_range_data_request(&mut send_pub_req, MessageCode::RawData, message) + .await + .unwrap(); + + let mut result_data = vec![]; + loop { + let resp_data = receive_range_data::)>>(&mut recv_pub_resp) + .await + .unwrap(); + + if let Some(data) = resp_data { + result_data.push(data); + } else { + break; + } + } + + let raw_data = match oneshot_recv.await { + Ok(v) => v, + Err(_) => { + eprintln!("the sender dropped"); + Vec::new() + } + }; + + assert_eq!(result_data.len(), 1); + assert_eq!(result_data[0].0, TIMESTAMP); + assert_eq!(&result_data[0].1, SOURCE); + assert_eq!( + raw_data, + bincode::serialize::)>>(&result_data.pop()).unwrap() + ); +} diff --git a/src/server.rs b/src/server.rs index 0f89489..13a44e7 100644 --- a/src/server.rs +++ b/src/server.rs @@ -1,7 +1,7 @@ use anyhow::{bail, Context, Result}; use log_broker::{info, LogLocation}; use quinn::{ClientConfig, Connection, ServerConfig, TransportConfig}; -use rustls::{Certificate, PrivateKey}; +use rustls::{Certificate, PrivateKey, RootCertStore}; use std::{sync::Arc, time::Duration}; use x509_parser::nom::Parser; @@ -10,30 +10,22 @@ pub const SERVER_ENDPOINT_DELAY: u64 = 300; pub const SERVER_CONNNECTION_DELAY: u64 = 200; const KEEP_ALIVE_INTERVAL: Duration = Duration::from_millis(5_000); +#[allow(clippy::module_name_repetitions, clippy::struct_field_names)] +#[derive(Clone)] +pub struct Certs { + pub certs: Vec, + pub key: PrivateKey, + pub ca_certs: RootCertStore, +} + #[allow(clippy::module_name_repetitions)] -pub fn config_server( - certs: Vec, - key: PrivateKey, - files: Vec>, -) -> Result { - let mut client_auth_roots = rustls::RootCertStore::empty(); - for file in files { - let root_cert: Vec = rustls_pemfile::certs(&mut &*file) - .context("invalid PEM-encoded certificate")? - .into_iter() - .map(rustls::Certificate) - .collect(); - if let Some(cert) = root_cert.first() { - client_auth_roots - .add(cert) - .context("failed to add client auth root cert")?; - } - } - let client_auth = rustls::server::AllowAnyAuthenticatedClient::new(client_auth_roots).boxed(); +pub fn config_server(certs: &Arc) -> Result { + let client_auth = + rustls::server::AllowAnyAuthenticatedClient::new(certs.ca_certs.clone()).boxed(); let server_crypto = rustls::ServerConfig::builder() .with_safe_defaults() .with_client_cert_verifier(client_auth) - .with_single_cert(certs, key) + .with_single_cert(certs.certs.clone(), certs.key.clone()) .context("server config error")?; let mut server_config = ServerConfig::with_crypto(Arc::new(server_crypto)); @@ -83,28 +75,11 @@ pub fn certificate_info(cert_info: &[Certificate]) -> Result<(String, String)> { } } -pub fn config_client( - cert: Vec, - key: PrivateKey, - files: Vec>, -) -> Result { - let mut root_store = rustls::RootCertStore::empty(); - for file in files { - let root_cert: Vec = rustls_pemfile::certs(&mut &*file) - .context("invalid PEM-encoded certificate")? - .into_iter() - .map(rustls::Certificate) - .collect(); - if let Some(cert) = root_cert.first() { - root_store - .add(cert) - .context("failed to add client auth root cert")?; - } - } +pub fn config_client(certs: &Arc) -> Result { let tls_config = rustls::ClientConfig::builder() .with_safe_defaults() - .with_root_certificates(root_store) - .with_client_auth_cert(cert, key)?; + .with_root_certificates(certs.ca_certs.clone()) + .with_client_auth_cert(certs.certs.clone(), certs.key.clone())?; let mut transport = TransportConfig::default(); transport.keep_alive_interval(Some(KEEP_ALIVE_INTERVAL)); diff --git a/src/storage/migration.rs b/src/storage/migration.rs index 202d243..ed46de5 100644 --- a/src/storage/migration.rs +++ b/src/storage/migration.rs @@ -11,7 +11,7 @@ use std::{ path::Path, }; -const COMPATIBLE_VERSION_REQ: &str = ">0.13.0-alpha,<0.16.0-alpha.2"; +const COMPATIBLE_VERSION_REQ: &str = ">0.13.0-alpha,<0.17.0-alpha"; /// Migrates the data directory to the up-to-date format if necessary. /// diff --git a/tests/cert.pem b/tests/cert.pem deleted file mode 100644 index a33068f..0000000 --- a/tests/cert.pem +++ /dev/null @@ -1,18 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIC1jCCAb6gAwIBAgIUG5Ym7P2vyEjCRLnI6g2Pl+1aanIwDQYJKoZIhvcNAQEL -BQAwFDESMBAGA1UEAxMJbG9jYWxob3N0MB4XDTIzMDUxNTAxMzIyMloXDTMzMDUx -MjAxMzIyMlowHDEaMBgGA1UEAwwRZ2lnYW50b0Bsb2NhbGhvc3QwggEiMA0GCSqG -SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDYAR+C++ie3BxMQ+p6LFjpLm5Do3rASv9k -beYfIM+hgLOnk63MeirASEVT5OKAukBm0i0u4Neums1Uv7cacRv9lQ/L1cHvIq1q -hxBfRLgarPOMaUwXJ+EwZvIyQqvrqEhzs7vOBW8JLAM5lYGhNCaQRwY+vDB6KCwi -kh4qz4PwV8eKYBYPkSDyw+2uf5MKGpFfIZpMrZqDOHHbXc0+YkPJDzorjSK48QVg -VAU5JVYBK879F5kbtrWp2HVVKEQY9ynpkifX4K6IA5+1kFDGrpfftGVUE7NMAk0L -b1TtlS9m7YCznqkmEUwfai7JHruUkRrcO8hc+s34XA+52+5J3ydnAgMBAAGjGDAW -MBQGA1UdEQQNMAuCCWxvY2FsaG9zdDANBgkqhkiG9w0BAQsFAAOCAQEATOg8RDRn -YzFWBcY9mKqkLkqvfXK93GNT3ehoUMJT8B/ddIA0Gx8lFYICzCgVSz8iljrPTSic -am8pw4gVNWhWdQlJIOZsWGptDXPZD289atzZuOoNXMjRvcBelaRht8qqtiYoM5rL -747QC4NwdLOImDne19sDiENpNfKGo2CgL2GN4xkZiLU+6AJkdFYTWUVdOtnpJpP5 -134d0rxBGrvscvh4IDv8yhPz6pCovlKCyuKLWSjEjE5dIrjJu0dbTiWv+XZeYKfs -Y4QII4M7hA5n0Q/CnXzb1t3Ws/xDYCL/K7kSttqqYrxWV2gdO4yQXm3Qjt6K0hXo -u1YmJdVsz6xCig== ------END CERTIFICATE----- diff --git a/tests/certs/node1/cert.pem b/tests/certs/node1/cert.pem new file mode 100644 index 0000000..f3dc02a --- /dev/null +++ b/tests/certs/node1/cert.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDKTCCAhGgAwIBAgIUZjX4IqX6Cy0rxTplnPI/KrHUiu8wDQYJKoZIhvcNAQEL +BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTIzMTExNzEzNDY0NVoXDTMzMTEx +NDEzNDY0NVowGDEWMBQGA1UEAwwNZ2lnYW50b0Bub2RlMTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAL/vQkWEkreEzt8GFwKI0qtDsdWNkn3JtlcbgIBr +s/qF2GKfWBDYGDOGbmzB+rDj06gSiA6F8OIfUk0Lj/PdzPM9vs3OdC36ReRqOGm+ +XhC7cFQjG2n90bvExp8/OlE2bI5XrURBTr4jrsffdG2s2Hqot+Ki/73iN5sXnE7x +1gRHx1fYQZMIqFDaskLEjkouzTJ1Q1GU9gzQuPbfzO/Clk2wd6MhKURTKSLLxelc +38lJv24E0FBfhfkMr+15GhwXicZP2U90Rwt6GKqybj5JZEFdvPOwYW5OcsHhGSc+ +triXLmAIy9Pf+8Hfy/syc8gDU+9qdJvDBfUs0VOpJ4fd368CAwEAAaNvMG0wEAYD +VR0RBAkwB4IFbm9kZTEwHQYDVR0OBBYEFME4YYJPKgyVv+Kl6yvyRDNjYY2LMDoG +A1UdIwQzMDGhGKQWMBQxEjAQBgNVBAMMCWxvY2FsaG9zdIIVAOxlN3pV8Reb/ftf +b1iXnOG8HzjXMA0GCSqGSIb3DQEBCwUAA4IBAQDFnSeMY5Q+9FwQbekmKdPypIJw +r0pRK3XMea1J811juw5SLgHRNUH8SmHLQ7r/IJMO8sKEp5pNl5+yFqTFanokyBXh +0ykxtSNEUVJHj2YybkH1hS7AHbbOzCUF0G5mnPrbiJMTFpsEhHNn970gXluGCpjd +knO7ABixWZbW2jurjfqhQPZWsttJiAbpc5TdrZXJ8RMulruAv+o8mDhBKXpltnWM +CES7VIkx5TskWj1LUZncvrbL75IMNZBfS+UmNTaw5WCxGfCC0axvyNnnWRcytD6N +/hWEvTaNb/CQdDnlN7OfQYXQttbek0iPIgUeYNtcSoJEYCyUhEqhTeeI02IZ +-----END CERTIFICATE----- diff --git a/tests/config.toml b/tests/certs/node1/config.toml similarity index 75% rename from tests/config.toml rename to tests/certs/node1/config.toml index c157384..a747f82 100644 --- a/tests/config.toml +++ b/tests/certs/node1/config.toml @@ -14,8 +14,7 @@ export_dir = "tests/export" ack_transmission = 1024 max_open_files = 8000 max_mb_of_level_base = 512 -peer_address= "100.101.102.1:38383" +peer_address= "127.0.0.1:48383" peers=[ - { address = "100.101.102.2:38383", host_name = "einsis1"}, - { address = "100.101.102.3:38383", host_name = "einsis2"}, + { address = "127.0.0.1:60192", host_name = "node2"}, ] diff --git a/tests/certs/node1/key.pem b/tests/certs/node1/key.pem new file mode 100644 index 0000000..324cb0e --- /dev/null +++ b/tests/certs/node1/key.pem @@ -0,0 +1,118 @@ +Private-Key: (2048 bit, 2 primes) +modulus: + 00:bf:ef:42:45:84:92:b7:84:ce:df:06:17:02:88: + d2:ab:43:b1:d5:8d:92:7d:c9:b6:57:1b:80:80:6b: + b3:fa:85:d8:62:9f:58:10:d8:18:33:86:6e:6c:c1: + fa:b0:e3:d3:a8:12:88:0e:85:f0:e2:1f:52:4d:0b: + 8f:f3:dd:cc:f3:3d:be:cd:ce:74:2d:fa:45:e4:6a: + 38:69:be:5e:10:bb:70:54:23:1b:69:fd:d1:bb:c4: + c6:9f:3f:3a:51:36:6c:8e:57:ad:44:41:4e:be:23: + ae:c7:df:74:6d:ac:d8:7a:a8:b7:e2:a2:ff:bd:e2: + 37:9b:17:9c:4e:f1:d6:04:47:c7:57:d8:41:93:08: + a8:50:da:b2:42:c4:8e:4a:2e:cd:32:75:43:51:94: + f6:0c:d0:b8:f6:df:cc:ef:c2:96:4d:b0:77:a3:21: + 29:44:53:29:22:cb:c5:e9:5c:df:c9:49:bf:6e:04: + d0:50:5f:85:f9:0c:af:ed:79:1a:1c:17:89:c6:4f: + d9:4f:74:47:0b:7a:18:aa:b2:6e:3e:49:64:41:5d: + bc:f3:b0:61:6e:4e:72:c1:e1:19:27:3e:b6:b8:97: + 2e:60:08:cb:d3:df:fb:c1:df:cb:fb:32:73:c8:03: + 53:ef:6a:74:9b:c3:05:f5:2c:d1:53:a9:27:87:dd: + df:af +publicExponent: 65537 (0x10001) +privateExponent: + 44:6a:23:ec:c4:b7:49:7c:29:03:3d:57:f9:5a:cd: + 01:28:35:9d:02:04:3c:1b:29:08:3e:b0:dc:d5:c7: + 47:1e:eb:85:27:f8:4b:b4:c3:57:a6:6c:f9:0e:61: + ee:28:9f:7a:f2:f4:15:7d:2f:e2:62:38:c3:66:c2: + 2e:b2:9a:89:ff:b1:e2:09:5b:c7:41:c5:25:08:a9: + c8:23:87:c3:1e:15:09:a0:a3:f6:71:77:8f:9f:32: + 0f:74:89:b4:86:1a:46:d8:c0:d5:aa:13:c7:d9:cf: + 44:c6:85:69:4c:a9:b3:7b:2d:64:b7:26:42:c8:7e: + 7a:a5:7d:5d:74:7d:8d:73:6b:b8:40:4b:97:b2:8b: + 6f:71:e1:d3:6d:01:eb:2d:b1:c2:67:5b:27:48:bd: + 68:d3:07:da:27:37:51:07:f0:24:0c:1c:7d:01:66: + 64:1c:9f:6a:4d:55:84:22:6d:54:51:3d:0e:33:dd: + da:02:7b:d9:43:4a:42:8b:ad:c9:23:12:cc:82:5e: + 77:d9:50:45:51:6d:82:ff:15:d2:d0:6c:8e:02:c5: + 28:71:d3:e1:41:e9:b2:5a:70:84:db:07:57:d6:46: + 1d:a0:34:14:d2:a7:03:a4:1d:74:69:5e:4c:4e:04: + 00:75:0c:21:df:17:63:60:0a:b1:70:07:e9:b1:be: + ad +prime1: + 00:f4:50:4d:3f:b1:f6:bb:0e:e2:4f:5a:b0:f0:a9: + d5:71:8d:6f:c1:6b:5e:4d:5f:f6:2b:9d:f5:84:c0: + af:2b:09:a2:11:61:42:8b:76:c1:f4:2c:fb:9a:3d: + bc:9e:65:a2:8a:f0:83:d3:7e:9c:53:8b:79:43:61: + f4:91:eb:a3:13:3f:41:87:bc:bc:ca:11:78:50:14: + 8c:84:d6:61:50:2e:e1:dd:8e:cc:64:21:42:b7:5e: + 2e:ee:f2:5b:c6:c9:6e:55:f9:2d:32:2a:2f:c9:8a: + 63:4c:41:0d:fc:d5:78:d2:31:da:c2:09:02:4b:10: + f9:1e:38:b4:35:81:87:ce:9d +prime2: + 00:c9:1d:8f:0d:84:72:23:10:9b:e3:01:29:b0:2d: + 66:ff:29:cf:da:67:ca:3c:b7:15:94:3e:69:2f:06: + 8a:dc:73:df:23:c9:45:4a:11:53:13:8f:1b:14:9f: + ea:d2:f7:8f:c2:a6:f8:e9:c6:4a:62:7a:a5:e6:6b: + 0c:3e:f9:f6:e9:04:71:74:dc:f5:6a:10:e2:f1:36: + 0c:ea:bf:da:47:03:a8:a1:a7:f5:32:4d:37:cc:1c: + 04:ef:e5:d3:1a:f2:ce:b8:c3:e4:b7:30:d1:2a:c7: + 2f:99:d4:93:a1:ef:b7:3b:33:27:1c:cd:50:43:8b: + e0:37:3a:8d:be:68:2d:cf:bb +exponent1: + 1d:eb:7f:aa:5f:47:12:1d:7e:85:ad:5b:a1:fd:8a: + 90:3f:7c:02:39:fd:f0:87:e2:03:f1:72:51:f1:af: + 02:1c:49:e6:ca:fc:75:4b:f3:f0:cd:59:7a:c8:38: + c3:45:3b:24:bc:d3:c8:20:e7:50:2b:f2:8b:d8:01: + 35:a5:9f:f1:09:7c:22:fd:70:39:66:ee:4f:10:55: + 2d:27:7b:ff:6f:de:65:5d:c9:96:31:a4:94:04:4a: + c7:24:93:2c:c3:1a:6a:18:fb:f9:fa:c4:a3:6f:f9: + 3d:2b:72:65:16:57:97:96:57:6e:0f:e0:62:05:10: + 1d:2d:60:94:74:ea:86:d9 +exponent2: + 00:a6:70:54:fc:4d:d9:1b:95:da:3c:d2:b0:8a:81: + f8:94:41:29:28:49:ee:ff:b2:51:24:7f:46:13:49: + e9:cf:aa:01:48:1f:80:bd:5c:7b:11:5b:c4:80:9c: + 1b:fd:7e:2d:b6:5c:f2:f0:9d:4e:e0:13:3a:8a:e7: + 9a:99:df:ca:6c:6b:65:ed:de:13:28:35:53:6b:66: + 5c:e8:45:e0:42:17:10:0e:c0:ee:56:33:bb:72:aa: + d9:97:23:ef:42:85:21:79:9d:7d:01:f2:5f:98:74: + 81:bb:60:46:d1:a4:8e:52:6a:f1:fa:8f:1b:d6:da: + c2:b3:52:3d:0e:10:2f:a1:93 +coefficient: + 6c:69:b9:bb:07:17:78:20:4a:a0:cd:26:98:ad:83: + 08:83:fc:0a:4f:7d:f1:ee:76:52:0b:b1:e9:41:04: + 4e:41:a4:ad:9b:61:34:2e:25:05:03:58:bf:b9:b3: + 9d:67:c5:cb:2b:7f:30:06:8e:f4:80:24:35:6f:0d: + 10:e3:92:e2:81:3d:eb:3a:db:c2:31:e4:73:0b:a3: + 60:f6:23:38:6f:c8:a1:e3:45:fe:09:49:bf:03:98: + 70:90:fe:23:dd:d6:ee:4f:56:44:9f:e6:a8:fa:46: + 92:40:b1:34:ad:37:fa:cc:e8:f5:d0:af:39:b8:a2: + 39:23:07:14:f4:26:4c:31 +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC/70JFhJK3hM7f +BhcCiNKrQ7HVjZJ9ybZXG4CAa7P6hdhin1gQ2Bgzhm5swfqw49OoEogOhfDiH1JN +C4/z3czzPb7NznQt+kXkajhpvl4Qu3BUIxtp/dG7xMafPzpRNmyOV61EQU6+I67H +33RtrNh6qLfiov+94jebF5xO8dYER8dX2EGTCKhQ2rJCxI5KLs0ydUNRlPYM0Lj2 +38zvwpZNsHejISlEUykiy8XpXN/JSb9uBNBQX4X5DK/teRocF4nGT9lPdEcLehiq +sm4+SWRBXbzzsGFuTnLB4RknPra4ly5gCMvT3/vB38v7MnPIA1PvanSbwwX1LNFT +qSeH3d+vAgMBAAECggEARGoj7MS3SXwpAz1X+VrNASg1nQIEPBspCD6w3NXHRx7r +hSf4S7TDV6Zs+Q5h7iifevL0FX0v4mI4w2bCLrKaif+x4glbx0HFJQipyCOHwx4V +CaCj9nF3j58yD3SJtIYaRtjA1aoTx9nPRMaFaUyps3stZLcmQsh+eqV9XXR9jXNr +uEBLl7KLb3Hh020B6y2xwmdbJ0i9aNMH2ic3UQfwJAwcfQFmZByfak1VhCJtVFE9 +DjPd2gJ72UNKQoutySMSzIJed9lQRVFtgv8V0tBsjgLFKHHT4UHpslpwhNsHV9ZG +HaA0FNKnA6QddGleTE4EAHUMId8XY2AKsXAH6bG+rQKBgQD0UE0/sfa7DuJPWrDw +qdVxjW/Ba15NX/YrnfWEwK8rCaIRYUKLdsH0LPuaPbyeZaKK8IPTfpxTi3lDYfSR +66MTP0GHvLzKEXhQFIyE1mFQLuHdjsxkIUK3Xi7u8lvGyW5V+S0yKi/JimNMQQ38 +1XjSMdrCCQJLEPkeOLQ1gYfOnQKBgQDJHY8NhHIjEJvjASmwLWb/Kc/aZ8o8txWU +PmkvBorcc98jyUVKEVMTjxsUn+rS94/CpvjpxkpieqXmaww++fbpBHF03PVqEOLx +Ngzqv9pHA6ihp/UyTTfMHATv5dMa8s64w+S3MNEqxy+Z1JOh77c7MycczVBDi+A3 +Oo2+aC3PuwKBgB3rf6pfRxIdfoWtW6H9ipA/fAI5/fCH4gPxclHxrwIcSebK/HVL +8/DNWXrIOMNFOyS808gg51Ar8ovYATWln/EJfCL9cDlm7k8QVS0ne/9v3mVdyZYx +pJQESsckkyzDGmoY+/n6xKNv+T0rcmUWV5eWV24P4GIFEB0tYJR06obZAoGBAKZw +VPxN2RuV2jzSsIqB+JRBKShJ7v+yUSR/RhNJ6c+qAUgfgL1cexFbxICcG/1+LbZc +8vCdTuATOornmpnfymxrZe3eEyg1U2tmXOhF4EIXEA7A7lYzu3Kq2Zcj70KFIXmd +fQHyX5h0gbtgRtGkjlJq8fqPG9bawrNSPQ4QL6GTAoGAbGm5uwcXeCBKoM0mmK2D +CIP8Ck998e52Ugux6UEETkGkrZthNC4lBQNYv7mznWfFyyt/MAaO9IAkNW8NEOOS +4oE96zrbwjHkcwujYPYjOG/IoeNF/glJvwOYcJD+I93W7k9WRJ/mqPpGkkCxNK03 ++szo9dCvObiiOSMHFPQmTDE= +-----END PRIVATE KEY----- diff --git a/tests/certs/node2/cert.pem b/tests/certs/node2/cert.pem new file mode 100644 index 0000000..3ca94b3 --- /dev/null +++ b/tests/certs/node2/cert.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDKTCCAhGgAwIBAgIUGxRbSl+jy/nEUHCyWicRzCIAOaMwDQYJKoZIhvcNAQEL +BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTIzMTExNzEzNDc1NVoXDTMzMTEx +NDEzNDc1NVowGDEWMBQGA1UEAwwNZ2lnYW50b0Bub2RlMjCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAJ4AsVhZKbjzPP+WNmQCz00ekyDWYNf1iFgyPueY +4YG3W9zwaPfrBP+375DNEpJ78rWonBiISyU58J4+FdrOWSOrOCo2Q+DverCW8H99 +TN8EpqXhzKwYur8vZJPTg+Xa8tETnEzvOlsfweTAxxqKLqUtb4+GoxlXRiM0MfbF +IpAoTXlB0dBQi3au04eUTFKFKJuA+Fxb47RMmT7M4+IKgpREsSGOeKC4fZKjAYhM +b+pthtRwzxpyJNAvbNbDrR48aBcUzxySsx+dPUNMXQuODxWVPcfrawJBO7ZmCAb8 +QwmxwtvFUjSRjL/S1s/datuLKfMXjJGxTIHDhsVlM47I2VECAwEAAaNvMG0wEAYD +VR0RBAkwB4IFbm9kZTIwHQYDVR0OBBYEFP/M2ab7yyM8KzjhPegzCOIACwjjMDoG +A1UdIwQzMDGhGKQWMBQxEjAQBgNVBAMMCWxvY2FsaG9zdIIVAOxlN3pV8Reb/ftf +b1iXnOG8HzjXMA0GCSqGSIb3DQEBCwUAA4IBAQBYcjeliNQTLd8zoSW6zR1yU2vR +8muQXREH+YeZy1qO2LqUhzyzMvLIyotTvpbnZghR2RE0VK96dVZ5tjzAGGaVxnZz +r/3iAsEbcOJL6MGy918CDrp0SZc0tCxjNCOS9Nj2cdsA9WOIyKYx2bCbK0LkfRQV +FLl3yMLdzN8OIG6JQ8PTH8p0wT8hJ4rB6CI7giel0InXG3dWavXeVnK4pcRXx/0t +1fXi9Zy2l0EdhZpbGL5Dsvanrao48GstA5Hb3AF3IYiohZUxqXyQfUUCyixufhHa +C6n5srHqPSe/baJ9m1K4kyOC5rY3toEpRhkKkR0m/wyRfB+w5P+KYqUIsKNO +-----END CERTIFICATE----- diff --git a/tests/certs/node2/key.pem b/tests/certs/node2/key.pem new file mode 100644 index 0000000..468a0c3 --- /dev/null +++ b/tests/certs/node2/key.pem @@ -0,0 +1,118 @@ +Private-Key: (2048 bit, 2 primes) +modulus: + 00:9e:00:b1:58:59:29:b8:f3:3c:ff:96:36:64:02: + cf:4d:1e:93:20:d6:60:d7:f5:88:58:32:3e:e7:98: + e1:81:b7:5b:dc:f0:68:f7:eb:04:ff:b7:ef:90:cd: + 12:92:7b:f2:b5:a8:9c:18:88:4b:25:39:f0:9e:3e: + 15:da:ce:59:23:ab:38:2a:36:43:e0:ef:7a:b0:96: + f0:7f:7d:4c:df:04:a6:a5:e1:cc:ac:18:ba:bf:2f: + 64:93:d3:83:e5:da:f2:d1:13:9c:4c:ef:3a:5b:1f: + c1:e4:c0:c7:1a:8a:2e:a5:2d:6f:8f:86:a3:19:57: + 46:23:34:31:f6:c5:22:90:28:4d:79:41:d1:d0:50: + 8b:76:ae:d3:87:94:4c:52:85:28:9b:80:f8:5c:5b: + e3:b4:4c:99:3e:cc:e3:e2:0a:82:94:44:b1:21:8e: + 78:a0:b8:7d:92:a3:01:88:4c:6f:ea:6d:86:d4:70: + cf:1a:72:24:d0:2f:6c:d6:c3:ad:1e:3c:68:17:14: + cf:1c:92:b3:1f:9d:3d:43:4c:5d:0b:8e:0f:15:95: + 3d:c7:eb:6b:02:41:3b:b6:66:08:06:fc:43:09:b1: + c2:db:c5:52:34:91:8c:bf:d2:d6:cf:dd:6a:db:8b: + 29:f3:17:8c:91:b1:4c:81:c3:86:c5:65:33:8e:c8: + d9:51 +publicExponent: 65537 (0x10001) +privateExponent: + 3f:20:c6:ba:a6:51:39:b0:91:80:0a:bb:ab:84:c5: + df:a9:a6:1e:1a:19:32:5f:ca:fc:76:02:d5:14:ab: + 90:e0:ba:26:d1:b5:16:cc:c3:40:0a:75:00:95:36: + e4:e8:f9:e5:a3:ba:ce:40:72:49:aa:d7:1c:da:70: + 6f:ee:3f:06:35:cc:28:6d:6a:fc:6d:a2:46:f8:df: + 99:08:6e:94:b2:6e:84:db:6d:23:9b:73:cd:9c:39: + 62:a8:c1:4b:ae:89:92:88:73:cb:f7:4f:09:06:6c: + b9:21:5e:cb:97:47:20:be:c1:12:86:b8:16:91:5c: + 98:07:2c:29:4c:c5:4c:f3:f4:2e:69:ef:45:24:b1: + 74:c3:f1:67:6e:4c:cb:d8:35:c8:8b:3d:f2:be:d1: + 81:d5:8f:f4:fa:da:69:1a:04:3c:9a:73:5c:9d:a0: + 15:66:25:d9:a8:05:14:a7:21:b1:3d:ce:9c:bc:02: + 55:b6:ce:60:b9:82:a7:8a:c6:a0:11:2c:21:3e:6d: + c6:13:36:36:aa:29:f0:c8:56:4f:1a:0f:33:0e:20: + 4a:8c:2d:d6:20:cd:79:93:52:5d:7b:6c:c1:3b:51: + f4:30:cb:5f:86:68:7f:de:da:c5:31:2b:7b:3f:ff: + a9:a2:5b:af:d1:2a:8b:53:53:36:41:fb:53:31:6d: + 73 +prime1: + 00:d8:80:0f:44:b9:6f:47:c0:51:34:af:42:fd:c8: + 8d:8e:1c:26:99:55:cf:c8:08:04:4d:3a:6b:6a:88: + 13:af:90:28:fe:dc:39:45:0f:c9:30:d9:54:78:12: + d0:30:97:03:b2:e0:00:1b:2b:88:e6:74:85:c4:5c: + 53:44:f8:b7:f1:af:84:82:61:75:88:5a:8d:c3:4d: + 73:75:58:2c:33:11:c8:1b:97:79:9b:b3:3b:59:5d: + 2c:25:e3:09:a7:45:b5:0a:2a:30:31:29:e5:cc:e3: + 43:56:5c:fb:46:92:24:a8:fb:5a:e3:63:57:fd:e0: + 6f:46:77:91:65:27:4c:52:ef +prime2: + 00:ba:d4:6d:06:34:0c:9f:9f:ce:89:24:4b:bb:57: + e0:36:18:e6:e4:93:93:1e:d6:d3:4b:a0:29:2f:70: + 7e:d5:e2:65:62:5f:d6:79:10:6a:b1:e1:95:5f:4c: + be:52:01:6f:64:72:06:d1:d7:d6:27:4d:da:a7:d6: + 5e:ec:9f:97:9d:01:0b:37:d4:00:38:58:95:5b:ca: + 2b:f5:38:fb:b2:ee:84:da:f7:f6:e4:75:2e:2d:9b: + 41:6b:fd:70:63:a6:75:e9:9d:d7:3e:6f:f4:9e:d3: + f7:db:91:5f:71:b0:b3:b7:7b:52:87:e3:b0:9f:f9: + 19:f4:f0:5d:48:16:d4:97:bf +exponent1: + 00:ab:ea:9c:b5:3c:ee:e1:b2:14:89:da:76:3e:e2: + 27:8d:e6:4a:bc:4e:08:ed:d5:77:dc:ff:7b:d6:9f: + 71:ca:62:96:3b:33:13:03:05:33:04:e8:3f:0c:0e: + 1c:65:c9:35:35:b0:6c:45:26:ff:d1:c8:4b:84:8e: + af:cc:c0:18:09:02:10:1e:1c:57:e1:4b:ce:f0:7c: + 2b:27:ce:39:62:e0:46:df:aa:1b:f6:08:90:23:39: + 3a:5a:67:7b:fd:e8:fb:86:b3:d3:8a:e6:b3:45:4c: + 5d:e7:51:74:73:28:8f:95:e3:ea:db:4f:c2:ca:a5: + 92:97:09:51:5a:5a:a7:5f:df +exponent2: + 46:6e:fd:27:20:8a:3d:c7:5e:72:bf:57:39:aa:98: + 61:6c:a3:e7:3a:67:75:71:e1:b8:9a:09:cc:d3:a9: + 4d:73:0b:6b:b9:db:be:4a:5d:1e:f0:7f:82:ad:08: + f7:1b:89:66:21:c2:c6:6b:8b:55:c7:f9:0d:db:38: + 70:16:7e:6e:f7:4c:c6:60:4b:30:f2:dc:63:fd:64: + f3:0e:e7:7c:fe:84:e1:94:27:e4:da:f4:6c:c7:22: + 33:d7:18:fe:c8:6b:9d:86:1e:f6:6f:f8:95:c4:2d: + a5:fe:39:30:40:40:90:84:92:89:9e:62:20:fd:25: + 36:fd:42:fd:81:75:e6:3b +coefficient: + 00:ba:04:b1:fb:4b:47:e3:e2:e7:1f:e4:37:aa:0d: + 9f:99:77:b9:2d:a8:05:9f:10:29:f6:59:b9:bc:1b: + 93:91:1c:a9:bd:f7:26:de:53:b7:eb:4e:32:6b:51: + 6e:b6:70:49:83:aa:b6:4d:d0:5f:88:75:ff:7f:82: + 69:65:cd:91:57:82:94:b3:9f:3c:10:4c:b5:7f:6d: + 04:cd:8c:cf:14:1e:eb:c2:4b:60:05:ec:e6:b1:f4: + b1:c4:78:a6:1b:29:7d:81:e6:ea:5a:78:c4:73:bb: + 9b:ed:87:9b:f3:54:66:20:a1:a6:d2:5a:52:c6:c8: + 67:81:26:3e:4e:c7:7f:a0:54 +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCeALFYWSm48zz/ +ljZkAs9NHpMg1mDX9YhYMj7nmOGBt1vc8Gj36wT/t++QzRKSe/K1qJwYiEslOfCe +PhXazlkjqzgqNkPg73qwlvB/fUzfBKal4cysGLq/L2ST04Pl2vLRE5xM7zpbH8Hk +wMcaii6lLW+PhqMZV0YjNDH2xSKQKE15QdHQUIt2rtOHlExShSibgPhcW+O0TJk+ +zOPiCoKURLEhjniguH2SowGITG/qbYbUcM8aciTQL2zWw60ePGgXFM8ckrMfnT1D +TF0Ljg8VlT3H62sCQTu2ZggG/EMJscLbxVI0kYy/0tbP3WrbiynzF4yRsUyBw4bF +ZTOOyNlRAgMBAAECggEAPyDGuqZRObCRgAq7q4TF36mmHhoZMl/K/HYC1RSrkOC6 +JtG1FszDQAp1AJU25Oj55aO6zkBySarXHNpwb+4/BjXMKG1q/G2iRvjfmQhulLJu +hNttI5tzzZw5YqjBS66Jkohzy/dPCQZsuSFey5dHIL7BEoa4FpFcmAcsKUzFTPP0 +LmnvRSSxdMPxZ25My9g1yIs98r7RgdWP9PraaRoEPJpzXJ2gFWYl2agFFKchsT3O +nLwCVbbOYLmCp4rGoBEsIT5txhM2Nqop8MhWTxoPMw4gSowt1iDNeZNSXXtswTtR +9DDLX4Zof97axTErez//qaJbr9Eqi1NTNkH7UzFtcwKBgQDYgA9EuW9HwFE0r0L9 +yI2OHCaZVc/ICARNOmtqiBOvkCj+3DlFD8kw2VR4EtAwlwOy4AAbK4jmdIXEXFNE ++Lfxr4SCYXWIWo3DTXN1WCwzEcgbl3mbsztZXSwl4wmnRbUKKjAxKeXM40NWXPtG +kiSo+1rjY1f94G9Gd5FlJ0xS7wKBgQC61G0GNAyfn86JJEu7V+A2GObkk5Me1tNL +oCkvcH7V4mViX9Z5EGqx4ZVfTL5SAW9kcgbR19YnTdqn1l7sn5edAQs31AA4WJVb +yiv1OPuy7oTa9/bkdS4tm0Fr/XBjpnXpndc+b/Se0/fbkV9xsLO3e1KH47Cf+Rn0 +8F1IFtSXvwKBgQCr6py1PO7hshSJ2nY+4ieN5kq8Tgjt1Xfc/3vWn3HKYpY7MxMD +BTME6D8MDhxlyTU1sGxFJv/RyEuEjq/MwBgJAhAeHFfhS87wfCsnzjli4Ebfqhv2 +CJAjOTpaZ3v96PuGs9OK5rNFTF3nUXRzKI+V4+rbT8LKpZKXCVFaWqdf3wKBgEZu +/Scgij3HXnK/VzmqmGFso+c6Z3Vx4biaCczTqU1zC2u5275KXR7wf4KtCPcbiWYh +wsZri1XH+Q3bOHAWfm73TMZgSzDy3GP9ZPMO53z+hOGUJ+Ta9GzHIjPXGP7Ia52G +HvZv+JXELaX+OTBAQJCEkomeYiD9JTb9Qv2BdeY7AoGBALoEsftLR+Pi5x/kN6oN +n5l3uS2oBZ8QKfZZubwbk5Ecqb33Jt5Tt+tOMmtRbrZwSYOqtk3QX4h1/3+CaWXN +kVeClLOfPBBMtX9tBM2MzxQe68JLYAXs5rH0scR4phspfYHm6lp4xHO7m+2Hm/NU +ZiChptJaUsbIZ4EmPk7Hf6BU +-----END PRIVATE KEY----- diff --git a/tests/certs/root.pem b/tests/certs/root.pem new file mode 100644 index 0000000..3397f05 --- /dev/null +++ b/tests/certs/root.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIICzzCCAbegAwIBAgIVAOxlN3pV8Reb/ftfb1iXnOG8HzjXMA0GCSqGSIb3DQEB +CwUAMBQxEjAQBgNVBAMMCWxvY2FsaG9zdDAeFw0yMzEwMjQwMDAwMDBaFw0yNDEw +MjQwMDAwMDBaMBQxEjAQBgNVBAMMCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAOQ6FGnbD7/KJ6BLjyJBXvXtMM5vubF3DkMXeM7isa9M +91k4iLzkV1zX3apmUYf3WbQJ5yRCFgraycZZRvKAnEDGuSGfJmOiCkoLJBeW9j3+ +6Wz9iAlVveAgp+WDL36KhP3m+DyRLrYFl3VB9dTnQVJuAhuKRM3LzIplpZMUDlHO +wPPyJjBRON37FgGeeu5FQ4z1xVbCGxi3VSwuuSfMGODPArHJ4ND3WMKJgxueP9ya +5X0gW/pU/X5n8PQJYnT8ly59DdGwW6sjuwMYFSlbQQ6y2h8skGXZRWqhBA+w9JDZ +ha9umLGHGAEkiytGn7N3ZmZH+ZHhwg3h5X24qdN6llUCAwEAAaMYMBYwFAYDVR0R +BA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQAMwcnNQ0+rXt8zbz7k +15NZkCdWiS5ma/P9G0ZsMor2Pr40zmcvcGV39OaDkVA9cX7N31jLHp/2yZelu8Vs +VKRytgO/i0mUVV7wtqDXCdgG+t6e/Kr+n7Yd7TGjvtS7Pd8SenY/RCL0Jffgr5RS +n66VRxaZ8XSThxpa8Zfr4KbWdj+BIDduQFB3wp9+jAE+xBvuJaFz5KjkmDR4Umc1 +Rgu7go074jHJ14l33UuB3KRKsE7DnLBGVDtM/fZowYe3J7Vo6r8nlvrmuT/T4WEx +71Jvm/iJgdm9fFZZpjXGnWUTt0Ju6+Sw03HX1UvRgrmoEAzqO1wn37GqGfnSw8Jv +3DMw +-----END CERTIFICATE----- diff --git a/tests/key.pem b/tests/key.pem deleted file mode 100644 index 1ead6df..0000000 --- a/tests/key.pem +++ /dev/null @@ -1,117 +0,0 @@ -RSA Private-Key: (2048 bit, 2 primes) -modulus: - 00:d8:01:1f:82:fb:e8:9e:dc:1c:4c:43:ea:7a:2c: - 58:e9:2e:6e:43:a3:7a:c0:4a:ff:64:6d:e6:1f:20: - cf:a1:80:b3:a7:93:ad:cc:7a:2a:c0:48:45:53:e4: - e2:80:ba:40:66:d2:2d:2e:e0:d7:ae:9a:cd:54:bf: - b7:1a:71:1b:fd:95:0f:cb:d5:c1:ef:22:ad:6a:87: - 10:5f:44:b8:1a:ac:f3:8c:69:4c:17:27:e1:30:66: - f2:32:42:ab:eb:a8:48:73:b3:bb:ce:05:6f:09:2c: - 03:39:95:81:a1:34:26:90:47:06:3e:bc:30:7a:28: - 2c:22:92:1e:2a:cf:83:f0:57:c7:8a:60:16:0f:91: - 20:f2:c3:ed:ae:7f:93:0a:1a:91:5f:21:9a:4c:ad: - 9a:83:38:71:db:5d:cd:3e:62:43:c9:0f:3a:2b:8d: - 22:b8:f1:05:60:54:05:39:25:56:01:2b:ce:fd:17: - 99:1b:b6:b5:a9:d8:75:55:28:44:18:f7:29:e9:92: - 27:d7:e0:ae:88:03:9f:b5:90:50:c6:ae:97:df:b4: - 65:54:13:b3:4c:02:4d:0b:6f:54:ed:95:2f:66:ed: - 80:b3:9e:a9:26:11:4c:1f:6a:2e:c9:1e:bb:94:91: - 1a:dc:3b:c8:5c:fa:cd:f8:5c:0f:b9:db:ee:49:df: - 27:67 -publicExponent: 65537 (0x10001) -privateExponent: - 37:64:22:56:8e:8e:da:9e:0b:4b:ab:66:e7:a9:93: - ab:48:52:d2:59:8b:d2:df:e8:9a:32:6c:7e:b3:05: - 4b:9f:1b:72:ef:b5:f3:e7:a8:2a:e2:dd:94:63:ef: - 89:9f:39:80:a4:89:1d:aa:5e:e4:d1:a1:b2:07:ef: - c6:82:16:f1:72:56:38:a9:3a:30:3e:ae:45:d1:18: - eb:b5:4d:98:d9:9d:22:f6:a5:a2:92:81:76:ce:0f: - 71:c0:13:22:3d:67:46:f4:62:46:b4:b2:02:a2:1c: - b9:99:93:42:69:83:32:20:64:83:95:f2:c0:80:a8: - c0:bc:c3:ab:aa:c7:64:cc:44:d4:7e:5d:6e:72:6f: - 07:85:bf:de:3c:61:db:48:97:57:db:e1:90:c2:6f: - ca:e1:35:20:14:37:1a:35:d9:54:ea:10:c2:68:56: - c9:61:d8:68:5c:b0:ef:6e:e8:8e:cf:dc:1c:cf:76: - 00:da:db:9e:27:92:0e:8e:69:8b:55:3c:3a:01:74: - 79:80:38:0c:50:10:e7:88:db:c8:36:6c:ca:f0:f3: - 24:6d:cf:48:3a:17:dd:de:67:ca:b2:7f:ae:a9:6b: - 3c:fd:0b:88:ad:87:41:45:ff:3d:dd:8a:0a:d9:11: - a6:cb:54:07:b0:a3:32:e3:3c:6c:e7:0e:46:22:a3: - d1 -prime1: - 00:fd:ca:8f:d9:58:2b:0e:71:aa:5f:05:e5:df:5d: - 6a:55:0f:57:0f:ca:e9:c2:84:e5:94:54:a7:ab:ff: - 6b:18:ff:7b:7a:61:5c:d3:50:5b:2a:3c:81:80:15: - 89:78:a1:0d:6d:a1:50:2e:82:90:cb:75:6a:23:69: - 03:88:62:d1:3c:9a:a3:62:89:53:04:51:d6:01:10: - ce:46:5d:af:81:cb:68:b2:a7:4c:1c:51:8f:3d:cc: - 23:83:db:78:99:58:0a:f6:6d:86:74:31:8f:a9:dd: - ce:59:ba:7f:b1:1c:6f:b1:3a:95:2e:8d:d7:9c:22: - 29:9c:0f:33:12:30:29:72:13 -prime2: - 00:d9:e2:5f:93:dd:83:4b:e6:cf:c7:91:69:98:81: - 2d:58:d9:cf:93:9b:34:19:98:7e:4f:0d:03:38:35: - d7:85:49:c8:cb:54:35:d6:30:f9:ff:aa:28:59:ff: - 92:1d:b1:26:68:b8:8d:65:e6:da:1c:27:09:df:66: - c2:5b:f6:82:6d:26:ff:5d:f4:36:92:49:cb:ce:b9: - 53:5e:df:c7:0b:94:12:35:69:39:3a:d1:f8:77:39: - d6:b6:94:60:7c:d0:b0:3e:51:4d:2d:96:33:37:95: - c3:ea:b0:88:50:af:97:46:7a:00:20:34:e1:a6:77: - 13:e7:d4:29:48:99:87:3f:dd -exponent1: - 00:80:58:8b:4c:d8:28:d6:22:27:8d:ec:96:a6:d2: - 40:2f:0a:15:f6:57:a0:c6:0d:9d:68:d9:32:fe:69: - aa:48:b7:13:43:fd:af:0a:f2:ad:57:fb:c7:65:f5: - fe:52:b1:e7:ef:94:4f:cf:65:9a:42:3a:7b:d5:b2: - 8a:e5:d3:4b:8b:81:8c:b5:0d:4b:9d:78:a1:68:62: - 8e:5e:c9:8c:33:70:a1:df:0f:7a:0f:2c:33:eb:85: - ac:44:47:9e:82:b6:22:bd:23:00:87:0c:75:61:eb: - 59:2a:c9:85:40:15:68:6a:d1:7c:96:51:87:43:5f: - 87:79:73:ba:59:2d:60:18:5f -exponent2: - 35:f5:4f:c2:28:ab:69:52:a1:c6:44:86:a8:13:e5: - 8d:09:73:d4:94:2d:14:e1:a4:f6:22:bc:d7:60:ce: - 1c:de:26:f1:e3:ca:e9:5e:7b:ab:0b:12:af:27:11: - a2:74:ca:94:48:90:0b:da:5d:cd:77:05:84:e3:c7: - e2:4a:6c:06:68:0f:f9:ce:8e:ed:2f:85:5f:8e:99: - cc:f7:12:2e:15:df:64:86:c8:7b:7e:a0:4f:b6:6d: - 2c:0e:16:cf:12:47:73:47:63:1b:95:04:de:11:22: - d1:ae:22:34:09:28:27:75:f3:2e:06:aa:93:35:0e: - 22:ed:71:c3:de:f1:95:a9 -coefficient: - 59:b1:eb:da:da:58:3d:40:66:37:36:1b:0c:19:d9: - 37:a0:a7:2b:61:c8:9b:4a:19:43:bd:49:be:9e:4c: - f1:64:32:a6:26:69:ac:46:36:a6:ec:3d:7b:be:c6: - f4:12:bb:d0:a1:24:10:89:0b:1a:7c:cc:46:de:98: - 3f:59:ce:9b:d5:6f:e1:57:2c:2b:8b:66:a9:04:75: - ae:e0:8f:fe:57:b9:fa:8e:4c:a1:f0:3d:dc:ff:5d: - 6d:76:c8:b0:13:85:60:2f:66:fb:d1:2d:30:ce:83: - b5:e0:0b:b9:89:2c:7d:2f:52:3f:b7:10:f7:64:37: - 57:5f:59:b6:e1:8a:ff:6e ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEA2AEfgvvontwcTEPqeixY6S5uQ6N6wEr/ZG3mHyDPoYCzp5Ot -zHoqwEhFU+TigLpAZtItLuDXrprNVL+3GnEb/ZUPy9XB7yKtaocQX0S4GqzzjGlM -FyfhMGbyMkKr66hIc7O7zgVvCSwDOZWBoTQmkEcGPrwweigsIpIeKs+D8FfHimAW -D5Eg8sPtrn+TChqRXyGaTK2agzhx213NPmJDyQ86K40iuPEFYFQFOSVWASvO/ReZ -G7a1qdh1VShEGPcp6ZIn1+CuiAOftZBQxq6X37RlVBOzTAJNC29U7ZUvZu2As56p -JhFMH2ouyR67lJEa3DvIXPrN+FwPudvuSd8nZwIDAQABAoIBADdkIlaOjtqeC0ur -Zuepk6tIUtJZi9Lf6JoybH6zBUufG3LvtfPnqCri3ZRj74mfOYCkiR2qXuTRobIH -78aCFvFyVjipOjA+rkXRGOu1TZjZnSL2paKSgXbOD3HAEyI9Z0b0Yka0sgKiHLmZ -k0JpgzIgZIOV8sCAqMC8w6uqx2TMRNR+XW5ybweFv948YdtIl1fb4ZDCb8rhNSAU -Nxo12VTqEMJoVslh2GhcsO9u6I7P3BzPdgDa254nkg6OaYtVPDoBdHmAOAxQEOeI -28g2bMrw8yRtz0g6F93eZ8qyf66pazz9C4ith0FF/z3digrZEabLVAewozLjPGzn -DkYio9ECgYEA/cqP2VgrDnGqXwXl311qVQ9XD8rpwoTllFSnq/9rGP97emFc01Bb -KjyBgBWJeKENbaFQLoKQy3VqI2kDiGLRPJqjYolTBFHWARDORl2vgctosqdMHFGP -Pcwjg9t4mVgK9m2GdDGPqd3OWbp/sRxvsTqVLo3XnCIpnA8zEjApchMCgYEA2eJf -k92DS+bPx5FpmIEtWNnPk5s0GZh+Tw0DODXXhUnIy1Q11jD5/6ooWf+SHbEmaLiN -ZebaHCcJ32bCW/aCbSb/XfQ2kknLzrlTXt/HC5QSNWk5OtH4dznWtpRgfNCwPlFN -LZYzN5XD6rCIUK+XRnoAIDThpncT59QpSJmHP90CgYEAgFiLTNgo1iInjeyWptJA -LwoV9legxg2daNky/mmqSLcTQ/2vCvKtV/vHZfX+UrHn75RPz2WaQjp71bKK5dNL -i4GMtQ1LnXihaGKOXsmMM3Ch3w96Dywz64WsREeegrYivSMAhwx1YetZKsmFQBVo -atF8llGHQ1+HeXO6WS1gGF8CgYA19U/CKKtpUqHGRIaoE+WNCXPUlC0U4aT2IrzX -YM4c3ibx48rpXnurCxKvJxGidMqUSJAL2l3NdwWE48fiSmwGaA/5zo7tL4VfjpnM -9xIuFd9khsh7fqBPtm0sDhbPEkdzR2MblQTeESLRriI0CSgndfMuBqqTNQ4i7XHD -3vGVqQKBgFmx69raWD1AZjc2GwwZ2TegpythyJtKGUO9Sb6eTPFkMqYmaaxGNqbs -PXu+xvQSu9ChJBCJCxp8zEbemD9ZzpvVb+FXLCuLZqkEda7gj/5XufqOTKHwPdz/ -XW12yLAThWAvZvvRLTDOg7XgC7mJLH0vUj+3EPdkN1dfWbbhiv9u ------END RSA PRIVATE KEY----- diff --git a/tests/root.pem b/tests/root.pem deleted file mode 100644 index e0be906..0000000 --- a/tests/root.pem +++ /dev/null @@ -1,17 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICwzCCAaugAwIBAgIJAKjCzmQfDy6aMA0GCSqGSIb3DQEBCwUAMBQxEjAQBgNV -BAMTCWxvY2FsaG9zdDAeFw0yMzA1MTUwMDAwMDBaFw0yNDA1MTUwMDAwMDBaMBQx -EjAQBgNVBAMTCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC -ggEBALaJilb2vYMPqWjw5M2RzgsWcXTb+AImym2+E35PmX7sEtbxNRdXxR0AMcqT -ZoiKTqFmrnvQUVefpRC6gz6HpM9KXQ91bSPtrP6fhVCW5TIgNyys5Y6V1W74e0qb -b+O8vfaQWjyxOfoknVy/jCEU8W4PBuRPFDPyuedf1+iYWYKUDqcVQu4ylTqtHISZ -o+g37rohUIKKbV7ogF/FNVrQyEvsE2dI0LRda9tz4WC6OXBOlSlpoHKzFWlYd0S3 -TCZBNsoZki9onCEmdy7DnZkmzUYt4Iie8KkRtZNYY0n+tPudsnCxHSs5kX0ST5JP -NLT6wTuAnenluD2HkTzje9VQGLsCAwEAAaMYMBYwFAYDVR0RBA0wC4IJbG9jYWxo -b3N0MA0GCSqGSIb3DQEBCwUAA4IBAQAO+5KAww6ZhzyqfFPs+0y25xDqloaiad1x -Kj4/rf1TJ+0BBYeF2QQSGM6eEMlHcXo7iOTBWF+fGM97WgiudrZgmSrG4V33/MwP -Ds5TiAdr7EmU8eq/tJZEThwhZiZGh49k0q4hvqqyKVX1ohh7/7cvDQylrRtVv5BC -EJJwub2ZTaCnvDOVSVY74sMyRK0CgdJG+meq+79D4qNidnNc3+0V9LdqYn2xbu3+ -t9HYpyO8EwYXMyVxaAxRS7v1udSnVKFfDceW7Irg66foTopUa0oWYZICb4t1AZe/ -+ofekvjpxlQygtafAinfu2MAEPuZTaT0Tq/1XD0xF3SYMxgcd5Ac ------END CERTIFICATE-----