From ec28ecf2a2531048ec241f8ad96235edfda7cb5e Mon Sep 17 00:00:00 2001 From: giangndm <45644921+giangndm@users.noreply.github.com> Date: Wed, 26 Jun 2024 00:44:47 +0700 Subject: [PATCH] feat: connector (#316) * WIP: mock for connector node * WIP: connector server received event from agent * WIP: connector remote rpc and sql storage * WIP: media_core feedback peer events * fixed typos * added stream event and added index to migration * added simple test for connector sql_storage --- Cargo.lock | 1129 ++++++++++++++++- Cargo.toml | 2 +- bin/Cargo.toml | 6 +- bin/connector_z0_n1.sh | 9 + bin/connector_z256_n1.sh | 9 + bin/src/http.rs | 15 +- bin/src/http/api_console.rs | 8 + bin/src/http/api_console/connector.rs | 235 ++++ bin/src/http/api_media.rs | 8 +- bin/src/main.rs | 2 +- bin/src/server/connector.rs | 150 ++- .../server/connector/remote_rpc_handler.rs | 107 ++ bin/src/server/console.rs | 54 +- bin/src/server/gateway.rs | 19 +- bin/src/server/gateway/local_rpc_handler.rs | 134 +- bin/src/server/gateway/remote_rpc_handler.rs | 125 +- bin/src/server/media/rpc_handler.rs | 2 +- packages/media_connector/Cargo.toml | 20 + packages/media_connector/src/agent_service.rs | 201 +++ .../media_connector/src/handler_service.rs | 188 +++ packages/media_connector/src/lib.rs | 75 ++ packages/media_connector/src/msg_queue.rs | 124 ++ packages/media_connector/src/sql_storage.rs | 632 +++++++++ .../media_connector/src/sql_storage/entity.rs | 5 + .../src/sql_storage/entity/event.rs | 29 + .../src/sql_storage/entity/peer.rs | 33 + .../src/sql_storage/entity/peer_session.rs | 37 + .../src/sql_storage/entity/room.rs | 24 + .../src/sql_storage/entity/session.rs | 34 + .../src/sql_storage/migration.rs | 12 + .../migration/m20240626_0001_init.rs | 170 +++ packages/media_core/src/cluster.rs | 2 - packages/media_core/src/endpoint.rs | 8 +- packages/media_core/src/endpoint/internal.rs | 182 ++- .../src/endpoint/internal/local_track.rs | 41 +- .../src/endpoint/internal/remote_track.rs | 47 +- .../media_core/src/endpoint/middleware.rs | 1 - .../src/endpoint/middleware/mix_minus.rs | 1 - packages/media_core/src/transport.rs | 8 +- packages/media_runner/Cargo.toml | 1 + packages/media_runner/src/worker.rs | 69 +- packages/media_utils/src/lib.rs | 2 + packages/media_utils/src/time.rs | 5 + packages/protocol/Cargo.toml | 1 + packages/protocol/build.rs | 17 + .../protocol/proto/cluster/connector.proto | 243 ++++ packages/protocol/proto/cluster/gateway.proto | 3 + packages/protocol/src/cluster.rs | 5 + packages/protocol/src/connector.rs | 1 + packages/protocol/src/lib.rs | 1 + .../src/protobuf/cluster_connector.rs | 691 ++++++++++ .../protocol/src/protobuf/cluster_gateway.rs | 6 + packages/protocol/src/protobuf/mod.rs | 3 + packages/protocol/src/rpc/quinn.rs | 3 + packages/protocol/src/transport/webrtc.rs | 4 +- packages/protocol/src/transport/whep.rs | 3 + packages/protocol/src/transport/whip.rs | 3 + packages/transport_webrtc/src/transport.rs | 12 +- .../transport_webrtc/src/transport/webrtc.rs | 45 +- .../transport_webrtc/src/transport/whep.rs | 15 +- .../transport_webrtc/src/transport/whip.rs | 15 +- packages/transport_webrtc/src/worker.rs | 26 +- 62 files changed, 4874 insertions(+), 188 deletions(-) create mode 100644 bin/connector_z0_n1.sh create mode 100644 bin/connector_z256_n1.sh create mode 100644 bin/src/http/api_console/connector.rs create mode 100644 bin/src/server/connector/remote_rpc_handler.rs create mode 100644 packages/media_connector/Cargo.toml create mode 100644 packages/media_connector/src/agent_service.rs create mode 100644 packages/media_connector/src/handler_service.rs create mode 100644 packages/media_connector/src/lib.rs create mode 100644 packages/media_connector/src/msg_queue.rs create mode 100644 packages/media_connector/src/sql_storage.rs create mode 100644 packages/media_connector/src/sql_storage/entity.rs create mode 100644 packages/media_connector/src/sql_storage/entity/event.rs create mode 100644 packages/media_connector/src/sql_storage/entity/peer.rs create mode 100644 packages/media_connector/src/sql_storage/entity/peer_session.rs create mode 100644 packages/media_connector/src/sql_storage/entity/room.rs create mode 100644 packages/media_connector/src/sql_storage/entity/session.rs create mode 100644 packages/media_connector/src/sql_storage/migration.rs create mode 100644 packages/media_connector/src/sql_storage/migration/m20240626_0001_init.rs delete mode 100644 packages/media_core/src/endpoint/middleware/mix_minus.rs create mode 100644 packages/media_utils/src/time.rs create mode 100644 packages/protocol/proto/cluster/connector.proto create mode 100644 packages/protocol/src/connector.rs create mode 100644 packages/protocol/src/protobuf/cluster_connector.rs diff --git a/Cargo.lock b/Cargo.lock index 1e8fc72b..a58f99e0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -52,6 +52,30 @@ dependencies = [ "subtle", ] +[[package]] +name = "ahash" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" +dependencies = [ + "getrandom", + "once_cell", + "version_check", +] + +[[package]] +name = "ahash" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +dependencies = [ + "cfg-if", + "getrandom", + "once_cell", + "version_check", + "zerocopy", +] + [[package]] name = "aho-corasick" version = "1.1.3" @@ -61,6 +85,18 @@ dependencies = [ "memchr", ] +[[package]] +name = "aliasable" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "250f629c0161ad8107cf89319e990051fae62832fd343083bea452d93e2205fd" + +[[package]] +name = "allocator-api2" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" + [[package]] name = "android-tzdata" version = "0.1.1" @@ -143,6 +179,39 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +[[package]] +name = "async-stream" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.66", +] + +[[package]] +name = "async-trait" +version = "0.1.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.66", +] + [[package]] name = "atm0s-media-server" version = "0.1.0" @@ -154,10 +223,12 @@ dependencies = [ "local-ip-address", "log", "maxminddb", + "media-server-connector", "media-server-gateway", "media-server-protocol", "media-server-runner", "media-server-secure", + "media-server-utils", "num_enum", "poem", "poem-openapi", @@ -165,7 +236,7 @@ dependencies = [ "quinn", "rand", "rcgen", - "rustls", + "rustls 0.23.10", "sans-io-runtime", "serde", "sysinfo", @@ -251,6 +322,15 @@ dependencies = [ "serde", ] +[[package]] +name = "atoi" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28d99ec8bfea296261ca1af174f24225171fea9664ba9003cbebee704810528" +dependencies = [ + "num-traits", +] + [[package]] name = "atomic-waker" version = "1.1.2" @@ -342,6 +422,17 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +[[package]] +name = "bigdecimal" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6773ddc0eafc0e509fb60e48dff7f450f8e674a0686ae8605e8d9901bd5eefa" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", +] + [[package]] name = "bincode" version = "1.3.3" @@ -357,7 +448,7 @@ version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ - "bitflags", + "bitflags 2.5.0", "cexpr", "clang-sys", "itertools", @@ -380,11 +471,32 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e0d60973d9320722cb1206f412740e162a33b8547ea8d6be75d7cff237c7a85" +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + [[package]] name = "bitflags" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" +dependencies = [ + "serde", +] + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] [[package]] name = "blake2b_simd" @@ -406,6 +518,30 @@ dependencies = [ "generic-array", ] +[[package]] +name = "borsh" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6362ed55def622cddc70a4746a68554d7b687713770de539e59a739b249f8ed" +dependencies = [ + "borsh-derive", + "cfg_aliases 0.2.1", +] + +[[package]] +name = "borsh-derive" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3ef8005764f53cd4dca619f5bf64cafd4664dada50ece25e4d81de54c80cc0b" +dependencies = [ + "once_cell", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.66", + "syn_derive", +] + [[package]] name = "bs58" version = "0.5.1" @@ -431,6 +567,28 @@ version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" +[[package]] +name = "bytecheck" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23cdc57ce23ac53c931e88a43d06d070a6fd142f2617be5855eb75efc9beb1c2" +dependencies = [ + "bytecheck_derive", + "ptr_meta", + "simdutf8", +] + +[[package]] +name = "bytecheck_derive" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "byteorder" version = "1.5.0" @@ -481,6 +639,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chrono" version = "0.4.38" @@ -490,6 +654,7 @@ dependencies = [ "android-tzdata", "iana-time-zone", "num-traits", + "serde", "windows-targets 0.52.5", ] @@ -564,7 +729,7 @@ version = "4.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c780290ccf4fb26629baa7a1081e68ced113f1d3ec302fa5948f1c381ebf06c6" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "quote", "syn 2.0.66", @@ -735,6 +900,15 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "crossbeam-queue" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "crossbeam-utils" version = "0.8.20" @@ -884,6 +1058,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", + "serde", ] [[package]] @@ -928,6 +1103,12 @@ dependencies = [ "subtle", ] +[[package]] +name = "dotenvy" +version = "0.15.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" + [[package]] name = "downcast" version = "0.11.0" @@ -969,6 +1150,9 @@ name = "either" version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" +dependencies = [ + "serde", +] [[package]] name = "elliptic-curve" @@ -1016,6 +1200,23 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "etcetera" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" +dependencies = [ + "cfg-if", + "home", + "windows-sys 0.48.0", +] + +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + [[package]] name = "fastrand" version = "2.1.0" @@ -1044,6 +1245,17 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" +[[package]] +name = "flume" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" +dependencies = [ + "futures-core", + "futures-sink", + "spin 0.9.8", +] + [[package]] name = "fnv" version = "1.0.7" @@ -1086,6 +1298,26 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + +[[package]] +name = "futures" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + [[package]] name = "futures-channel" version = "0.3.30" @@ -1093,6 +1325,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", + "futures-sink", ] [[package]] @@ -1101,6 +1334,34 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +[[package]] +name = "futures-executor" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-intrusive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f" +dependencies = [ + "futures-core", + "lock_api", + "parking_lot", +] + +[[package]] +name = "futures-io" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" + [[package]] name = "futures-macro" version = "0.3.30" @@ -1130,10 +1391,13 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ + "futures-channel", "futures-core", + "futures-io", "futures-macro", "futures-sink", "futures-task", + "memchr", "pin-project-lite", "pin-utils", "slab", @@ -1213,7 +1477,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bf760ebf69878d9fd8f110c89703d90ce35095324d1f1edcb595c63945ee757" dependencies = [ - "bitflags", + "bitflags 2.5.0", "ignore", "walkdir", ] @@ -1257,11 +1521,33 @@ dependencies = [ "byteorder", ] +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash 0.7.8", +] + [[package]] name = "hashbrown" version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash 0.8.11", + "allocator-api2", +] + +[[package]] +name = "hashlink" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +dependencies = [ + "hashbrown 0.14.5", +] [[package]] name = "headers" @@ -1297,6 +1583,15 @@ dependencies = [ "stable_deref_trait", ] +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +dependencies = [ + "unicode-segmentation", +] + [[package]] name = "heck" version = "0.5.0" @@ -1315,6 +1610,12 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + [[package]] name = "hkdf" version = "0.12.4" @@ -1518,7 +1819,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", - "hashbrown", + "hashbrown 0.14.5", +] + +[[package]] +name = "inherent" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0122b7114117e64a63ac49f752a5ca4624d534c7b1c7de796ac196381cd2d947" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.66", ] [[package]] @@ -1690,6 +2002,17 @@ dependencies = [ "tracing", ] +[[package]] +name = "libsqlite3-sys" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf4e226dcd58b4be396f7bd3c20da8fdee2911400705297ba7d2d7cc2c30f716" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + [[package]] name = "linux-raw-sys" version = "0.4.14" @@ -1724,6 +2047,15 @@ version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +[[package]] +name = "lru" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" +dependencies = [ + "hashbrown 0.14.5", +] + [[package]] name = "matchers" version = "0.1.0" @@ -1746,23 +2078,51 @@ dependencies = [ ] [[package]] -name = "media-server-core" +name = "md-5" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" +dependencies = [ + "cfg-if", + "digest", +] + +[[package]] +name = "media-server-connector" version = "0.1.0" dependencies = [ + "async-trait", "atm0s-sdn", - "audio-mixer", - "derivative", - "derive_more", - "indexmap", "log", + "lru", "media-server-protocol", "media-server-utils", - "mockall", - "num_enum", - "sans-io-runtime", - "smallmap", - "tracing-subscriber", -] + "prost", + "sea-orm", + "sea-orm-migration", + "serde", + "serde_json", + "tokio", +] + +[[package]] +name = "media-server-core" +version = "0.1.0" +dependencies = [ + "atm0s-sdn", + "audio-mixer", + "derivative", + "derive_more", + "indexmap", + "log", + "media-server-protocol", + "media-server-utils", + "mockall", + "num_enum", + "sans-io-runtime", + "smallmap", + "tracing-subscriber", +] [[package]] name = "media-server-gateway" @@ -1787,6 +2147,7 @@ dependencies = [ "prost", "prost-build", "quinn", + "rand", "serde", "tera", "tokio", @@ -1799,6 +2160,7 @@ dependencies = [ "atm0s-sdn", "convert-enum", "log", + "media-server-connector", "media-server-core", "media-server-gateway", "media-server-protocol", @@ -2003,9 +2365,9 @@ version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" dependencies = [ - "bitflags", + "bitflags 2.5.0", "cfg-if", - "cfg_aliases", + "cfg_aliases 0.1.1", "libc", ] @@ -2193,7 +2555,7 @@ version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ - "bitflags", + "bitflags 2.5.0", "cfg-if", "foreign-types", "libc", @@ -2241,6 +2603,39 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "ordered-float" +version = "3.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1e1c390732d15f1d48471625cd92d154e66db2c56645e29a9cd26f4699f72dc" +dependencies = [ + "num-traits", +] + +[[package]] +name = "ouroboros" +version = "0.17.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2ba07320d39dfea882faa70554b4bd342a5f273ed59ba7c1c6b4c840492c954" +dependencies = [ + "aliasable", + "ouroboros_macro", + "static_assertions", +] + +[[package]] +name = "ouroboros_macro" +version = "0.17.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec4c6225c69b4ca778c0aea097321a64c421cf4577b331c61b229267edabb6f8" +dependencies = [ + "heck 0.4.1", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.66", +] + [[package]] name = "overload" version = "0.1.1" @@ -2289,7 +2684,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.5.2", "smallvec", "windows-targets 0.52.5", ] @@ -2657,6 +3052,30 @@ dependencies = [ "toml_edit", ] +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + [[package]] name = "proc-macro2" version = "1.0.85" @@ -2683,7 +3102,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" dependencies = [ "bytes", - "heck", + "heck 0.5.0", "itertools", "log", "multimap", @@ -2719,6 +3138,26 @@ dependencies = [ "prost", ] +[[package]] +name = "ptr_meta" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0738ccf7ea06b608c10564b31debd4f5bc5e197fc8bfe088f68ae5ce81e7a4f1" +dependencies = [ + "ptr_meta_derive", +] + +[[package]] +name = "ptr_meta_derive" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "quick-protobuf" version = "0.8.1" @@ -2749,7 +3188,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls", + "rustls 0.23.10", "thiserror", "tokio", "tracing", @@ -2765,7 +3204,7 @@ dependencies = [ "rand", "ring", "rustc-hash", - "rustls", + "rustls 0.23.10", "rustls-platform-verifier", "slab", "thiserror", @@ -2795,6 +3234,12 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + [[package]] name = "rand" version = "0.8.5" @@ -2858,13 +3303,22 @@ dependencies = [ "yasna", ] +[[package]] +name = "redox_syscall" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_syscall" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c82cf8cff14456045f55ec4241383baeff27af886adb72ffb2162f99911de0fd" dependencies = [ - "bitflags", + "bitflags 2.5.0", ] [[package]] @@ -2911,6 +3365,15 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +[[package]] +name = "rend" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71fe3824f5629716b1589be05dacd749f6aa084c87e00e016714a8cdfccc997c" +dependencies = [ + "bytecheck", +] + [[package]] name = "rfc6979" version = "0.4.0" @@ -2945,6 +3408,35 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "rkyv" +version = "0.7.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cba464629b3394fc4dbc6f940ff8f5b4ff5c7aef40f29166fd4ad12acbc99c0" +dependencies = [ + "bitvec", + "bytecheck", + "bytes", + "hashbrown 0.12.3", + "ptr_meta", + "rend", + "rkyv_derive", + "seahash", + "tinyvec", + "uuid", +] + +[[package]] +name = "rkyv_derive" +version = "0.7.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7dddfff8de25e6f62b9d64e6e432bf1c6736c57d20323e15ee10435fbda7c65" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "rsa" version = "0.9.6" @@ -2966,6 +3458,22 @@ dependencies = [ "zeroize", ] +[[package]] +name = "rust_decimal" +version = "1.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1790d1c4c0ca81211399e0e0af16333276f375209e71a37b67698a373db5b47a" +dependencies = [ + "arrayvec", + "borsh", + "bytes", + "num-traits", + "rand", + "rkyv", + "serde", + "serde_json", +] + [[package]] name = "rustc-demangle" version = "0.1.24" @@ -2993,13 +3501,24 @@ version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ - "bitflags", + "bitflags 2.5.0", "errno", "libc", "linux-raw-sys", "windows-sys 0.52.0", ] +[[package]] +name = "rustls" +version = "0.21.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +dependencies = [ + "ring", + "rustls-webpki 0.101.7", + "sct", +] + [[package]] name = "rustls" version = "0.23.10" @@ -3011,7 +3530,7 @@ dependencies = [ "once_cell", "ring", "rustls-pki-types", - "rustls-webpki", + "rustls-webpki 0.102.4", "subtle", "zeroize", ] @@ -3023,12 +3542,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" dependencies = [ "openssl-probe", - "rustls-pemfile", + "rustls-pemfile 2.1.2", "rustls-pki-types", "schannel", "security-framework", ] +[[package]] +name = "rustls-pemfile" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64 0.21.7", +] + [[package]] name = "rustls-pemfile" version = "2.1.2" @@ -3056,13 +3584,13 @@ dependencies = [ "jni", "log", "once_cell", - "rustls", + "rustls 0.23.10", "rustls-native-certs", "rustls-platform-verifier-android", - "rustls-webpki", + "rustls-webpki 0.102.4", "security-framework", "security-framework-sys", - "webpki-roots", + "webpki-roots 0.26.3", "winapi", ] @@ -3072,6 +3600,16 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84e217e7fdc8466b5b35d30f8c0a30febd29173df4a3a0c2115d306b9c4117ad" +[[package]] +name = "rustls-webpki" +version = "0.101.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "rustls-webpki" version = "0.102.4" @@ -3129,6 +3667,16 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "sct" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "sctp-proto" version = "0.2.2" @@ -3144,6 +3692,171 @@ dependencies = [ "thiserror", ] +[[package]] +name = "sea-bae" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bd3534a9978d0aa7edd2808dc1f8f31c4d0ecd31ddf71d997b3c98e9f3c9114" +dependencies = [ + "heck 0.4.1", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.66", +] + +[[package]] +name = "sea-orm" +version = "0.12.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8814e37dc25de54398ee62228323657520b7f29713b8e238649385dbe473ee0" +dependencies = [ + "async-stream", + "async-trait", + "bigdecimal", + "chrono", + "futures", + "log", + "ouroboros", + "rust_decimal", + "sea-orm-macros", + "sea-query", + "sea-query-binder", + "serde", + "serde_json", + "sqlx", + "strum", + "thiserror", + "time", + "tracing", + "url", + "uuid", +] + +[[package]] +name = "sea-orm-cli" +version = "0.12.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "620bc560062ae251b1366bde43b3f1508445cab5c2c8cbdb397034638ab1b357" +dependencies = [ + "chrono", + "clap", + "dotenvy", + "glob", + "regex", + "sea-schema", + "tracing", + "tracing-subscriber", + "url", +] + +[[package]] +name = "sea-orm-macros" +version = "0.12.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e115c6b078e013aa963cc2d38c196c2c40b05f03d0ac872fe06b6e0d5265603" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "sea-bae", + "syn 2.0.66", + "unicode-ident", +] + +[[package]] +name = "sea-orm-migration" +version = "0.12.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee8269bc6ff71afd6b78aa4333ac237a69eebd2cdb439036291e64fb4b8db23c" +dependencies = [ + "async-trait", + "clap", + "dotenvy", + "futures", + "sea-orm", + "sea-orm-cli", + "sea-schema", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "sea-query" +version = "0.30.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4166a1e072292d46dc91f31617c2a1cdaf55a8be4b5c9f4bf2ba248e3ac4999b" +dependencies = [ + "bigdecimal", + "chrono", + "derivative", + "inherent", + "ordered-float", + "rust_decimal", + "sea-query-derive", + "serde_json", + "time", + "uuid", +] + +[[package]] +name = "sea-query-binder" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36bbb68df92e820e4d5aeb17b4acd5cc8b5d18b2c36a4dd6f4626aabfa7ab1b9" +dependencies = [ + "bigdecimal", + "chrono", + "rust_decimal", + "sea-query", + "serde_json", + "sqlx", + "time", + "uuid", +] + +[[package]] +name = "sea-query-derive" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25a82fcb49253abcb45cdcb2adf92956060ec0928635eb21b4f7a6d8f25ab0bc" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "syn 2.0.66", + "thiserror", +] + +[[package]] +name = "sea-schema" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30d148608012d25222442d1ebbfafd1228dbc5221baf4ec35596494e27a2394e" +dependencies = [ + "futures", + "sea-query", + "sea-schema-derive", +] + +[[package]] +name = "sea-schema-derive" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6f686050f76bffc4f635cda8aea6df5548666b830b52387e8bc7de11056d11e" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "seahash" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" + [[package]] name = "sec1" version = "0.7.3" @@ -3164,7 +3877,7 @@ version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" dependencies = [ - "bitflags", + "bitflags 2.5.0", "core-foundation", "core-foundation-sys", "libc", @@ -3321,6 +4034,12 @@ dependencies = [ "rand_core", ] +[[package]] +name = "simdutf8" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" + [[package]] name = "siphasher" version = "0.3.11" @@ -3388,6 +4107,9 @@ name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] [[package]] name = "spki" @@ -3399,6 +4121,232 @@ dependencies = [ "der", ] +[[package]] +name = "sqlformat" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f895e3734318cc55f1fe66258926c9b910c124d47520339efecbb6c59cec7c1f" +dependencies = [ + "nom", + "unicode_categories", +] + +[[package]] +name = "sqlx" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9a2ccff1a000a5a59cd33da541d9f2fdcd9e6e8229cc200565942bff36d0aaa" +dependencies = [ + "sqlx-core", + "sqlx-macros", + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", +] + +[[package]] +name = "sqlx-core" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24ba59a9342a3d9bab6c56c118be528b27c9b60e490080e9711a04dccac83ef6" +dependencies = [ + "ahash 0.8.11", + "atoi", + "bigdecimal", + "byteorder", + "bytes", + "chrono", + "crc", + "crossbeam-queue", + "either", + "event-listener", + "futures-channel", + "futures-core", + "futures-intrusive", + "futures-io", + "futures-util", + "hashlink", + "hex", + "indexmap", + "log", + "memchr", + "once_cell", + "paste", + "percent-encoding", + "rust_decimal", + "rustls 0.21.12", + "rustls-pemfile 1.0.4", + "serde", + "serde_json", + "sha2", + "smallvec", + "sqlformat", + "thiserror", + "time", + "tokio", + "tokio-stream", + "tracing", + "url", + "uuid", + "webpki-roots 0.25.4", +] + +[[package]] +name = "sqlx-macros" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ea40e2345eb2faa9e1e5e326db8c34711317d2b5e08d0d5741619048a803127" +dependencies = [ + "proc-macro2", + "quote", + "sqlx-core", + "sqlx-macros-core", + "syn 1.0.109", +] + +[[package]] +name = "sqlx-macros-core" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5833ef53aaa16d860e92123292f1f6a3d53c34ba8b1969f152ef1a7bb803f3c8" +dependencies = [ + "dotenvy", + "either", + "heck 0.4.1", + "hex", + "once_cell", + "proc-macro2", + "quote", + "serde", + "serde_json", + "sha2", + "sqlx-core", + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", + "syn 1.0.109", + "tempfile", + "tokio", + "url", +] + +[[package]] +name = "sqlx-mysql" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ed31390216d20e538e447a7a9b959e06ed9fc51c37b514b46eb758016ecd418" +dependencies = [ + "atoi", + "base64 0.21.7", + "bigdecimal", + "bitflags 2.5.0", + "byteorder", + "bytes", + "chrono", + "crc", + "digest", + "dotenvy", + "either", + "futures-channel", + "futures-core", + "futures-io", + "futures-util", + "generic-array", + "hex", + "hkdf", + "hmac", + "itoa", + "log", + "md-5", + "memchr", + "once_cell", + "percent-encoding", + "rand", + "rsa", + "rust_decimal", + "serde", + "sha1", + "sha2", + "smallvec", + "sqlx-core", + "stringprep", + "thiserror", + "time", + "tracing", + "uuid", + "whoami", +] + +[[package]] +name = "sqlx-postgres" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c824eb80b894f926f89a0b9da0c7f435d27cdd35b8c655b114e58223918577e" +dependencies = [ + "atoi", + "base64 0.21.7", + "bigdecimal", + "bitflags 2.5.0", + "byteorder", + "chrono", + "crc", + "dotenvy", + "etcetera", + "futures-channel", + "futures-core", + "futures-io", + "futures-util", + "hex", + "hkdf", + "hmac", + "home", + "itoa", + "log", + "md-5", + "memchr", + "num-bigint", + "once_cell", + "rand", + "rust_decimal", + "serde", + "serde_json", + "sha2", + "smallvec", + "sqlx-core", + "stringprep", + "thiserror", + "time", + "tracing", + "uuid", + "whoami", +] + +[[package]] +name = "sqlx-sqlite" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b244ef0a8414da0bed4bb1910426e890b19e5e9bccc27ada6b797d05c55ae0aa" +dependencies = [ + "atoi", + "chrono", + "flume", + "futures-channel", + "futures-core", + "futures-executor", + "futures-intrusive", + "futures-util", + "libsqlite3-sys", + "log", + "percent-encoding", + "serde", + "sqlx-core", + "time", + "tracing", + "url", + "urlencoding", + "uuid", +] + [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -3430,12 +4378,29 @@ dependencies = [ "tracing", ] +[[package]] +name = "stringprep" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b4df3d392d81bd458a8a621b8bffbd2302a12ffe288a9d931670948749463b1" +dependencies = [ + "unicode-bidi", + "unicode-normalization", + "unicode-properties", +] + [[package]] name = "strsim" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" +[[package]] +name = "strum" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" + [[package]] name = "subtle" version = "2.5.0" @@ -3477,6 +4442,18 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn_derive" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.66", +] + [[package]] name = "sync_wrapper" version = "1.0.1" @@ -3501,6 +4478,12 @@ dependencies = [ "windows", ] +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + [[package]] name = "tempfile" version = "3.10.1" @@ -3868,6 +4851,24 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-properties" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4259d9d4425d9f0661581b804cb85fe66a4c631cadd8f490d1c13a35d5d9291" + +[[package]] +name = "unicode-segmentation" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" + +[[package]] +name = "unicode_categories" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" + [[package]] name = "universal-hash" version = "0.5.1" @@ -3907,12 +4908,27 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "urlencoding" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + [[package]] name = "utf8parse" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" +[[package]] +name = "uuid" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" +dependencies = [ + "serde", +] + [[package]] name = "valuable" version = "0.1.0" @@ -3947,6 +4963,12 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasite" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" + [[package]] name = "wasix" version = "0.12.21" @@ -4010,6 +5032,12 @@ version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +[[package]] +name = "webpki-roots" +version = "0.25.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" + [[package]] name = "webpki-roots" version = "0.26.3" @@ -4031,6 +5059,16 @@ dependencies = [ "rustix", ] +[[package]] +name = "whoami" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a44ab49fad634e88f55bf8f9bb3abd2f27d7204172a112c7c9987e01c1c94ea9" +dependencies = [ + "redox_syscall 0.4.1", + "wasite", +] + [[package]] name = "wildmatch" version = "2.3.4" @@ -4235,6 +5273,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + [[package]] name = "x25519-dalek" version = "2.0.1" @@ -4256,6 +5303,26 @@ dependencies = [ "time", ] +[[package]] +name = "zerocopy" +version = "0.7.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.66", +] + [[package]] name = "zeroize" version = "1.8.1" diff --git a/Cargo.toml b/Cargo.toml index 2eeeb7a0..e5d371bd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,7 +8,7 @@ members = [ "packages/transport_webrtc", "packages/media_secure", "packages/media_gateway", - "packages/audio_mixer", + "packages/audio_mixer", "packages/media_connector", ] [workspace.dependencies] diff --git a/bin/Cargo.toml b/bin/Cargo.toml index 5c2f72a8..ce550f34 100644 --- a/bin/Cargo.toml +++ b/bin/Cargo.toml @@ -20,6 +20,8 @@ media-server-protocol = { path = "../packages/protocol", features = ["quinn-rpc" media-server-secure = { path = "../packages/media_secure" } media-server-runner = { path = "../packages/media_runner", optional = true } media-server-gateway = { path = "../packages/media_gateway", optional = true } +media-server-connector = { path = "../packages/media_connector", optional = true } +media-server-utils = { path = "../packages/media_utils", optional = true } local-ip-address = "0.6" serde = { version = "1.0", features = ["derive"] } quinn = { version = "0.11", optional = true } @@ -33,10 +35,10 @@ sysinfo = { version = "0.30", optional = true } [features] default = ["console", "gateway", "media", "connector", "cert_utils"] -gateway = ["media-server-gateway", "quinn_vnet", "node_metrics", "maxminddb"] +gateway = ["media-server-gateway", "media-server-connector", "quinn_vnet", "node_metrics", "maxminddb"] media = ["media-server-runner", "quinn_vnet", "node_metrics"] console = [] -connector = ["quinn_vnet"] +connector = ["quinn_vnet", "media-server-connector", "media-server-utils"] cert_utils = ["rcgen", "rustls"] quinn_vnet = ["rustls", "quinn"] node_metrics = ["sysinfo"] diff --git a/bin/connector_z0_n1.sh b/bin/connector_z0_n1.sh new file mode 100644 index 00000000..0bf0fda4 --- /dev/null +++ b/bin/connector_z0_n1.sh @@ -0,0 +1,9 @@ +RUST_LOG=info \ +RUST_BACKTRACE=1 \ +cargo run -- \ + --http-port 3000 \ + --node-id 4 \ + --sdn-port 10004 \ + --sdn-zone 0 \ + --seeds 0@/ip4/127.0.0.1/udp/10000 \ + connector diff --git a/bin/connector_z256_n1.sh b/bin/connector_z256_n1.sh new file mode 100644 index 00000000..cf8645fd --- /dev/null +++ b/bin/connector_z256_n1.sh @@ -0,0 +1,9 @@ +RUST_LOG=info \ +RUST_BACKTRACE=1 \ +cargo run -- \ + --http-port 3000 \ + --node-id 259 \ + --sdn-port 11003 \ + --sdn-zone 256 \ + --seeds 256@/ip4/127.0.0.1/udp/11000 \ + connector diff --git a/bin/src/http.rs b/bin/src/http.rs index e8790104..3517e422 100644 --- a/bin/src/http.rs +++ b/bin/src/http.rs @@ -2,6 +2,10 @@ use std::net::SocketAddr; use std::sync::Arc; use media_server_protocol::endpoint::ClusterConnId; +#[cfg(feature = "console")] +use media_server_protocol::protobuf::cluster_connector::MediaConnectorServiceClient; +#[cfg(feature = "console")] +use media_server_protocol::rpc::quinn::{QuinnClient, QuinnStream}; use media_server_protocol::transport::{RpcReq, RpcRes}; use media_server_secure::{MediaEdgeSecure, MediaGatewaySecure}; use poem::endpoint::StaticFilesEndpoint; @@ -48,6 +52,7 @@ pub async fn run_console_http_server( port: u16, secure: media_server_secure::jwt::MediaConsoleSecureJwt, storage: crate::server::console_storage::StorageShared, + connector: MediaConnectorServiceClient, ) -> Result<(), Box> { let user_service: OpenApiService<_, ()> = OpenApiService::new(api_console::user::Apis, "Console User APIs", env!("CARGO_PKG_VERSION")).server("/api/user/"); let user_ui = user_service.swagger_ui(); @@ -57,7 +62,11 @@ pub async fn run_console_http_server( let cluster_ui = cluster_service.swagger_ui(); let cluster_spec = cluster_service.spec(); - let ctx = api_console::ConsoleApisCtx { secure, storage }; + let connector_service: OpenApiService<_, ()> = OpenApiService::new(api_console::connector::Apis, "Console Connector APIs", env!("CARGO_PKG_VERSION")).server("/api/connector/"); + let connector_ui = connector_service.swagger_ui(); + let connector_spec = connector_service.spec(); + + let ctx = api_console::ConsoleApisCtx { secure, storage, connector }; let route = Route::new() //TODO build UI and embed to here @@ -70,6 +79,10 @@ pub async fn run_console_http_server( .nest("/api/cluster/", cluster_service.data(ctx.clone())) .nest("/api/cluster/ui", cluster_ui) .at("/api/cluster/spec", poem::endpoint::make_sync(move |_| cluster_spec.clone())) + //connector + .nest("/api/connector/", connector_service.data(ctx.clone())) + .nest("/api/connector/ui", connector_ui) + .at("/api/connector/spec", poem::endpoint::make_sync(move |_| connector_spec.clone())) .with(Cors::new()); Server::new(TcpListener::bind(SocketAddr::new([0, 0, 0, 0].into(), port))).run(route).await?; diff --git a/bin/src/http/api_console.rs b/bin/src/http/api_console.rs index d45722f8..757d7108 100644 --- a/bin/src/http/api_console.rs +++ b/bin/src/http/api_console.rs @@ -1,3 +1,9 @@ +use std::net::SocketAddr; + +use media_server_protocol::{ + protobuf::cluster_connector::MediaConnectorServiceClient, + rpc::quinn::{QuinnClient, QuinnStream}, +}; use media_server_secure::{jwt::MediaConsoleSecureJwt, MediaConsoleSecure}; use poem::Request; use poem_openapi::{auth::ApiKey, SecurityScheme}; @@ -5,12 +11,14 @@ use poem_openapi::{auth::ApiKey, SecurityScheme}; use crate::server::console_storage::StorageShared; pub mod cluster; +pub mod connector; pub mod user; #[derive(Clone)] pub struct ConsoleApisCtx { pub secure: MediaConsoleSecureJwt, //TODO make it generic pub storage: StorageShared, + pub connector: MediaConnectorServiceClient, } /// ApiKey authorization diff --git a/bin/src/http/api_console/connector.rs b/bin/src/http/api_console/connector.rs new file mode 100644 index 00000000..650af84b --- /dev/null +++ b/bin/src/http/api_console/connector.rs @@ -0,0 +1,235 @@ +use super::{super::Response, ConsoleApisCtx, ConsoleAuthorization}; +use media_server_protocol::{ + connector::CONNECTOR_RPC_PORT, + protobuf::cluster_connector::{GetEventParams, GetParams, GetPeerParams}, + rpc::node_vnet_addr, +}; +use poem::web::Data; +use poem_openapi::{ + param::{Path, Query}, + payload::Json, + OpenApi, +}; + +#[derive(poem_openapi::Object)] +pub struct RoomInfo { + pub id: i32, + pub room: String, +} + +#[derive(poem_openapi::Object)] +pub struct PeerSession { + pub id: i32, + /// u64 cause wrong parse in js, so we convert it to string + pub session: String, + pub peer_id: i32, + pub peer: String, + pub created_at: u64, + pub joined_at: u64, + pub leaved_at: Option, +} + +#[derive(poem_openapi::Object)] +pub struct PeerInfo { + pub id: i32, + pub room_id: i32, + pub room: String, + pub peer: String, + pub created_at: u64, + pub sessions: Vec, +} + +#[derive(poem_openapi::Object)] +pub struct SessionInfo { + /// u64 cause wrong parse in js, so we convert it to string + pub id: String, + pub ip: Option, + pub user_agent: Option, + pub sdk: Option, + pub created_at: u64, + pub sessions: Vec, +} + +#[derive(poem_openapi::Object)] +pub struct EventInfo { + pub id: i32, + /// u64 cause wrong parse in js, so we convert it to string + pub session: String, + pub node: u32, + pub node_ts: u64, + pub created_at: u64, + pub event: String, + pub meta: Option, +} + +pub struct Apis; + +#[OpenApi] +impl Apis { + /// get rooms + #[oai(path = "/:node/log/rooms", method = "get")] + async fn rooms(&self, _auth: ConsoleAuthorization, Data(ctx): Data<&ConsoleApisCtx>, Path(node): Path, Query(page): Query, Query(limit): Query) -> Json>> { + match ctx.connector.rooms(node_vnet_addr(node, CONNECTOR_RPC_PORT), GetParams { page, limit }).await { + Some(res) => Json(Response { + status: true, + error: None, + data: Some(res.rooms.into_iter().map(|e| RoomInfo { id: e.id, room: e.room }).collect::>()), + }), + None => Json(Response { + status: false, + error: Some("CLUSTER_ERROR".to_string()), + data: None, + }), + } + } + + /// get peers + #[oai(path = "/:node/log/peers", method = "get")] + async fn peers( + &self, + _auth: ConsoleAuthorization, + Data(ctx): Data<&ConsoleApisCtx>, + Path(node): Path, + Query(room): Query>, + Query(page): Query, + Query(limit): Query, + ) -> Json>> { + match ctx.connector.peers(node_vnet_addr(node, CONNECTOR_RPC_PORT), GetPeerParams { room, page, limit }).await { + Some(res) => Json(Response { + status: true, + error: None, + data: Some( + res.peers + .into_iter() + .map(|p| PeerInfo { + id: p.id, + room_id: p.room_id, + room: p.room, + peer: p.peer, + created_at: p.created_at, + sessions: p + .sessions + .into_iter() + .map(|s| PeerSession { + id: s.id, + session: s.session.to_string(), + peer_id: s.peer_id, + peer: s.peer, + created_at: s.created_at, + joined_at: s.joined_at, + leaved_at: s.leaved_at, + }) + .collect::>(), + }) + .collect::>(), + ), + }), + None => Json(Response { + status: false, + error: Some("CLUSTER_ERROR".to_string()), + data: None, + }), + } + } + + /// get peers + #[oai(path = "/:node/log/sessions", method = "get")] + async fn sessions( + &self, + _auth: ConsoleAuthorization, + Data(ctx): Data<&ConsoleApisCtx>, + Path(node): Path, + Query(page): Query, + Query(limit): Query, + ) -> Json>> { + match ctx.connector.sessions(node_vnet_addr(node, CONNECTOR_RPC_PORT), GetParams { page, limit }).await { + Some(res) => Json(Response { + status: true, + error: None, + data: Some( + res.sessions + .into_iter() + .map(|p| SessionInfo { + id: p.id.to_string(), + ip: p.ip, + user_agent: p.user_agent, + sdk: p.sdk, + created_at: p.created_at, + sessions: p + .peers + .into_iter() + .map(|s| PeerSession { + id: s.id, + session: s.session.to_string(), + peer_id: s.peer_id, + peer: s.peer, + created_at: s.created_at, + joined_at: s.joined_at, + leaved_at: s.leaved_at, + }) + .collect::>(), + }) + .collect::>(), + ), + }), + None => Json(Response { + status: false, + error: Some("CLUSTER_ERROR".to_string()), + data: None, + }), + } + } + + /// get events + #[oai(path = "/:node/log/events", method = "get")] + async fn events( + &self, + _auth: ConsoleAuthorization, + Data(ctx): Data<&ConsoleApisCtx>, + Path(node): Path, + Query(session): Query>, + Query(start_ts): Query>, + Query(end_ts): Query>, + Query(page): Query, + Query(limit): Query, + ) -> Json>> { + match ctx + .connector + .events( + node_vnet_addr(node, CONNECTOR_RPC_PORT), + GetEventParams { + session, + start_ts, + end_ts, + page, + limit, + }, + ) + .await + { + Some(res) => Json(Response { + status: true, + error: None, + data: Some( + res.events + .into_iter() + .map(|e| EventInfo { + id: e.id, + session: e.session.to_string(), + node: e.node, + node_ts: e.node_ts, + created_at: e.created_at, + event: e.event, + meta: e.meta, + }) + .collect::>(), + ), + }), + None => Json(Response { + status: false, + error: Some("CLUSTER_ERROR".to_string()), + data: None, + }), + } + } +} diff --git a/bin/src/http/api_media.rs b/bin/src/http/api_media.rs index 5744f7ee..d926cb60 100644 --- a/bin/src/http/api_media.rs +++ b/bin/src/http/api_media.rs @@ -1,6 +1,7 @@ use std::{marker::PhantomData, sync::Arc}; use media_server_protocol::{ + cluster::gen_cluster_session_id, endpoint::ClusterConnId, protobuf::gateway::{ConnectRequest, ConnectResponse, RemoteIceRequest, RemoteIceResponse}, tokens::{WebrtcToken, WhepToken, WhipToken}, @@ -59,9 +60,11 @@ impl MediaApis { TokenAuthorization(token): TokenAuthorization, body: ApplicationSdp, ) -> Result>> { + let session_id = gen_cluster_session_id(); let token = ctx.secure.decode_obj::("whip", &token.token).ok_or(poem::Error::from_status(StatusCode::BAD_REQUEST))?; log::info!("[MediaAPIs] create whip endpoint with token {:?}, ip {}, user_agent {}", token, ip_addr, user_agent); let (req, rx) = Rpc::new(RpcReq::Whip(whip::RpcReq::Connect(WhipConnectReq { + session_id, ip: ip_addr, sdp: body.0, room: token.room.into(), @@ -153,9 +156,11 @@ impl MediaApis { TokenAuthorization(token): TokenAuthorization, body: ApplicationSdp, ) -> Result>> { + let session_id = gen_cluster_session_id(); let token = ctx.secure.decode_obj::("whep", &token.token).ok_or(poem::Error::from_status(StatusCode::BAD_REQUEST))?; log::info!("[MediaAPIs] create whep endpoint with token {:?}, ip {}, user_agent {}", token, ip_addr, user_agent); let (req, rx) = Rpc::new(RpcReq::Whep(whep::RpcReq::Connect(WhepConnectReq { + session_id, ip: ip_addr, sdp: body.0, room: token.room.into(), @@ -247,6 +252,7 @@ impl MediaApis { TokenAuthorization(token): TokenAuthorization, connect: Protobuf, ) -> Result>> { + let session_id = gen_cluster_session_id(); let token = ctx.secure.decode_obj::("webrtc", &token.token).ok_or(poem::Error::from_status(StatusCode::BAD_REQUEST))?; log::info!("[MediaAPIs] create webrtc with token {:?}, ip {}, user_agent {}, request {:?}", token, ip_addr, user_agent, connect); if let Some(join) = &connect.join { @@ -258,7 +264,7 @@ impl MediaApis { return Err(poem::Error::from_string("Wrong peer".to_string(), StatusCode::FORBIDDEN)); } } - let (req, rx) = Rpc::new(RpcReq::Webrtc(webrtc::RpcReq::Connect(ip_addr, user_agent, connect.0))); + let (req, rx) = Rpc::new(RpcReq::Webrtc(webrtc::RpcReq::Connect(session_id, ip_addr, user_agent, connect.0))); ctx.sender.send(req).await.map_err(|_e| poem::Error::from_status(StatusCode::INTERNAL_SERVER_ERROR))?; let res = rx.await.map_err(|_e| poem::Error::from_status(StatusCode::INTERNAL_SERVER_ERROR))?; match res { diff --git a/bin/src/main.rs b/bin/src/main.rs index cf787e35..c1211e50 100644 --- a/bin/src/main.rs +++ b/bin/src/main.rs @@ -80,7 +80,7 @@ async fn main() { #[cfg(feature = "gateway")] server::ServerType::Gateway(args) => server::run_media_gateway(workers, http_port, node, args).await, #[cfg(feature = "connector")] - server::ServerType::Connector(args) => server::run_media_connector(workers, args).await, + server::ServerType::Connector(args) => server::run_media_connector(workers, node, args).await, #[cfg(feature = "media")] server::ServerType::Media(args) => server::run_media_server(workers, http_port, node, args).await, #[cfg(feature = "cert_utils")] diff --git a/bin/src/server/connector.rs b/bin/src/server/connector.rs index dc3837eb..377f224b 100644 --- a/bin/src/server/connector.rs +++ b/bin/src/server/connector.rs @@ -1,8 +1,152 @@ +use std::{sync::Arc, time::Duration}; + +use atm0s_sdn::{features::FeaturesEvent, secure::StaticKeyAuthorization, services::visualization, SdnBuilder, SdnControllerUtils, SdnExtOut, SdnOwner}; use clap::Parser; +use media_server_connector::{ + handler_service::{self, ConnectorHandlerServiceBuilder}, + sql_storage::ConnectorStorage, + Storage, HANDLER_SERVICE_ID, +}; +use media_server_protocol::{ + cluster::{ClusterNodeGenericInfo, ClusterNodeInfo}, + connector::CONNECTOR_RPC_PORT, + protobuf::cluster_connector::MediaConnectorServiceServer, + rpc::quinn::QuinnServer, +}; +use rustls::pki_types::{CertificateDer, PrivatePkcs8KeyDer}; +use tokio::sync::mpsc::channel; + +use crate::{ + node_metrics::NodeMetricsCollector, + quinn::{make_quinn_server, VirtualNetwork}, + NodeConfig, +}; +use sans_io_runtime::backend::PollingBackend; + +mod remote_rpc_handler; + +#[derive(Clone, Debug, convert_enum::From, convert_enum::TryInto)] +enum SC { + Visual(visualization::Control), + Connector(media_server_connector::handler_service::Control), +} + +#[derive(Clone, Debug, convert_enum::From, convert_enum::TryInto)] +enum SE { + Visual(visualization::Event), + Connector(media_server_connector::handler_service::Event), +} +type TC = (); +type TW = (); #[derive(Debug, Parser)] -pub struct Args {} +pub struct Args { + /// DB Uri + #[arg(env, long, default_value = "sqlite://connector.db?mode=rwc")] + db_uri: String, +} + +pub async fn run_media_connector(workers: usize, node: NodeConfig, args: Args) { + rustls::crypto::ring::default_provider().install_default().expect("should install ring as default"); + + let mut connector_storage = Arc::new(ConnectorStorage::new(&args.db_uri).await); + + let default_cluster_cert_buf = include_bytes!("../../certs/cluster.cert"); + let default_cluster_key_buf = include_bytes!("../../certs/cluster.key"); + let default_cluster_cert = CertificateDer::from(default_cluster_cert_buf.to_vec()); + let default_cluster_key = PrivatePkcs8KeyDer::from(default_cluster_key_buf.to_vec()); + + let node_id = node.node_id; + + let mut builder = SdnBuilder::<(), SC, SE, TC, TW, ClusterNodeInfo>::new(node_id, node.udp_port, node.custom_addrs); + let node_addr = builder.node_addr(); + let node_info = ClusterNodeInfo::Connector(ClusterNodeGenericInfo { + addr: node_addr.to_string(), + cpu: 0, + memory: 0, + disk: 0, + }); + + builder.set_authorization(StaticKeyAuthorization::new(&node.secret)); + builder.set_manual_discovery(vec!["connector".to_string()], vec!["gateway".to_string()]); + builder.add_service(Arc::new(ConnectorHandlerServiceBuilder::new())); + + for seed in node.seeds { + builder.add_seed(seed); + } + + let mut controller = builder.build::>(workers, node_info); + + // + // Vnet is a virtual udp layer for creating RPC handlers, we separate media server to 2 layer + // - async for business logic like proxy, logging handling + // - sync with sans-io style for media data + // + let (mut vnet, vnet_tx, mut vnet_rx) = VirtualNetwork::new(node.node_id); + + let media_rpc_socket = vnet.udp_socket(CONNECTOR_RPC_PORT).await.expect("Should open virtual port for gateway rpc"); + let mut media_rpc_server = MediaConnectorServiceServer::new( + QuinnServer::new(make_quinn_server(media_rpc_socket, default_cluster_key, default_cluster_cert.clone()).expect("Should create endpoint for media rpc server")), + remote_rpc_handler::Ctx { storage: connector_storage.clone() }, + remote_rpc_handler::ConnectorRemoteRpcHandlerImpl::default(), + ); + + tokio::task::spawn_local(async move { + media_rpc_server.run().await; + }); + + tokio::task::spawn_local(async move { while vnet.recv().await.is_some() {} }); + + // Collect node metrics for update to gateway agent service, this information is used inside gateway + // for forwarding from other gateway + let mut node_metrics_collector = NodeMetricsCollector::default(); + + // Subscribe ConnectorHandler service + controller.service_control(HANDLER_SERVICE_ID.into(), (), handler_service::Control::Sub.into()); + + let (connector_storage_tx, mut connector_storage_rx) = channel(1024); + tokio::task::spawn_local(async move { + while let Some((from, ts, req_id, event)) = connector_storage_rx.recv().await { + connector_storage.on_event(from, ts, req_id, event).await; + } + }); + + loop { + if controller.process().is_none() { + break; + } + + // Pop from metric collector and pass to Gateway store service + if let Some(metrics) = node_metrics_collector.pop_measure() { + let node_info = ClusterNodeInfo::Connector(ClusterNodeGenericInfo { + addr: node_addr.to_string(), + cpu: metrics.cpu, + memory: metrics.memory, + disk: metrics.disk, + }); + controller.service_control(visualization::SERVICE_ID.into(), (), visualization::Control::UpdateInfo(node_info).into()); + } + while let Ok(control) = vnet_rx.try_recv() { + controller.feature_control((), control.into()); + } -pub async fn run_media_connector(_workers: usize, _args: Args) { - println!("Running media connector"); + while let Some(out) = controller.pop_event() { + match out { + SdnExtOut::ServicesEvent(_, _, SE::Connector(event)) => match event { + media_server_connector::handler_service::Event::Req(from, ts, req_id, event) => { + if let Err(e) = connector_storage_tx.send((from, ts, req_id, event)).await { + log::error!("[MediaConnector] send event to storage error {:?}", e); + } + } + }, + SdnExtOut::FeaturesEvent(_, FeaturesEvent::Socket(event)) => { + if let Err(e) = vnet_tx.try_send(event) { + log::error!("[MediaConnector] forward Sdn SocketEvent error {:?}", e); + } + } + _ => {} + } + } + tokio::time::sleep(Duration::from_millis(10)).await; + } } diff --git a/bin/src/server/connector/remote_rpc_handler.rs b/bin/src/server/connector/remote_rpc_handler.rs new file mode 100644 index 00000000..8c9c8bb3 --- /dev/null +++ b/bin/src/server/connector/remote_rpc_handler.rs @@ -0,0 +1,107 @@ +use std::sync::Arc; + +use media_server_connector::{sql_storage, Querier}; +use media_server_protocol::protobuf::cluster_connector::{ + get_events::EventInfo, get_peers::PeerInfo, get_rooms::RoomInfo, get_sessions::SessionInfo, GetEventParams, GetEvents, GetParams, GetPeerParams, GetPeers, GetRooms, GetSessions, + MediaConnectorServiceHandler, PeerSession, +}; + +#[derive(Clone)] +pub struct Ctx { + pub storage: Arc, //TODO make it generic +} + +#[derive(Default)] +pub struct ConnectorRemoteRpcHandlerImpl {} + +impl MediaConnectorServiceHandler for ConnectorRemoteRpcHandlerImpl { + async fn rooms(&self, ctx: &Ctx, req: GetParams) -> Option { + let rooms = ctx + .storage + .rooms(req.page as usize, req.limit as usize) + .await? + .into_iter() + .map(|e| RoomInfo { id: e.id, room: e.room }) + .collect::>(); + Some(GetRooms { rooms }) + } + + async fn peers(&self, ctx: &Ctx, req: GetPeerParams) -> Option { + let peers = ctx + .storage + .peers(req.room, req.page as usize, req.limit as usize) + .await? + .into_iter() + .map(|p| PeerInfo { + id: p.id, + room_id: p.room_id, + room: p.room, + peer: p.peer, + created_at: p.created_at, + sessions: p + .sessions + .into_iter() + .map(|p| PeerSession { + id: p.id, + peer_id: p.peer_id, + peer: p.peer, + session: p.session, + created_at: p.session, + joined_at: p.joined_at, + leaved_at: p.leaved_at, + }) + .collect::>(), + }) + .collect::>(); + Some(GetPeers { peers }) + } + + async fn sessions(&self, ctx: &Ctx, req: GetParams) -> Option { + let sessions = ctx + .storage + .sessions(req.page as usize, req.limit as usize) + .await? + .into_iter() + .map(|e| SessionInfo { + id: e.id, + ip: e.ip, + sdk: e.sdk, + user_agent: e.user_agent, + created_at: e.created_at, + peers: e + .peers + .into_iter() + .map(|p| PeerSession { + id: p.id, + peer_id: p.peer_id, + peer: p.peer, + session: p.session, + created_at: p.session, + joined_at: p.joined_at, + leaved_at: p.leaved_at, + }) + .collect::>(), + }) + .collect::>(); + Some(GetSessions { sessions }) + } + + async fn events(&self, ctx: &Ctx, req: GetEventParams) -> Option { + let events = ctx + .storage + .events(req.session, req.start_ts, req.end_ts, req.page as usize, req.limit as usize) + .await? + .into_iter() + .map(|e| EventInfo { + id: e.id, + node: e.node, + node_ts: e.node_ts, + session: e.session, + created_at: e.created_at, + event: e.event, + meta: e.meta.map(|m| m.to_string()), + }) + .collect::>(); + Some(GetEvents { events }) + } +} diff --git a/bin/src/server/console.rs b/bin/src/server/console.rs index e2056d11..2a84cac5 100644 --- a/bin/src/server/console.rs +++ b/bin/src/server/console.rs @@ -1,12 +1,21 @@ use std::time::{Duration, Instant}; -use atm0s_sdn::{secure::StaticKeyAuthorization, services::visualization, SdnBuilder, SdnControllerUtils, SdnExtOut, SdnOwner}; +use atm0s_sdn::{features::FeaturesEvent, secure::StaticKeyAuthorization, services::visualization, SdnBuilder, SdnControllerUtils, SdnExtOut, SdnOwner}; use clap::Parser; -use media_server_protocol::cluster::{ClusterNodeGenericInfo, ClusterNodeInfo}; +use media_server_protocol::{ + cluster::{ClusterNodeGenericInfo, ClusterNodeInfo}, + protobuf::cluster_connector::MediaConnectorServiceClient, + rpc::quinn::QuinnClient, +}; use media_server_secure::jwt::MediaConsoleSecureJwt; use storage::StorageShared; -use crate::{http::run_console_http_server, node_metrics::NodeMetricsCollector, NodeConfig}; +use crate::{ + http::run_console_http_server, + node_metrics::NodeMetricsCollector, + quinn::{make_quinn_client, VirtualNetwork}, + NodeConfig, +}; use sans_io_runtime::backend::PollingBackend; pub mod storage; @@ -27,16 +36,9 @@ type TW = (); pub struct Args {} pub async fn run_console_server(workers: usize, http_port: Option, node: NodeConfig, _args: Args) { + rustls::crypto::ring::default_provider().install_default().expect("should install ring as default"); + let storage = StorageShared::default(); - if let Some(http_port) = http_port { - let secure = MediaConsoleSecureJwt::from(node.secret.as_bytes()); - let storage = storage.clone(); - tokio::spawn(async move { - if let Err(e) = run_console_http_server(http_port, secure, storage).await { - log::error!("HTTP Error: {}", e); - } - }); - } let node_id = node.node_id; let mut builder = SdnBuilder::<(), SC, SE, TC, TW, ClusterNodeInfo>::new(node_id, node.udp_port, node.custom_addrs); @@ -60,6 +62,23 @@ pub async fn run_console_server(workers: usize, http_port: Option, node: No let mut controller = builder.build::>(workers, node_info); controller.service_control(visualization::SERVICE_ID.into(), (), visualization::Control::Subscribe.into()); + let (mut vnet, vnet_tx, mut vnet_rx) = VirtualNetwork::new(node.node_id); + + let connector_rpc_socket = vnet.udp_socket(0).await.expect("Should open virtual port for gateway rpc"); + let connector_rpc_client = MediaConnectorServiceClient::new(QuinnClient::new(make_quinn_client(connector_rpc_socket, &[]).expect("Should create endpoint for media rpc client"))); + + tokio::task::spawn_local(async move { while vnet.recv().await.is_some() {} }); + + if let Some(http_port) = http_port { + let secure = MediaConsoleSecureJwt::from(node.secret.as_bytes()); + let storage = storage.clone(); + tokio::spawn(async move { + if let Err(e) = run_console_http_server(http_port, secure, storage, connector_rpc_client).await { + log::error!("HTTP Error: {}", e); + } + }); + } + let mut node_metrics_collector = NodeMetricsCollector::default(); loop { @@ -67,6 +86,10 @@ pub async fn run_console_server(workers: usize, http_port: Option, node: No break; } + while let Ok(control) = vnet_rx.try_recv() { + controller.feature_control((), control.into()); + } + if let Some(metrics) = node_metrics_collector.pop_measure() { let node_info = ClusterNodeInfo::Console(ClusterNodeGenericInfo { addr: node_addr.to_string(), @@ -92,7 +115,12 @@ pub async fn run_console_server(workers: usize, http_port: Option, node: No log::info!("Node del: {:?}", node); } }, - SdnExtOut::FeaturesEvent(_, _) => {} + SdnExtOut::FeaturesEvent(_, FeaturesEvent::Socket(event)) => { + if let Err(e) = vnet_tx.try_send(event) { + log::error!("forward sdn SocketEvent error {:?}", e); + } + } + _ => {} } } tokio::time::sleep(Duration::from_millis(10)).await; diff --git a/bin/src/server/gateway.rs b/bin/src/server/gateway.rs index 8c81232f..70ebd5d2 100644 --- a/bin/src/server/gateway.rs +++ b/bin/src/server/gateway.rs @@ -2,6 +2,7 @@ use std::{sync::Arc, time::Duration}; use atm0s_sdn::{features::FeaturesEvent, secure::StaticKeyAuthorization, services::visualization, SdnBuilder, SdnControllerUtils, SdnExtOut, SdnOwner}; use clap::Parser; +use media_server_connector::agent_service::ConnectorAgentServiceBuilder; use media_server_gateway::{store_service::GatewayStoreServiceBuilder, STORE_SERVICE_ID}; use media_server_protocol::{ cluster::{ClusterGatewayInfo, ClusterNodeGenericInfo, ClusterNodeInfo}, @@ -31,12 +32,14 @@ mod remote_rpc_handler; enum SC { Visual(visualization::Control), Gateway(media_server_gateway::store_service::Control), + Connector(media_server_connector::agent_service::Control), } #[derive(Clone, Debug, convert_enum::From, convert_enum::TryInto)] enum SE { Visual(visualization::Event), Gateway(media_server_gateway::store_service::Event), + Connector(media_server_connector::agent_service::Event), } type TC = (); type TW = (); @@ -76,6 +79,9 @@ pub async fn run_media_gateway(workers: usize, http_port: Option, node: Nod let default_cluster_cert = CertificateDer::from(default_cluster_cert_buf.to_vec()); let default_cluster_key = PrivatePkcs8KeyDer::from(default_cluster_key_buf.to_vec()); + // This tx and rx is for sending event to connector in other tasks + let (connector_agent_tx, mut connector_agent_rx) = tokio::sync::mpsc::channel::(1024); + let edge_secure = Arc::new(MediaEdgeSecureJwt::from(node.secret.as_bytes())); let gateway_secure = Arc::new(MediaGatewaySecureJwt::from(node.secret.as_bytes())); let (req_tx, mut req_rx) = tokio::sync::mpsc::channel(1024); @@ -109,6 +115,7 @@ pub async fn run_media_gateway(workers: usize, http_port: Option, node: Nod builder.set_authorization(StaticKeyAuthorization::new(&node.secret)); builder.set_manual_discovery(vec!["gateway".to_string(), generate_gateway_zone_tag(node.zone)], vec!["gateway".to_string()]); builder.add_service(Arc::new(GatewayStoreServiceBuilder::new(node.zone, args.lat, args.lon, args.max_cpu, args.max_memory, args.max_disk))); + builder.add_service(Arc::new(ConnectorAgentServiceBuilder::new())); for seed in node.seeds { builder.add_seed(seed); @@ -134,6 +141,7 @@ pub async fn run_media_gateway(workers: usize, http_port: Option, node: Nod let mut media_rpc_server = MediaEdgeServiceServer::new( QuinnServer::new(make_quinn_server(media_rpc_socket, default_cluster_key, default_cluster_cert.clone()).expect("Should create endpoint for media rpc server")), remote_rpc_handler::Ctx { + connector_agent_tx: connector_agent_tx.clone(), selector: selector.clone(), client: media_rpc_client.clone(), ip2location: ip2location.clone(), @@ -141,7 +149,7 @@ pub async fn run_media_gateway(workers: usize, http_port: Option, node: Nod remote_rpc_handler::MediaRemoteRpcHandlerImpl::default(), ); - let local_rpc_processor = Arc::new(MediaLocalRpcHandler::new(selector, media_rpc_client, ip2location)); + let local_rpc_processor = Arc::new(MediaLocalRpcHandler::new(connector_agent_tx.clone(), selector, media_rpc_client, ip2location)); tokio::task::spawn_local(async move { media_rpc_server.run().await; @@ -155,6 +163,9 @@ pub async fn run_media_gateway(workers: usize, http_port: Option, node: Nod let mut live_sessions = 0; let mut max_sessions = 0; + // Subscribe ConnectorHandler service + controller.service_control(media_server_connector::AGENT_SERVICE_ID.into(), (), media_server_connector::agent_service::Control::Sub.into()); + loop { if controller.process().is_none() { break; @@ -196,6 +207,9 @@ pub async fn run_media_gateway(workers: usize, http_port: Option, node: Nod res_tx.send(res).print_err2("answer http request error"); }); } + while let Ok(control) = connector_agent_rx.try_recv() { + controller.service_control(media_server_connector::AGENT_SERVICE_ID.into(), (), control.into()); + } while let Some(out) = controller.pop_event() { match out { @@ -206,6 +220,9 @@ pub async fn run_media_gateway(workers: usize, http_port: Option, node: Nod } media_server_gateway::store_service::Event::FindNodeRes(req_id, res) => requester.on_find_node_res(req_id, res), }, + SdnExtOut::ServicesEvent(_, _, SE::Connector(event)) => match event { + media_server_connector::agent_service::Event::Stats { queue, inflight, acked } => {} + }, SdnExtOut::FeaturesEvent(_, FeaturesEvent::Socket(event)) => { if let Err(e) = vnet_tx.try_send(event) { log::error!("[MediaEdge] forward Sdn SocketEvent error {:?}", e); diff --git a/bin/src/server/gateway/local_rpc_handler.rs b/bin/src/server/gateway/local_rpc_handler.rs index ea628653..f3aed3e5 100644 --- a/bin/src/server/gateway/local_rpc_handler.rs +++ b/bin/src/server/gateway/local_rpc_handler.rs @@ -4,11 +4,21 @@ use std::{ }; use atm0s_sdn::NodeId; +use media_server_connector::agent_service::Control as ConnectorControl; use media_server_gateway::ServiceKind; +use media_server_protocol::protobuf::{ + cluster_connector::{ + connector_request::Event as ConnectorEvent, + peer_event::{route_error::ErrorType, Event as PeerEvent2, RouteError, RouteSuccess}, + PeerEvent, + }, + cluster_gateway::WhipConnectRequest, +}; use media_server_protocol::{ endpoint::ClusterConnId, gateway::GATEWAY_RPC_PORT, protobuf::{ + cluster_connector::peer_event::RouteBegin, cluster_gateway::MediaEdgeServiceClient, gateway::{ConnectRequest, ConnectResponse, RemoteIceRequest, RemoteIceResponse}, }, @@ -23,20 +33,81 @@ use media_server_protocol::{ RpcError, RpcReq, RpcRes, RpcResult, }, }; +use media_server_utils::now_ms; +use tokio::sync::mpsc::Sender; use crate::errors::MediaServerError; use super::{dest_selector::GatewayDestSelector, ip_location::Ip2Location}; pub struct MediaLocalRpcHandler { + connector_agent_tx: Sender, selector: GatewayDestSelector, client: MediaEdgeServiceClient, ip2location: Arc, } impl MediaLocalRpcHandler { - pub fn new(selector: GatewayDestSelector, client: MediaEdgeServiceClient, ip2location: Arc) -> Self { - Self { selector, client, ip2location } + async fn feedback_route_begin(&self, session_id: u64, ip: IpAddr) { + self.connector_agent_tx + .send(ConnectorControl::Fire( + now_ms(), + ConnectorEvent::Peer(PeerEvent { + session_id, + event: Some(PeerEvent2::RouteBegin(RouteBegin { remote_ip: ip.to_string() })), + }), + )) + .await + .expect("Should send"); + } + + async fn feedback_route_success(&self, session_id: u64, after_ms: u64, node: NodeId) { + self.connector_agent_tx + .send(ConnectorControl::Fire( + now_ms(), + ConnectorEvent::Peer(PeerEvent { + session_id, + event: Some(PeerEvent2::RouteSuccess(RouteSuccess { + after_ms: after_ms as u32, + dest_node: node, + })), + }), + )) + .await + .expect("Should send"); + } + + async fn feedback_route_error(&self, session_id: u64, after_ms: u64, node: Option, error: ErrorType) { + self.connector_agent_tx + .send(ConnectorControl::Fire( + now_ms(), + ConnectorEvent::Peer(PeerEvent { + session_id, + event: Some(PeerEvent2::RouteError(RouteError { + after_ms: after_ms as u32, + dest_node: node, + error: error as i32, + })), + }), + )) + .await + .expect("Should send"); + } +} + +impl MediaLocalRpcHandler { + pub fn new( + connector_agent_tx: Sender, + selector: GatewayDestSelector, + client: MediaEdgeServiceClient, + ip2location: Arc, + ) -> Self { + Self { + connector_agent_tx, + selector, + client, + ip2location, + } } pub async fn process_req(&self, conn_part: Option<(NodeId, u64)>, param: RpcReq) -> RpcRes { @@ -52,7 +123,7 @@ impl MediaLocalRpcHandler { whep::RpcReq::Delete(param) => RpcRes::Whep(whep::RpcRes::Delete(self.whep_delete(conn_part, param).await)), }, RpcReq::Webrtc(param) => match param { - webrtc::RpcReq::Connect(ip, user_agent, param) => RpcRes::Webrtc(webrtc::RpcRes::Connect(self.webrtc_connect(ip, user_agent, param).await)), + webrtc::RpcReq::Connect(session_id, ip, user_agent, param) => RpcRes::Webrtc(webrtc::RpcRes::Connect(self.webrtc_connect(session_id, ip, user_agent, param).await)), webrtc::RpcReq::RemoteIce(conn, param) => RpcRes::Webrtc(webrtc::RpcRes::RemoteIce(self.webrtc_remote_ice(conn_part, conn, param).await)), webrtc::RpcReq::RestartIce(conn, ip, user_agent, req) => RpcRes::Webrtc(webrtc::RpcRes::RestartIce(self.webrtc_restart_ice(conn_part, conn, ip, user_agent, req).await)), webrtc::RpcReq::Delete(_) => { @@ -68,21 +139,31 @@ impl MediaLocalRpcHandler { */ async fn whip_connect(&self, param: WhipConnectReq) -> RpcResult> { - if let Some(selected) = self.selector.select(ServiceKind::Webrtc, self.ip2location.get_location(¶m.ip)).await { - let sock_addr = node_vnet_addr(selected, GATEWAY_RPC_PORT); - log::info!("[Gateway] selected node {selected}"); - let rpc_req = param.into(); + let session_id = param.session_id; + let started_at = now_ms(); + self.feedback_route_begin(session_id, param.ip).await; + + if let Some(node_id) = self.selector.select(ServiceKind::Webrtc, self.ip2location.get_location(¶m.ip)).await { + let sock_addr = node_vnet_addr(node_id, GATEWAY_RPC_PORT); + log::info!("[Gateway] selected node {node_id}"); + let mut rpc_req: WhipConnectRequest = param.into(); + rpc_req.session_id = session_id; + let res = self.client.whip_connect(sock_addr, rpc_req).await; - log::info!("[Gateway] response from node {selected} => {:?}", res); + log::info!("[Gateway] response from node {node_id} => {:?}", res); if let Some(res) = res { + self.feedback_route_success(session_id, now_ms() - started_at, node_id).await; + Ok(whip::WhipConnectRes { sdp: res.sdp, conn_id: res.conn.parse().unwrap(), }) } else { + self.feedback_route_error(session_id, now_ms() - started_at, Some(node_id), ErrorType::Timeout).await; Err(RpcError::new2(MediaServerError::GatewayRpcError)) } } else { + self.feedback_route_error(session_id, now_ms() - started_at, None, ErrorType::PoolEmpty).await; Err(RpcError::new2(MediaServerError::NodePoolEmpty)) } } @@ -127,21 +208,27 @@ impl MediaLocalRpcHandler { */ async fn whep_connect(&self, param: WhepConnectReq) -> RpcResult> { - if let Some(selected) = self.selector.select(ServiceKind::Webrtc, self.ip2location.get_location(¶m.ip)).await { - let sock_addr = node_vnet_addr(selected, GATEWAY_RPC_PORT); - log::info!("[Gateway] selected node {selected}"); - let rpc_req = param.into(); - let res = self.client.whep_connect(sock_addr, rpc_req).await; - log::info!("[Gateway] response from node {selected} => {:?}", res); + let started_at = now_ms(); + let session_id = param.session_id; + self.feedback_route_begin(session_id, param.ip).await; + + if let Some(node_id) = self.selector.select(ServiceKind::Webrtc, self.ip2location.get_location(¶m.ip)).await { + let sock_addr = node_vnet_addr(node_id, GATEWAY_RPC_PORT); + log::info!("[Gateway] selected node {node_id}"); + let res = self.client.whep_connect(sock_addr, param.into()).await; + log::info!("[Gateway] response from node {node_id} => {:?}", res); if let Some(res) = res { + self.feedback_route_success(session_id, now_ms() - started_at, node_id).await; Ok(whep::WhepConnectRes { sdp: res.sdp, conn_id: res.conn.parse().unwrap(), }) } else { + self.feedback_route_error(session_id, now_ms() - started_at, Some(node_id), ErrorType::Timeout).await; Err(RpcError::new2(MediaServerError::GatewayRpcError)) } } else { + self.feedback_route_error(session_id, now_ms() - started_at, None, ErrorType::PoolEmpty).await; Err(RpcError::new2(MediaServerError::NodePoolEmpty)) } } @@ -185,31 +272,40 @@ impl MediaLocalRpcHandler { Webrtc part */ - async fn webrtc_connect(&self, ip: IpAddr, user_agent: String, req: ConnectRequest) -> RpcResult<(ClusterConnId, ConnectResponse)> { - if let Some(selected) = self.selector.select(ServiceKind::Webrtc, self.ip2location.get_location(&ip)).await { - let sock_addr = node_vnet_addr(selected, GATEWAY_RPC_PORT); - log::info!("[Gateway] selected node {selected}"); + async fn webrtc_connect(&self, session_id: u64, ip: IpAddr, user_agent: String, req: ConnectRequest) -> RpcResult<(ClusterConnId, ConnectResponse)> { + let started_at = now_ms(); + self.feedback_route_begin(session_id, ip).await; + + if let Some(node_id) = self.selector.select(ServiceKind::Webrtc, self.ip2location.get_location(&ip)).await { + let sock_addr = node_vnet_addr(node_id, GATEWAY_RPC_PORT); + log::info!("[Gateway] selected node {node_id}"); let rpc_req = media_server_protocol::protobuf::cluster_gateway::WebrtcConnectRequest { + session_id, user_agent, ip: ip.to_string(), req: Some(req), }; let res = self.client.webrtc_connect(sock_addr, rpc_req).await; - log::info!("[Gateway] response from node {selected} => {:?}", res); + log::info!("[Gateway] response from node {node_id} => {:?}", res); if let Some(res) = res { if let Some(res) = res.res { if let Ok(conn) = res.conn_id.parse() { + self.feedback_route_success(session_id, now_ms() - started_at, node_id).await; Ok((conn, res)) } else { + self.feedback_route_error(session_id, now_ms() - started_at, Some(node_id), ErrorType::MediaError).await; Err(RpcError::new2(MediaServerError::MediaResError)) } } else { + self.feedback_route_error(session_id, now_ms() - started_at, Some(node_id), ErrorType::GatewayError).await; Err(RpcError::new2(MediaServerError::GatewayRpcError)) } } else { + self.feedback_route_error(session_id, now_ms() - started_at, Some(node_id), ErrorType::Timeout).await; Err(RpcError::new2(MediaServerError::GatewayRpcError)) } } else { + self.feedback_route_error(session_id, now_ms() - started_at, None, ErrorType::PoolEmpty).await; Err(RpcError::new2(MediaServerError::NodePoolEmpty)) } } diff --git a/bin/src/server/gateway/remote_rpc_handler.rs b/bin/src/server/gateway/remote_rpc_handler.rs index 390528ef..c7b1422e 100644 --- a/bin/src/server/gateway/remote_rpc_handler.rs +++ b/bin/src/server/gateway/remote_rpc_handler.rs @@ -1,13 +1,22 @@ use std::{net::SocketAddr, sync::Arc}; +use atm0s_sdn::NodeId; +use media_server_connector::agent_service::Control as ConnectorControl; use media_server_gateway::ServiceKind; use media_server_protocol::{ endpoint::ClusterConnId, gateway::GATEWAY_RPC_PORT, - protobuf::cluster_gateway::{ - MediaEdgeServiceClient, MediaEdgeServiceHandler, WebrtcConnectRequest, WebrtcConnectResponse, WebrtcRemoteIceRequest, WebrtcRemoteIceResponse, WebrtcRestartIceRequest, - WebrtcRestartIceResponse, WhepCloseRequest, WhepCloseResponse, WhepConnectRequest, WhepConnectResponse, WhepRemoteIceRequest, WhepRemoteIceResponse, WhipCloseRequest, WhipCloseResponse, - WhipConnectRequest, WhipConnectResponse, WhipRemoteIceRequest, WhipRemoteIceResponse, + protobuf::{ + cluster_connector::{ + connector_request::Event as ConnectorEvent, + peer_event::{route_error::ErrorType, Event as PeerEvent2, RouteBegin, RouteError, RouteSuccess}, + PeerEvent, + }, + cluster_gateway::{ + MediaEdgeServiceClient, MediaEdgeServiceHandler, WebrtcConnectRequest, WebrtcConnectResponse, WebrtcRemoteIceRequest, WebrtcRemoteIceResponse, WebrtcRestartIceRequest, + WebrtcRestartIceResponse, WhepCloseRequest, WhepCloseResponse, WhepConnectRequest, WhepConnectResponse, WhepRemoteIceRequest, WhepRemoteIceResponse, WhipCloseRequest, WhipCloseResponse, + WhipConnectRequest, WhipConnectResponse, WhipRemoteIceRequest, WhipRemoteIceResponse, + }, }, rpc::{ node_vnet_addr, @@ -15,11 +24,14 @@ use media_server_protocol::{ }, transport::ConnLayer, }; +use media_server_utils::now_ms; +use tokio::sync::mpsc::Sender; use super::{dest_selector::GatewayDestSelector, ip_location::Ip2Location}; #[derive(Clone)] pub struct Ctx { + pub(crate) connector_agent_tx: Sender, pub(crate) selector: GatewayDestSelector, pub(crate) client: MediaEdgeServiceClient, pub(crate) ip2location: Arc, @@ -28,13 +40,74 @@ pub struct Ctx { #[derive(Default)] pub struct MediaRemoteRpcHandlerImpl {} +impl MediaRemoteRpcHandlerImpl { + async fn feedback_route_begin(ctx: &Ctx, session_id: u64, remote_ip: String) { + ctx.connector_agent_tx + .send(ConnectorControl::Fire( + now_ms(), + ConnectorEvent::Peer(PeerEvent { + session_id, + event: Some(PeerEvent2::RouteBegin(RouteBegin { remote_ip })), + }), + )) + .await + .expect("Should send"); + } + + async fn feedback_route_success(ctx: &Ctx, session_id: u64, after_ms: u64, node: NodeId) { + ctx.connector_agent_tx + .send(ConnectorControl::Fire( + now_ms(), + ConnectorEvent::Peer(PeerEvent { + session_id, + event: Some(PeerEvent2::RouteSuccess(RouteSuccess { + after_ms: after_ms as u32, + dest_node: node, + })), + }), + )) + .await + .expect("Should send"); + } + + async fn feedback_route_error(ctx: &Ctx, session_id: u64, after_ms: u64, node: Option, error: ErrorType) { + ctx.connector_agent_tx + .send(ConnectorControl::Fire( + now_ms(), + ConnectorEvent::Peer(PeerEvent { + session_id, + event: Some(PeerEvent2::RouteError(RouteError { + after_ms: after_ms as u32, + dest_node: node, + error: error as i32, + })), + }), + )) + .await + .expect("Should send"); + } +} + impl MediaEdgeServiceHandler for MediaRemoteRpcHandlerImpl { async fn whip_connect(&self, ctx: &Ctx, req: WhipConnectRequest) -> Option { + let started_at = now_ms(); + let session_id = req.session_id; log::info!("On whip_connect from other gateway"); + Self::feedback_route_begin(ctx, session_id, req.ip.clone()).await; let location = req.ip.parse().ok().and_then(|ip| ctx.ip2location.get_location(&ip)); - let dest = ctx.selector.select(ServiceKind::Webrtc, location).await?; - let dest_addr = node_vnet_addr(dest, GATEWAY_RPC_PORT); - ctx.client.whip_connect(dest_addr, req).await + if let Some(node_id) = ctx.selector.select(ServiceKind::Webrtc, location).await { + let node_addr = node_vnet_addr(node_id, GATEWAY_RPC_PORT); + if let Some(res) = ctx.client.whip_connect(node_addr, req).await { + Self::feedback_route_success(ctx, session_id, now_ms() - started_at, node_id).await; + Some(res) + } else { + Self::feedback_route_error(ctx, session_id, now_ms() - started_at, Some(node_id), ErrorType::Timeout).await; + None + } + } else { + Self::feedback_route_error(ctx, session_id, now_ms() - started_at, None, ErrorType::PoolEmpty).await; + None + } } async fn whip_remote_ice(&self, ctx: &Ctx, req: WhipRemoteIceRequest) -> Option { @@ -54,11 +127,24 @@ impl MediaEdgeServiceHandler for MediaRemoteRpcHandlerImpl { } async fn whep_connect(&self, ctx: &Ctx, req: WhepConnectRequest) -> Option { + let started_at = now_ms(); + let session_id = req.session_id; log::info!("On whep_connect from other gateway"); + Self::feedback_route_begin(ctx, session_id, req.ip.clone()).await; let location = req.ip.parse().ok().and_then(|ip| ctx.ip2location.get_location(&ip)); - let dest = ctx.selector.select(ServiceKind::Webrtc, location).await?; - let dest_addr = node_vnet_addr(dest, GATEWAY_RPC_PORT); - ctx.client.whep_connect(dest_addr, req).await + if let Some(node_id) = ctx.selector.select(ServiceKind::Webrtc, location).await { + let dest_addr = node_vnet_addr(node_id, GATEWAY_RPC_PORT); + if let Some(res) = ctx.client.whep_connect(dest_addr, req).await { + Self::feedback_route_success(ctx, session_id, now_ms() - started_at, node_id).await; + Some(res) + } else { + Self::feedback_route_error(ctx, session_id, now_ms() - started_at, Some(node_id), ErrorType::Timeout).await; + None + } + } else { + Self::feedback_route_error(ctx, session_id, now_ms() - started_at, None, ErrorType::PoolEmpty).await; + None + } } async fn whep_remote_ice(&self, ctx: &Ctx, req: WhepRemoteIceRequest) -> Option { @@ -78,11 +164,24 @@ impl MediaEdgeServiceHandler for MediaRemoteRpcHandlerImpl { } async fn webrtc_connect(&self, ctx: &Ctx, req: WebrtcConnectRequest) -> Option { + let started_at = now_ms(); + let session_id = req.session_id; log::info!("On webrtc_connect from other gateway"); + Self::feedback_route_begin(ctx, session_id, req.ip.clone()).await; let location = req.ip.parse().ok().and_then(|ip| ctx.ip2location.get_location(&ip)); - let dest = ctx.selector.select(ServiceKind::Webrtc, location).await?; - let dest_addr = node_vnet_addr(dest, GATEWAY_RPC_PORT); - ctx.client.webrtc_connect(dest_addr, req).await + if let Some(node_id) = ctx.selector.select(ServiceKind::Webrtc, location).await { + let dest_addr = node_vnet_addr(node_id, GATEWAY_RPC_PORT); + if let Some(res) = ctx.client.webrtc_connect(dest_addr, req).await { + Self::feedback_route_success(ctx, session_id, now_ms() - started_at, node_id).await; + Some(res) + } else { + Self::feedback_route_error(ctx, session_id, now_ms() - started_at, Some(node_id), ErrorType::Timeout).await; + None + } + } else { + Self::feedback_route_error(ctx, session_id, now_ms() - started_at, None, ErrorType::PoolEmpty).await; + None + } } async fn webrtc_remote_ice(&self, ctx: &Ctx, req: WebrtcRemoteIceRequest) -> Option { diff --git a/bin/src/server/media/rpc_handler.rs b/bin/src/server/media/rpc_handler.rs index e704dedc..4387a95d 100644 --- a/bin/src/server/media/rpc_handler.rs +++ b/bin/src/server/media/rpc_handler.rs @@ -122,7 +122,7 @@ impl MediaEdgeServiceHandler for MediaRpcHandlerImpl { /* Start of sdk */ async fn webrtc_connect(&self, ctx: &Ctx, req: WebrtcConnectRequest) -> Option { log::info!("On webrtc_connect from gateway"); - let (req, rx) = Rpc::new(RpcReq::Webrtc(webrtc::RpcReq::Connect(req.ip.parse().ok()?, req.user_agent, req.req?))); + let (req, rx) = Rpc::new(RpcReq::Webrtc(webrtc::RpcReq::Connect(req.session_id, req.ip.parse().ok()?, req.user_agent, req.req?))); ctx.req_tx.send(req).await.ok()?; let res = rx.await.ok()?; match res { diff --git a/packages/media_connector/Cargo.toml b/packages/media_connector/Cargo.toml new file mode 100644 index 00000000..92f25ac9 --- /dev/null +++ b/packages/media_connector/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "media-server-connector" +version = "0.1.0" +edition = "2021" + +[dependencies] +log = { workspace = true } +serde = { version = "1.0", features = ["derive"] } +media-server-protocol = { path = "../protocol" } +media-server-utils = { path = "../media_utils" } +atm0s-sdn = { workspace = true } +prost = { workspace = true } +lru = "0.12" +async-trait = "0.1" +sea-orm-migration = "0.12" +sea-orm = { version = "0.12", features = ["sqlx-sqlite", "runtime-tokio-rustls"] } +serde_json = "1.0" + +[dev-dependencies] +tokio = { version = "1", features = ["full"] } diff --git a/packages/media_connector/src/agent_service.rs b/packages/media_connector/src/agent_service.rs new file mode 100644 index 00000000..192fedd5 --- /dev/null +++ b/packages/media_connector/src/agent_service.rs @@ -0,0 +1,201 @@ +use std::{collections::VecDeque, fmt::Debug}; + +use atm0s_sdn::{ + base::{ + NetOutgoingMeta, Service, ServiceBuilder, ServiceControlActor, ServiceCtx, ServiceInput, ServiceOutput, ServiceSharedInput, ServiceWorker, ServiceWorkerCtx, ServiceWorkerInput, + ServiceWorkerOutput, + }, + features::{data, FeaturesControl, FeaturesEvent}, + RouteRule, +}; + +use media_server_protocol::protobuf::cluster_connector::{connector_request, ConnectorRequest, ConnectorResponse}; +use prost::Message; + +use crate::{msg_queue::MessageQueue, AGENT_SERVICE_ID, AGENT_SERVICE_NAME, DATA_PORT, HANDLER_SERVICE_ID}; + +#[derive(Debug, Clone)] +pub enum Control { + Fire(u64, connector_request::Event), + Sub, +} + +#[derive(Debug, Clone)] +pub enum Event { + Stats { queue: usize, inflight: usize, acked: usize }, +} + +pub struct ConnectorAgentService { + req_id_seed: u64, + subscriber: Option>, + msg_queue: MessageQueue, + queue: Option>, + _tmp: std::marker::PhantomData<(UserData, SC, SE, TC, TW)>, +} + +impl ConnectorAgentService { + pub fn new() -> Self { + Self { + req_id_seed: 0, + subscriber: None, + queue: Some(ServiceOutput::FeatureControl(data::Control::DataListen(DATA_PORT).into())), + msg_queue: MessageQueue::default(), + _tmp: std::marker::PhantomData, + } + } +} + +impl Service for ConnectorAgentService +where + SC: From + TryInto + Debug, + SE: From + TryInto, +{ + fn service_id(&self) -> u8 { + AGENT_SERVICE_ID + } + + fn service_name(&self) -> &str { + AGENT_SERVICE_NAME + } + + fn on_shared_input<'a>(&mut self, _ctx: &ServiceCtx, _now: u64, input: ServiceSharedInput) { + match input { + ServiceSharedInput::Tick(_) => { + if let Some(subscriber) = self.subscriber { + self.queue = Some(ServiceOutput::Event( + subscriber, + Event::Stats { + queue: self.msg_queue.waits(), + inflight: self.msg_queue.inflight(), + acked: self.msg_queue.acked(), + } + .into(), + )); + } + } + ServiceSharedInput::Connection(_) => {} + } + } + + fn on_input(&mut self, _ctx: &ServiceCtx, _now: u64, input: ServiceInput) { + match input { + ServiceInput::Control(owner, control) => { + if let Ok(control) = control.try_into() { + match control { + Control::Fire(ts, event) => { + let req_id = self.req_id_seed; + self.req_id_seed += 1; + let req = ConnectorRequest { req_id, ts, event: Some(event) }; + log::info!("[ConnectorAgent] push msg to queue {:?}", req); + self.msg_queue.push(req); + } + Control::Sub => { + self.subscriber = Some(owner); + } + } + } + } + ServiceInput::FromWorker(_data) => {} + ServiceInput::FeatureEvent(FeaturesEvent::Data(event)) => match event { + data::Event::Pong(_, _) => {} + data::Event::Recv(_port, _meta, buf) => match ConnectorResponse::decode(buf.as_slice()) { + Ok(msg) => { + self.msg_queue.on_ack(msg.req_id); + log::info!("[ConnectorAgent] on msg response {:?}", msg); + } + Err(er) => { + log::error!("[ConnectorAgent] decode data error {}", er); + } + }, + }, + _ => {} + } + } + + fn pop_output2(&mut self, now: u64) -> Option> { + if let Some(out) = self.queue.take() { + return Some(out); + } + let out = self.msg_queue.pop(now)?; + let buf = out.encode_to_vec(); + let mut meta = NetOutgoingMeta::secure(); + meta.source = true; + log::info!("[ConnectorAgent] send msg to net {:?}", out); + Some(ServiceOutput::FeatureControl( + data::Control::DataSendRule(DATA_PORT, RouteRule::ToService(HANDLER_SERVICE_ID), meta, buf).into(), + )) + } +} + +pub struct ConnectorAgentServiceWorker { + queue: VecDeque>, +} + +impl ServiceWorker for ConnectorAgentServiceWorker { + fn service_id(&self) -> u8 { + AGENT_SERVICE_ID + } + + fn service_name(&self) -> &str { + AGENT_SERVICE_NAME + } + + fn on_tick(&mut self, _ctx: &ServiceWorkerCtx, _now: u64, _tick_count: u64) {} + + fn on_input(&mut self, _ctx: &ServiceWorkerCtx, _now: u64, input: ServiceWorkerInput) { + match input { + ServiceWorkerInput::Control(owner, control) => self.queue.push_back(ServiceWorkerOutput::ForwardControlToController(owner, control)), + ServiceWorkerInput::FromController(_) => {} + ServiceWorkerInput::FeatureEvent(event) => self.queue.push_back(ServiceWorkerOutput::ForwardFeatureEventToController(event)), + } + } + + fn pop_output2(&mut self, _now: u64) -> Option> { + self.queue.pop_front() + } +} + +pub struct ConnectorAgentServiceBuilder { + _tmp: std::marker::PhantomData<(UserData, SC, SE, TC, TW)>, +} + +impl ConnectorAgentServiceBuilder { + pub fn new() -> Self { + Self { _tmp: std::marker::PhantomData } + } +} + +impl ServiceBuilder for ConnectorAgentServiceBuilder +where + UserData: 'static + Debug + Send + Sync + Copy + Eq, + SC: 'static + Debug + Send + Sync + From + TryInto, + SE: 'static + Debug + Send + Sync + From + TryInto, + TC: 'static + Debug + Send + Sync, + TW: 'static + Debug + Send + Sync, +{ + fn service_id(&self) -> u8 { + AGENT_SERVICE_ID + } + + fn service_name(&self) -> &str { + AGENT_SERVICE_NAME + } + + fn discoverable(&self) -> bool { + false + } + + fn create(&self) -> Box> { + Box::new(ConnectorAgentService::new()) + } + + fn create_worker(&self) -> Box> { + Box::new(ConnectorAgentServiceWorker { queue: Default::default() }) + } +} + +impl crate::msg_queue::Message for ConnectorRequest { + fn msg_id(&self) -> u64 { + self.req_id + } +} diff --git a/packages/media_connector/src/handler_service.rs b/packages/media_connector/src/handler_service.rs new file mode 100644 index 00000000..5f5c98f4 --- /dev/null +++ b/packages/media_connector/src/handler_service.rs @@ -0,0 +1,188 @@ +use std::{collections::VecDeque, fmt::Debug, num::NonZeroUsize}; + +use atm0s_sdn::{ + base::{ + NetOutgoingMeta, Service, ServiceBuilder, ServiceControlActor, ServiceCtx, ServiceInput, ServiceOutput, ServiceSharedInput, ServiceWorker, ServiceWorkerCtx, ServiceWorkerInput, + ServiceWorkerOutput, + }, + features::{data, FeaturesControl, FeaturesEvent}, + NodeId, RouteRule, +}; +use lru::LruCache; +use media_server_protocol::protobuf::cluster_connector::{ + connector_request::Event as ConnectorEvent, + connector_response::{Response, Success}, + ConnectorRequest, ConnectorResponse, +}; +use prost::Message; + +use crate::{DATA_PORT, HANDLER_SERVICE_ID, HANDLER_SERVICE_NAME}; + +#[derive(Debug, Clone)] +pub enum Control { + Sub, +} + +#[derive(Debug, Clone)] +pub enum Event { + Req(NodeId, u64, u64, ConnectorEvent), +} + +type ReqUuid = (NodeId, u64, u64); + +pub struct ConnectorHandlerService { + lru: LruCache, + subscriber: Option>, + queue: VecDeque>, + _tmp: std::marker::PhantomData<(UserData, SC, SE, TC, TW)>, +} + +impl ConnectorHandlerService { + pub fn new() -> Self { + Self { + subscriber: None, + lru: LruCache::new(NonZeroUsize::new(10000).expect("should be non-zero")), + queue: VecDeque::from([ServiceOutput::FeatureControl(data::Control::DataListen(DATA_PORT).into())]), + _tmp: std::marker::PhantomData, + } + } +} + +impl Service for ConnectorHandlerService +where + SC: From + TryInto + Debug, + SE: From + TryInto, +{ + fn service_id(&self) -> u8 { + HANDLER_SERVICE_ID + } + + fn service_name(&self) -> &str { + HANDLER_SERVICE_NAME + } + + fn on_shared_input<'a>(&mut self, _ctx: &ServiceCtx, _now: u64, _input: ServiceSharedInput) {} + + fn on_input(&mut self, _ctx: &ServiceCtx, _now: u64, input: ServiceInput) { + match input { + ServiceInput::Control(owner, control) => { + if let Ok(control) = control.try_into() { + match control { + Control::Sub => { + self.subscriber = Some(owner); + } + } + } + } + ServiceInput::FromWorker(_data) => {} + ServiceInput::FeatureEvent(FeaturesEvent::Data(event)) => match event { + data::Event::Pong(_, _) => {} + data::Event::Recv(_port, meta, buf) => match ConnectorRequest::decode(buf.as_slice()) { + Ok(msg) => { + if let Some(source) = meta.source { + if self.lru.put((source, msg.ts, msg.req_id), ()).is_some() { + log::warn!("[ConnectorHandler] duplicate msg {:?}", msg); + return; + } + + log::info!("[ConnectorHandler] on event {:?}", msg); + if let Some(event) = msg.event { + if let Some(actor) = self.subscriber { + self.queue.push_back(ServiceOutput::Event(actor, Event::Req(source, msg.ts, msg.req_id, event).into())); + } else { + log::warn!("[ConnectorHandler] subscriber not found"); + } + } + + let res = ConnectorResponse { + req_id: msg.req_id, + response: Some(Response::Success(Success {})), + }; + log::info!("[ConnectorHandler] reply to net {:?}", res); + self.queue.push_back(ServiceOutput::FeatureControl( + data::Control::DataSendRule(DATA_PORT, RouteRule::ToNode(source), NetOutgoingMeta::secure(), res.encode_to_vec()).into(), + )); + } else { + log::warn!("[ConnectorHandler] reject msg without source"); + } + } + Err(er) => { + log::error!("[ConnectorHandler] decode data error {}", er); + } + }, + }, + _ => {} + } + } + + fn pop_output2(&mut self, _now: u64) -> Option> { + self.queue.pop_front() + } +} + +pub struct ConnectorHandlerServiceWorker { + queue: VecDeque>, +} + +impl ServiceWorker for ConnectorHandlerServiceWorker { + fn service_id(&self) -> u8 { + HANDLER_SERVICE_ID + } + + fn service_name(&self) -> &str { + HANDLER_SERVICE_NAME + } + + fn on_tick(&mut self, _ctx: &ServiceWorkerCtx, _now: u64, _tick_count: u64) {} + + fn on_input(&mut self, _ctx: &ServiceWorkerCtx, _now: u64, input: ServiceWorkerInput) { + match input { + ServiceWorkerInput::Control(owner, control) => self.queue.push_back(ServiceWorkerOutput::ForwardControlToController(owner, control)), + ServiceWorkerInput::FromController(_) => {} + ServiceWorkerInput::FeatureEvent(event) => self.queue.push_back(ServiceWorkerOutput::ForwardFeatureEventToController(event)), + } + } + + fn pop_output2(&mut self, _now: u64) -> Option> { + self.queue.pop_front() + } +} + +pub struct ConnectorHandlerServiceBuilder { + _tmp: std::marker::PhantomData<(UserData, SC, SE, TC, TW)>, +} + +impl ConnectorHandlerServiceBuilder { + pub fn new() -> Self { + Self { _tmp: std::marker::PhantomData } + } +} + +impl ServiceBuilder for ConnectorHandlerServiceBuilder +where + UserData: 'static + Debug + Send + Sync + Copy + Eq, + SC: 'static + Debug + Send + Sync + From + TryInto, + SE: 'static + Debug + Send + Sync + From + TryInto, + TC: 'static + Debug + Send + Sync, + TW: 'static + Debug + Send + Sync, +{ + fn service_id(&self) -> u8 { + HANDLER_SERVICE_ID + } + + fn service_name(&self) -> &str { + HANDLER_SERVICE_NAME + } + + fn discoverable(&self) -> bool { + true + } + + fn create(&self) -> Box> { + Box::new(ConnectorHandlerService::new()) + } + + fn create_worker(&self) -> Box> { + Box::new(ConnectorHandlerServiceWorker { queue: Default::default() }) + } +} diff --git a/packages/media_connector/src/lib.rs b/packages/media_connector/src/lib.rs new file mode 100644 index 00000000..9a8c1d2b --- /dev/null +++ b/packages/media_connector/src/lib.rs @@ -0,0 +1,75 @@ +use atm0s_sdn::NodeId; +use media_server_protocol::protobuf::cluster_connector::connector_request; +use serde_json::Value; + +pub mod agent_service; +pub mod handler_service; +mod msg_queue; +pub mod sql_storage; + +pub const DATA_PORT: u16 = 10002; + +pub const AGENT_SERVICE_ID: u8 = 103; +pub const AGENT_SERVICE_NAME: &str = "connector-agent"; +pub const HANDLER_SERVICE_ID: u8 = 104; +pub const HANDLER_SERVICE_NAME: &str = "connector-handler"; + +#[derive(Debug)] +pub struct RoomInfo { + pub id: i32, + pub room: String, + pub created_at: u64, + pub peers: usize, +} + +#[derive(Debug)] +pub struct PeerSession { + pub id: i32, + pub peer_id: i32, + pub peer: String, + pub session: u64, + pub joined_at: u64, + pub leaved_at: Option, +} + +#[derive(Debug)] +pub struct PeerInfo { + pub id: i32, + pub room_id: i32, + pub room: String, + pub peer: String, + pub created_at: u64, + pub sessions: Vec, +} + +#[derive(Debug)] +pub struct SessionInfo { + pub id: u64, + pub created_at: u64, + pub ip: Option, + pub user_agent: Option, + pub sdk: Option, + pub peers: Vec, +} + +#[derive(Debug)] +pub struct EventInfo { + pub id: i32, + pub node: u32, + pub session: u64, + pub node_ts: u64, + pub created_at: u64, + pub event: String, + pub meta: Option, +} + +pub trait Storage { + fn on_event(&self, from: NodeId, ts: u64, req_id: u64, event: connector_request::Event) -> impl std::future::Future> + Send; +} + +pub trait Querier { + fn rooms(&self, page: usize, count: usize) -> impl std::future::Future>> + Send; + fn peers(&self, room: Option, page: usize, count: usize) -> impl std::future::Future>> + Send; + fn sessions(&self, page: usize, count: usize) -> impl std::future::Future>> + Send; + fn events(&self, session: Option, from: Option, to: Option, page: usize, count: usize) -> impl std::future::Future>> + Send; +} diff --git a/packages/media_connector/src/msg_queue.rs b/packages/media_connector/src/msg_queue.rs new file mode 100644 index 00000000..8acad050 --- /dev/null +++ b/packages/media_connector/src/msg_queue.rs @@ -0,0 +1,124 @@ +use std::collections::{BTreeMap, HashMap, VecDeque}; + +const RESEND_AFTER_MS: u64 = 1000; + +pub trait Message { + fn msg_id(&self) -> u64; +} + +#[derive(Default)] +pub struct MessageQueue { + inflight_ts: BTreeMap>, + inflight: HashMap, + queue: VecDeque, + acked: usize, +} + +impl MessageQueue { + pub fn push(&mut self, msg: M) { + self.queue.push_back(msg); + } + + pub fn on_ack(&mut self, id: u64) { + if self.inflight.remove(&id).is_some() { + self.acked += 1; + log::debug!("[ConnectorAgent/MessageQueue] msg for ack {id}"); + } else { + log::warn!("[ConnectorAgent/MessageQueue] msg for ack {id} not found"); + } + } + + pub fn pop(&mut self, now_ms: u64) -> Option<&M> { + if let Some(msg_id) = self.pop_retry_msg_id(now_ms) { + let entry = self.inflight_ts.entry(now_ms).or_insert_with(Default::default); + entry.push(msg_id); + return Some(self.inflight.get(&msg_id).expect("should exist retry_msg_id")); + } + + if self.inflight.len() < MAX_INFLIGHT { + let msg = self.queue.pop_front()?; + let msg_id = msg.msg_id(); + let entry = self.inflight_ts.entry(now_ms).or_insert_with(Default::default); + entry.push(msg_id); + self.inflight.insert(msg_id, msg); + self.inflight.get(&msg_id) + } else { + None + } + } + + pub fn waits(&self) -> usize { + self.queue.len() + } + + pub fn inflight(&self) -> usize { + self.inflight.len() + } + + pub fn acked(&self) -> usize { + self.acked + } + + fn pop_retry_msg_id(&mut self, now_ms: u64) -> Option { + loop { + let mut entry = self.inflight_ts.first_entry()?; + if *entry.key() + RESEND_AFTER_MS <= now_ms { + let msg_id = entry.get_mut().pop().expect("should have msg"); + if entry.get().is_empty() { + entry.remove(); + } + if self.inflight.contains_key(&msg_id) { + break Some(msg_id); + } + } else { + break None; + } + } + } +} + +#[cfg(test)] +mod tests { + use crate::msg_queue::RESEND_AFTER_MS; + + use super::{Message, MessageQueue}; + + impl Message for u64 { + fn msg_id(&self) -> u64 { + *self + } + } + + #[test] + fn simple_work() { + let mut queue = MessageQueue::::default(); + queue.push(1); + queue.push(2); + + assert_eq!(queue.pop(0), Some(&1)); + assert_eq!(queue.pop(0), None); + + queue.on_ack(1); + assert_eq!(queue.pop(0), Some(&2)); + assert_eq!(queue.pop(0), None); + + queue.on_ack(2); + assert_eq!(queue.pop(0), None); + assert_eq!(queue.inflight.len(), 0); + assert_eq!(queue.inflight_ts.len(), 1); + + assert_eq!(queue.pop(RESEND_AFTER_MS), None); + assert_eq!(queue.inflight_ts.len(), 0); + } + + #[test] + fn retry_msg() { + let mut queue = MessageQueue::::default(); + queue.push(1); + assert_eq!(queue.pop(0), Some(&1)); + assert_eq!(queue.pop(0), None); + + assert_eq!(queue.pop(RESEND_AFTER_MS), Some(&1)); + assert_eq!(queue.pop(RESEND_AFTER_MS), None); + } +} diff --git a/packages/media_connector/src/sql_storage.rs b/packages/media_connector/src/sql_storage.rs new file mode 100644 index 00000000..08d6eb7a --- /dev/null +++ b/packages/media_connector/src/sql_storage.rs @@ -0,0 +1,632 @@ +use std::time::Duration; + +use atm0s_sdn::NodeId; +use media_server_protocol::protobuf::cluster_connector::{connector_request, peer_event}; +use media_server_utils::now_ms; +use sea_orm::{sea_query::OnConflict, ActiveModelTrait, ColumnTrait, ConnectOptions, Database, DatabaseConnection, EntityTrait, QueryFilter, QueryOrder, QuerySelect, Set}; +use sea_orm_migration::MigratorTrait; + +use crate::{EventInfo, PeerInfo, PeerSession, Querier, RoomInfo, SessionInfo, Storage}; + +mod entity; +mod migration; + +pub struct ConnectorStorage { + db: DatabaseConnection, +} + +impl ConnectorStorage { + pub async fn new(sql_uri: &str) -> Self { + let mut opt = ConnectOptions::new(sql_uri.to_owned()); + opt.max_connections(100) + .min_connections(5) + .connect_timeout(Duration::from_secs(8)) + .acquire_timeout(Duration::from_secs(8)) + .idle_timeout(Duration::from_secs(8)) + .max_lifetime(Duration::from_secs(8)) + .sqlx_logging(false) + .sqlx_logging_level(log::LevelFilter::Info); // Setting default PostgreSQL schema + + let db = Database::connect(opt).await.expect("Should connect to sql server"); + migration::Migrator::up(&db, None).await.expect("Should run migration success"); + + Self { db } + } + + async fn on_peer_event(&self, from: NodeId, ts: u64, session: u64, event: peer_event::Event) -> Option<()> { + match event { + peer_event::Event::RouteBegin(params) => { + entity::session::Entity::insert(entity::session::ActiveModel { + id: Set(session as i64), + created_at: Set(now_ms() as i64), + ip: Set(Some(params.remote_ip.clone())), + ..Default::default() + }) + .exec(&self.db) + .await + .ok()?; + + entity::event::ActiveModel { + node: Set(from), + node_ts: Set(ts as i64), + session: Set(session as i64), + created_at: Set(now_ms() as i64), + event: Set("RouteBegin".to_owned()), + meta: Set(Some(serde_json::to_value(params).expect("Should convert params to Json"))), + ..Default::default() + } + .insert(&self.db) + .await + .ok()?; + + Some(()) + } + peer_event::Event::RouteSuccess(params) => { + entity::event::ActiveModel { + node: Set(from), + node_ts: Set(ts as i64), + session: Set(session as i64), + created_at: Set(now_ms() as i64), + event: Set("RouteSuccess".to_owned()), + meta: Set(Some(serde_json::to_value(params).expect("Should convert params to Json"))), + ..Default::default() + } + .insert(&self.db) + .await + .ok()?; + Some(()) + } + peer_event::Event::RouteError(params) => { + entity::event::ActiveModel { + node: Set(from), + node_ts: Set(ts as i64), + session: Set(session as i64), + created_at: Set(now_ms() as i64), + event: Set("RouteError".to_owned()), + meta: Set(Some(serde_json::to_value(params).expect("Should convert params to Json"))), + ..Default::default() + } + .insert(&self.db) + .await + .ok()?; + Some(()) + } + peer_event::Event::Connecting(params) => { + entity::session::Entity::insert(entity::session::ActiveModel { + id: Set(session as i64), + created_at: Set(now_ms() as i64), + ..Default::default() + }) + .on_conflict( + // on conflict do nothing + OnConflict::column(entity::session::Column::Id).do_nothing().to_owned(), + ) + .exec(&self.db) + .await + .ok()?; + + entity::event::ActiveModel { + node: Set(from), + node_ts: Set(ts as i64), + session: Set(session as i64), + created_at: Set(now_ms() as i64), + event: Set("Connecting".to_owned()), + meta: Set(Some(serde_json::to_value(params).expect("Should convert params to Json"))), + ..Default::default() + } + .insert(&self.db) + .await + .ok()?; + Some(()) + } + peer_event::Event::Connected(params) => { + entity::event::ActiveModel { + node: Set(from), + node_ts: Set(ts as i64), + session: Set(session as i64), + created_at: Set(now_ms() as i64), + event: Set("Connected".to_owned()), + meta: Set(Some(serde_json::to_value(params).expect("Should convert params to Json"))), + ..Default::default() + } + .insert(&self.db) + .await + .ok()?; + Some(()) + } + peer_event::Event::ConnectError(params) => { + entity::event::ActiveModel { + node: Set(from), + node_ts: Set(ts as i64), + session: Set(session as i64), + created_at: Set(now_ms() as i64), + event: Set("ConnectError".to_owned()), + meta: Set(Some(serde_json::to_value(params).expect("Should convert params to Json"))), + ..Default::default() + } + .insert(&self.db) + .await + .ok()?; + Some(()) + } + peer_event::Event::Stats(_) => todo!(), + peer_event::Event::Reconnect(params) => { + entity::event::ActiveModel { + node: Set(from), + node_ts: Set(ts as i64), + session: Set(session as i64), + created_at: Set(now_ms() as i64), + event: Set("Reconnect".to_owned()), + meta: Set(Some(serde_json::to_value(params).expect("Should convert params to Json"))), + ..Default::default() + } + .insert(&self.db) + .await + .ok()?; + Some(()) + } + peer_event::Event::Reconnected(params) => { + entity::event::ActiveModel { + node: Set(from), + node_ts: Set(ts as i64), + session: Set(session as i64), + created_at: Set(now_ms() as i64), + event: Set("Reconnected".to_owned()), + meta: Set(Some(serde_json::to_value(params).expect("Should convert params to Json"))), + ..Default::default() + } + .insert(&self.db) + .await + .ok()?; + Some(()) + } + peer_event::Event::Disconnected(params) => { + entity::event::ActiveModel { + node: Set(from), + node_ts: Set(ts as i64), + session: Set(session as i64), + created_at: Set(now_ms() as i64), + event: Set("Disconnected".to_owned()), + meta: Set(Some(serde_json::to_value(params).expect("Should convert params to Json"))), + ..Default::default() + } + .insert(&self.db) + .await + .ok()?; + Some(()) + } + peer_event::Event::Join(params) => { + let room = self.upsert_room(¶ms.room).await?; + let peer = self.upsert_peer(room, ¶ms.peer).await?; + let _peer_session = self.upsert_peer_session(peer, session, ts).await?; + + entity::event::ActiveModel { + node: Set(from), + node_ts: Set(ts as i64), + session: Set(session as i64), + created_at: Set(now_ms() as i64), + event: Set("Join".to_owned()), + meta: Set(Some(serde_json::to_value(params).expect("Should convert params to Json"))), + ..Default::default() + } + .insert(&self.db) + .await + .ok()?; + Some(()) + } + peer_event::Event::Leave(params) => { + let room = self.upsert_room(¶ms.room).await?; + let peer = self.upsert_peer(room, ¶ms.peer).await?; + let peer_session = entity::peer_session::Entity::find() + .filter(entity::peer_session::Column::Peer.eq(peer)) + .filter(entity::peer_session::Column::Session.eq(session)) + .one(&self.db) + .await + .ok()?; + if let Some(peer_session) = peer_session { + let mut model: entity::peer_session::ActiveModel = peer_session.into(); + model.leaved_at = Set(Some(ts as i64)); + model.save(&self.db).await.ok()?; + } + + entity::event::ActiveModel { + node: Set(from), + node_ts: Set(ts as i64), + session: Set(session as i64), + created_at: Set(now_ms() as i64), + event: Set("Leave".to_owned()), + meta: Set(Some(serde_json::to_value(params).expect("Should convert params to Json"))), + ..Default::default() + } + .insert(&self.db) + .await + .ok()?; + Some(()) + } + peer_event::Event::RemoteTrackStarted(params) => { + entity::event::ActiveModel { + node: Set(from), + node_ts: Set(ts as i64), + session: Set(session as i64), + created_at: Set(now_ms() as i64), + event: Set("RemoteTrackStarted".to_owned()), + meta: Set(Some(serde_json::to_value(params).expect("Should convert params to Json"))), + ..Default::default() + } + .insert(&self.db) + .await + .ok()?; + Some(()) + } + peer_event::Event::RemoteTrackEnded(params) => { + entity::event::ActiveModel { + node: Set(from), + node_ts: Set(ts as i64), + session: Set(session as i64), + created_at: Set(now_ms() as i64), + event: Set("RemoteTrackEnded".to_owned()), + meta: Set(Some(serde_json::to_value(params).expect("Should convert params to Json"))), + ..Default::default() + } + .insert(&self.db) + .await + .ok()?; + Some(()) + } + peer_event::Event::LocalTrack(params) => { + entity::event::ActiveModel { + node: Set(from), + node_ts: Set(ts as i64), + session: Set(session as i64), + created_at: Set(now_ms() as i64), + event: Set("LocalTrack".to_owned()), + meta: Set(Some(serde_json::to_value(params).expect("Should convert params to Json"))), + ..Default::default() + } + .insert(&self.db) + .await + .ok()?; + Some(()) + } + peer_event::Event::LocalTrackAttach(params) => { + entity::event::ActiveModel { + node: Set(from), + node_ts: Set(ts as i64), + session: Set(session as i64), + created_at: Set(now_ms() as i64), + event: Set("LocalTrackAttach".to_owned()), + meta: Set(Some(serde_json::to_value(params).expect("Should convert params to Json"))), + ..Default::default() + } + .insert(&self.db) + .await + .ok()?; + Some(()) + } + peer_event::Event::LocalTrackDetach(params) => { + entity::event::ActiveModel { + node: Set(from), + node_ts: Set(ts as i64), + session: Set(session as i64), + created_at: Set(now_ms() as i64), + event: Set("LocalTrackDetach".to_owned()), + meta: Set(Some(serde_json::to_value(params).expect("Should convert params to Json"))), + ..Default::default() + } + .insert(&self.db) + .await + .ok()?; + Some(()) + } + } + } + + async fn upsert_room(&self, room: &str) -> Option { + let room_row = entity::room::Entity::find().filter(entity::room::Column::Room.eq(room)).one(&self.db).await.ok()?; + if let Some(info) = room_row { + Some(info.id) + } else { + entity::room::ActiveModel { + room: Set(room.to_owned()), + created_at: Set(now_ms() as i64), + ..Default::default() + } + .insert(&self.db) + .await + .ok() + .map(|r| r.id) + } + } + + async fn upsert_peer(&self, room: i32, peer: &str) -> Option { + let peer_row = entity::peer::Entity::find() + .filter(entity::peer::Column::Room.eq(room)) + .filter(entity::peer::Column::Peer.eq(peer)) + .one(&self.db) + .await + .ok()?; + if let Some(info) = peer_row { + Some(info.id) + } else { + entity::peer::ActiveModel { + room: Set(room), + peer: Set(peer.to_owned()), + created_at: Set(now_ms() as i64), + ..Default::default() + } + .insert(&self.db) + .await + .ok() + .map(|r| r.id) + } + } + + async fn upsert_peer_session(&self, peer: i32, session: u64, ts: u64) -> Option { + let peer_row = entity::peer_session::Entity::find() + .filter(entity::peer_session::Column::Session.eq(session)) + .filter(entity::peer_session::Column::Peer.eq(peer)) + .one(&self.db) + .await + .ok()?; + if let Some(info) = peer_row { + Some(info.id) + } else { + entity::peer_session::ActiveModel { + session: Set(session as i64), + peer: Set(peer), + created_at: Set(now_ms() as i64), + joined_at: Set(ts as i64), + ..Default::default() + } + .insert(&self.db) + .await + .ok() + .map(|r| r.id) + } + } +} + +impl Storage for ConnectorStorage { + async fn on_event(&self, from: NodeId, ts: u64, _req_id: u64, event: connector_request::Event) -> Option<()> { + match event { + connector_request::Event::Peer(event) => self.on_peer_event(from, ts, event.session_id, event.event?).await, + } + } +} + +impl Querier for ConnectorStorage { + async fn rooms(&self, page: usize, count: usize) -> Option> { + let rooms = entity::room::Entity::find() + .order_by(entity::room::Column::CreatedAt, sea_orm::Order::Desc) + .limit(count as u64) + .offset((page * count) as u64) + .all(&self.db) + .await + .ok()? + .into_iter() + .map(|r| RoomInfo { + id: r.id, + room: r.room, + created_at: r.created_at as u64, + peers: 0, //TODO count peers + }) + .collect::>(); + Some(rooms) + } + + async fn peers(&self, room: Option, page: usize, count: usize) -> Option> { + let peers = entity::peer::Entity::find(); + let peers = if let Some(room) = room { + peers.filter(entity::peer::Column::Room.eq(room)) + } else { + peers + }; + + let peers = peers + .order_by(entity::peer::Column::CreatedAt, sea_orm::Order::Desc) + .limit(count as u64) + .offset((page * count) as u64) + .find_with_related(entity::peer_session::Entity) + .all(&self.db) + .await + .unwrap() + .into_iter() + .map(|(r, sessions)| PeerInfo { + id: r.id, + room_id: r.room, + room: "".to_string(), //TODO get room + peer: r.peer.clone(), + created_at: r.created_at as u64, + sessions: sessions + .into_iter() + .map(|s| PeerSession { + id: s.id, + peer_id: s.peer, + peer: r.peer.clone(), + session: s.session as u64, + joined_at: s.joined_at as u64, + leaved_at: s.leaved_at.map(|l| l as u64), + }) + .collect::>(), + }) + .collect::>(); + + Some(peers) + } + + async fn sessions(&self, page: usize, count: usize) -> Option> { + let sessions = entity::session::Entity::find() + .order_by(entity::session::Column::CreatedAt, sea_orm::Order::Desc) + .limit(count as u64) + .offset((page * count) as u64) + .find_with_related(entity::peer_session::Entity) + .all(&self.db) + .await + .ok()? + .into_iter() + .map(|(r, peers)| SessionInfo { + id: r.id as u64, + created_at: r.created_at as u64, + ip: r.ip, + user_agent: r.user_agent, + sdk: r.sdk, + peers: peers + .into_iter() + .map(|s| PeerSession { + id: s.id, + peer_id: s.peer, + peer: "_".to_string(), //TODO get peer + session: s.session as u64, + joined_at: s.joined_at as u64, + leaved_at: s.leaved_at.map(|l| l as u64), + }) + .collect::>(), + }) + .collect::>(); + log::info!("{:?}", sessions); + Some(sessions) + } + + async fn events(&self, session: Option, from: Option, to: Option, page: usize, count: usize) -> Option> { + let events = entity::event::Entity::find(); + let events = if let Some(session) = session { + events.filter(entity::event::Column::Session.eq(session as i64)) + } else { + events + }; + + let events = if let Some(from) = from { + events.filter(entity::event::Column::CreatedAt.gte(from as i64)) + } else { + events + }; + + let events = if let Some(to) = to { + events.filter(entity::event::Column::CreatedAt.lte(to as i64)) + } else { + events + }; + + let events = events + .order_by(entity::event::Column::CreatedAt, sea_orm::Order::Desc) + .limit(count as u64) + .offset((page * count) as u64) + .all(&self.db) + .await + .unwrap() + .into_iter() + .map(|r| EventInfo { + id: r.id, + node: r.node, + created_at: r.created_at as u64, + session: r.session as u64, + node_ts: r.node_ts as u64, + event: r.event, + meta: r.meta, + }) + .collect::>(); + Some(events) + } +} + +#[cfg(test)] +mod tests { + use media_server_protocol::protobuf::cluster_connector::{ + connector_request, + peer_event::{Connected, Connecting, Event, Join, RouteBegin}, + PeerEvent, + }; + + use crate::{Querier, Storage}; + + use super::ConnectorStorage; + + #[tokio::test] + async fn test_event() { + let session_id = 10000; + let node = 1; + let ts = 1000; + let req_id = 0; + let remote_ip = "127.0.0.1".to_string(); + let storage = ConnectorStorage::new("sqlite::memory:").await; + storage + .on_event( + node, + ts, + req_id, + connector_request::Event::Peer(PeerEvent { + session_id, + event: Some(Event::RouteBegin(RouteBegin { remote_ip: remote_ip.clone() })), + }), + ) + .await + .expect("Should process event"); + + assert_eq!(storage.sessions(0, 2).await.expect("Should got sessions").len(), 1); + assert_eq!(storage.events(None, None, None, 0, 2).await.expect("Should got events").len(), 1); + assert_eq!(storage.events(Some(session_id), None, None, 0, 2).await.expect("Should got events").len(), 1); + } + + #[tokio::test] + async fn test_room() { + let session_id = 10000; + let node = 1; + let ts = 1000; + let req_id = 0; + let remote_ip = "127.0.0.1".to_string(); + let storage = ConnectorStorage::new("sqlite::memory:").await; + storage + .on_event( + node, + ts, + req_id, + connector_request::Event::Peer(PeerEvent { + session_id, + event: Some(Event::Connecting(Connecting { remote_ip: remote_ip.clone() })), + }), + ) + .await + .expect("Should process event"); + + assert_eq!(storage.sessions(0, 2).await.expect("Should got sessions").len(), 1); + assert_eq!(storage.events(None, None, None, 0, 2).await.expect("Should got events").len(), 1); + assert_eq!(storage.events(Some(session_id), None, None, 0, 2).await.expect("Should got events").len(), 1); + + storage + .on_event( + node, + ts, + req_id, + connector_request::Event::Peer(PeerEvent { + session_id, + event: Some(Event::Connected(Connected { + after_ms: 10, + remote_ip: remote_ip.clone(), + })), + }), + ) + .await + .expect("Should process event"); + + assert_eq!(storage.rooms(0, 2).await.expect("Should got rooms").len(), 0); + + storage + .on_event( + node, + ts, + req_id, + connector_request::Event::Peer(PeerEvent { + session_id, + event: Some(Event::Join(Join { + room: "demo".to_string(), + peer: "peer".to_string(), + })), + }), + ) + .await + .expect("Should process event"); + + assert_eq!(storage.rooms(0, 2).await.expect("Should got rooms").len(), 1); + assert_eq!(storage.peers(None, 0, 2).await.expect("Should got rooms").len(), 1); + } +} diff --git a/packages/media_connector/src/sql_storage/entity.rs b/packages/media_connector/src/sql_storage/entity.rs new file mode 100644 index 00000000..1b334a0c --- /dev/null +++ b/packages/media_connector/src/sql_storage/entity.rs @@ -0,0 +1,5 @@ +pub mod event; +pub mod peer; +pub mod peer_session; +pub mod room; +pub mod session; diff --git a/packages/media_connector/src/sql_storage/entity/event.rs b/packages/media_connector/src/sql_storage/entity/event.rs new file mode 100644 index 00000000..160287f1 --- /dev/null +++ b/packages/media_connector/src/sql_storage/entity/event.rs @@ -0,0 +1,29 @@ +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] +#[sea_orm(table_name = "event")] +pub struct Model { + #[sea_orm(primary_key)] + pub id: i32, + pub node: u32, + /// This is node timestamp + pub node_ts: i64, + pub session: i64, + pub created_at: i64, + pub event: String, + pub meta: Option, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm(belongs_to = "super::session::Entity", from = "Column::Session", to = "super::session::Column::Id")] + Session, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Session.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/packages/media_connector/src/sql_storage/entity/peer.rs b/packages/media_connector/src/sql_storage/entity/peer.rs new file mode 100644 index 00000000..40564100 --- /dev/null +++ b/packages/media_connector/src/sql_storage/entity/peer.rs @@ -0,0 +1,33 @@ +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] +#[sea_orm(table_name = "peer")] +pub struct Model { + #[sea_orm(primary_key)] + pub id: i32, + pub room: i32, + pub peer: String, + pub created_at: i64, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm(belongs_to = "super::room::Entity", from = "Column::Room", to = "super::room::Column::Id")] + Room, + #[sea_orm(has_many = "super::peer_session::Entity")] + Sessions, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Room.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Sessions.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/packages/media_connector/src/sql_storage/entity/peer_session.rs b/packages/media_connector/src/sql_storage/entity/peer_session.rs new file mode 100644 index 00000000..915c1a03 --- /dev/null +++ b/packages/media_connector/src/sql_storage/entity/peer_session.rs @@ -0,0 +1,37 @@ +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] +#[sea_orm(table_name = "peer_session")] +pub struct Model { + #[sea_orm(primary_key)] + pub id: i32, + pub peer: i32, + pub session: i64, + pub created_at: i64, + /// This is node timestamp + pub joined_at: i64, + /// This is node timestamp + pub leaved_at: Option, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm(belongs_to = "super::peer::Entity", from = "Column::Peer", to = "super::peer::Column::Id")] + Peer, + #[sea_orm(belongs_to = "super::session::Entity", from = "Column::Session", to = "super::session::Column::Id")] + Session, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Peer.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Session.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/packages/media_connector/src/sql_storage/entity/room.rs b/packages/media_connector/src/sql_storage/entity/room.rs new file mode 100644 index 00000000..a2f0c49c --- /dev/null +++ b/packages/media_connector/src/sql_storage/entity/room.rs @@ -0,0 +1,24 @@ +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] +#[sea_orm(table_name = "room")] +pub struct Model { + #[sea_orm(primary_key)] + pub id: i32, + pub room: String, + pub created_at: i64, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm(has_many = "super::peer::Entity")] + Peers, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Peers.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/packages/media_connector/src/sql_storage/entity/session.rs b/packages/media_connector/src/sql_storage/entity/session.rs new file mode 100644 index 00000000..cba8bf77 --- /dev/null +++ b/packages/media_connector/src/sql_storage/entity/session.rs @@ -0,0 +1,34 @@ +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] +#[sea_orm(table_name = "session")] +pub struct Model { + #[sea_orm(primary_key)] + pub id: i64, + pub created_at: i64, + pub ip: Option, + pub user_agent: Option, + pub sdk: Option, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm(has_many = "super::event::Entity")] + Events, + #[sea_orm(has_many = "super::peer_session::Entity")] + Peers, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Events.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Peers.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/packages/media_connector/src/sql_storage/migration.rs b/packages/media_connector/src/sql_storage/migration.rs new file mode 100644 index 00000000..d9b34656 --- /dev/null +++ b/packages/media_connector/src/sql_storage/migration.rs @@ -0,0 +1,12 @@ +use sea_orm_migration::{MigrationTrait, MigratorTrait}; + +mod m20240626_0001_init; + +pub struct Migrator; + +#[async_trait::async_trait] +impl MigratorTrait for Migrator { + fn migrations() -> Vec> { + vec![Box::new(m20240626_0001_init::Migration)] + } +} diff --git a/packages/media_connector/src/sql_storage/migration/m20240626_0001_init.rs b/packages/media_connector/src/sql_storage/migration/m20240626_0001_init.rs new file mode 100644 index 00000000..56dd0daf --- /dev/null +++ b/packages/media_connector/src/sql_storage/migration/m20240626_0001_init.rs @@ -0,0 +1,170 @@ +use sea_orm_migration::prelude::*; + +#[derive(DeriveMigrationName)] +pub struct Migration; + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .create_table( + Table::create() + .table(Room::Table) + .if_not_exists() + .col(ColumnDef::new(Room::Id).integer().not_null().auto_increment().primary_key()) + .col(ColumnDef::new(Room::Room).string().not_null()) + .col(ColumnDef::new(Room::CreatedAt).big_integer().not_null()) + .to_owned(), + ) + .await?; + + manager.create_index(Index::create().name("room_query_room").table(Room::Table).col(Room::Room).to_owned()).await?; + manager.create_index(Index::create().name("room_order").table(Room::Table).col(Room::CreatedAt).to_owned()).await?; + + manager + .create_table( + Table::create() + .table(Peer::Table) + .if_not_exists() + .col(ColumnDef::new(Peer::Id).integer().not_null().auto_increment().primary_key()) + .col(ColumnDef::new(Peer::Room).integer().not_null()) + .col(ColumnDef::new(Peer::Peer).string().not_null()) + .col(ColumnDef::new(Peer::CreatedAt).big_integer().not_null()) + .to_owned(), + ) + .await?; + + manager.create_index(Index::create().name("peer_query_room").table(Peer::Table).col(Peer::Room).to_owned()).await?; + manager.create_index(Index::create().name("peer_query_peer").table(Peer::Table).col(Peer::Peer).to_owned()).await?; + manager.create_index(Index::create().name("peer_order").table(Peer::Table).col(Peer::CreatedAt).to_owned()).await?; + + manager + .create_table( + Table::create() + .table(Session::Table) + .if_not_exists() + .col(ColumnDef::new(Session::Id).big_integer().not_null().primary_key()) + .col(ColumnDef::new(Session::Ip).string()) + .col(ColumnDef::new(Session::UserAgent).string()) + .col(ColumnDef::new(Session::Sdk).string()) + .col(ColumnDef::new(Session::CreatedAt).big_integer().not_null()) + .col(ColumnDef::new(Session::JoinedAt).big_integer()) + .col(ColumnDef::new(Session::LeavedAt).big_integer()) + .to_owned(), + ) + .await?; + + manager + .create_index(Index::create().name("session_order").table(Session::Table).col(Session::CreatedAt).to_owned()) + .await?; + + manager + .create_table( + Table::create() + .table(PeerSession::Table) + .if_not_exists() + .col(ColumnDef::new(PeerSession::Id).integer().not_null().auto_increment().primary_key()) + .col(ColumnDef::new(PeerSession::Peer).integer().not_null()) + .col(ColumnDef::new(PeerSession::Session).big_integer().not_null()) + .col(ColumnDef::new(PeerSession::CreatedAt).big_integer().not_null()) + .col(ColumnDef::new(PeerSession::JoinedAt).big_integer().not_null()) + .col(ColumnDef::new(PeerSession::LeavedAt).big_integer()) + .to_owned(), + ) + .await?; + + manager + .create_index(Index::create().name("peer_session_peer").table(PeerSession::Table).col(PeerSession::Peer).to_owned()) + .await?; + manager + .create_index(Index::create().name("peer_session_session").table(PeerSession::Table).col(PeerSession::Session).to_owned()) + .await?; + manager + .create_index(Index::create().name("peer_session_order").table(PeerSession::Table).col(PeerSession::CreatedAt).to_owned()) + .await?; + + manager + .create_table( + Table::create() + .table(Event::Table) + .if_not_exists() + .col(ColumnDef::new(Event::Id).integer().not_null().auto_increment().primary_key()) + .col(ColumnDef::new(Event::Node).unsigned().not_null()) + .col(ColumnDef::new(Event::NodeTs).big_integer().not_null()) + .col(ColumnDef::new(Event::Session).big_integer().not_null()) + .col(ColumnDef::new(Event::CreatedAt).big_integer().not_null()) + .col(ColumnDef::new(Event::Event).string().not_null()) + .col(ColumnDef::new(Event::Meta).json()) + .to_owned(), + ) + .await?; + + manager + .create_index(Index::create().name("event_session_match").table(Event::Table).col(Event::Session).to_owned()) + .await?; + manager.create_index(Index::create().name("event_order").table(Event::Table).col(Event::CreatedAt).to_owned()).await?; + + Ok(()) + } + + async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager.drop_table(Table::drop().table(Room::Table).to_owned()).await?; + manager.drop_table(Table::drop().table(Peer::Table).to_owned()).await?; + manager.drop_table(Table::drop().table(Session::Table).to_owned()).await?; + manager.drop_table(Table::drop().table(PeerSession::Table).to_owned()).await?; + manager.drop_table(Table::drop().table(Event::Table).to_owned()).await?; + Ok(()) + } +} + +#[derive(Iden)] +enum Room { + Table, + Id, + Room, + CreatedAt, +} + +#[derive(Iden)] +enum Peer { + Table, + Id, + Room, + Peer, + CreatedAt, +} + +#[derive(Iden)] +enum PeerSession { + Table, + Id, + Peer, + Session, + CreatedAt, + JoinedAt, + LeavedAt, +} + +#[derive(Iden)] +enum Session { + Table, + Id, + Ip, + UserAgent, + Sdk, + CreatedAt, + JoinedAt, + LeavedAt, +} + +#[derive(Iden)] +enum Event { + Table, + Id, + Node, + NodeTs, + Session, + CreatedAt, + Event, + Meta, +} diff --git a/packages/media_core/src/cluster.rs b/packages/media_core/src/cluster.rs index 6f4a3159..f38283dc 100644 --- a/packages/media_core/src/cluster.rs +++ b/packages/media_core/src/cluster.rs @@ -62,11 +62,9 @@ pub enum ClusterLocalTrackControl { #[derive(Clone, Debug, PartialEq, Eq)] pub enum ClusterLocalTrackEvent { - Started, RelayChanged, SourceChanged, Media(u64, MediaPacket), - Ended, } #[derive(Clone, Debug, PartialEq, Eq)] diff --git a/packages/media_core/src/endpoint.rs b/packages/media_core/src/endpoint.rs index 845396c1..46f6b051 100644 --- a/packages/media_core/src/endpoint.rs +++ b/packages/media_core/src/endpoint.rs @@ -5,7 +5,7 @@ use std::{marker::PhantomData, time::Instant}; use media_server_protocol::{ endpoint::{AudioMixerConfig, BitrateControlMode, PeerId, PeerMeta, RoomId, RoomInfoPublish, RoomInfoSubscribe, TrackMeta, TrackName, TrackPriority, TrackSource}, media::MediaPacket, - protobuf, + protobuf::{self, cluster_connector::peer_event}, transport::RpcResult, }; use sans_io_runtime::{ @@ -176,6 +176,7 @@ pub enum EndpointInput { pub enum EndpointOutput { Net(BackendOutgoing), Cluster(ClusterRoomHash, ClusterEndpointControl), + PeerEvent(u64, Instant, peer_event::Event), Ext(Ext), Continue, Destroy, @@ -194,6 +195,7 @@ pub struct EndpointCfg { } pub struct Endpoint, ExtIn, ExtOut> { + session_id: u64, transport: TaskSwitcherBranch>, internal: TaskSwitcherBranch, switcher: TaskSwitcher, @@ -201,8 +203,9 @@ pub struct Endpoint, ExtIn, ExtOut> { } impl, ExtIn, ExtOut> Endpoint { - pub fn new(cfg: EndpointCfg, transport: T) -> Self { + pub fn new(session_id: u64, cfg: EndpointCfg, transport: T) -> Self { Self { + session_id, transport: TaskSwitcherBranch::new(transport, TaskType::Transport), internal: TaskSwitcherBranch::new(EndpointInternal::new(cfg), TaskType::Internal), switcher: TaskSwitcher::new(2), @@ -293,6 +296,7 @@ impl, ExtIn, ExtOut> Endpoint { } InternalOutput::Cluster(room, control) => Some(EndpointOutput::Cluster(room, control)), InternalOutput::Destroy => Some(EndpointOutput::Destroy), + InternalOutput::PeerEvent(ts, event) => Some(EndpointOutput::PeerEvent(self.session_id, ts, event)), } } } diff --git a/packages/media_core/src/endpoint/internal.rs b/packages/media_core/src/endpoint/internal.rs index 42d18666..8ee76529 100644 --- a/packages/media_core/src/endpoint/internal.rs +++ b/packages/media_core/src/endpoint/internal.rs @@ -4,6 +4,7 @@ use std::{collections::VecDeque, time::Instant}; use media_server_protocol::{ endpoint::{AudioMixerConfig, AudioMixerMode, PeerId, PeerMeta, RoomId, RoomInfoPublish, RoomInfoSubscribe}, + protobuf::{cluster_connector::peer_event, shared::Kind}, transport::RpcError, }; use media_server_utils::Small2dMap; @@ -31,9 +32,10 @@ enum TaskType { BitrateAllocator, } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq)] pub enum InternalOutput { Event(EndpointEvent), + PeerEvent(Instant, peer_event::Event), RpcRes(EndpointReqId, EndpointRes), Cluster(ClusterRoomHash, ClusterEndpointControl), Destroy, @@ -41,7 +43,7 @@ pub enum InternalOutput { pub struct EndpointInternal { cfg: EndpointCfg, - state: TransportState, + state: Option<(Instant, TransportState)>, wait_join: Option<(EndpointReqId, RoomId, PeerId, PeerMeta, RoomInfoPublish, RoomInfoSubscribe, Option)>, joined: Option<(ClusterRoomHash, RoomId, PeerId, Option)>, local_tracks_id: Small2dMap, @@ -57,7 +59,7 @@ pub struct EndpointInternal { impl EndpointInternal { pub fn new(cfg: EndpointCfg) -> Self { Self { - state: TransportState::Connecting, + state: None, wait_join: None, joined: None, local_tracks_id: Default::default(), @@ -113,14 +115,15 @@ impl EndpointInternal { pub fn on_transport_rpc(&mut self, now: Instant, req_id: EndpointReqId, req: EndpointReq) { match req { - EndpointReq::JoinRoom(room, peer, meta, publish, subscribe, mixer) => { - if matches!(self.state, TransportState::Connecting) { + EndpointReq::JoinRoom(room, peer, meta, publish, subscribe, mixer) => match &self.state { + None | Some((_, TransportState::Connecting(_))) => { log::info!("[EndpointInternal] join_room({room}, {peer}) but in Connecting state => wait"); self.wait_join = Some((req_id, room, peer, meta, publish, subscribe, mixer)); - } else { + } + _ => { self.join_room(now, req_id, room, peer, meta, publish, subscribe, mixer); } - } + }, EndpointReq::LeaveRoom => { if let Some((_req_id, room, peer, _meta, _publish, _subscribe, _mixer)) = self.wait_join.take() { log::info!("[EndpointInternal] leave_room({room}, {peer}) but in Connecting state => only clear local"); @@ -186,26 +189,61 @@ impl EndpointInternal { } fn on_transport_state_changed(&mut self, now: Instant, state: TransportState) { - self.state = state; - match &self.state { - TransportState::Connecting => { + let pre_state = self.state.take(); + self.state = Some((now, state)); + match &(self.state.as_ref().expect("Should have state").1) { + TransportState::Connecting(ip) => { log::info!("[EndpointInternal] connecting"); + self.queue + .push_back(InternalOutput::PeerEvent(now, peer_event::Event::Connecting(peer_event::Connecting { remote_ip: ip.to_string() }))); } TransportState::ConnectError(err) => { log::info!("[EndpointInternal] connect error {:?}", err); + let (pre_ts, _pre_event) = pre_state.expect("Should have previous state"); + self.queue.push_back(InternalOutput::PeerEvent( + now, + peer_event::Event::ConnectError(peer_event::ConnectError { + after_ms: (pre_ts - now).as_millis() as u32, + error: 0, + }), + )); self.queue.push_back(InternalOutput::Destroy); } - TransportState::Connected => { + TransportState::Connected(ip) => { log::info!("[EndpointInternal] connected"); + let (pre_ts, pre_event) = pre_state.expect("Should have previous state"); + if matches!(pre_event, TransportState::Reconnecting(_)) { + self.queue.push_back(InternalOutput::PeerEvent( + now, + peer_event::Event::Reconnected(peer_event::Reconnected { + after_ms: (pre_ts - now).as_millis() as u32, + remote_ip: ip.to_string(), + }), + )); + } else { + self.queue.push_back(InternalOutput::PeerEvent( + now, + peer_event::Event::Connected(peer_event::Connected { + after_ms: (pre_ts - now).as_millis() as u32, + remote_ip: ip.to_string(), + }), + )); + } let (req_id, room, peer, meta, publish, subscribe, mixer) = return_if_none!(self.wait_join.take()); log::info!("[EndpointInternal] join_room({room}, {peer}) after connected"); self.join_room(now, req_id, room, peer, meta, publish, subscribe, mixer); } - TransportState::Reconnecting => { + TransportState::Reconnecting(ip) => { log::info!("[EndpointInternal] reconnecting"); + self.queue + .push_back(InternalOutput::PeerEvent(now, peer_event::Event::Reconnect(peer_event::Reconnecting { remote_ip: ip.to_string() }))); } TransportState::Disconnected(err) => { log::info!("[EndpointInternal] disconnected {:?}", err); + self.queue.push_back(InternalOutput::PeerEvent( + now, + peer_event::Event::Disconnected(peer_event::Disconnected { duration_ms: 0, reason: 0 }), //TODO provide correct reason + )); self.leave_room(now); self.queue.push_back(InternalOutput::Destroy); } @@ -227,8 +265,18 @@ impl EndpointInternal { if let Some(kind) = event.need_create() { log::info!("[EndpointInternal] create local track {:?}", track); let room = self.joined.as_ref().map(|j| j.0); - let index = self.local_tracks.input(&mut self.switcher).add_task(EndpointLocalTrack::new(kind, room)); + let index = self.local_tracks.input(&mut self.switcher).add_task(EndpointLocalTrack::new(track, kind, room)); self.local_tracks_id.insert(track, index); + + // We need to fire event here because local track never removed. + // Inside local track we only fire attach or detach event + self.queue.push_back(InternalOutput::PeerEvent( + now, + peer_event::Event::LocalTrack(peer_event::LocalTrack { + track: track.0 as i32, + kind: Kind::from(kind) as i32, + }), + )); } let index = return_if_none!(self.local_tracks_id.get1(&track)); self.local_tracks.input(&mut self.switcher).on_event(now, *index, local_track::Input::Event(event)); @@ -246,7 +294,9 @@ impl EndpointInternal { self.joined = Some(((&room).into(), room.clone(), peer.clone(), mixer.as_ref().map(|m| m.mode))); self.queue - .push_back(InternalOutput::Cluster((&room).into(), ClusterEndpointControl::Join(peer, meta, publish, subscribe, mixer))); + .push_back(InternalOutput::Cluster((&room).into(), ClusterEndpointControl::Join(peer.clone(), meta, publish, subscribe, mixer))); + self.queue + .push_back(InternalOutput::PeerEvent(now, peer_event::Event::Join(peer_event::Join { room: room.0, peer: peer.0 }))); for (_track_id, index) in self.local_tracks_id.pairs() { self.local_tracks.input(&mut self.switcher).on_event(now, index, local_track::Input::JoinRoom(room_hash)); @@ -278,6 +328,8 @@ impl EndpointInternal { } self.queue.push_back(InternalOutput::Cluster(hash, ClusterEndpointControl::Leave)); + self.queue + .push_back(InternalOutput::PeerEvent(now, peer_event::Event::Leave(peer_event::Leave { room: room.0, peer: peer.0 }))); } } @@ -343,6 +395,9 @@ impl EndpointInternal { } self.remote_tracks.input(&mut self.switcher).remove_task(index); } + remote_track::Output::PeerEvent(ts, event) => { + self.queue.push_back(InternalOutput::PeerEvent(ts, event)); + } } } @@ -376,6 +431,9 @@ impl EndpointInternal { self.bitrate_allocator.input(&mut self.switcher).del_egress_video_track(id); } } + local_track::Output::PeerEvent(ts, event) => { + self.queue.push_back(InternalOutput::PeerEvent(ts, event)); + } } } @@ -402,9 +460,13 @@ impl EndpointInternal { #[cfg(test)] mod tests { - use std::time::Instant; + use std::{ + net::{IpAddr, Ipv4Addr}, + time::Instant, + }; use media_server_protocol::endpoint::{PeerId, PeerMeta, RoomId, RoomInfoPublish, RoomInfoSubscribe}; + use media_server_protocol::protobuf::cluster_connector::peer_event; use sans_io_runtime::TaskSwitcherChild; use crate::{ @@ -422,8 +484,25 @@ mod tests { max_ingress_bitrate: 2_000_000, }); + let remote = IpAddr::V4(Ipv4Addr::LOCALHOST); let now = Instant::now(); - internal.on_transport_event(now, TransportEvent::State(TransportState::Connected)); + internal.on_transport_event(now, TransportEvent::State(TransportState::Connecting(remote))); + assert_eq!( + internal.pop_output(now), + Some(InternalOutput::PeerEvent(now, peer_event::Event::Connecting(peer_event::Connecting { remote_ip: remote.to_string() }))) + ); + assert_eq!(internal.pop_output(now), None); + internal.on_transport_event(now, TransportEvent::State(TransportState::Connected(remote))); + assert_eq!( + internal.pop_output(now), + Some(InternalOutput::PeerEvent( + now, + peer_event::Event::Connected(peer_event::Connected { + remote_ip: remote.to_string(), + after_ms: 0 + }) + )) + ); assert_eq!(internal.pop_output(now), None); let room: RoomId = "room".into(); @@ -436,7 +515,17 @@ mod tests { let room_hash = ClusterRoomHash::from(&room); assert_eq!( internal.pop_output(now), - Some(InternalOutput::Cluster(room_hash, ClusterEndpointControl::Join(peer, meta, publish, subscribe, None))) + Some(InternalOutput::Cluster(room_hash, ClusterEndpointControl::Join(peer.clone(), meta, publish, subscribe, None))) + ); + assert_eq!( + internal.pop_output(now), + Some(InternalOutput::PeerEvent( + now, + peer_event::Event::Join(peer_event::Join { + room: room.0.clone(), + peer: peer.0.clone(), + }) + )) ); assert_eq!(internal.pop_output(now), None); @@ -444,6 +533,16 @@ mod tests { internal.on_transport_rpc(now, 1.into(), EndpointReq::LeaveRoom); assert_eq!(internal.pop_output(now), Some(InternalOutput::RpcRes(1.into(), EndpointRes::LeaveRoom(Ok(()))))); assert_eq!(internal.pop_output(now), Some(InternalOutput::Cluster(room_hash, ClusterEndpointControl::Leave))); + assert_eq!( + internal.pop_output(now), + Some(InternalOutput::PeerEvent( + now, + peer_event::Event::Leave(peer_event::Leave { + room: room.0.clone(), + peer: peer.0.clone(), + }) + )) + ); assert_eq!(internal.pop_output(now), None); } @@ -454,8 +553,25 @@ mod tests { max_ingress_bitrate: 2_000_000, }); + let remote = IpAddr::V4(Ipv4Addr::LOCALHOST); let now = Instant::now(); - internal.on_transport_event(now, TransportEvent::State(TransportState::Connected)); + internal.on_transport_event(now, TransportEvent::State(TransportState::Connecting(remote))); + assert_eq!( + internal.pop_output(now), + Some(InternalOutput::PeerEvent(now, peer_event::Event::Connecting(peer_event::Connecting { remote_ip: remote.to_string() }))) + ); + assert_eq!(internal.pop_output(now), None); + internal.on_transport_event(now, TransportEvent::State(TransportState::Connected(remote))); + assert_eq!( + internal.pop_output(now), + Some(InternalOutput::PeerEvent( + now, + peer_event::Event::Connected(peer_event::Connected { + remote_ip: remote.to_string(), + after_ms: 0 + }) + )) + ); assert_eq!(internal.pop_output(now), None); let room1: RoomId = "room1".into(); @@ -478,6 +594,16 @@ mod tests { ClusterEndpointControl::Join(peer.clone(), meta.clone(), publish.clone(), subscribe.clone(), None), )) ); + assert_eq!( + internal.pop_output(now), + Some(InternalOutput::PeerEvent( + now, + peer_event::Event::Join(peer_event::Join { + room: room1.0.clone(), + peer: peer.0.clone(), + }) + )) + ); assert_eq!(internal.pop_output(now), None); //now join other room should success @@ -492,6 +618,16 @@ mod tests { assert_eq!(internal.pop_output(now), Some(InternalOutput::RpcRes(1.into(), EndpointRes::JoinRoom(Ok(()))))); //it will auto leave room1 assert_eq!(internal.pop_output(now), Some(InternalOutput::Cluster(room1_hash, ClusterEndpointControl::Leave))); + assert_eq!( + internal.pop_output(now), + Some(InternalOutput::PeerEvent( + now, + peer_event::Event::Leave(peer_event::Leave { + room: room1.0.clone(), + peer: peer.0.clone(), + }) + )) + ); //and after that join room2 assert_eq!( @@ -501,6 +637,16 @@ mod tests { ClusterEndpointControl::Join(peer.clone(), meta.clone(), publish.clone(), subscribe.clone(), None), )) ); + assert_eq!( + internal.pop_output(now), + Some(InternalOutput::PeerEvent( + now, + peer_event::Event::Join(peer_event::Join { + room: room2.0.clone(), + peer: peer.0.clone(), + }) + )) + ); assert_eq!(internal.pop_output(now), None); } diff --git a/packages/media_core/src/endpoint/internal/local_track.rs b/packages/media_core/src/endpoint/internal/local_track.rs index c0541a8b..c7bdfd9d 100644 --- a/packages/media_core/src/endpoint/internal/local_track.rs +++ b/packages/media_core/src/endpoint/internal/local_track.rs @@ -8,8 +8,8 @@ use atm0s_sdn::TimePivot; use media_server_protocol::{ endpoint::{PeerId, TrackName, TrackPriority}, media::{MediaKind, MediaMeta}, - protobuf::shared::receiver::Status as ProtoStatus, - transport::RpcError, + protobuf::{cluster_connector::peer_event, shared::receiver::Status as ProtoStatus}, + transport::{LocalTrackId, RpcError}, }; use sans_io_runtime::{return_if_none, Task, TaskSwitcherChild}; @@ -42,6 +42,7 @@ pub enum Input { pub enum Output { Event(EndpointLocalTrackEvent), Cluster(ClusterRoomHash, ClusterLocalTrackControl), + PeerEvent(Instant, peer_event::Event), RpcRes(EndpointReqId, EndpointLocalTrackRes), Started(MediaKind, TrackPriority), Updated(MediaKind, TrackPriority), @@ -56,6 +57,7 @@ enum Status { } pub struct EndpointLocalTrack { + track: LocalTrackId, kind: MediaKind, room: Option, bind: Option<(PeerId, TrackName, Status)>, @@ -66,9 +68,10 @@ pub struct EndpointLocalTrack { } impl EndpointLocalTrack { - pub fn new(kind: MediaKind, room: Option) -> Self { + pub fn new(track: LocalTrackId, kind: MediaKind, room: Option) -> Self { log::info!("[EndpointLocalTrack] track {kind}, room {:?}", room); Self { + track, kind, room, bind: None, @@ -86,18 +89,25 @@ impl EndpointLocalTrack { self.room = Some(room); } - fn on_leave_room(&mut self, _now: Instant) { + fn on_leave_room(&mut self, now: Instant) { assert_ne!(self.room, None); let room = return_if_none!(self.room.take()); log::info!("[EndpointLocalTrack] leave room {room}"); let (peer, track, _) = return_if_none!(self.bind.take()); log::info!("[EndpointLocalTrack] leave room {room} => auto Unsubscribe {peer} {track}"); self.queue.push_back(Output::Cluster(room, ClusterLocalTrackControl::Unsubscribe)); + self.queue.push_back(Output::PeerEvent( + now, + peer_event::Event::LocalTrackDetach(peer_event::LocalTrackDetach { + track: self.track.0 as i32, + remote_peer: peer.0, + remote_track: track.0, + }), + )); } fn on_cluster_event(&mut self, now: Instant, event: ClusterLocalTrackEvent) { match event { - ClusterLocalTrackEvent::Started => todo!(), ClusterLocalTrackEvent::RelayChanged => { if self.kind.is_video() { let room = return_if_none!(self.room.as_ref()); @@ -137,7 +147,6 @@ impl EndpointLocalTrack { self.queue.push_back(Output::Event(EndpointLocalTrackEvent::Media(pkt))); } } - ClusterLocalTrackEvent::Ended => todo!(), } } @@ -177,7 +186,15 @@ impl EndpointLocalTrack { self.bind = Some((peer.clone(), track.clone(), Status::Waiting)); self.selector.set_limit_layer(now_ms, config.max_spatial, config.max_temporal); self.queue.push_back(Output::Started(self.kind, config.priority)); - self.queue.push_back(Output::Cluster(*room, ClusterLocalTrackControl::Subscribe(peer, track))); + self.queue.push_back(Output::Cluster(*room, ClusterLocalTrackControl::Subscribe(peer.clone(), track.clone()))); + self.queue.push_back(Output::PeerEvent( + now, + peer_event::Event::LocalTrackAttach(peer_event::LocalTrackAttach { + track: self.track.0 as i32, + remote_peer: peer.0, + remote_track: track.0, + }), + )); self.selector.reset(); } else { log::warn!("[EndpointLocalTrack] track {} view but not in room", self.kind); @@ -189,10 +206,18 @@ impl EndpointLocalTrack { //TODO process config here if let Some(room) = self.room.as_ref() { if let Some((peer, track, _)) = self.bind.take() { + log::info!("[EndpointLocalTrack] unview room {room} peer {peer} track {track}"); self.queue.push_back(Output::RpcRes(req_id, EndpointLocalTrackRes::Detach(Ok(())))); self.queue.push_back(Output::Stopped(self.kind)); self.queue.push_back(Output::Cluster(*room, ClusterLocalTrackControl::Unsubscribe)); - log::info!("[EndpointLocalTrack] unview room {room} peer {peer} track {track}"); + self.queue.push_back(Output::PeerEvent( + now, + peer_event::Event::LocalTrackDetach(peer_event::LocalTrackDetach { + track: self.track.0 as i32, + remote_peer: peer.0, + remote_track: track.0, + }), + )); } else { log::warn!("[EndpointLocalTrack] unview but not bind to any source"); self.queue diff --git a/packages/media_core/src/endpoint/internal/remote_track.rs b/packages/media_core/src/endpoint/internal/remote_track.rs index 823405c4..551f6634 100644 --- a/packages/media_core/src/endpoint/internal/remote_track.rs +++ b/packages/media_core/src/endpoint/internal/remote_track.rs @@ -5,6 +5,7 @@ use std::{collections::VecDeque, time::Instant}; use media_server_protocol::{ endpoint::{BitrateControlMode, TrackMeta, TrackName, TrackPriority}, media::{MediaKind, MediaLayersBitrate}, + protobuf::{cluster_connector::peer_event, shared::Kind}, transport::RpcError, }; use sans_io_runtime::{return_if_none, Task, TaskSwitcherChild}; @@ -31,6 +32,7 @@ pub enum Input { pub enum Output { Event(EndpointRemoteTrackEvent), Cluster(ClusterRoomHash, ClusterRemoteTrackControl), + PeerEvent(Instant, peer_event::Event), RpcRes(EndpointReqId, EndpointRemoteTrackRes), Started(MediaKind, TrackPriority), Update(MediaKind, TrackPriority), @@ -62,21 +64,35 @@ impl EndpointRemoteTrack { } } - fn on_join_room(&mut self, _now: Instant, room: ClusterRoomHash) { + fn on_join_room(&mut self, now: Instant, room: ClusterRoomHash) { assert_eq!(self.room, None); self.room = Some(room); log::info!("[EndpointRemoteTrack] join room {room}"); let name = return_if_none!(self.name.clone()); log::info!("[EndpointRemoteTrack] started as name {name} after join room"); - self.queue.push_back(Output::Cluster(room, ClusterRemoteTrackControl::Started(name, self.meta.clone()))); + self.queue.push_back(Output::Cluster(room, ClusterRemoteTrackControl::Started(name.clone(), self.meta.clone()))); + self.queue.push_back(Output::PeerEvent( + now, + peer_event::Event::RemoteTrackStarted(peer_event::RemoteTrackStarted { + track: name.0, + kind: Kind::from(self.meta.kind) as i32, + }), + )); } - fn on_leave_room(&mut self, _now: Instant) { + fn on_leave_room(&mut self, now: Instant) { let room = self.room.take().expect("Must have room here"); log::info!("[EndpointRemoteTrack] leave room {room}"); let name = return_if_none!(self.name.clone()); log::info!("[EndpointRemoteTrack] stopped as name {name} after leave room"); - self.queue.push_back(Output::Cluster(room, ClusterRemoteTrackControl::Ended(name, self.meta.clone()))); + self.queue.push_back(Output::Cluster(room, ClusterRemoteTrackControl::Ended(name.clone(), self.meta.clone()))); + self.queue.push_back(Output::PeerEvent( + now, + peer_event::Event::RemoteTrackEnded(peer_event::RemoteTrackEnded { + track: name.0, + kind: Kind::from(self.meta.kind) as i32, + }), + )); } fn on_cluster_event(&mut self, _now: Instant, event: ClusterRemoteTrackEvent) { @@ -93,14 +109,22 @@ impl EndpointRemoteTrack { } } - fn on_transport_event(&mut self, _now: Instant, event: RemoteTrackEvent) { + fn on_transport_event(&mut self, now: Instant, event: RemoteTrackEvent) { match event { - RemoteTrackEvent::Started { name, priority, meta: _ } => { + RemoteTrackEvent::Started { name, priority, meta } => { self.name = Some(name.clone().into()); let room = return_if_none!(self.room.as_ref()); log::info!("[EndpointRemoteTrack] started as name {name} in room {room}"); - self.queue.push_back(Output::Cluster(*room, ClusterRemoteTrackControl::Started(TrackName(name), self.meta.clone()))); + self.queue + .push_back(Output::Cluster(*room, ClusterRemoteTrackControl::Started(TrackName(name.clone()), self.meta.clone()))); self.queue.push_back(Output::Started(self.meta.kind, priority)); + self.queue.push_back(Output::PeerEvent( + now, + peer_event::Event::RemoteTrackStarted(peer_event::RemoteTrackStarted { + track: name, + kind: Kind::from(meta.kind) as i32, + }), + )); } RemoteTrackEvent::Paused => {} RemoteTrackEvent::Resumed => {} @@ -124,8 +148,15 @@ impl EndpointRemoteTrack { let name = return_if_none!(self.name.take()); let room = return_if_none!(self.room.as_ref()); log::info!("[EndpointRemoteTrack] stopped with name {name} in room {room}"); - self.queue.push_back(Output::Cluster(*room, ClusterRemoteTrackControl::Ended(name, self.meta.clone()))); + self.queue.push_back(Output::Cluster(*room, ClusterRemoteTrackControl::Ended(name.clone(), self.meta.clone()))); self.queue.push_back(Output::Stopped(self.meta.kind)); + self.queue.push_back(Output::PeerEvent( + now, + peer_event::Event::RemoteTrackEnded(peer_event::RemoteTrackEnded { + track: name.0, + kind: Kind::from(self.meta.kind) as i32, + }), + )); } } } diff --git a/packages/media_core/src/endpoint/middleware.rs b/packages/media_core/src/endpoint/middleware.rs index 51343fa8..5248fb21 100644 --- a/packages/media_core/src/endpoint/middleware.rs +++ b/packages/media_core/src/endpoint/middleware.rs @@ -1,4 +1,3 @@ mod logger; -mod mix_minus; pub trait EndpointMiddleware {} diff --git a/packages/media_core/src/endpoint/middleware/mix_minus.rs b/packages/media_core/src/endpoint/middleware/mix_minus.rs deleted file mode 100644 index 8b137891..00000000 --- a/packages/media_core/src/endpoint/middleware/mix_minus.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/packages/media_core/src/transport.rs b/packages/media_core/src/transport.rs index 5d63a3cf..99966765 100644 --- a/packages/media_core/src/transport.rs +++ b/packages/media_core/src/transport.rs @@ -1,5 +1,5 @@ use derive_more::{Display, From}; -use std::time::Instant; +use std::{net::IpAddr, time::Instant}; use media_server_protocol::{ endpoint::{TrackMeta, TrackPriority}, @@ -25,10 +25,10 @@ pub enum TransportError { #[derive(Debug, PartialEq, Eq)] pub enum TransportState { - Connecting, + Connecting(IpAddr), ConnectError(TransportError), - Connected, - Reconnecting, + Connected(IpAddr), + Reconnecting(IpAddr), Disconnected(Option), } diff --git a/packages/media_runner/Cargo.toml b/packages/media_runner/Cargo.toml index 019c1c29..1afd53ed 100644 --- a/packages/media_runner/Cargo.toml +++ b/packages/media_runner/Cargo.toml @@ -13,6 +13,7 @@ convert-enum = { workspace = true } media-server-protocol = { path = "../protocol" } media-server-secure = { path = "../media_secure" } media-server-gateway = { path = "../media_gateway" } +media-server-connector = { path = "../media_connector" } media-server-core = { path = "../media_core" } sans-io-runtime = { workspace = true, default-features = false } diff --git a/packages/media_runner/src/worker.rs b/packages/media_runner/src/worker.rs index 2287dc3d..0ba1734a 100644 --- a/packages/media_runner/src/worker.rs +++ b/packages/media_runner/src/worker.rs @@ -6,12 +6,16 @@ use atm0s_sdn::{ services::{manual_discovery, visualization}, ControllerPlaneCfg, DataPlaneCfg, DataWorkerHistory, NetInput, NetOutput, NodeAddr, SdnExtIn, SdnExtOut, SdnWorker, SdnWorkerBusEvent, SdnWorkerCfg, SdnWorkerInput, SdnWorkerOutput, TimePivot, }; +use media_server_connector::agent_service::ConnectorAgentServiceBuilder; use media_server_core::cluster::{self, MediaCluster}; use media_server_gateway::{agent_service::GatewayAgentServiceBuilder, NodeMetrics, ServiceKind, AGENT_SERVICE_ID}; use media_server_protocol::{ cluster::{ClusterMediaInfo, ClusterNodeGenericInfo, ClusterNodeInfo}, gateway::generate_gateway_zone_tag, - protobuf::gateway::{ConnectResponse, RemoteIceResponse}, + protobuf::{ + cluster_connector::{connector_request, PeerEvent}, + gateway::{ConnectResponse, RemoteIceResponse}, + }, transport::{ webrtc, whep::{self, WhepConnectRes, WhepDeleteRes, WhepRemoteIceRes}, @@ -55,12 +59,14 @@ pub enum UserData { pub enum SC { Visual(visualization::Control), Gateway(media_server_gateway::agent_service::Control), + Connector(media_server_connector::agent_service::Control), } #[derive(Clone, Debug, convert_enum::From, convert_enum::TryInto)] pub enum SE { Visual(visualization::Event), Gateway(media_server_gateway::agent_service::Event), + Connector(media_server_connector::agent_service::Event), } pub type TC = (); pub type TW = (); @@ -138,6 +144,7 @@ impl MediaServerWorker { vec![generate_gateway_zone_tag(sdn_zone)], )); let gateway = Arc::new(GatewayAgentServiceBuilder::new(media.max_live)); + let connector = Arc::new(ConnectorAgentServiceBuilder::new()); let sdn_config = SdnConfig { node_id, @@ -147,7 +154,7 @@ impl MediaServerWorker { authorization: Arc::new(StaticKeyAuthorization::new(secret)), handshake_builder: Arc::new(HandshakeBuilderXDA), random: Box::new(OsRng), - services: vec![visualization.clone(), discovery.clone(), gateway.clone()], + services: vec![visualization.clone(), discovery.clone(), gateway.clone(), connector.clone()], }) } else { None @@ -353,6 +360,18 @@ impl MediaServerWorker { self.media_cluster.input(&mut self.switcher).on_endpoint_control(now, session.into(), room, control); Output::Continue } + transport_webrtc::GroupOutput::PeerEvent(_, session_id, ts, event) => { + let now_ms = self.timer.timestamp_ms(now); + self.sdn_worker.input(&mut self.switcher).on_event( + now_ms, + SdnWorkerInput::Ext(SdnExtIn::ServicesControl( + media_server_connector::AGENT_SERVICE_ID.into(), + UserData::Cluster, + media_server_connector::agent_service::Control::Fire(self.timer.timestamp_ms(ts), connector_request::Event::Peer(PeerEvent { session_id, event: Some(event) })).into(), + )), + ); + Output::Continue + } transport_webrtc::GroupOutput::Ext(session, ext) => match ext { transport_webrtc::ExtOut::RemoteIce(req_id, variant, res) => match variant { transport_webrtc::Variant::Whip => Output::ExtRpc(req_id, RpcRes::Whip(whip::RpcRes::RemoteIce(res.map(|_| WhipRemoteIceRes {})))), @@ -384,7 +403,11 @@ impl MediaServerWorker { log::info!("[MediaServerWorker] incoming rpc req {req_id}"); match req { RpcReq::Whip(req) => match req { - whip::RpcReq::Connect(req) => match self.media_webrtc.input(&mut self.switcher).spawn(transport_webrtc::VariantParams::Whip(req.room, req.peer), &req.sdp) { + whip::RpcReq::Connect(req) => match self + .media_webrtc + .input(&mut self.switcher) + .spawn(req.ip, req.session_id, transport_webrtc::VariantParams::Whip(req.room, req.peer), &req.sdp) + { Ok((_ice_lite, sdp, conn_id)) => self.queue.push_back(Output::ExtRpc(req_id, RpcRes::Whip(whip::RpcRes::Connect(Ok(WhipConnectRes { conn_id, sdp }))))), Err(e) => self.queue.push_back(Output::ExtRpc(req_id, RpcRes::Whip(whip::RpcRes::Connect(Err(e))))), }, @@ -407,7 +430,7 @@ impl MediaServerWorker { match self .media_webrtc .input(&mut self.switcher) - .spawn(transport_webrtc::VariantParams::Whep(req.room, peer_id.into()), &req.sdp) + .spawn(req.ip, req.session_id, transport_webrtc::VariantParams::Whep(req.room, peer_id.into()), &req.sdp) { Ok((_ice_lite, sdp, conn_id)) => self.queue.push_back(Output::ExtRpc(req_id, RpcRes::Whep(whep::RpcRes::Connect(Ok(WhepConnectRes { conn_id, sdp }))))), Err(e) => self.queue.push_back(Output::ExtRpc(req_id, RpcRes::Whep(whep::RpcRes::Connect(Err(e))))), @@ -427,24 +450,26 @@ impl MediaServerWorker { } }, RpcReq::Webrtc(req) => match req { - webrtc::RpcReq::Connect(ip, user_agent, req) => match self - .media_webrtc - .input(&mut self.switcher) - .spawn(VariantParams::Webrtc(ip, user_agent, req.clone(), self.secure.clone()), &req.sdp) - { - Ok((ice_lite, sdp, conn_id)) => self.queue.push_back(Output::ExtRpc( - req_id, - RpcRes::Webrtc(webrtc::RpcRes::Connect(Ok(( - conn_id, - ConnectResponse { - conn_id: "".to_string(), - sdp, - ice_lite, - }, - )))), - )), - Err(e) => self.queue.push_back(Output::ExtRpc(req_id, RpcRes::Webrtc(webrtc::RpcRes::Connect(Err(e))))), - }, + webrtc::RpcReq::Connect(session_id, ip, user_agent, req) => { + match self + .media_webrtc + .input(&mut self.switcher) + .spawn(ip, session_id, VariantParams::Webrtc(user_agent, req.clone(), self.secure.clone()), &req.sdp) + { + Ok((ice_lite, sdp, conn_id)) => self.queue.push_back(Output::ExtRpc( + req_id, + RpcRes::Webrtc(webrtc::RpcRes::Connect(Ok(( + conn_id, + ConnectResponse { + conn_id: "".to_string(), + sdp, + ice_lite, + }, + )))), + )), + Err(e) => self.queue.push_back(Output::ExtRpc(req_id, RpcRes::Webrtc(webrtc::RpcRes::Connect(Err(e))))), + } + } webrtc::RpcReq::RemoteIce(conn, ice) => { log::info!("on rpc request {req_id}, webrtc::RpcReq::RemoteIce"); self.media_webrtc.input(&mut self.switcher).on_event( diff --git a/packages/media_utils/src/lib.rs b/packages/media_utils/src/lib.rs index 9ba56463..24999f10 100644 --- a/packages/media_utils/src/lib.rs +++ b/packages/media_utils/src/lib.rs @@ -2,10 +2,12 @@ mod f16; mod seq_extend; mod seq_rewrite; mod small_2dmap; +mod time; mod ts_rewrite; pub use f16::{F16i, F16u}; pub use seq_extend::RtpSeqExtend; pub use seq_rewrite::SeqRewrite; pub use small_2dmap::Small2dMap; +pub use time::now_ms; pub use ts_rewrite::TsRewrite; diff --git a/packages/media_utils/src/time.rs b/packages/media_utils/src/time.rs new file mode 100644 index 00000000..693ab3d1 --- /dev/null +++ b/packages/media_utils/src/time.rs @@ -0,0 +1,5 @@ +use std::time::{SystemTime, UNIX_EPOCH}; +pub fn now_ms() -> u64 { + let start = SystemTime::now(); + start.duration_since(UNIX_EPOCH).expect("Time went backwards").as_millis() as u64 +} diff --git a/packages/protocol/Cargo.toml b/packages/protocol/Cargo.toml index dd2010e5..1d21cd76 100644 --- a/packages/protocol/Cargo.toml +++ b/packages/protocol/Cargo.toml @@ -11,6 +11,7 @@ log = { workspace = true } convert-enum = { workspace = true } derivative = { workspace = true } derive_more = { workspace = true } +rand = { workspace = true } prost = { workspace = true } serde = { version = "1.0", features = ["derive"] } quinn = { version = "0.11", optional = true } diff --git a/packages/protocol/build.rs b/packages/protocol/build.rs index 8c2e5e07..2c1543c0 100644 --- a/packages/protocol/build.rs +++ b/packages/protocol/build.rs @@ -12,6 +12,22 @@ fn main() -> Result<()> { .service_generator(Box::new(GenericRpcGenerator)) .out_dir("src/protobuf") .include_file("mod.rs") + .type_attribute("cluster_connector.PeerEvent.RouteBegin", "#[derive(serde::Serialize)]") + .type_attribute("cluster_connector.PeerEvent.RouteSuccess", "#[derive(serde::Serialize)]") + .type_attribute("cluster_connector.PeerEvent.RouteError", "#[derive(serde::Serialize)]") + .type_attribute("cluster_connector.PeerEvent.Connecting", "#[derive(serde::Serialize)]") + .type_attribute("cluster_connector.PeerEvent.ConnectError", "#[derive(serde::Serialize)]") + .type_attribute("cluster_connector.PeerEvent.Connected", "#[derive(serde::Serialize)]") + .type_attribute("cluster_connector.PeerEvent.Reconnecting", "#[derive(serde::Serialize)]") + .type_attribute("cluster_connector.PeerEvent.Reconnected", "#[derive(serde::Serialize)]") + .type_attribute("cluster_connector.PeerEvent.Disconnected", "#[derive(serde::Serialize)]") + .type_attribute("cluster_connector.PeerEvent.Join", "#[derive(serde::Serialize)]") + .type_attribute("cluster_connector.PeerEvent.Leave", "#[derive(serde::Serialize)]") + .type_attribute("cluster_connector.PeerEvent.RemoteTrackStarted", "#[derive(serde::Serialize)]") + .type_attribute("cluster_connector.PeerEvent.RemoteTrackEnded", "#[derive(serde::Serialize)]") + .type_attribute("cluster_connector.PeerEvent.LocalTrack", "#[derive(serde::Serialize)]") + .type_attribute("cluster_connector.PeerEvent.LocalTrackAttach", "#[derive(serde::Serialize)]") + .type_attribute("cluster_connector.PeerEvent.LocalTrackDetach", "#[derive(serde::Serialize)]") .compile_protos( &[ "./proto/shared.proto", @@ -20,6 +36,7 @@ fn main() -> Result<()> { "./proto/sdk/features.mixer.proto", "./proto/sdk/gateway.proto", "./proto/cluster/gateway.proto", + "./proto/cluster/connector.proto", ], &["./proto"], )?; diff --git a/packages/protocol/proto/cluster/connector.proto b/packages/protocol/proto/cluster/connector.proto new file mode 100644 index 00000000..00bfcad8 --- /dev/null +++ b/packages/protocol/proto/cluster/connector.proto @@ -0,0 +1,243 @@ +syntax = "proto3"; + +package cluster_connector; + +import "shared.proto"; + +message ConnectorRequest { + uint64 req_id = 1; + uint64 ts = 2; + oneof event { + PeerEvent peer = 3; + } +} + +message ConnectorResponse { + message Success { + + } + + message Error { + uint32 code = 1; + string message = 2; + } + + uint64 req_id = 1; + oneof response { + Success success = 2; + Error error = 3; + } +} + +message PeerEvent { + message RouteBegin { + string remote_ip = 1; + } + + message RouteSuccess { + uint32 after_ms = 1; + uint32 dest_node = 2; + } + + message RouteError { + enum ErrorType { + PoolEmpty = 0; + Timeout = 1; + GatewayError = 2; + MediaError = 3; + } + + uint32 after_ms = 1; + optional uint32 dest_node = 2; + ErrorType error = 3; + } + + message Connecting { + string remote_ip = 1; + } + + message ConnectError { + enum ErrorType { + InvalidSdp = 0; + Timeout = 1; + } + + uint32 after_ms = 1; + ErrorType error = 2; + } + + message Join { + string room = 1; + string peer = 2; + } + + message Leave { + string room = 1; + string peer = 2; + } + + message Connected { + uint32 after_ms = 1; + string remote_ip = 2; + } + + message Stats { + uint64 sent_bytes = 1; + uint64 received_bytes = 2; + } + + message Reconnecting { + string remote_ip = 1; + } + + message Reconnected { + uint32 after_ms = 1; + string remote_ip = 2; + } + + message Disconnected { + enum Reason { + UserAction = 0; + Timeout = 1; + NodeShutdown = 2; + KickByAPI = 3; + } + + uint32 duration_ms = 1; + Reason reason = 2; + } + + message RemoteTrackStarted { + string track = 1; + shared.Kind kind = 2; + } + + message RemoteTrackEnded { + string track = 1; + shared.Kind kind = 2; + } + + message LocalTrack { + int32 track = 1; + shared.Kind kind = 2; + } + + message LocalTrackAttach { + int32 track = 1; + string remote_peer = 2; + string remote_track = 3; + } + + message LocalTrackDetach { + int32 track = 1; + string remote_peer = 2; + string remote_track = 3; + } + + uint64 session_id = 1; + + oneof event { + RouteBegin route_begin = 2; + RouteSuccess route_success = 3; + RouteError route_error = 4; + Connecting connecting = 5; + Connected connected = 6; + ConnectError connect_error = 7; + Stats stats = 8; + Reconnecting reconnect = 9; + Reconnected reconnected = 10; + Disconnected disconnected = 11; + Join join = 12; + Leave leave = 13; + RemoteTrackStarted remote_track_started = 14; + RemoteTrackEnded remote_track_ended = 15; + LocalTrack local_track = 16; + LocalTrackAttach local_track_attach = 17; + LocalTrackDetach local_track_detach = 18; + } +} + +message GetParams { + uint32 page = 1; + uint32 limit = 2; +} + +message GetRooms { + message RoomInfo { + int32 id = 1; + string room = 2; + } + + repeated RoomInfo rooms = 1; +} + +message GetPeerParams { + optional int32 room = 1; + uint32 page = 2; + uint32 limit = 3; +} + +message PeerSession { + int32 id = 1; + int32 peer_id = 2; + string peer = 3; + uint64 session = 4; + uint64 created_at = 5; + uint64 joined_at = 6; + optional uint64 leaved_at = 7; +} + +message GetPeers { + message PeerInfo { + int32 id = 1; + int32 room_id = 2; + string room = 3; + string peer = 4; + uint64 created_at = 5; + repeated PeerSession sessions = 6; + } + + repeated PeerInfo peers = 1; +} + +message GetSessions { + message SessionInfo { + uint64 id = 1; + optional string ip = 2; + optional string user_agent = 3; + optional string sdk = 4; + uint64 created_at = 5; + repeated PeerSession peers = 6; + } + + repeated SessionInfo sessions = 1; +} + +message GetEventParams { + optional uint64 session = 1; + optional uint64 start_ts = 2; + optional uint64 end_ts = 3; + uint32 page = 4; + uint32 limit = 5; +} + +message GetEvents { + message EventInfo { + int32 id = 1; + uint32 node = 2; + uint64 node_ts = 3; + uint64 session = 4; + uint64 created_at = 5; + string event = 6; + optional string meta = 7; + } + + repeated EventInfo events = 1; +} + +service MediaConnector { + rpc Rooms (GetParams) returns (GetRooms); + rpc Peers (GetPeerParams) returns (GetPeers); + rpc Sessions (GetParams) returns (GetSessions); + rpc Events (GetEventParams) returns (GetEvents); +} diff --git a/packages/protocol/proto/cluster/gateway.proto b/packages/protocol/proto/cluster/gateway.proto index dcd1ca70..3e4e80c4 100644 --- a/packages/protocol/proto/cluster/gateway.proto +++ b/packages/protocol/proto/cluster/gateway.proto @@ -66,6 +66,7 @@ message WhipConnectRequest { string sdp = 3; string room = 4; string peer = 5; + uint64 session_id = 6; } message WhipConnectResponse { @@ -97,6 +98,7 @@ message WhepConnectRequest { string sdp = 3; string room = 4; string peer = 5; + uint64 session_id = 6; } message WhepConnectResponse { @@ -126,6 +128,7 @@ message WebrtcConnectRequest { string user_agent = 1; string ip = 2; gateway.ConnectRequest req = 3; + uint64 session_id = 4; } message WebrtcConnectResponse { diff --git a/packages/protocol/src/cluster.rs b/packages/protocol/src/cluster.rs index 48d762ec..77ae6231 100644 --- a/packages/protocol/src/cluster.rs +++ b/packages/protocol/src/cluster.rs @@ -29,3 +29,8 @@ pub enum ClusterNodeInfo { Media(ClusterNodeGenericInfo, ClusterMediaInfo), Connector(ClusterNodeGenericInfo), } + +/// Generate global cluster session_id +pub fn gen_cluster_session_id() -> u64 { + rand::random::() & 0x7FFF_FFFF_FFFF_FFFF //avoid over i64, which some database will error +} diff --git a/packages/protocol/src/connector.rs b/packages/protocol/src/connector.rs new file mode 100644 index 00000000..4b22ffe0 --- /dev/null +++ b/packages/protocol/src/connector.rs @@ -0,0 +1 @@ +pub const CONNECTOR_RPC_PORT: u16 = 10003; diff --git a/packages/protocol/src/lib.rs b/packages/protocol/src/lib.rs index 073367b5..32e65994 100644 --- a/packages/protocol/src/lib.rs +++ b/packages/protocol/src/lib.rs @@ -1,4 +1,5 @@ pub mod cluster; +pub mod connector; pub mod endpoint; pub mod gateway; pub mod media; diff --git a/packages/protocol/src/protobuf/cluster_connector.rs b/packages/protocol/src/protobuf/cluster_connector.rs new file mode 100644 index 00000000..e37d6b19 --- /dev/null +++ b/packages/protocol/src/protobuf/cluster_connector.rs @@ -0,0 +1,691 @@ +// This file is @generated by prost-build. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ConnectorRequest { + #[prost(uint64, tag = "1")] + pub req_id: u64, + #[prost(uint64, tag = "2")] + pub ts: u64, + #[prost(oneof = "connector_request::Event", tags = "3")] + pub event: ::core::option::Option, +} +/// Nested message and enum types in `ConnectorRequest`. +pub mod connector_request { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Event { + #[prost(message, tag = "3")] + Peer(super::PeerEvent), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ConnectorResponse { + #[prost(uint64, tag = "1")] + pub req_id: u64, + #[prost(oneof = "connector_response::Response", tags = "2, 3")] + pub response: ::core::option::Option, +} +/// Nested message and enum types in `ConnectorResponse`. +pub mod connector_response { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Success {} + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Error { + #[prost(uint32, tag = "1")] + pub code: u32, + #[prost(string, tag = "2")] + pub message: ::prost::alloc::string::String, + } + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Response { + #[prost(message, tag = "2")] + Success(Success), + #[prost(message, tag = "3")] + Error(Error), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PeerEvent { + #[prost(uint64, tag = "1")] + pub session_id: u64, + #[prost( + oneof = "peer_event::Event", + tags = "2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18" + )] + pub event: ::core::option::Option, +} +/// Nested message and enum types in `PeerEvent`. +pub mod peer_event { + #[derive(serde::Serialize)] + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct RouteBegin { + #[prost(string, tag = "1")] + pub remote_ip: ::prost::alloc::string::String, + } + #[derive(serde::Serialize)] + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct RouteSuccess { + #[prost(uint32, tag = "1")] + pub after_ms: u32, + #[prost(uint32, tag = "2")] + pub dest_node: u32, + } + #[derive(serde::Serialize)] + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct RouteError { + #[prost(uint32, tag = "1")] + pub after_ms: u32, + #[prost(uint32, optional, tag = "2")] + pub dest_node: ::core::option::Option, + #[prost(enumeration = "route_error::ErrorType", tag = "3")] + pub error: i32, + } + /// Nested message and enum types in `RouteError`. + pub mod route_error { + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum ErrorType { + PoolEmpty = 0, + Timeout = 1, + GatewayError = 2, + MediaError = 3, + } + impl ErrorType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ErrorType::PoolEmpty => "PoolEmpty", + ErrorType::Timeout => "Timeout", + ErrorType::GatewayError => "GatewayError", + ErrorType::MediaError => "MediaError", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "PoolEmpty" => Some(Self::PoolEmpty), + "Timeout" => Some(Self::Timeout), + "GatewayError" => Some(Self::GatewayError), + "MediaError" => Some(Self::MediaError), + _ => None, + } + } + } + } + #[derive(serde::Serialize)] + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Connecting { + #[prost(string, tag = "1")] + pub remote_ip: ::prost::alloc::string::String, + } + #[derive(serde::Serialize)] + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct ConnectError { + #[prost(uint32, tag = "1")] + pub after_ms: u32, + #[prost(enumeration = "connect_error::ErrorType", tag = "2")] + pub error: i32, + } + /// Nested message and enum types in `ConnectError`. + pub mod connect_error { + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum ErrorType { + InvalidSdp = 0, + Timeout = 1, + } + impl ErrorType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ErrorType::InvalidSdp => "InvalidSdp", + ErrorType::Timeout => "Timeout", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "InvalidSdp" => Some(Self::InvalidSdp), + "Timeout" => Some(Self::Timeout), + _ => None, + } + } + } + } + #[derive(serde::Serialize)] + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Join { + #[prost(string, tag = "1")] + pub room: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub peer: ::prost::alloc::string::String, + } + #[derive(serde::Serialize)] + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Leave { + #[prost(string, tag = "1")] + pub room: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub peer: ::prost::alloc::string::String, + } + #[derive(serde::Serialize)] + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Connected { + #[prost(uint32, tag = "1")] + pub after_ms: u32, + #[prost(string, tag = "2")] + pub remote_ip: ::prost::alloc::string::String, + } + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Stats { + #[prost(uint64, tag = "1")] + pub sent_bytes: u64, + #[prost(uint64, tag = "2")] + pub received_bytes: u64, + } + #[derive(serde::Serialize)] + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Reconnecting { + #[prost(string, tag = "1")] + pub remote_ip: ::prost::alloc::string::String, + } + #[derive(serde::Serialize)] + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Reconnected { + #[prost(uint32, tag = "1")] + pub after_ms: u32, + #[prost(string, tag = "2")] + pub remote_ip: ::prost::alloc::string::String, + } + #[derive(serde::Serialize)] + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Disconnected { + #[prost(uint32, tag = "1")] + pub duration_ms: u32, + #[prost(enumeration = "disconnected::Reason", tag = "2")] + pub reason: i32, + } + /// Nested message and enum types in `Disconnected`. + pub mod disconnected { + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Reason { + UserAction = 0, + Timeout = 1, + NodeShutdown = 2, + KickByApi = 3, + } + impl Reason { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Reason::UserAction => "UserAction", + Reason::Timeout => "Timeout", + Reason::NodeShutdown => "NodeShutdown", + Reason::KickByApi => "KickByAPI", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UserAction" => Some(Self::UserAction), + "Timeout" => Some(Self::Timeout), + "NodeShutdown" => Some(Self::NodeShutdown), + "KickByAPI" => Some(Self::KickByApi), + _ => None, + } + } + } + } + #[derive(serde::Serialize)] + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct RemoteTrackStarted { + #[prost(string, tag = "1")] + pub track: ::prost::alloc::string::String, + #[prost(enumeration = "super::super::shared::Kind", tag = "2")] + pub kind: i32, + } + #[derive(serde::Serialize)] + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct RemoteTrackEnded { + #[prost(string, tag = "1")] + pub track: ::prost::alloc::string::String, + #[prost(enumeration = "super::super::shared::Kind", tag = "2")] + pub kind: i32, + } + #[derive(serde::Serialize)] + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct LocalTrack { + #[prost(int32, tag = "1")] + pub track: i32, + #[prost(enumeration = "super::super::shared::Kind", tag = "2")] + pub kind: i32, + } + #[derive(serde::Serialize)] + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct LocalTrackAttach { + #[prost(int32, tag = "1")] + pub track: i32, + #[prost(string, tag = "2")] + pub remote_peer: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub remote_track: ::prost::alloc::string::String, + } + #[derive(serde::Serialize)] + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct LocalTrackDetach { + #[prost(int32, tag = "1")] + pub track: i32, + #[prost(string, tag = "2")] + pub remote_peer: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub remote_track: ::prost::alloc::string::String, + } + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Event { + #[prost(message, tag = "2")] + RouteBegin(RouteBegin), + #[prost(message, tag = "3")] + RouteSuccess(RouteSuccess), + #[prost(message, tag = "4")] + RouteError(RouteError), + #[prost(message, tag = "5")] + Connecting(Connecting), + #[prost(message, tag = "6")] + Connected(Connected), + #[prost(message, tag = "7")] + ConnectError(ConnectError), + #[prost(message, tag = "8")] + Stats(Stats), + #[prost(message, tag = "9")] + Reconnect(Reconnecting), + #[prost(message, tag = "10")] + Reconnected(Reconnected), + #[prost(message, tag = "11")] + Disconnected(Disconnected), + #[prost(message, tag = "12")] + Join(Join), + #[prost(message, tag = "13")] + Leave(Leave), + #[prost(message, tag = "14")] + RemoteTrackStarted(RemoteTrackStarted), + #[prost(message, tag = "15")] + RemoteTrackEnded(RemoteTrackEnded), + #[prost(message, tag = "16")] + LocalTrack(LocalTrack), + #[prost(message, tag = "17")] + LocalTrackAttach(LocalTrackAttach), + #[prost(message, tag = "18")] + LocalTrackDetach(LocalTrackDetach), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetParams { + #[prost(uint32, tag = "1")] + pub page: u32, + #[prost(uint32, tag = "2")] + pub limit: u32, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetRooms { + #[prost(message, repeated, tag = "1")] + pub rooms: ::prost::alloc::vec::Vec, +} +/// Nested message and enum types in `GetRooms`. +pub mod get_rooms { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct RoomInfo { + #[prost(int32, tag = "1")] + pub id: i32, + #[prost(string, tag = "2")] + pub room: ::prost::alloc::string::String, + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetPeerParams { + #[prost(int32, optional, tag = "1")] + pub room: ::core::option::Option, + #[prost(uint32, tag = "2")] + pub page: u32, + #[prost(uint32, tag = "3")] + pub limit: u32, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PeerSession { + #[prost(int32, tag = "1")] + pub id: i32, + #[prost(int32, tag = "2")] + pub peer_id: i32, + #[prost(string, tag = "3")] + pub peer: ::prost::alloc::string::String, + #[prost(uint64, tag = "4")] + pub session: u64, + #[prost(uint64, tag = "5")] + pub created_at: u64, + #[prost(uint64, tag = "6")] + pub joined_at: u64, + #[prost(uint64, optional, tag = "7")] + pub leaved_at: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetPeers { + #[prost(message, repeated, tag = "1")] + pub peers: ::prost::alloc::vec::Vec, +} +/// Nested message and enum types in `GetPeers`. +pub mod get_peers { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct PeerInfo { + #[prost(int32, tag = "1")] + pub id: i32, + #[prost(int32, tag = "2")] + pub room_id: i32, + #[prost(string, tag = "3")] + pub room: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub peer: ::prost::alloc::string::String, + #[prost(uint64, tag = "5")] + pub created_at: u64, + #[prost(message, repeated, tag = "6")] + pub sessions: ::prost::alloc::vec::Vec, + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetSessions { + #[prost(message, repeated, tag = "1")] + pub sessions: ::prost::alloc::vec::Vec, +} +/// Nested message and enum types in `GetSessions`. +pub mod get_sessions { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct SessionInfo { + #[prost(uint64, tag = "1")] + pub id: u64, + #[prost(string, optional, tag = "2")] + pub ip: ::core::option::Option<::prost::alloc::string::String>, + #[prost(string, optional, tag = "3")] + pub user_agent: ::core::option::Option<::prost::alloc::string::String>, + #[prost(string, optional, tag = "4")] + pub sdk: ::core::option::Option<::prost::alloc::string::String>, + #[prost(uint64, tag = "5")] + pub created_at: u64, + #[prost(message, repeated, tag = "6")] + pub peers: ::prost::alloc::vec::Vec, + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetEventParams { + #[prost(uint64, optional, tag = "1")] + pub session: ::core::option::Option, + #[prost(uint64, optional, tag = "2")] + pub start_ts: ::core::option::Option, + #[prost(uint64, optional, tag = "3")] + pub end_ts: ::core::option::Option, + #[prost(uint32, tag = "4")] + pub page: u32, + #[prost(uint32, tag = "5")] + pub limit: u32, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetEvents { + #[prost(message, repeated, tag = "1")] + pub events: ::prost::alloc::vec::Vec, +} +/// Nested message and enum types in `GetEvents`. +pub mod get_events { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct EventInfo { + #[prost(int32, tag = "1")] + pub id: i32, + #[prost(uint32, tag = "2")] + pub node: u32, + #[prost(uint64, tag = "3")] + pub node_ts: u64, + #[prost(uint64, tag = "4")] + pub session: u64, + #[prost(uint64, tag = "5")] + pub created_at: u64, + #[prost(string, tag = "6")] + pub event: ::prost::alloc::string::String, + #[prost(string, optional, tag = "7")] + pub meta: ::core::option::Option<::prost::alloc::string::String>, + } +} +#[allow(async_fn_in_trait)] +pub trait MediaConnectorServiceHandler { + async fn rooms(&self, ctx: &CTX, req: GetParams) -> Option; + async fn peers(&self, ctx: &CTX, req: GetPeerParams) -> Option; + async fn sessions(&self, ctx: &CTX, req: GetParams) -> Option; + async fn events(&self, ctx: &CTX, req: GetEventParams) -> Option; +} +pub struct MediaConnectorServiceClient< + D, + C: crate::rpc::RpcClient, + S: crate::rpc::RpcStream, +> { + client: C, + _tmp: std::marker::PhantomData<(D, S)>, +} +impl, S: crate::rpc::RpcStream> Clone +for MediaConnectorServiceClient { + fn clone(&self) -> Self { + Self { + client: self.client.clone(), + _tmp: Default::default(), + } + } +} +impl< + D, + C: crate::rpc::RpcClient, + S: crate::rpc::RpcStream, +> MediaConnectorServiceClient { + pub fn new(client: C) -> Self { + Self { + client, + _tmp: Default::default(), + } + } + pub async fn rooms(&self, dest: D, req: GetParams) -> Option { + use prost::Message; + let mut stream = self.client.connect(dest, "rooms.service").await?; + let out_buf = req.encode_to_vec(); + stream.write(&out_buf).await?; + let in_buf = stream.read().await?; + GetRooms::decode(in_buf.as_slice()).ok() + } + pub async fn peers(&self, dest: D, req: GetPeerParams) -> Option { + use prost::Message; + let mut stream = self.client.connect(dest, "peers.service").await?; + let out_buf = req.encode_to_vec(); + stream.write(&out_buf).await?; + let in_buf = stream.read().await?; + GetPeers::decode(in_buf.as_slice()).ok() + } + pub async fn sessions(&self, dest: D, req: GetParams) -> Option { + use prost::Message; + let mut stream = self.client.connect(dest, "sessions.service").await?; + let out_buf = req.encode_to_vec(); + stream.write(&out_buf).await?; + let in_buf = stream.read().await?; + GetSessions::decode(in_buf.as_slice()).ok() + } + pub async fn events(&self, dest: D, req: GetEventParams) -> Option { + use prost::Message; + let mut stream = self.client.connect(dest, "events.service").await?; + let out_buf = req.encode_to_vec(); + stream.write(&out_buf).await?; + let in_buf = stream.read().await?; + GetEvents::decode(in_buf.as_slice()).ok() + } +} +pub struct MediaConnectorServiceServer< + CTX, + H: MediaConnectorServiceHandler, + Sr: crate::rpc::RpcServer, + S: crate::rpc::RpcStream, +> { + ctx: std::sync::Arc, + handler: std::sync::Arc, + server: Sr, + _tmp: std::marker::PhantomData, +} +impl< + CTX: 'static + Clone, + H: 'static + MediaConnectorServiceHandler, + Sr: crate::rpc::RpcServer, + S: 'static + crate::rpc::RpcStream, +> MediaConnectorServiceServer { + pub fn new(server: Sr, ctx: CTX, handler: H) -> Self { + Self { + ctx: std::sync::Arc::new(ctx), + handler: std::sync::Arc::new(handler), + server, + _tmp: Default::default(), + } + } + pub async fn run(&mut self) { + let local = tokio::task::LocalSet::new(); + local + .run_until(async move { + self.run_local().await; + }) + .await; + } + async fn run_local(&mut self) { + use prost::Message; + while let Some((domain, mut stream)) = self.server.accept().await { + let ctx = self.ctx.clone(); + let handler = self.handler.clone(); + match domain.as_str() { + "rooms.service" => { + tokio::task::spawn_local(async move { + if let Some(in_buf) = stream.read().await { + if let Ok(req) = GetParams::decode(in_buf.as_slice()) { + if let Some(res) = handler.rooms(&ctx, req).await { + let out_buf = res.encode_to_vec(); + stream.write(&out_buf).await; + stream.close().await; + } + } + } + }); + } + "peers.service" => { + tokio::task::spawn_local(async move { + if let Some(in_buf) = stream.read().await { + if let Ok(req) = GetPeerParams::decode(in_buf.as_slice()) { + if let Some(res) = handler.peers(&ctx, req).await { + let out_buf = res.encode_to_vec(); + stream.write(&out_buf).await; + stream.close().await; + } + } + } + }); + } + "sessions.service" => { + tokio::task::spawn_local(async move { + if let Some(in_buf) = stream.read().await { + if let Ok(req) = GetParams::decode(in_buf.as_slice()) { + if let Some(res) = handler.sessions(&ctx, req).await { + let out_buf = res.encode_to_vec(); + stream.write(&out_buf).await; + stream.close().await; + } + } + } + }); + } + "events.service" => { + tokio::task::spawn_local(async move { + if let Some(in_buf) = stream.read().await { + if let Ok(req) = GetEventParams::decode(in_buf.as_slice()) { + if let Some(res) = handler.events(&ctx, req).await { + let out_buf = res.encode_to_vec(); + stream.write(&out_buf).await; + stream.close().await; + } + } + } + }); + } + _ => {} + } + } + } +} diff --git a/packages/protocol/src/protobuf/cluster_gateway.rs b/packages/protocol/src/protobuf/cluster_gateway.rs index a776ee70..84ba3e91 100644 --- a/packages/protocol/src/protobuf/cluster_gateway.rs +++ b/packages/protocol/src/protobuf/cluster_gateway.rs @@ -88,6 +88,8 @@ pub struct WhipConnectRequest { pub room: ::prost::alloc::string::String, #[prost(string, tag = "5")] pub peer: ::prost::alloc::string::String, + #[prost(uint64, tag = "6")] + pub session_id: u64, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -137,6 +139,8 @@ pub struct WhepConnectRequest { pub room: ::prost::alloc::string::String, #[prost(string, tag = "5")] pub peer: ::prost::alloc::string::String, + #[prost(uint64, tag = "6")] + pub session_id: u64, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -182,6 +186,8 @@ pub struct WebrtcConnectRequest { pub ip: ::prost::alloc::string::String, #[prost(message, optional, tag = "3")] pub req: ::core::option::Option, + #[prost(uint64, tag = "4")] + pub session_id: u64, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] diff --git a/packages/protocol/src/protobuf/mod.rs b/packages/protocol/src/protobuf/mod.rs index 53962482..8df7ad5b 100644 --- a/packages/protocol/src/protobuf/mod.rs +++ b/packages/protocol/src/protobuf/mod.rs @@ -1,4 +1,7 @@ // This file is @generated by prost-build. +pub mod cluster_connector { + include!("cluster_connector.rs"); +} pub mod cluster_gateway { include!("cluster_gateway.rs"); } diff --git a/packages/protocol/src/rpc/quinn.rs b/packages/protocol/src/rpc/quinn.rs index ab7fdeb6..a8f8c5a5 100644 --- a/packages/protocol/src/rpc/quinn.rs +++ b/packages/protocol/src/rpc/quinn.rs @@ -122,6 +122,9 @@ impl RpcStream for QuinnStream { let len = self.recv.read_u32().await.ok()?; self.buf_goal = Some(len as usize); log::debug!("Got frame len {}", len); + if len == 0 { + return Some(vec![]); + } } } } diff --git a/packages/protocol/src/transport/webrtc.rs b/packages/protocol/src/transport/webrtc.rs index c7512bf6..eaea1aac 100644 --- a/packages/protocol/src/transport/webrtc.rs +++ b/packages/protocol/src/transport/webrtc.rs @@ -6,7 +6,7 @@ use crate::protobuf::gateway::{ConnectRequest, ConnectResponse, RemoteIceRequest #[derive(Debug, Clone)] pub enum RpcReq { /// Ip, Agent, Req - Connect(IpAddr, String, ConnectRequest), + Connect(u64, IpAddr, String, ConnectRequest), RemoteIce(Conn, RemoteIceRequest), RestartIce(Conn, IpAddr, String, ConnectRequest), Delete(Conn), @@ -15,7 +15,7 @@ pub enum RpcReq { impl RpcReq { pub fn down(self) -> (RpcReq, Option) { match self { - RpcReq::Connect(ip_addr, user_agent, req) => (RpcReq::Connect(ip_addr, user_agent, req), None), + RpcReq::Connect(session_id, ip_addr, user_agent, req) => (RpcReq::Connect(session_id, ip_addr, user_agent, req), None), RpcReq::RemoteIce(conn, req) => { let (down, layer) = conn.down(); (RpcReq::RemoteIce(down, req), Some(layer)) diff --git a/packages/protocol/src/transport/whep.rs b/packages/protocol/src/transport/whep.rs index 00f1328c..3660e2c9 100644 --- a/packages/protocol/src/transport/whep.rs +++ b/packages/protocol/src/transport/whep.rs @@ -9,6 +9,7 @@ use super::{ConnLayer, RpcResult}; #[derive(Debug, Clone)] pub struct WhepConnectReq { + pub session_id: u64, pub sdp: String, pub room: RoomId, pub peer: PeerId, @@ -95,6 +96,7 @@ impl TryFrom for WhepConnectReq { type Error = (); fn try_from(value: protobuf::cluster_gateway::WhepConnectRequest) -> Result { Ok(Self { + session_id: value.session_id, sdp: value.sdp, room: value.room.into(), peer: value.peer.into(), @@ -107,6 +109,7 @@ impl TryFrom for WhepConnectReq { impl From for protobuf::cluster_gateway::WhepConnectRequest { fn from(val: WhepConnectReq) -> Self { protobuf::cluster_gateway::WhepConnectRequest { + session_id: val.session_id, user_agent: val.user_agent, ip: val.ip.to_string(), sdp: val.sdp, diff --git a/packages/protocol/src/transport/whip.rs b/packages/protocol/src/transport/whip.rs index 1953f28b..65d9e06d 100644 --- a/packages/protocol/src/transport/whip.rs +++ b/packages/protocol/src/transport/whip.rs @@ -9,6 +9,7 @@ use super::{ConnLayer, RpcResult}; #[derive(Debug, Clone)] pub struct WhipConnectReq { + pub session_id: u64, pub sdp: String, pub room: RoomId, pub peer: PeerId, @@ -95,6 +96,7 @@ impl TryFrom for WhipConnectReq { type Error = (); fn try_from(value: protobuf::cluster_gateway::WhipConnectRequest) -> Result { Ok(Self { + session_id: value.session_id, sdp: value.sdp, room: value.room.into(), peer: value.peer.into(), @@ -107,6 +109,7 @@ impl TryFrom for WhipConnectReq { impl From for protobuf::cluster_gateway::WhipConnectRequest { fn from(val: WhipConnectReq) -> Self { protobuf::cluster_gateway::WhipConnectRequest { + session_id: val.session_id, user_agent: val.user_agent, ip: val.ip.to_string(), sdp: val.sdp, diff --git a/packages/transport_webrtc/src/transport.rs b/packages/transport_webrtc/src/transport.rs index a5344632..22673de6 100644 --- a/packages/transport_webrtc/src/transport.rs +++ b/packages/transport_webrtc/src/transport.rs @@ -45,7 +45,7 @@ mod whip; pub enum VariantParams { Whip(RoomId, PeerId), Whep(RoomId, PeerId), - Webrtc(IpAddr, String, ConnectRequest, Arc), + Webrtc(String, ConnectRequest, Arc), } #[derive(Debug, PartialEq, Eq)] @@ -112,7 +112,7 @@ pub struct TransportWebrtc { } impl TransportWebrtc { - pub fn new(variant: VariantParams, offer: &str, dtls_cert: DtlsCert, local_addrs: Vec<(SocketAddr, usize)>, rtc_ice_lite: bool) -> RpcResult<(Self, String, String)> { + pub fn new(remote: IpAddr, variant: VariantParams, offer: &str, dtls_cert: DtlsCert, local_addrs: Vec<(SocketAddr, usize)>, rtc_ice_lite: bool) -> RpcResult<(Self, String, String)> { let offer = SdpOffer::from_sdp_string(offer).map_err(|_e| RpcError::new2(WebrtcError::InvalidSdp))?; let rtc_config = Rtc::builder() .set_rtp_mode(true) @@ -133,9 +133,9 @@ impl TransportWebrtc { let mut rtc = rtc_config.build(); let mut internal: Box = match variant { - VariantParams::Whip(room, peer) => Box::new(whip::TransportWebrtcWhip::new(room, peer)), - VariantParams::Whep(room, peer) => Box::new(whep::TransportWebrtcWhep::new(room, peer)), - VariantParams::Webrtc(_ip, _user_agent, req, secure) => { + VariantParams::Whip(room, peer) => Box::new(whip::TransportWebrtcWhip::new(room, peer, remote)), + VariantParams::Whep(room, peer) => Box::new(whep::TransportWebrtcWhep::new(room, peer, remote)), + VariantParams::Webrtc(_user_agent, req, secure) => { rtc.direct_api().create_data_channel(ChannelConfig { label: "data".to_string(), negotiated: Some(1000), @@ -144,7 +144,7 @@ impl TransportWebrtc { //we need to start sctp as client side for handling restart-ice in new server //if not, datachannel will not connect successful after reconnect to new server rtc.direct_api().start_sctp(true); - Box::new(webrtc::TransportWebrtcSdk::new(req, secure)) + Box::new(webrtc::TransportWebrtcSdk::new(req, secure, remote)) } }; diff --git a/packages/transport_webrtc/src/transport/webrtc.rs b/packages/transport_webrtc/src/transport/webrtc.rs index 36366f0d..5193a097 100644 --- a/packages/transport_webrtc/src/transport/webrtc.rs +++ b/packages/transport_webrtc/src/transport/webrtc.rs @@ -1,4 +1,5 @@ use std::{ + net::IpAddr, sync::Arc, time::{Duration, Instant}, }; @@ -72,6 +73,7 @@ enum TransportWebrtcError { } pub struct TransportWebrtcSdk { + remote: IpAddr, join: Option<(RoomId, PeerId, Option, RoomInfoPublish, RoomInfoSubscribe)>, state: State, queue: DynamicDeque, @@ -86,12 +88,13 @@ pub struct TransportWebrtcSdk { } impl TransportWebrtcSdk { - pub fn new(req: ConnectRequest, secure: Arc) -> Self { + pub fn new(req: ConnectRequest, secure: Arc, remote: IpAddr) -> Self { let tracks = req.tracks.unwrap_or_default(); let local_tracks: Vec = tracks.receivers.into_iter().enumerate().map(|(index, r)| LocalTrack::new((index as u16).into(), r)).collect(); let remote_tracks: Vec = tracks.senders.into_iter().enumerate().map(|(index, s)| RemoteTrack::new((index as u16).into(), s)).collect(); if let Some(j) = req.join { Self { + remote, join: Some((j.room.into(), j.peer.into(), j.metadata, j.publish.unwrap_or_default().into(), j.subscribe.unwrap_or_default().into())), state: State::New, audio_mixer: j.features.and_then(|f| { @@ -119,6 +122,7 @@ impl TransportWebrtcSdk { } } else { Self { + remote, join: None, state: State::New, local_tracks, @@ -190,7 +194,7 @@ impl TransportWebrtcInternal for TransportWebrtcSdk { State::New => { self.state = State::Connecting { at: now }; self.queue - .push_back(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Connecting)))); + .push_back(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Connecting(self.remote))))); } State::Connecting { at } => { if now - *at >= Duration::from_secs(TIMEOUT_SEC) { @@ -422,7 +426,7 @@ impl TransportWebrtcInternal for TransportWebrtcSdk { self.channel = Some(channel); log::info!("[TransportWebrtcSdk] channel {name} opened, join state {:?}", self.join); self.queue - .push_back(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Connected)))); + .push_back(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Connected(self.remote))))); //TODO get paired ip from webrtc if let Some((room, peer, metadata, publish, subscribe)) = &self.join { self.queue.push_back(InternalOutput::TransportOutput(TransportOutput::RpcReq( 0.into(), @@ -534,8 +538,9 @@ impl TransportWebrtcSdk { if let State::Reconnecting { at } = &self.state { log::info!("[TransportWebrtcSdk] switched to reconnected after {:?}", now - *at); self.state = State::Connected; + //TODO get paired ip self.queue - .push_back(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Connected)))) + .push_back(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Connected(self.remote))))); } } IceConnectionState::Disconnected => { @@ -543,7 +548,9 @@ impl TransportWebrtcSdk { self.state = State::Reconnecting { at: now }; log::info!("[TransportWebrtcSdk] switched to reconnecting"); self.queue - .push_back(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Reconnecting)))); + .push_back(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Reconnecting( + self.remote, + ))))); } } } @@ -784,7 +791,11 @@ impl TransportWebrtcSdk { #[cfg(test)] mod tests { - use std::{sync::Arc, time::Instant}; + use std::{ + net::{IpAddr, Ipv4Addr}, + sync::Arc, + time::Instant, + }; use media_server_core::{ endpoint::EndpointReq, @@ -831,14 +842,21 @@ mod tests { let channel_id = create_channel_id(); let now = Instant::now(); + let ip = IpAddr::V4(Ipv4Addr::LOCALHOST); let secure_jwt = Arc::new(MediaEdgeSecureJwt::from(b"1234".as_slice())); - let mut transport = TransportWebrtcSdk::new(req, secure_jwt.clone()); + let mut transport = TransportWebrtcSdk::new(req, secure_jwt.clone(), ip); assert_eq!(transport.pop_output(now), None); + transport.on_tick(now); + assert_eq!( + transport.pop_output(now), + Some(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Connecting(ip))))) + ); + transport.on_str0m_event(now, str0m::Event::ChannelOpen(channel_id, "data".to_string())); assert_eq!( transport.pop_output(now), - Some(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Connected)))) + Some(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Connected(ip))))) ); assert_eq!( transport.pop_output(now), @@ -866,15 +884,22 @@ mod tests { let channel_id = create_channel_id(); let now = Instant::now(); + let ip = IpAddr::V4(Ipv4Addr::LOCALHOST); let gateway_jwt = MediaGatewaySecureJwt::from(b"1234".as_slice()); let secure_jwt = Arc::new(MediaEdgeSecureJwt::from(b"1234".as_slice())); - let mut transport = TransportWebrtcSdk::new(req, secure_jwt.clone()); + let mut transport = TransportWebrtcSdk::new(req, secure_jwt.clone(), ip); assert_eq!(transport.pop_output(now), None); + transport.on_tick(now); + assert_eq!( + transport.pop_output(now), + Some(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Connecting(ip))))) + ); + transport.on_str0m_event(now, str0m::Event::ChannelOpen(channel_id, "data".to_string())); assert_eq!( transport.pop_output(now), - Some(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Connected)))) + Some(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Connected(ip))))) ); assert_eq!(transport.pop_output(now), None); diff --git a/packages/transport_webrtc/src/transport/whep.rs b/packages/transport_webrtc/src/transport/whep.rs index efa229ca..cab826dd 100644 --- a/packages/transport_webrtc/src/transport/whep.rs +++ b/packages/transport_webrtc/src/transport/whep.rs @@ -1,5 +1,6 @@ use std::{ collections::VecDeque, + net::IpAddr, time::{Duration, Instant}, }; @@ -48,6 +49,7 @@ struct SubscribeStreams { } pub struct TransportWebrtcWhep { + remote: IpAddr, room: RoomId, peer: PeerId, state: State, @@ -61,8 +63,9 @@ pub struct TransportWebrtcWhep { } impl TransportWebrtcWhep { - pub fn new(room: RoomId, peer: PeerId) -> Self { + pub fn new(room: RoomId, peer: PeerId, remote: IpAddr) -> Self { Self { + remote, room, peer, state: State::New, @@ -89,7 +92,7 @@ impl TransportWebrtcInternal for TransportWebrtcWhep { State::New => { self.state = State::Connecting { at: now }; self.queue - .push_back(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Connecting)))); + .push_back(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Connecting(self.remote))))); } State::Connecting { at } => { if now - *at >= Duration::from_secs(TIMEOUT_SEC) { @@ -178,7 +181,7 @@ impl TransportWebrtcInternal for TransportWebrtcWhep { ), ))); self.queue - .push_back(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Connected)))); + .push_back(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Connected(self.remote))))); } Str0mEvent::IceConnectionStateChange(state) => self.on_str0m_state(now, state), Str0mEvent::MediaAdded(media) => self.on_str0m_media_added(now, media), @@ -232,7 +235,7 @@ impl TransportWebrtcWhep { log::info!("[TransportWebrtcWhep] switched to reconnected after {:?}", now - *at); self.state = State::Connected; self.queue - .push_back(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Connected)))) + .push_back(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Connected(self.remote))))) } } IceConnectionState::Disconnected => { @@ -240,7 +243,9 @@ impl TransportWebrtcWhep { self.state = State::Reconnecting { at: now }; log::info!("[TransportWebrtcWhep] switched to reconnecting"); self.queue - .push_back(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Reconnecting)))); + .push_back(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Reconnecting( + self.remote, + ))))); } } } diff --git a/packages/transport_webrtc/src/transport/whip.rs b/packages/transport_webrtc/src/transport/whip.rs index 807d9f6a..fd88c816 100644 --- a/packages/transport_webrtc/src/transport/whip.rs +++ b/packages/transport_webrtc/src/transport/whip.rs @@ -1,5 +1,6 @@ use std::{ collections::VecDeque, + net::IpAddr, time::{Duration, Instant}, }; @@ -44,6 +45,7 @@ enum TransportWebrtcError { } pub struct TransportWebrtcWhip { + remote: IpAddr, room: RoomId, peer: PeerId, state: State, @@ -55,8 +57,9 @@ pub struct TransportWebrtcWhip { } impl TransportWebrtcWhip { - pub fn new(room: RoomId, peer: PeerId) -> Self { + pub fn new(room: RoomId, peer: PeerId, remote: IpAddr) -> Self { Self { + remote, room, peer, state: State::New, @@ -78,7 +81,7 @@ impl TransportWebrtcInternal for TransportWebrtcWhip { State::New => { self.state = State::Connecting { at: now }; self.queue - .push_back(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Connecting)))); + .push_back(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Connecting(self.remote))))); } State::Connecting { at } => { if now - *at >= Duration::from_secs(TIMEOUT_SEC) { @@ -144,7 +147,7 @@ impl TransportWebrtcInternal for TransportWebrtcWhip { self.state = State::Connected; log::info!("[TransportWebrtcWhip] connected"); self.queue - .push_back(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Connected)))); + .push_back(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Connected(self.remote))))); self.queue.push_back(InternalOutput::TransportOutput(TransportOutput::RpcReq( 0.into(), EndpointReq::JoinRoom( @@ -214,7 +217,7 @@ impl TransportWebrtcWhip { log::info!("[TransportWebrtcWhip] switched to reconnected after {:?}", now - *at); self.state = State::Connected; self.queue - .push_back(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Connected)))); + .push_back(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Connected(self.remote))))); } } IceConnectionState::Disconnected => { @@ -222,7 +225,9 @@ impl TransportWebrtcWhip { self.state = State::Reconnecting { at: now }; log::info!("[TransportWebrtcWhip] switched to reconnecting"); self.queue - .push_back(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Reconnecting)))); + .push_back(InternalOutput::TransportOutput(TransportOutput::Event(TransportEvent::State(TransportState::Reconnecting( + self.remote, + ))))); } } } diff --git a/packages/transport_webrtc/src/worker.rs b/packages/transport_webrtc/src/worker.rs index 8b0052ae..d1f8dd76 100644 --- a/packages/transport_webrtc/src/worker.rs +++ b/packages/transport_webrtc/src/worker.rs @@ -1,10 +1,19 @@ -use std::{collections::VecDeque, net::SocketAddr, sync::Arc, time::Instant}; +use std::{ + collections::VecDeque, + net::{IpAddr, SocketAddr}, + sync::Arc, + time::Instant, +}; use media_server_core::{ cluster::{ClusterEndpointControl, ClusterEndpointEvent, ClusterRoomHash}, endpoint::{Endpoint, EndpointCfg, EndpointInput, EndpointOutput}, }; -use media_server_protocol::transport::{RpcError, RpcResult}; +use media_server_protocol::{ + cluster::gen_cluster_session_id, + protobuf::cluster_connector::peer_event, + transport::{RpcError, RpcResult}, +}; use media_server_secure::MediaEdgeSecure; use sans_io_runtime::{ backend::{BackendIncoming, BackendOutgoing}, @@ -31,6 +40,7 @@ pub enum GroupInput { pub enum GroupOutput { Net(BackendOutgoing), Cluster(WebrtcSession, ClusterRoomHash, ClusterEndpointControl), + PeerEvent(WebrtcSession, u64, Instant, peer_event::Event), Ext(WebrtcSession, ExtOut), Shutdown(WebrtcSession), Continue, @@ -60,7 +70,7 @@ impl MediaWorkerWebrtc { } } - pub fn spawn(&mut self, variant: VariantParams, offer: &str) -> RpcResult<(bool, String, usize)> { + pub fn spawn(&mut self, remote: IpAddr, session_id: u64, variant: VariantParams, offer: &str) -> RpcResult<(bool, String, usize)> { let cfg = match &variant { VariantParams::Whip(_, _) => EndpointCfg { max_ingress_bitrate: 2_500_000, @@ -70,13 +80,13 @@ impl MediaWorkerWebrtc { max_ingress_bitrate: 2_500_000, max_egress_bitrate: 2_500_000, }, - VariantParams::Webrtc(_, _, _, _) => EndpointCfg { + VariantParams::Webrtc(_, _, _) => EndpointCfg { max_ingress_bitrate: 2_500_000, max_egress_bitrate: 2_500_000, }, }; - let (tran, ufrag, sdp) = TransportWebrtc::new(variant, offer, self.dtls_cert.clone(), self.addrs.clone(), self.ice_lite)?; - let endpoint = Endpoint::new(cfg, tran); + let (tran, ufrag, sdp) = TransportWebrtc::new(remote, variant, offer, self.dtls_cert.clone(), self.addrs.clone(), self.ice_lite)?; + let endpoint = Endpoint::new(session_id, cfg, tran); let index = self.endpoints.add_task(endpoint); log::info!("[TransportWebrtc] create endpoint {index}"); self.shared_port.add_ufrag(ufrag, index); @@ -87,6 +97,7 @@ impl MediaWorkerWebrtc { match out { EndpointOutput::Net(net) => GroupOutput::Net(net), EndpointOutput::Cluster(room, control) => GroupOutput::Cluster(WebrtcSession(index), room, control), + EndpointOutput::PeerEvent(session_id, ts, event) => GroupOutput::PeerEvent(WebrtcSession(index), session_id, ts, event), EndpointOutput::Destroy => { log::info!("[TransportWebrtc] destroy endpoint {index}"); self.endpoints.remove_task(index); @@ -134,7 +145,8 @@ impl MediaWorkerWebrtc { } ExtIn::RestartIce(req_id, variant, remote, useragent, req) => { let sdp = req.sdp.clone(); - if let Ok((ice_lite, sdp, index)) = self.spawn(VariantParams::Webrtc(remote, useragent, req, self.secure.clone()), &sdp) { + let session_id = gen_cluster_session_id(); //TODO need to reuse old session_id + if let Ok((ice_lite, sdp, index)) = self.spawn(remote, session_id, VariantParams::Webrtc(useragent, req, self.secure.clone()), &sdp) { self.queue.push_back(GroupOutput::Ext(index.into(), ExtOut::RestartIce(req_id, variant, Ok((ice_lite, sdp))))); } else { self.queue