diff --git a/.github/workflows/fly-deploy-benchrunner.yml b/.github/workflows/fly-deploy-benchrunner.yml new file mode 100644 index 00000000..f5b0cce1 --- /dev/null +++ b/.github/workflows/fly-deploy-benchrunner.yml @@ -0,0 +1,23 @@ +name: Deploy benchrunner to Fly + +on: + push: + tags: + - 'production/benchrunner-*' + - 'experimental/benchrunner-*' + +env: + FLY_API_TOKEN: ${{ secrets.FLY_API_TOKEN }} + +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Setup Fly + uses: superfly/flyctl-actions/setup-flyctl@master + + - name: Deploy solana-lite-rpc-benchrunner + run: flyctl deploy -c cd/solana-lite-rpc-benchrunner.toml --remote-only diff --git a/Cargo.lock b/Cargo.lock index 4e0811c4..d46b09dd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -76,9 +76,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.5" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd7d5a2cecb58716e47d67d5703a249964b14c7be1ec3cad3affc295b2d1c35d" +checksum = "72832d73be48bac96a5d7944568f305d829ed55b0ce3b483647089dfaf6cf704" dependencies = [ "cfg-if", "getrandom 0.2.12", @@ -89,9 +89,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] @@ -433,18 +433,18 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] name = "async-trait" -version = "0.1.78" +version = "0.1.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "461abc97219de0eaaf81fe3ef974a540158f3d079c2ab200f891f1a2ef201e85" +checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -520,9 +520,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ "addr2line", "cc", @@ -614,9 +614,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.2" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" dependencies = [ "serde", ] @@ -819,7 +819,7 @@ checksum = "4da9a32f3fed317401fa3c862968128267c3106685286e15d5aaa3d7389c2f60" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -830,9 +830,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" [[package]] name = "cap" @@ -952,7 +952,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -1245,7 +1245,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -1256,7 +1256,7 @@ checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ "darling_core", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -1411,7 +1411,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -1434,7 +1434,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -1522,7 +1522,7 @@ checksum = "03cdc46ec28bd728e67540c528013c6a10eb69a02eb31078a1bda695438cbfb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -1568,9 +1568,9 @@ checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" [[package]] name = "fastrand" -version = "2.0.1" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984" [[package]] name = "feature-probe" @@ -1686,7 +1686,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -1779,7 +1779,7 @@ dependencies = [ [[package]] name = "geyser-grpc-connector" version = "0.10.1+yellowstone.1.12" -source = "git+https://github.com/blockworks-foundation/geyser-grpc-connector.git?branch=v1.13.0+solana.1.17.25#74c6cb759874ffc8efb3eddb144a8b02e28a3fae" +source = "git+https://github.com/blockworks-foundation/geyser-grpc-connector.git?tag=v0.10.3+yellowstone.1.12+solana.1.17.15-hacked-windowsize3#ae56e0f5f894933bea046e8f220f74df3eab5355" dependencies = [ "anyhow", "async-stream", @@ -1792,6 +1792,7 @@ dependencies = [ "merge-streams", "solana-sdk", "tokio", + "tonic-health", "tracing", "url", "yellowstone-grpc-client", @@ -1873,7 +1874,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.2.5", + "indexmap 2.2.6", "slab", "tokio", "tokio-util", @@ -1910,7 +1911,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.5", + "ahash 0.8.4", ] [[package]] @@ -2169,9 +2170,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.5" +version = "2.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -2239,6 +2240,26 @@ version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" +[[package]] +name = "jemalloc-sys" +version = "0.5.4+5.3.0-patched" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac6c1946e1cea1788cbfde01c993b52a10e2da07f4bac608228d1bed20bfebf2" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "jemallocator" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0de374a9f8e63150e6f5e8a60cc14c668226d7a347d8aee1a45766e3c4dd3bc" +dependencies = [ + "jemalloc-sys", + "libc", +] + [[package]] name = "jobserver" version = "0.1.28" @@ -2460,7 +2481,7 @@ version = "0.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "libc", "redox_syscall", ] @@ -2553,6 +2574,7 @@ dependencies = [ "futures-util", "hyper", "itertools 0.10.5", + "jemallocator", "jsonrpsee", "lazy_static", "log", @@ -2852,7 +2874,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -2942,7 +2964,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -2954,7 +2976,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -2999,7 +3021,7 @@ version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cfg-if", "foreign-types", "libc", @@ -3016,7 +3038,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -3133,7 +3155,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.2.5", + "indexmap 2.2.6", ] [[package]] @@ -3171,7 +3193,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -3227,6 +3249,18 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" +[[package]] +name = "postgres-derive" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83145eba741b050ef981a9a1838c843fa7665e154383325aa8b440ae703180a2" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "syn 2.0.55", +] + [[package]] name = "postgres-native-tls" version = "0.5.0" @@ -3267,7 +3301,10 @@ dependencies = [ "bytes", "chrono", "fallible-iterator", + "postgres-derive", "postgres-protocol", + "serde", + "serde_json", ] [[package]] @@ -3284,12 +3321,12 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "prettyplease" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" +checksum = "8d3928fb5db768cb86f891ff014f0144589297e3c6a1aba6ed7cecfdace270c7" dependencies = [ "proc-macro2", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -3371,7 +3408,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.53", + "syn 2.0.55", "tempfile", "which", ] @@ -3386,7 +3423,7 @@ dependencies = [ "itertools 0.11.0", "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -3430,7 +3467,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -3578,9 +3615,9 @@ checksum = "e6e97ca3dbabd81e6033cfe09f0cef37c89f34f2a9223cab7cf99305d46a9633" [[package]] name = "rayon" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4963ed1bc86e4f3ee217022bd855b297cef07fb9eac5dfa1f788b220b49b3bd" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" dependencies = [ "either", "rayon-core", @@ -3642,9 +3679,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.3" +version = "1.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" +checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" dependencies = [ "aho-corasick", "memchr", @@ -3686,9 +3723,9 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "reqwest" -version = "0.11.26" +version = "0.11.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78bf93c4af7a8bb7d879d51cebe797356ff10ae8516ace542b5182d9dcac10b2" +checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" dependencies = [ "async-compression", "base64 0.21.7", @@ -3819,11 +3856,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.31" +version = "0.38.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" +checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "errno", "libc", "linux-raw-sys", @@ -3917,7 +3954,7 @@ checksum = "1db149f81d46d2deba7cd3c50772474707729550221e69588478ebf9ada425ae" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -3991,7 +4028,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -4036,7 +4073,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -4166,9 +4203,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" @@ -4198,9 +4235,9 @@ dependencies = [ [[package]] name = "solana-account-decoder" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "942b6faa78521915895cbe52f62c5ba29e0962fff976271ec983a68a6e6b9f6a" +checksum = "22ea4bedfcc8686ae6d01a3d8288f5b9746cd00ec63f0ce9a6415849d35add50" dependencies = [ "Inflector", "base64 0.21.7", @@ -4223,9 +4260,9 @@ dependencies = [ [[package]] name = "solana-address-lookup-table-program" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00cfecfcf82ce18010a93fca9b1225c8fc2aed50b39687b83131e115477987b3" +checksum = "d89cd9fd1668735eab8c83407bde1d073a35c0486c0d9f1afc9ef75b9b726f94" dependencies = [ "bincode", "bytemuck", @@ -4244,9 +4281,9 @@ dependencies = [ [[package]] name = "solana-clap-utils" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4b76f277d3c922d15ffcb30e0aab0919fe5691017278038a5d0935481607fc9" +checksum = "eba77d79b1202853954c7a8cc4261bc50b39f99872d09fd6bbd22373df161171" dependencies = [ "chrono", "clap 2.34.0", @@ -4261,16 +4298,16 @@ dependencies = [ [[package]] name = "solana-client" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab357a45351eeab99539eed5af638af9d577a32fdefa25baf2504fccfb97cab9" +checksum = "6948f1741ea197c04a989510b6810e1593a694848d54f9a128dc15a840484c1f" dependencies = [ "async-trait", "bincode", "dashmap 4.0.2", "futures", "futures-util", - "indexmap 2.2.5", + "indexmap 2.2.6", "indicatif", "log", "quinn", @@ -4294,9 +4331,9 @@ dependencies = [ [[package]] name = "solana-config-program" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677b61fe38df5db47589d6d09085baf8792006a268447b8fe0542b462a127f2f" +checksum = "8de23cd0dd8673f4590e90bfa47ff19eb629f4b7dc15a3fb173a62d932801d07" dependencies = [ "bincode", "chrono", @@ -4308,15 +4345,15 @@ dependencies = [ [[package]] name = "solana-connection-cache" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a3f6a921263f29e0a7f808fe9659a3fc7e6dfbd42ce4811ca95436a95aee89b" +checksum = "9300b6a51c990fdd16918a522258c384582ad63e2fadcfb9ad1574e4b315e937" dependencies = [ "async-trait", "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.2.5", + "indexmap 2.2.6", "log", "rand 0.8.5", "rayon", @@ -4330,11 +4367,11 @@ dependencies = [ [[package]] name = "solana-frozen-abi" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de577bb681dfc3afeda6247dbc381f8c74a31eeed141883e6a9a36e93fdcf784" +checksum = "4090f2ac64149ce1fbabd5277f41e278edc1f38121927fe8f6355e67ead3e199" dependencies = [ - "ahash 0.8.5", + "ahash 0.8.4", "blake3", "block-buffer 0.10.4", "bs58", @@ -4360,14 +4397,14 @@ dependencies = [ [[package]] name = "solana-frozen-abi-macro" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6373184605334be54d85564b657e7b4d88bdf4e3c011abccce4fd2712c96caf" +checksum = "765bcdc1ecc31ea5d3d7ddb680ffa6645809c122b4ffdc223b161850e6ba352b" dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -4492,6 +4529,37 @@ dependencies = [ "tokio", ] +[[package]] +name = "solana-lite-rpc-benchrunner-service" +version = "0.2.4" +dependencies = [ + "anyhow", + "async-trait", + "bench", + "chrono", + "clap 4.5.3", + "futures", + "futures-util", + "itertools 0.10.5", + "lazy_static", + "log", + "native-tls", + "postgres-native-tls", + "postgres-types", + "prometheus", + "serde", + "serde_json", + "solana-lite-rpc-util", + "solana-rpc-client", + "solana-rpc-client-api", + "solana-sdk", + "solana-transaction-status", + "tokio", + "tokio-postgres", + "tokio-util", + "tracing-subscriber", +] + [[package]] name = "solana-lite-rpc-blockstore" version = "0.2.4" @@ -4564,6 +4632,7 @@ dependencies = [ "thiserror", "tokio", "tokio-stream", + "tonic-health", "tracing", "yellowstone-grpc-client", "yellowstone-grpc-proto", @@ -4775,9 +4844,9 @@ dependencies = [ [[package]] name = "solana-logger" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6959774302d4407c77d5fbdd4d5e31c2696f5ac1c74bf0cdcac704b474bc6fd" +checksum = "9c7f3cad088bc5f00569cb5b4c3aaba8d935f8f7cc25c91cc0c55a8a7de2b137" dependencies = [ "env_logger", "lazy_static", @@ -4786,9 +4855,9 @@ dependencies = [ [[package]] name = "solana-measure" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9327e70f9cb17094077531449f7487677c4d380bd99b9494dca85af5ea5f5e19" +checksum = "2de5041d16120852c0deea047c024e1fad8819e49041491f0cca6c91c243fd5d" dependencies = [ "log", "solana-sdk", @@ -4796,9 +4865,9 @@ dependencies = [ [[package]] name = "solana-metrics" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca5e3931823a9bdaee5d65d27195804127911578abddaddda3025f6af6647c08" +checksum = "2fd6f25f0076b6eb873f7e2a85e53191ac2affe6782131be1a2867d057307e20" dependencies = [ "crossbeam-channel", "gethostname", @@ -4811,9 +4880,9 @@ dependencies = [ [[package]] name = "solana-net-utils" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fed17001119742b35ce06b18823b4901313860c5dd495e32bccf070424b2947" +checksum = "12ff6114e678f321b3d421288dc12311e6e5ca5b72eadd962d4239276b3d66d2" dependencies = [ "bincode", "clap 3.2.25", @@ -4833,11 +4902,11 @@ dependencies = [ [[package]] name = "solana-perf" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c2a6d77ead9c96aa2a84afd0662d01c55abec61b514d8c9e2dc5627a1d74c7b" +checksum = "34b28f2db62b93cb04b56d610ac8736ee4fb89b4a030c55935b646b7212b6556" dependencies = [ - "ahash 0.8.5", + "ahash 0.8.4", "bincode", "bv", "caps", @@ -4862,9 +4931,9 @@ dependencies = [ [[package]] name = "solana-program" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dd3bcc37b433d7e8d45236a0f5aa68df462c4d5c6a709a6efd916988ce3ac08" +checksum = "c1141d1dffbe68852128f7bbcc3c43a5d2cb715ecffeeb64eb81bb93cbaf80bb" dependencies = [ "ark-bn254", "ark-ec", @@ -4872,7 +4941,7 @@ dependencies = [ "ark-serialize", "base64 0.21.7", "bincode", - "bitflags 2.4.2", + "bitflags 2.5.0", "blake3", "borsh 0.10.3", "borsh 0.9.3", @@ -4916,9 +4985,9 @@ dependencies = [ [[package]] name = "solana-program-runtime" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "618fb4dc2238daa2737805659c4ad380fb61dd27a40cfd14e63d890d1e4335b0" +checksum = "942de577a2865cec28fc174575c9bd6cf7af815832af67fe40ca856075550998" dependencies = [ "base64 0.21.7", "bincode", @@ -4944,9 +5013,9 @@ dependencies = [ [[package]] name = "solana-pubsub-client" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0daf3d4daa67ef7550fe1a4b88dad32164263c9b7b7f0477caacae6b0220ceb5" +checksum = "a7ba8af6bfcb21abdde0a9fc2bade99e5e7c8fadab7f113ac7bceb408296ae26" dependencies = [ "crossbeam-channel", "futures-util", @@ -4969,9 +5038,9 @@ dependencies = [ [[package]] name = "solana-quic-client" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39f1c28340a1845d18ab85f96c57f283a588745f4f334a77a0cc7a13a6f2eae4" +checksum = "460b3265aec34d9a746bec2b64e2fb2890d7af694c81a494aceddeb9fb77182c" dependencies = [ "async-mutex", "async-trait", @@ -4996,9 +5065,9 @@ dependencies = [ [[package]] name = "solana-rayon-threadlimit" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c8f657c79b681bd49b4106242890225ba0df190cc83109394a1fcc8e3c54819" +checksum = "dda21485597afb5edb4080b854d0e2a7d2d6a8e05b0f1f0abf8cb855b44b9b6c" dependencies = [ "lazy_static", "num_cpus", @@ -5006,9 +5075,9 @@ dependencies = [ [[package]] name = "solana-remote-wallet" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8d8bc9c75495fcf301a85bda5c921213dbff9dc8d6e7708c74eaa9d06e8e395" +checksum = "b2b32421f5ce524405db85b3227e57386a245ac75fbf7e0a2407b2e35c789968" dependencies = [ "console", "dialoguer", @@ -5025,9 +5094,9 @@ dependencies = [ [[package]] name = "solana-rpc-client" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f050027aff888d96c5a659dc164998d6ec25aadde649f1474d2cbb73b2a72de8" +checksum = "09220386ed831960f32119e6b87100588269712e764e4838e01f78dc11b1ec1d" dependencies = [ "async-trait", "base64 0.21.7", @@ -5051,9 +5120,9 @@ dependencies = [ [[package]] name = "solana-rpc-client-api" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a273e98835985e15e3774267af94a0631b1e27ae187b242e7fbacf1c1fad29fb" +checksum = "b2ef8ffa33755c1e648be2c7433404a275a34b951787770970b77bc682ff12c5" dependencies = [ "base64 0.21.7", "bs58", @@ -5073,9 +5142,9 @@ dependencies = [ [[package]] name = "solana-rpc-client-nonce-utils" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "458fc8f9d14e42d397e7867288fdff47de6ca9f949b1f11217e807a65bf17c43" +checksum = "f46040b32f0e8b8e215caa7d43e2b879e1c1e2cade205bc3edfb3dcfa0632ac5" dependencies = [ "clap 2.34.0", "solana-clap-utils", @@ -5086,14 +5155,14 @@ dependencies = [ [[package]] name = "solana-sdk" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1de78b8c4fa09e4b90d720b2aa3ef3c80c4b956aa3d14616261a7f4bdf64c04" +checksum = "278a95acb99377dd4585599fdbec23d0a6fcb94ec78285283723fdd365fe885e" dependencies = [ "assert_matches", "base64 0.21.7", "bincode", - "bitflags 2.4.2", + "bitflags 2.5.0", "borsh 0.10.3", "bs58", "bytemuck", @@ -5140,15 +5209,15 @@ dependencies = [ [[package]] name = "solana-sdk-macro" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b5055c4b785cf3e5f2f52d687bdd1a795755105fe4365182396bc8b6bb41cd5" +checksum = "92dbaf563210f61828800f2a3d8c188fa2afede91920d364982e280318db2eb5" dependencies = [ "bs58", "proc-macro2", "quote", "rustversion", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -5159,16 +5228,16 @@ checksum = "468aa43b7edb1f9b7b7b686d5c3aeb6630dc1708e86e31343499dd5c4d775183" [[package]] name = "solana-streamer" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05a31fb5a63f80318a5b03148d9132e3bb1f2125e0bebe9bedfc095d1b16753c" +checksum = "c8315b493d109f89842dcc9df02b4444beb182b810e3256f69a777c6f2a8147e" dependencies = [ "async-channel", "bytes", "crossbeam-channel", "futures-util", "histogram", - "indexmap 2.2.5", + "indexmap 2.2.6", "itertools 0.10.5", "libc", "log", @@ -5191,9 +5260,9 @@ dependencies = [ [[package]] name = "solana-thin-client" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b19c4e6d850b0b8598f84513b4b5cdcc36d095df1b99725704c087d4df7e9eb" +checksum = "3db2f09ad842ede0c354a2e870ed8d7660aa9d85eabbff2ced6fc7b8ba409a90" dependencies = [ "bincode", "log", @@ -5206,14 +5275,14 @@ dependencies = [ [[package]] name = "solana-tpu-client" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e6cb310a96dad1a34e7cacd9344800206df21c5f891459240621c96e13c6ee" +checksum = "9263b1c6647258a5c36a54b8d79e1c366180b1eda65150b0cafb7b469c68f5e1" dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.2.5", + "indexmap 2.2.6", "indicatif", "log", "rayon", @@ -5230,9 +5299,9 @@ dependencies = [ [[package]] name = "solana-transaction-status" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd87fd7b4164cb7cbe047e6376e9585668923ed8072ea32b7e878f25c90fd056" +checksum = "5e2031070cba17802f7108b53f6db01b82cdfb0360b0a8b9d51c584f2e9dd9e4" dependencies = [ "Inflector", "base64 0.21.7", @@ -5255,9 +5324,9 @@ dependencies = [ [[package]] name = "solana-udp-client" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4886959ef4094af0d9ceda93dc468fa7088f46b22cc7d0c8c18086389e8d63e7" +checksum = "829c54d9706b8da0fe81b5b79c4291c4a7d366c16b3e19bcfb44125e1cc4741b" dependencies = [ "async-trait", "solana-connection-cache", @@ -5270,9 +5339,9 @@ dependencies = [ [[package]] name = "solana-version" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "310500e993127ea009a2c41daf2e004d436d3041cdee6673112804c574a41eda" +checksum = "c1dff08def0cc14d1ab26916ffdc11a1456620d6590c43c569c08a98fac91f7d" dependencies = [ "log", "rustc_version", @@ -5286,9 +5355,9 @@ dependencies = [ [[package]] name = "solana-vote-program" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04079fab6e48794ec194c9bb44422672fa66594b9979dfc97d62c1556434ee7a" +checksum = "c144aee890e2e62bcfab7079d0a5c5795b84c5cd4cbe904c64b8cd868b131a05" dependencies = [ "bincode", "log", @@ -5308,9 +5377,9 @@ dependencies = [ [[package]] name = "solana-zk-token-sdk" -version = "1.17.25" +version = "1.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9e0b222c3aad3df370ae87e993b32419906f00827a1677b7c814c65c9682909" +checksum = "ef26fb44734aa940e6648bbbeead677edc68c7e1ec09128e5f16a8924c389a38" dependencies = [ "aes-gcm-siv", "base64 0.21.7", @@ -5394,9 +5463,9 @@ dependencies = [ [[package]] name = "spl-discriminator" -version = "0.1.1" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daa600f2fe56f32e923261719bae640d873edadbc5237681a39b8e37bfd4d263" +checksum = "cce5d563b58ef1bb2cdbbfe0dfb9ffdc24903b10ae6a4df2d8f425ece375033f" dependencies = [ "bytemuck", "solana-program", @@ -5411,7 +5480,7 @@ checksum = "07fd7858fc4ff8fb0e34090e41d7eb06a823e1057945c26d480bfc21d2338a93" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -5423,7 +5492,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.53", + "syn 2.0.55", "thiserror", ] @@ -5447,9 +5516,9 @@ dependencies = [ [[package]] name = "spl-pod" -version = "0.1.1" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85a5db7e4efb1107b0b8e52a13f035437cdcb36ef99c58f6d467f089d9b2915a" +checksum = "2881dddfca792737c0706fa0175345ab282b1b0879c7d877bad129645737c079" dependencies = [ "borsh 0.10.3", "bytemuck", @@ -5460,9 +5529,9 @@ dependencies = [ [[package]] name = "spl-program-error" -version = "0.3.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e0657b6490196971d9e729520ba934911ff41fbb2cb9004463dbe23cf8b4b4f" +checksum = "249e0318493b6bcf27ae9902600566c689b7dfba9f1bdff5893e92253374e78c" dependencies = [ "num-derive 0.4.2", "num-traits 0.2.18", @@ -5480,14 +5549,14 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] name = "spl-tlv-account-resolution" -version = "0.5.2" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f335787add7fa711819f9e7c573f8145a5358a709446fe2d24bf2a88117c90" +checksum = "615d381f48ddd2bb3c57c7f7fb207591a2a05054639b18a62e785117dd7a8683" dependencies = [ "bytemuck", "solana-program", @@ -5581,9 +5650,9 @@ dependencies = [ [[package]] name = "spl-type-length-value" -version = "0.3.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f9ebd75d29c5f48de5f6a9c114e08531030b75b8ac2c557600ac7da0b73b1e8" +checksum = "a468e6f6371f9c69aae760186ea9f1a01c2908351b06a5e0026d21cfc4d7ecac" dependencies = [ "bytemuck", "solana-program", @@ -5640,9 +5709,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.53" +version = "2.0.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7383cd0e49fff4b6b90ca5670bfd3e9d6a733b3f90c686605aa7eec8c4996032" +checksum = "002a1b3dbf967edfafc32655d0f377ab0bb7b994aa1d32c8cc7e9b8bf3ebb8f0" dependencies = [ "proc-macro2", "quote", @@ -5741,7 +5810,7 @@ checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -5856,7 +5925,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -5968,7 +6037,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.2.5", + "indexmap 2.2.6", "toml_datetime", "winnow", ] @@ -5979,7 +6048,7 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.2.5", + "indexmap 2.2.6", "toml_datetime", "winnow", ] @@ -6025,7 +6094,7 @@ dependencies = [ "proc-macro2", "prost-build", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -6069,7 +6138,7 @@ checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" dependencies = [ "async-compression", "base64 0.21.7", - "bitflags 2.4.2", + "bitflags 2.5.0", "bytes", "futures-core", "futures-util", @@ -6123,7 +6192,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -6306,9 +6375,9 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a" +checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" dependencies = [ "getrandom 0.2.12", ] @@ -6391,7 +6460,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", "wasm-bindgen-shared", ] @@ -6425,7 +6494,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6704,8 +6773,8 @@ dependencies = [ [[package]] name = "yellowstone-grpc-client" -version = "1.14.0+solana.1.17.25" -source = "git+https://github.com/rpcpool/yellowstone-grpc.git?tag=v1.13.0+solana.1.17.25#50ac737dd9dd2f288e3bbed662dcfd310c44591c" +version = "1.13.0+solana.1.17.15" +source = "git+https://github.com/rpcpool/yellowstone-grpc.git?tag=v1.12.0+solana.1.17.15#c7b72cc8781c2dc48e4a7c94e411f95df495cf2f" dependencies = [ "bytes", "futures", @@ -6718,8 +6787,8 @@ dependencies = [ [[package]] name = "yellowstone-grpc-proto" -version = "1.13.0+solana.1.17.25" -source = "git+https://github.com/rpcpool/yellowstone-grpc.git?tag=v1.13.0+solana.1.17.25#50ac737dd9dd2f288e3bbed662dcfd310c44591c" +version = "1.12.0+solana.1.17.15" +source = "git+https://github.com/rpcpool/yellowstone-grpc.git?tag=v1.12.0+solana.1.17.15#c7b72cc8781c2dc48e4a7c94e411f95df495cf2f" dependencies = [ "anyhow", "bincode", @@ -6749,7 +6818,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -6769,7 +6838,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 0657e857..9cc87b93 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,6 +12,7 @@ members = [ "blockstore", "prioritization_fees", "bench", + "benchrunner-service", "address-lookup-tables", "accounts", "accounts-on-demand", @@ -27,20 +28,19 @@ license = "AGPL" edition = "2021" [workspace.dependencies] -solana-sdk = "~1.17.25" -solana-rpc-client = "~1.17.25" -solana-rpc-client-api = "~1.17.25" -solana-transaction-status = "~1.17.25" -solana-version = "~1.17.25" -solana-client = "~1.17.25" -solana-net-utils = "~1.17.25" -solana-pubsub-client = "~1.17.25" -solana-streamer = "~1.17.25" -solana-account-decoder = "~1.17.25" -solana-ledger = "~1.17.25" -solana-program = "~1.17.25" -solana-address-lookup-table-program = "~1.17.25" - +solana-sdk = "~1.17.15" +solana-rpc-client = "~1.17.15" +solana-rpc-client-api = "~1.17.15" +solana-transaction-status = "~1.17.15" +solana-version = "~1.17.15" +solana-client = "~1.17.15" +solana-net-utils = "~1.17.15" +solana-pubsub-client = "~1.17.15" +solana-streamer = "~1.17.15" +solana-account-decoder = "~1.17.15" +solana-ledger = "~1.17.15" +solana-program = "~1.17.15" +solana-address-lookup-table-program = "~1.17.15" itertools = "0.10.5" rangetools = "0.1.4" serde = { version = "1.0.160", features = ["derive"] } @@ -69,6 +69,7 @@ lazy_static = "1.4.0" dotenv = "0.15.0" async-channel = "1.8.0" merge-streams = "0.1.2" +jemallocator = "0.5" quinn = "0.10.2" quinn-proto = "0.10.5" @@ -84,7 +85,9 @@ solana-lite-rpc-prioritization-fees = {path = "prioritization_fees", version="0. solana-lite-rpc-address-lookup-tables = {path = "address-lookup-tables", version="0.2.4"} solana-lite-rpc-accounts = {path = "accounts", version = "0.2.4"} solana-lite-rpc-accounts-on-demand = {path = "accounts-on-demand", version = "0.2.4"} +bench = { path = "bench", version="0.2.4" } async-trait = "0.1.68" -yellowstone-grpc-client = { git = "https://github.com/rpcpool/yellowstone-grpc.git", tag = "v1.13.0+solana.1.17.25" } -yellowstone-grpc-proto = { git = "https://github.com/rpcpool/yellowstone-grpc.git", tag = "v1.13.0+solana.1.17.25" } +yellowstone-grpc-client = { git = "https://github.com/rpcpool/yellowstone-grpc.git", tag = "v1.12.0+solana.1.17.15" } +yellowstone-grpc-proto = { git = "https://github.com/rpcpool/yellowstone-grpc.git", tag = "v1.12.0+solana.1.17.15" } +tonic-health = "0.10" diff --git a/Dockerfile b/Dockerfile index c4cabeb5..bfcd6d79 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,6 +14,7 @@ FROM base as build COPY --from=plan /app/recipe.json recipe.json RUN cargo chef cook --release --recipe-path recipe.json COPY . . +ENV RUSTFLAGS="--cfg tokio_unstable" RUN cargo build --release --bin lite-rpc --bin solana-lite-rpc-quic-forward-proxy FROM debian:bookworm-slim as run @@ -21,4 +22,4 @@ RUN apt-get update && apt-get -y install ca-certificates libc6 libssl3 libssl-de COPY --from=build /app/target/release/solana-lite-rpc-quic-forward-proxy /usr/local/bin/ COPY --from=build /app/target/release/lite-rpc /usr/local/bin/ -CMD lite-rpc \ No newline at end of file +CMD lite-rpc diff --git a/Dockerfile-benchrunner b/Dockerfile-benchrunner new file mode 100644 index 00000000..927fe634 --- /dev/null +++ b/Dockerfile-benchrunner @@ -0,0 +1,31 @@ +# syntax = docker/dockerfile:1.2 +FROM rust:1.75.0 as base +RUN cargo install cargo-chef@0.1.62 --locked +RUN rustup component add rustfmt +RUN apt-get update && apt-get install -y clang cmake ssh +WORKDIR /app + +FROM base AS plan +COPY . . +WORKDIR /app +RUN cargo chef prepare --recipe-path recipe.json + +FROM base as build +COPY --from=plan /app/recipe.json recipe.json +RUN cargo chef cook --release --recipe-path recipe.json +COPY . . +RUN cargo build --release --bin solana-lite-rpc-benchrunner-service + +FROM debian:bookworm-slim as run +RUN apt-get update && apt-get -y install ca-certificates libc6 libssl3 libssl-dev openssl + +COPY openssl-legacy.cnf /etc/ssl/openssl-legacy.cnf + +COPY --from=build /app/target/release/solana-lite-rpc-benchrunner-service /usr/local/bin/ + +ENV OPENSSL_CONF=/etc/ssl/openssl-legacy.cnf + +CMD solana-lite-rpc-benchrunner-service \ + --bench-interval 600000 \ + --tx-count 100 \ + --prio-fees 100000 --prio-fees 1000 --prio-fees 0 diff --git a/accounts-on-demand/src/accounts_on_demand.rs b/accounts-on-demand/src/accounts_on_demand.rs index 7d50531c..0370a574 100644 --- a/accounts-on-demand/src/accounts_on_demand.rs +++ b/accounts-on-demand/src/accounts_on_demand.rs @@ -1,7 +1,8 @@ -use std::sync::Arc; +use std::{collections::HashMap, sync::Arc, time::Duration}; use async_trait::async_trait; use dashmap::DashSet; +use futures::lock::Mutex; use itertools::Itertools; use prometheus::{opts, register_int_gauge, IntGauge}; use solana_client::{ @@ -9,7 +10,9 @@ use solana_client::{ rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig}, rpc_filter::RpcFilterType, }; -use solana_lite_rpc_accounts::account_store_interface::AccountStorageInterface; +use solana_lite_rpc_accounts::account_store_interface::{ + AccountLoadingError, AccountStorageInterface, +}; use solana_lite_rpc_cluster_endpoints::geyser_grpc_connector::GrpcSourceConfig; use solana_lite_rpc_core::{ commitment_utils::Commitment, @@ -19,7 +22,7 @@ use solana_lite_rpc_core::{ }, }; use solana_sdk::{clock::Slot, pubkey::Pubkey}; -use tokio::sync::{broadcast::Sender, RwLock}; +use tokio::sync::{broadcast::Sender, Notify, RwLock}; use crate::subscription_manager::SubscriptionManger; @@ -31,12 +34,15 @@ lazy_static::lazy_static! { register_int_gauge!(opts!("literpc_number_of_program_filters_on_demand", "Number of program filters on demand")).unwrap(); } +const RETRY_FETCHING_ACCOUNT: usize = 10; + pub struct AccountsOnDemand { rpc_client: Arc, accounts_storage: Arc, accounts_subscribed: Arc>, program_filters: Arc>, subscription_manager: SubscriptionManger, + accounts_in_loading: Arc>>>, } impl AccountsOnDemand { @@ -56,6 +62,7 @@ impl AccountsOnDemand { accounts_storage, account_notification_sender, ), + accounts_in_loading: Arc::new(Mutex::new(HashMap::new())), } } @@ -102,52 +109,99 @@ impl AccountStorageInterface for AccountsOnDemand { .await } - async fn get_account(&self, account_pk: Pubkey, commitment: Commitment) -> Option { + async fn get_account( + &self, + account_pk: Pubkey, + commitment: Commitment, + ) -> Result, AccountLoadingError> { match self .accounts_storage .get_account(account_pk, commitment) - .await + .await? { - Some(account_data) => Some(account_data), + Some(account_data) => Ok(Some(account_data)), None => { // account does not exist in account store // first check if we have already subscribed to the required account // This is to avoid resetting geyser subscription because of accounts that do not exists. - if !self.accounts_subscribed.contains(&account_pk) { - // get account from rpc and create its subscription - self.accounts_subscribed.insert(account_pk); - self.refresh_subscription().await; - let account_response = self - .rpc_client - .get_account_with_commitment( - &account_pk, - commitment.into_commiment_config(), + let mut lk = self.accounts_in_loading.lock().await; + match lk.get(&account_pk).cloned() { + Some(loading_account) => { + drop(lk); + match tokio::time::timeout( + Duration::from_secs(10), + loading_account.notified(), ) - .await; - if let Ok(response) = account_response { - match response.value { - Some(account) => { - // update account in storage and return the account data - let account_data = AccountData { - pubkey: account_pk, - account: Arc::new(account), - updated_slot: response.context.slot, - }; + .await + { + Ok(_) => { self.accounts_storage - .update_account(account_data.clone(), commitment) + .get_account(account_pk, commitment) + .await + } + Err(_timeout) => Err(AccountLoadingError::OperationTimeOut), + } + } + None => { + // account is not loading + if self.accounts_subscribed.contains(&account_pk) { + // account was already tried to be loaded but does not exists + Ok(None) + } else { + // update account loading map + // create a notify for accounts under loading + lk.insert(account_pk, Arc::new(Notify::new())); + self.accounts_subscribed.insert(account_pk); + drop(lk); + self.refresh_subscription().await; + let mut return_value = None; + for _ in 0..RETRY_FETCHING_ACCOUNT { + let account_response = self + .rpc_client + .get_account_with_commitment( + &account_pk, + commitment.into_commiment_config(), + ) .await; - Some(account_data) + match account_response { + Ok(response) => { + if let Some(account) = response.value { + // update account in storage and return the account data + let account_data = AccountData { + pubkey: account_pk, + account: Arc::new(account), + updated_slot: response.context.slot, + }; + self.accounts_storage + .update_account(account_data.clone(), commitment) + .await; + return_value = Some(account_data); + break; + } else { + // account does not exist + break; + } + } + Err(e) => { + log::error!( + "Error fetching account {} {e:?}", + account_pk.to_string() + ); + } + } } - // account does not exist - None => None, + // update loading lock + { + let mut write_lock = self.accounts_in_loading.lock().await; + let notify = write_lock.remove(&account_pk); + drop(write_lock); + if let Some(notify) = notify { + notify.notify_waiters(); + } + } + Ok(return_value) } - } else { - // issue getting account, will then be updated by geyser - None } - } else { - // we have already subscribed to the account and it does not exist - None } } } diff --git a/accounts-on-demand/src/subscription_manager.rs b/accounts-on-demand/src/subscription_manager.rs index 0bd7ac81..908b6472 100644 --- a/accounts-on-demand/src/subscription_manager.rs +++ b/accounts-on-demand/src/subscription_manager.rs @@ -1,33 +1,22 @@ -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, - time::Duration, -}; +use std::{sync::Arc, time::Duration}; -use futures::StreamExt; use itertools::Itertools; -use merge_streams::MergeStreams; use prometheus::{opts, register_int_gauge, IntGauge}; use solana_lite_rpc_accounts::account_store_interface::AccountStorageInterface; -use solana_lite_rpc_cluster_endpoints::geyser_grpc_connector::GrpcSourceConfig; +use solana_lite_rpc_cluster_endpoints::{ + geyser_grpc_connector::GrpcSourceConfig, + grpc::grpc_accounts_streaming::start_account_streaming_tasks, +}; use solana_lite_rpc_core::{ - commitment_utils::Commitment, structures::{ - account_data::{AccountData, AccountNotificationMessage, AccountStream}, - account_filter::{AccountFilterType, AccountFilters, MemcmpFilterData}, + account_data::{AccountNotificationMessage, AccountStream}, + account_filter::AccountFilters, }, AnyhowJoinHandle, }; -use solana_sdk::{account::Account, pubkey::Pubkey}; use tokio::sync::{ broadcast::{self, Sender}, - watch, Notify, -}; -use yellowstone_grpc_proto::geyser::{ - subscribe_request_filter_accounts_filter::Filter, - subscribe_request_filter_accounts_filter_memcmp::Data, subscribe_update::UpdateOneof, - SubscribeRequest, SubscribeRequestFilterAccounts, SubscribeRequestFilterAccountsFilter, - SubscribeRequestFilterAccountsFilterMemcmp, + watch, }; lazy_static::lazy_static! { @@ -89,212 +78,6 @@ impl SubscriptionManger { } } -pub fn start_account_streaming_task( - grpc_config: GrpcSourceConfig, - accounts_filters: AccountFilters, - account_stream_sx: broadcast::Sender, - has_started: Arc, -) -> AnyhowJoinHandle { - tokio::spawn(async move { - 'main_loop: loop { - let processed_commitment = yellowstone_grpc_proto::geyser::CommitmentLevel::Processed; - - let mut subscribe_programs: HashMap = - HashMap::new(); - - let mut accounts_to_subscribe = HashSet::new(); - - for (index, accounts_filter) in accounts_filters.iter().enumerate() { - if !accounts_filter.accounts.is_empty() { - accounts_filter.accounts.iter().for_each(|account| { - accounts_to_subscribe.insert(account.clone()); - }); - } - if let Some(program_id) = &accounts_filter.program_id { - let filters = if let Some(filters) = &accounts_filter.filters { - filters - .iter() - .map(|filter| match filter { - AccountFilterType::Datasize(size) => { - SubscribeRequestFilterAccountsFilter { - filter: Some(Filter::Datasize(*size)), - } - } - AccountFilterType::Memcmp(memcmp) => { - SubscribeRequestFilterAccountsFilter { - filter: Some(Filter::Memcmp( - SubscribeRequestFilterAccountsFilterMemcmp { - offset: memcmp.offset, - data: Some(match &memcmp.data { - MemcmpFilterData::Bytes(bytes) => { - Data::Bytes(bytes.clone()) - } - MemcmpFilterData::Base58(data) => { - Data::Base58(data.clone()) - } - MemcmpFilterData::Base64(data) => { - Data::Base64(data.clone()) - } - }), - }, - )), - } - } - AccountFilterType::TokenAccountState => { - SubscribeRequestFilterAccountsFilter { - filter: Some(Filter::TokenAccountState(false)), - } - } - }) - .collect_vec() - } else { - vec![] - }; - subscribe_programs.insert( - format!("program_accounts_on_demand_{}", index), - SubscribeRequestFilterAccounts { - account: vec![], - owner: vec![program_id.clone()], - filters, - }, - ); - } - } - - let program_subscribe_request = SubscribeRequest { - accounts: subscribe_programs, - slots: Default::default(), - transactions: Default::default(), - blocks: Default::default(), - blocks_meta: Default::default(), - entry: Default::default(), - commitment: Some(processed_commitment.into()), - accounts_data_slice: Default::default(), - ping: None, - }; - - log::info!( - "Accounts on demand subscribing to {}", - grpc_config.grpc_addr - ); - let Ok(mut client) = yellowstone_grpc_client::GeyserGrpcClient::connect( - grpc_config.grpc_addr.clone(), - grpc_config.grpc_x_token.clone(), - None, - ) else { - // problem connecting to grpc, retry after a sec - tokio::time::sleep(Duration::from_secs(1)).await; - continue; - }; - - let Ok(account_stream) = client.subscribe_once2(program_subscribe_request).await else { - // problem subscribing to geyser stream, retry after a sec - tokio::time::sleep(Duration::from_secs(1)).await; - continue; - }; - - // each account subscription batch will require individual stream - let mut subscriptions = vec![account_stream]; - let mut index = 0; - for accounts_chunk in accounts_to_subscribe.iter().collect_vec().chunks(100) { - let mut accounts_subscription: HashMap = - HashMap::new(); - index += 1; - accounts_subscription.insert( - format!("account_sub_{}", index), - SubscribeRequestFilterAccounts { - account: accounts_chunk - .iter() - .map(|acc| (*acc).clone()) - .collect_vec(), - owner: vec![], - filters: vec![], - }, - ); - let mut client = yellowstone_grpc_client::GeyserGrpcClient::connect( - grpc_config.grpc_addr.clone(), - grpc_config.grpc_x_token.clone(), - None, - ) - .unwrap(); - - let account_request = SubscribeRequest { - accounts: accounts_subscription, - slots: Default::default(), - transactions: Default::default(), - blocks: Default::default(), - blocks_meta: Default::default(), - entry: Default::default(), - commitment: Some(processed_commitment.into()), - accounts_data_slice: Default::default(), - ping: None, - }; - - let account_stream = client.subscribe_once2(account_request).await.unwrap(); - subscriptions.push(account_stream); - } - let mut merged_stream = subscriptions.merge(); - - while let Some(message) = merged_stream.next().await { - let message = match message { - Ok(message) => message, - Err(status) => { - log::error!("Account on demand grpc error : {}", status.message()); - continue; - } - }; - let Some(update) = message.update_oneof else { - continue; - }; - - has_started.notify_one(); - - match update { - UpdateOneof::Account(account) => { - if let Some(account_data) = account.account { - let account_pk_bytes: [u8; 32] = account_data - .pubkey - .try_into() - .expect("Pubkey should be 32 byte long"); - let owner: [u8; 32] = account_data - .owner - .try_into() - .expect("owner pubkey should be deserializable"); - let notification = AccountNotificationMessage { - data: AccountData { - pubkey: Pubkey::new_from_array(account_pk_bytes), - account: Arc::new(Account { - lamports: account_data.lamports, - data: account_data.data, - owner: Pubkey::new_from_array(owner), - executable: account_data.executable, - rent_epoch: account_data.rent_epoch, - }), - updated_slot: account.slot, - }, - // TODO update with processed commitment / check above - commitment: Commitment::Processed, - }; - if account_stream_sx.send(notification).is_err() { - // non recoverable, i.e the whole stream is being restarted - log::error!("Account stream broken, breaking from main loop"); - break 'main_loop; - } - } - } - UpdateOneof::Ping(_) => { - log::trace!("GRPC Ping accounts stream"); - } - _ => { - log::error!("GRPC accounts steam misconfigured"); - } - }; - } - } - Ok(()) - }) -} - pub fn create_grpc_account_streaming_tasks( grpc_sources: Vec, mut account_filter_watch: watch::Receiver, @@ -318,7 +101,7 @@ pub fn create_grpc_account_streaming_tasks( let mut current_tasks = grpc_sources .iter() .map(|grpc_config| { - start_account_streaming_task( + start_account_streaming_tasks( grpc_config.clone(), accounts_filters.clone(), account_sender.clone(), @@ -338,7 +121,7 @@ pub fn create_grpc_account_streaming_tasks( let new_tasks = grpc_sources .iter() .map(|grpc_config| { - start_account_streaming_task( + start_account_streaming_tasks( grpc_config.clone(), accounts_filters.clone(), account_sender.clone(), diff --git a/accounts/src/account_service.rs b/accounts/src/account_service.rs index 7b9ab410..e7531881 100644 --- a/accounts/src/account_service.rs +++ b/accounts/src/account_service.rs @@ -4,13 +4,13 @@ use anyhow::bail; use itertools::Itertools; use prometheus::{opts, register_int_gauge, IntGauge}; use solana_account_decoder::{UiAccount, UiDataSliceConfig}; +use solana_lite_rpc_core::types::BlockInfoStream; use solana_lite_rpc_core::{ commitment_utils::Commitment, structures::{ account_data::{AccountData, AccountNotificationMessage, AccountStream}, account_filter::AccountFilters, }, - types::BlockStream, AnyhowJoinHandle, }; use solana_rpc_client::nonblocking::rpc_client::RpcClient; @@ -21,7 +21,7 @@ use solana_rpc_client_api::{ use solana_sdk::{commitment_config::CommitmentConfig, pubkey::Pubkey, slot_history::Slot}; use tokio::sync::broadcast::Sender; -use crate::account_store_interface::AccountStorageInterface; +use crate::account_store_interface::{AccountLoadingError, AccountStorageInterface}; lazy_static::lazy_static! { static ref ACCOUNT_UPDATES: IntGauge = @@ -151,7 +151,7 @@ impl AccountService { pub fn process_account_stream( &self, mut account_stream: AccountStream, - mut block_stream: BlockStream, + mut blockinfo_stream: BlockInfoStream, ) -> Vec { let this = self.clone(); let processed_task = tokio::spawn(async move { @@ -187,19 +187,19 @@ impl AccountService { let this = self.clone(); let block_processing_task = tokio::spawn(async move { loop { - match block_stream.recv().await { - Ok(block_notification) => { - if block_notification.commitment_config.is_processed() { + match blockinfo_stream.recv().await { + Ok(block_info) => { + if block_info.commitment_config.is_processed() { // processed commitment is not processed in this loop continue; } - let commitment = Commitment::from(block_notification.commitment_config); + let commitment = Commitment::from(block_info.commitment_config); let updated_accounts = this .account_store - .process_slot_data(block_notification.slot, commitment) + .process_slot_data(block_info.slot, commitment) .await; - if block_notification.commitment_config.is_finalized() { + if block_info.commitment_config.is_finalized() { ACCOUNT_UPDATES_FINALIZED.add(updated_accounts.len() as i64) } else { ACCOUNT_UPDATES_CONFIRMED.add(updated_accounts.len() as i64); @@ -250,7 +250,7 @@ impl AccountService { &self, account: Pubkey, config: Option, - ) -> anyhow::Result<(Slot, Option)> { + ) -> Result<(Slot, Option), AccountLoadingError> { GET_ACCOUNT_CALLED.inc(); let commitment = config .as_ref() @@ -259,7 +259,7 @@ impl AccountService { let commitment = Commitment::from(commitment); - if let Some(account_data) = self.account_store.get_account(account, commitment).await { + if let Some(account_data) = self.account_store.get_account(account, commitment).await? { // if minimum context slot is not satisfied return Null let minimum_context_slot = config .as_ref() @@ -273,10 +273,7 @@ impl AccountService { Ok((account_data.updated_slot, None)) } } else { - bail!( - "Account {} does not satisfy any configured filters", - account.to_string() - ) + Err(AccountLoadingError::ConfigDoesnotContainRequiredFilters) } } diff --git a/accounts/src/account_store_interface.rs b/accounts/src/account_store_interface.rs index 8574edde..fa64d60b 100644 --- a/accounts/src/account_store_interface.rs +++ b/accounts/src/account_store_interface.rs @@ -5,6 +5,13 @@ use solana_rpc_client_api::filter::RpcFilterType; use solana_sdk::pubkey::Pubkey; use solana_sdk::slot_history::Slot; +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum AccountLoadingError { + AccountNotFound, + ConfigDoesnotContainRequiredFilters, + OperationTimeOut, +} + #[async_trait] pub trait AccountStorageInterface: Send + Sync { // Update account and return true if the account was sucessfylly updated @@ -12,7 +19,11 @@ pub trait AccountStorageInterface: Send + Sync { async fn initilize_or_update_account(&self, account_data: AccountData); - async fn get_account(&self, account_pk: Pubkey, commitment: Commitment) -> Option; + async fn get_account( + &self, + account_pk: Pubkey, + commitment: Commitment, + ) -> Result, AccountLoadingError>; async fn get_program_accounts( &self, diff --git a/accounts/src/inmemory_account_store.rs b/accounts/src/inmemory_account_store.rs index a80852a4..0e534ea1 100644 --- a/accounts/src/inmemory_account_store.rs +++ b/accounts/src/inmemory_account_store.rs @@ -1,6 +1,6 @@ use std::{collections::HashSet, sync::Arc}; -use crate::account_store_interface::AccountStorageInterface; +use crate::account_store_interface::{AccountLoadingError, AccountStorageInterface}; use async_trait::async_trait; use dashmap::{DashMap, DashSet}; use itertools::Itertools; @@ -313,11 +313,15 @@ impl AccountStorageInterface for InmemoryAccountStore { } } - async fn get_account(&self, account_pk: Pubkey, commitment: Commitment) -> Option { + async fn get_account( + &self, + account_pk: Pubkey, + commitment: Commitment, + ) -> Result, AccountLoadingError> { if let Some(account_by_commitment) = self.account_store.get(&account_pk) { - account_by_commitment.get_account_data(commitment).clone() + Ok(account_by_commitment.get_account_data(commitment).clone()) } else { - None + Ok(None) } } @@ -331,7 +335,7 @@ impl AccountStorageInterface for InmemoryAccountStore { let mut return_vec = vec![]; for program_account in program_accounts.iter() { let account_data = self.get_account(*program_account, commitment).await; - if let Some(account_data) = account_data { + if let Ok(Some(account_data)) = account_data { // recheck program owner and filters if account_data.account.owner.eq(&program_pubkey) { match &account_filters { @@ -483,28 +487,28 @@ mod tests { assert_eq!( store.get_account(pk1, Commitment::Processed).await, - Some(account_data_0.clone()) + Ok(Some(account_data_0.clone())) ); assert_eq!( store.get_account(pk1, Commitment::Confirmed).await, - Some(account_data_0.clone()) + Ok(Some(account_data_0.clone())) ); assert_eq!( store.get_account(pk1, Commitment::Finalized).await, - Some(account_data_0.clone()) + Ok(Some(account_data_0.clone())) ); assert_eq!( store.get_account(pk2, Commitment::Processed).await, - Some(account_data_1.clone()) + Ok(Some(account_data_1.clone())) ); assert_eq!( store.get_account(pk2, Commitment::Confirmed).await, - Some(account_data_1.clone()) + Ok(Some(account_data_1.clone())) ); assert_eq!( store.get_account(pk2, Commitment::Finalized).await, - Some(account_data_1.clone()) + Ok(Some(account_data_1.clone())) ); let account_data_2 = create_random_account(&mut rng, 1, pk1, program); @@ -527,60 +531,60 @@ mod tests { assert_eq!( store.get_account(pk1, Commitment::Processed).await, - Some(account_data_5.clone()) + Ok(Some(account_data_5.clone())) ); assert_eq!( store.get_account(pk1, Commitment::Confirmed).await, - Some(account_data_0.clone()) + Ok(Some(account_data_0.clone())) ); assert_eq!( store.get_account(pk1, Commitment::Finalized).await, - Some(account_data_0.clone()) + Ok(Some(account_data_0.clone())) ); store.process_slot_data(1, Commitment::Confirmed).await; assert_eq!( store.get_account(pk1, Commitment::Processed).await, - Some(account_data_5.clone()) + Ok(Some(account_data_5.clone())) ); assert_eq!( store.get_account(pk1, Commitment::Confirmed).await, - Some(account_data_2.clone()) + Ok(Some(account_data_2.clone())) ); assert_eq!( store.get_account(pk1, Commitment::Finalized).await, - Some(account_data_0.clone()) + Ok(Some(account_data_0.clone())) ); store.process_slot_data(2, Commitment::Confirmed).await; assert_eq!( store.get_account(pk1, Commitment::Processed).await, - Some(account_data_5.clone()) + Ok(Some(account_data_5.clone())) ); assert_eq!( store.get_account(pk1, Commitment::Confirmed).await, - Some(account_data_3.clone()) + Ok(Some(account_data_3.clone())) ); assert_eq!( store.get_account(pk1, Commitment::Finalized).await, - Some(account_data_0.clone()) + Ok(Some(account_data_0.clone())) ); store.process_slot_data(1, Commitment::Finalized).await; assert_eq!( store.get_account(pk1, Commitment::Processed).await, - Some(account_data_5.clone()) + Ok(Some(account_data_5.clone())) ); assert_eq!( store.get_account(pk1, Commitment::Confirmed).await, - Some(account_data_3.clone()) + Ok(Some(account_data_3.clone())) ); assert_eq!( store.get_account(pk1, Commitment::Finalized).await, - Some(account_data_2.clone()) + Ok(Some(account_data_2.clone())) ); } @@ -690,7 +694,7 @@ mod tests { assert_eq!( store.get_account(pk1, Commitment::Finalized).await, - Some(last_account.clone()), + Ok(Some(last_account.clone())), ); // check finalizing previous commitment does not affect @@ -698,7 +702,7 @@ mod tests { assert_eq!( store.get_account(pk1, Commitment::Finalized).await, - Some(last_account), + Ok(Some(last_account)), ); } diff --git a/bench/src/bench1.rs b/bench/src/bench1.rs new file mode 100644 index 00000000..cc4deb44 --- /dev/null +++ b/bench/src/bench1.rs @@ -0,0 +1,166 @@ +use crate::{helpers::BenchHelper, metrics::Metric, metrics::TxMetricData}; +use dashmap::DashMap; +use log::warn; +use solana_rpc_client::nonblocking::rpc_client::RpcClient; +use solana_sdk::hash::Hash; +use solana_sdk::signature::Keypair; +use solana_sdk::signature::Signature; +use solana_sdk::slot_history::Slot; +use std::sync::{ + atomic::{AtomicU64, Ordering}, + Arc, +}; +use tokio::{ + sync::{mpsc::UnboundedSender, RwLock}, + time::{Duration, Instant}, +}; + +#[derive(Clone, Debug, Copy)] +struct TxSendData { + sent_duration: Duration, + sent_instant: Instant, + sent_slot: Slot, + transaction_bytes: u64, +} + +struct ApiCallerResult { + gross_send_time: Duration, +} + +// called by benchrunner-service +#[allow(clippy::too_many_arguments)] +pub async fn bench( + rpc_client: Arc, + tx_count: usize, + funded_payer: Keypair, + seed: u64, + block_hash: Arc>, + current_slot: Arc, + tx_metric_sx: UnboundedSender, + log_txs: bool, + transaction_size: TransactionSize, + cu_price_micro_lamports: u64, +) -> Metric { + let map_of_txs: Arc> = Arc::new(DashMap::new()); + // transaction sender task + let api_caller_result = { + let map_of_txs = map_of_txs.clone(); + let rpc_client = rpc_client.clone(); + let current_slot = current_slot.clone(); + tokio::spawn(async move { + let map_of_txs = map_of_txs.clone(); + let n_chars = match transaction_size { + TransactionSize::Small => 10, + TransactionSize::Large => 232, // 565 is max but we need to lower that to not burn the CUs + }; + let rand_strings = BenchHelper::generate_random_strings(tx_count, Some(seed), n_chars); + + let bench_start_time = Instant::now(); + + for rand_string in &rand_strings { + let blockhash = { *block_hash.read().await }; + let tx = match transaction_size { + TransactionSize::Small => BenchHelper::create_memo_tx_small( + rand_string, + &funded_payer, + blockhash, + cu_price_micro_lamports, + ), + TransactionSize::Large => BenchHelper::create_memo_tx_large( + rand_string, + &funded_payer, + blockhash, + cu_price_micro_lamports, + ), + }; + let start_time = Instant::now(); + match rpc_client.send_transaction(&tx).await { + Ok(signature) => { + map_of_txs.insert( + signature, + TxSendData { + sent_duration: start_time.elapsed(), + sent_instant: Instant::now(), + sent_slot: current_slot.load(std::sync::atomic::Ordering::Relaxed), + transaction_bytes: bincode::serialized_size(&tx).unwrap(), + }, + ); + } + Err(e) => { + warn!("tx send failed with error {}", e); + } + } + } + ApiCallerResult { + gross_send_time: bench_start_time.elapsed(), + } + }) + }; + + let mut metric = Metric::default(); + let confirmation_time = Instant::now(); + let mut confirmed_count = 0; + while confirmation_time.elapsed() < Duration::from_secs(60) + && !(map_of_txs.is_empty() && confirmed_count == tx_count) + { + let signatures = map_of_txs.iter().map(|x| *x.key()).collect::>(); + if signatures.is_empty() { + tokio::time::sleep(Duration::from_millis(1)).await; + continue; + } + + if let Ok(res) = rpc_client.get_signature_statuses(&signatures).await { + for (i, signature) in signatures.iter().enumerate() { + let tx_status = &res.value[i]; + if tx_status.is_some() { + let tx_data = map_of_txs.get(signature).unwrap(); + let time_to_confirm = tx_data.sent_instant.elapsed(); + let transaction_bytes = tx_data.transaction_bytes; + metric.add_successful_transaction( + tx_data.sent_duration, + time_to_confirm, + transaction_bytes, + ); + + if log_txs { + let _ = tx_metric_sx.send(TxMetricData { + signature: signature.to_string(), + sent_slot: tx_data.sent_slot, + confirmed_slot: current_slot.load(Ordering::Relaxed), + time_to_send_in_millis: tx_data.sent_duration.as_millis() as u64, + time_to_confirm_in_millis: time_to_confirm.as_millis() as u64, + }); + } + drop(tx_data); + map_of_txs.remove(signature); + confirmed_count += 1; + } + } + } + } + + for tx in map_of_txs.iter() { + metric.add_unsuccessful_transaction(tx.sent_duration, tx.transaction_bytes); + } + + let api_caller_result = api_caller_result + .await + .expect("api caller task must succeed"); + + metric + .set_total_gross_send_time(api_caller_result.gross_send_time.as_micros() as f64 / 1_000.0); + + metric.finalize(); + metric +} + +// see https://spl.solana.com/memo for sizing of transactions +// As of v1.5.1, an unsigned instruction can support single-byte UTF-8 of up to 566 bytes. +// An instruction with a simple memo of 32 bytes can support up to 12 signers. +#[derive(Debug, Clone, Copy)] +pub enum TransactionSize { + // 179 bytes, 5237 CUs + Small, + // 1186 bytes, 193175 CUs + Large, +} diff --git a/bench/src/benches/confirmation_rate.rs b/bench/src/benches/confirmation_rate.rs index 845ded88..606d0cd5 100644 --- a/bench/src/benches/confirmation_rate.rs +++ b/bench/src/benches/confirmation_rate.rs @@ -29,6 +29,7 @@ pub async fn confirmation_rate( payer_path: &Path, rpc_url: String, tx_params: BenchmarkTransactionParams, + max_timeout_ms: u64, txs_per_round: usize, num_of_runs: usize, ) -> anyhow::Result<()> { @@ -45,7 +46,7 @@ pub async fn confirmation_rate( let mut rpc_results = Vec::with_capacity(num_of_runs); for _ in 0..num_of_runs { - match send_bulk_txs_and_wait(&rpc, &payer, txs_per_round, &tx_params) + match send_bulk_txs_and_wait(&rpc, &payer, txs_per_round, &tx_params, max_timeout_ms) .await .context("send bulk tx and wait") { @@ -74,6 +75,7 @@ pub async fn send_bulk_txs_and_wait( payer: &Keypair, num_txns: usize, tx_params: &BenchmarkTransactionParams, + max_timeout_ms: u64, ) -> anyhow::Result { trace!("Get latest blockhash and generate transactions"); let hash = rpc.get_latest_blockhash().await.map_err(|err| { @@ -85,7 +87,7 @@ pub async fn send_bulk_txs_and_wait( trace!("Sending {} transactions in bulk ..", txs.len()); let tx_and_confirmations_from_rpc: Vec<(Signature, ConfirmationResponseFromRpc)> = - send_and_confirm_bulk_transactions(rpc, &txs) + send_and_confirm_bulk_transactions(rpc, &txs, max_timeout_ms) .await .context("send and confirm bulk tx")?; trace!("Done sending {} transaction.", txs.len()); diff --git a/bench/src/benches/confirmation_slot.rs b/bench/src/benches/confirmation_slot.rs index c89ae039..17320b67 100644 --- a/bench/src/benches/confirmation_slot.rs +++ b/bench/src/benches/confirmation_slot.rs @@ -98,14 +98,14 @@ pub async fn confirmation_slot( let a_task = tokio::spawn(async move { sleep(Duration::from_secs_f64(a_delay)).await; debug!("(A) sending tx {}", rpc_a_tx.signatures[0]); - send_and_confirm_transaction(&rpc_a, rpc_a_tx) + send_and_confirm_transaction(&rpc_a, rpc_a_tx, max_timeout_ms) .await }); let b_task = tokio::spawn(async move { sleep(Duration::from_secs_f64(b_delay)).await; debug!("(B) sending tx {}", rpc_b_tx.signatures[0]); - send_and_confirm_transaction(&rpc_b, rpc_b_tx) + send_and_confirm_transaction(&rpc_b, rpc_b_tx, max_timeout_ms) .await }); @@ -158,10 +158,10 @@ async fn create_tx( async fn send_and_confirm_transaction( rpc: &RpcClient, tx: Transaction, + max_timeout_ms: u64, ) -> anyhow::Result { - let result_vec: Vec<(Signature, ConfirmationResponseFromRpc)> = - send_and_confirm_bulk_transactions(rpc, &[tx]).await?; + send_and_confirm_bulk_transactions(rpc, &[tx], max_timeout_ms).await?; assert_eq!(result_vec.len(), 1, "expected 1 result"); let (_sig, confirmation_response) = result_vec.into_iter().next().unwrap(); diff --git a/bench/src/benches/rpc_interface.rs b/bench/src/benches/rpc_interface.rs index d62b2af5..00e00a52 100644 --- a/bench/src/benches/rpc_interface.rs +++ b/bench/src/benches/rpc_interface.rs @@ -37,6 +37,7 @@ pub enum ConfirmationResponseFromRpc { pub async fn send_and_confirm_bulk_transactions( rpc_client: &RpcClient, txs: &[Transaction], + max_timeout_ms: u64, ) -> anyhow::Result> { trace!("Polling for next slot .."); let send_slot = poll_next_slot_start(rpc_client) @@ -74,7 +75,8 @@ pub async fn send_and_confirm_bulk_transactions( } else { debug!( "Slot did not advance during sending transactions: {} -> {}", - send_slot, after_send_slot + send_slot, + after_send_slot ); } diff --git a/bench/src/benchnew.rs b/bench/src/benchnew.rs index f61d0332..8f3c7622 100644 --- a/bench/src/benchnew.rs +++ b/bench/src/benchnew.rs @@ -40,6 +40,9 @@ enum SubCommand { rpc_url: String, #[clap(short, long)] size_tx: TxSize, + /// Maximum confirmation time in milliseconds. After this, the txn is considered unconfirmed + #[clap(short, long, default_value_t = 15_000)] + max_timeout_ms: u64, #[clap(short, long)] txns_per_round: usize, #[clap(short, long)] @@ -105,6 +108,7 @@ async fn main() { payer_path, rpc_url, size_tx, + max_timeout_ms, txns_per_round, num_of_runs, cu_price, @@ -115,6 +119,7 @@ async fn main() { tx_size: size_tx, cu_price_micro_lamports: cu_price, }, + max_timeout_ms, txns_per_round, num_of_runs, ) diff --git a/bench/src/helpers.rs b/bench/src/helpers.rs index a57af8ac..65c1c626 100644 --- a/bench/src/helpers.rs +++ b/bench/src/helpers.rs @@ -3,6 +3,7 @@ use itertools::Itertools; use lazy_static::lazy_static; use rand::{distributions::Alphanumeric, prelude::Distribution, SeedableRng}; use solana_rpc_client::nonblocking::rpc_client::RpcClient; +use solana_sdk::compute_budget; use solana_sdk::instruction::AccountMeta; use solana_sdk::{ commitment_config::CommitmentConfig, @@ -95,6 +96,7 @@ impl BenchHelper { funded_payer: &Keypair, blockhash: Hash, random_seed: Option, + cu_price_micro_lamports: u64, ) -> Vec { let seed = random_seed.map_or(0, |x| x); let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(seed); @@ -102,24 +104,51 @@ impl BenchHelper { .map(|_| { let random_bytes: Vec = Alphanumeric.sample_iter(&mut rng).take(10).collect(); - Self::create_memo_tx_small(&random_bytes, funded_payer, blockhash) + Self::create_memo_tx_small( + &random_bytes, + funded_payer, + blockhash, + cu_price_micro_lamports, + ) }) .collect() } - pub fn create_memo_tx_small(msg: &[u8], payer: &Keypair, blockhash: Hash) -> Transaction { + // note: there is another version of this + pub fn create_memo_tx_small( + msg: &[u8], + payer: &Keypair, + blockhash: Hash, + cu_price_micro_lamports: u64, + ) -> Transaction { let memo = Pubkey::from_str(MEMO_PROGRAM_ID).unwrap(); - let instruction = Instruction::new_with_bytes(memo, msg, vec![]); - let message = Message::new(&[instruction], Some(&payer.pubkey())); + + let cu_request: Instruction = + compute_budget::ComputeBudgetInstruction::set_compute_unit_limit(14000); + + let instructions = if cu_price_micro_lamports > 0 { + let cu_budget_ix: Instruction = + compute_budget::ComputeBudgetInstruction::set_compute_unit_price( + cu_price_micro_lamports, + ); + vec![cu_request, cu_budget_ix, instruction] + } else { + vec![cu_request, instruction] + }; + + let message = Message::new(&instructions, Some(&payer.pubkey())); Transaction::new(&[payer], message, blockhash) } - pub fn create_memo_tx_large(msg: &[u8], payer: &Keypair, blockhash: Hash) -> Transaction { + pub fn create_memo_tx_large( + msg: &[u8], + payer: &Keypair, + blockhash: Hash, + cu_price_micro_lamports: u64, + ) -> Transaction { let accounts = (0..8).map(|_| Keypair::new()).collect_vec(); - let memo = Pubkey::from_str(MEMO_PROGRAM_ID).unwrap(); - let instruction = Instruction::new_with_bytes( memo, msg, @@ -128,7 +157,18 @@ impl BenchHelper { .map(|keypair| AccountMeta::new_readonly(keypair.pubkey(), true)) .collect_vec(), ); - let message = Message::new(&[instruction], Some(&payer.pubkey())); + + let instructions = if cu_price_micro_lamports > 0 { + let cu_budget_ix: Instruction = + compute_budget::ComputeBudgetInstruction::set_compute_unit_price( + cu_price_micro_lamports, + ); + vec![cu_budget_ix, instruction] + } else { + vec![instruction] + }; + + let message = Message::new(&instructions, Some(&payer.pubkey())); let mut signers = vec![payer]; signers.extend(accounts.iter()); @@ -136,3 +176,33 @@ impl BenchHelper { Transaction::new(&signers, message, blockhash) } } + +#[test] +fn transaction_size_small() { + let blockhash = Hash::default(); + let payer_keypair = Keypair::from_base58_string( + "rKiJ7H5UUp3JR18kNyTF1XPuwPKHEM7gMLWHZPWP5djrW1vSjfwjhvJrevxF9MPmUmN9gJMLHZdLMgc9ao78eKr", + ); + + let seed = 42; + let random_strings = BenchHelper::generate_random_strings(1, Some(seed), 10); + let rand_string = random_strings.first().unwrap(); + let tx = BenchHelper::create_memo_tx_small(rand_string, &payer_keypair, blockhash, 300); + + assert_eq!(bincode::serialized_size(&tx).unwrap(), 231); +} + +#[test] +fn transaction_size_large() { + let blockhash = Hash::default(); + let payer_keypair = Keypair::from_base58_string( + "rKiJ7H5UUp3JR18kNyTF1XPuwPKHEM7gMLWHZPWP5djrW1vSjfwjhvJrevxF9MPmUmN9gJMLHZdLMgc9ao78eKr", + ); + + let seed = 42; + let random_strings = BenchHelper::generate_random_strings(1, Some(seed), 232); + let rand_string = random_strings.first().unwrap(); + let tx = BenchHelper::create_memo_tx_large(rand_string, &payer_keypair, blockhash, 300); + + assert_eq!(bincode::serialized_size(&tx).unwrap(), 1222); +} diff --git a/bench/src/lib.rs b/bench/src/lib.rs index 3ccea0dd..83587d7c 100644 --- a/bench/src/lib.rs +++ b/bench/src/lib.rs @@ -22,9 +22,11 @@ use std::{str::FromStr, time::Duration}; use tokio::time::Instant; use tx_size::TxSize; +pub mod bench1; pub mod benches; pub mod helpers; pub mod metrics; +pub mod service_adapter; pub mod tx_size; #[derive(Parser, Debug)] diff --git a/bench/src/main.rs b/bench/src/main.rs index 17e3fb9a..8597968a 100644 --- a/bench/src/main.rs +++ b/bench/src/main.rs @@ -1,26 +1,21 @@ use bench::{ + bench1, helpers::BenchHelper, metrics::{AvgMetric, Metric, TxMetricData}, Args, }; use clap::Parser; -use dashmap::DashMap; + use futures::future::join_all; -use log::{error, info, warn}; +use log::{error, info}; use solana_rpc_client::nonblocking::rpc_client::RpcClient; -use solana_sdk::signature::Signature; + +use bench::bench1::TransactionSize; use solana_sdk::{ commitment_config::CommitmentConfig, hash::Hash, signature::Keypair, signer::Signer, - slot_history::Slot, -}; -use std::sync::{ - atomic::{AtomicU64, Ordering}, - Arc, -}; -use tokio::{ - sync::{mpsc::UnboundedSender, RwLock}, - time::{Duration, Instant}, }; +use std::sync::{atomic::AtomicU64, Arc}; +use tokio::{sync::RwLock, time::Duration}; #[tokio::main(flavor = "multi_thread", worker_threads = 16)] async fn main() { @@ -36,6 +31,8 @@ async fn main() { large_transactions, } = Args::parse(); + let cu_price_micro_lamports = 300; + let mut run_interval_ms = tokio::time::interval(Duration::from_millis(run_interval_ms)); let transaction_size = if large_transactions { @@ -103,7 +100,7 @@ async fn main() { for seed in 0..runs { let funded_payer = Keypair::from_bytes(funded_payer.to_bytes().as_slice()).unwrap(); - tasks.push(tokio::spawn(bench( + tasks.push(tokio::spawn(bench1::bench( rpc_client.clone(), tx_count, funded_payer, @@ -113,6 +110,7 @@ async fn main() { tx_log_sx.clone(), log_transactions, transaction_size, + cu_price_micro_lamports, ))); // wait for an interval run_interval_ms.tick().await; @@ -145,145 +143,3 @@ async fn main() { csv_writer.flush().unwrap(); } - -#[derive(Clone, Debug, Copy)] -struct TxSendData { - sent_duration: Duration, - sent_instant: Instant, - sent_slot: Slot, - transaction_bytes: u64, -} - -struct ApiCallerResult { - gross_send_time: Duration, -} - -#[allow(clippy::too_many_arguments)] -async fn bench( - rpc_client: Arc, - tx_count: usize, - funded_payer: Keypair, - seed: u64, - block_hash: Arc>, - current_slot: Arc, - tx_metric_sx: UnboundedSender, - log_txs: bool, - transaction_size: TransactionSize, -) -> Metric { - let map_of_txs: Arc> = Arc::new(DashMap::new()); - // transaction sender task - let api_caller_result = { - let map_of_txs = map_of_txs.clone(); - let rpc_client = rpc_client.clone(); - let current_slot = current_slot.clone(); - tokio::spawn(async move { - let map_of_txs = map_of_txs.clone(); - let n_chars = match transaction_size { - TransactionSize::Small => 10, - TransactionSize::Large => 240, // 565 is max but we need to lower that to not burn the CUs - }; - let rand_strings = BenchHelper::generate_random_strings(tx_count, Some(seed), n_chars); - - let bench_start_time = Instant::now(); - - for rand_string in &rand_strings { - let blockhash = { *block_hash.read().await }; - let tx = match transaction_size { - TransactionSize::Small => { - BenchHelper::create_memo_tx_small(rand_string, &funded_payer, blockhash) - } - TransactionSize::Large => { - BenchHelper::create_memo_tx_large(rand_string, &funded_payer, blockhash) - } - }; - let start_time = Instant::now(); - match rpc_client.send_transaction(&tx).await { - Ok(signature) => { - map_of_txs.insert( - signature, - TxSendData { - sent_duration: start_time.elapsed(), - sent_instant: Instant::now(), - sent_slot: current_slot.load(std::sync::atomic::Ordering::Relaxed), - transaction_bytes: bincode::serialized_size(&tx).unwrap(), - }, - ); - } - Err(e) => { - warn!("tx send failed with error {}", e); - } - } - } - ApiCallerResult { - gross_send_time: bench_start_time.elapsed(), - } - }) - }; - - let mut metric = Metric::default(); - let confirmation_time = Instant::now(); - let mut confirmed_count = 0; - while confirmation_time.elapsed() < Duration::from_secs(60) - && !(map_of_txs.is_empty() && confirmed_count == tx_count) - { - let signatures = map_of_txs.iter().map(|x| *x.key()).collect::>(); - if signatures.is_empty() { - tokio::time::sleep(Duration::from_millis(1)).await; - continue; - } - - if let Ok(res) = rpc_client.get_signature_statuses(&signatures).await { - for (i, signature) in signatures.iter().enumerate() { - let tx_status = &res.value[i]; - if tx_status.is_some() { - let tx_data = map_of_txs.get(signature).unwrap(); - let time_to_confirm = tx_data.sent_instant.elapsed(); - let transaction_bytes = tx_data.transaction_bytes; - metric.add_successful_transaction( - tx_data.sent_duration, - time_to_confirm, - transaction_bytes, - ); - - if log_txs { - let _ = tx_metric_sx.send(TxMetricData { - signature: signature.to_string(), - sent_slot: tx_data.sent_slot, - confirmed_slot: current_slot.load(Ordering::Relaxed), - time_to_send_in_millis: tx_data.sent_duration.as_millis() as u64, - time_to_confirm_in_millis: time_to_confirm.as_millis() as u64, - }); - } - drop(tx_data); - map_of_txs.remove(signature); - confirmed_count += 1; - } - } - } - } - - for tx in map_of_txs.iter() { - metric.add_unsuccessful_transaction(tx.sent_duration, tx.transaction_bytes); - } - - let api_caller_result = api_caller_result - .await - .expect("api caller task must succeed"); - - metric - .set_total_gross_send_time(api_caller_result.gross_send_time.as_micros() as f64 / 1_000.0); - - metric.finalize(); - metric -} - -// see https://spl.solana.com/memo for sizing of transactions -// As of v1.5.1, an unsigned instruction can support single-byte UTF-8 of up to 566 bytes. -// An instruction with a simple memo of 32 bytes can support up to 12 signers. -#[derive(Debug, Clone, Copy)] -enum TransactionSize { - // 179 bytes, 5237 CUs - Small, - // 1186 bytes, 193175 CUs - Large, -} diff --git a/bench/src/service_adapter.rs b/bench/src/service_adapter.rs new file mode 100644 index 00000000..a1f310bd --- /dev/null +++ b/bench/src/service_adapter.rs @@ -0,0 +1,102 @@ +// adapter code for all from benchrunner-service + +use crate::bench1; +use crate::bench1::TransactionSize; +use crate::metrics::{Metric, TxMetricData}; +use crate::tx_size::TxSize; +use log::debug; +use solana_rpc_client::nonblocking::rpc_client::RpcClient; +use solana_sdk::commitment_config::CommitmentConfig; +use solana_sdk::hash::Hash; +use solana_sdk::signature::Keypair; +use solana_sdk::signer::Signer; +use std::fmt::Display; +use std::sync::atomic::AtomicU64; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::RwLock; +use tokio::time::Instant; + +#[derive(Debug, Clone)] +pub struct BenchConfig { + pub tx_count: usize, + pub cu_price_micro_lamports: u64, +} + +impl Display for BenchConfig { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self) + } +} + +pub async fn bench_servicerunner( + bench_config: &BenchConfig, + rpc_addr: String, + funded_payer: Keypair, + size_tx: TxSize, +) -> Metric { + let started_at = Instant::now(); + + let transaction_size = match size_tx { + TxSize::Small => TransactionSize::Small, + TxSize::Large => TransactionSize::Large, + }; + + debug!("Payer: {}", funded_payer.pubkey()); + + let rpc_client = Arc::new(RpcClient::new_with_commitment( + rpc_addr.clone(), + CommitmentConfig::confirmed(), + )); + let bh = rpc_client.get_latest_blockhash().await.unwrap(); + let slot = rpc_client.get_slot().await.unwrap(); + let block_hash: Arc> = Arc::new(RwLock::new(bh)); + let current_slot = Arc::new(AtomicU64::new(slot)); + { + // block hash updater task + let block_hash = block_hash.clone(); + let rpc_client = rpc_client.clone(); + let current_slot = current_slot.clone(); + tokio::spawn(async move { + loop { + let bh = rpc_client.get_latest_blockhash().await; + match bh { + Ok(bh) => { + let mut lock = block_hash.write().await; + *lock = bh; + } + Err(e) => println!("blockhash update error {}", e), + } + + let slot = rpc_client.get_slot().await; + match slot { + Ok(slot) => { + current_slot.store(slot, std::sync::atomic::Ordering::Relaxed); + } + Err(e) => println!("slot {}", e), + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + }) + }; + + { + // TODO what todo + // not used unless log_txs is set to true + let (tx_log_sx_null, _tx_log_rx) = tokio::sync::mpsc::unbounded_channel::(); + + bench1::bench( + rpc_client.clone(), + bench_config.tx_count, + funded_payer, + started_at.elapsed().as_micros() as u64, + block_hash.clone(), + current_slot.clone(), + tx_log_sx_null, + false, // log_transactions + transaction_size, + bench_config.cu_price_micro_lamports, + ) + .await + } +} diff --git a/bench/src/tx_size.rs b/bench/src/tx_size.rs index f6bc899d..f1a159cb 100644 --- a/bench/src/tx_size.rs +++ b/bench/src/tx_size.rs @@ -1,4 +1,5 @@ use serde::Deserialize; +use std::fmt::Display; // see https://spl.solana.com/memo for sizing of transactions // As of v1.5.1, an unsigned instruction can support single-byte UTF-8 of up to 566 bytes. @@ -11,6 +12,15 @@ pub enum TxSize { Large, } +impl Display for TxSize { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + TxSize::Small => write!(f, "small"), + TxSize::Large => write!(f, "large"), + } + } +} + impl TxSize { pub fn size(&self) -> usize { match self { diff --git a/bench/transactions.csv b/bench/transactions.csv new file mode 100644 index 00000000..e69de29b diff --git a/benchrunner-service/Cargo.toml b/benchrunner-service/Cargo.toml new file mode 100644 index 00000000..6949df82 --- /dev/null +++ b/benchrunner-service/Cargo.toml @@ -0,0 +1,39 @@ + +[package] +name = "solana-lite-rpc-benchrunner-service" +version = "0.2.4" +edition = "2021" +description = "Service for running recurring benchmarks" +rust-version = "1.73.0" +repository = "https://github.com/blockworks-foundation/lite-rpc" +license = "AGPL" + +[dependencies] +solana-lite-rpc-util = { workspace = true } +bench = { workspace = true } + +solana-sdk = { workspace = true } +solana-rpc-client = { workspace = true } +solana-transaction-status = { workspace = true } +solana-rpc-client-api = { workspace = true } + +serde = { workspace = true } +serde_json = { workspace = true } +futures = { workspace = true } +futures-util = { workspace = true } +anyhow = { workspace = true } +log = { workspace = true } +clap = { workspace = true } +tracing-subscriber = { workspace = true } +prometheus = { workspace = true } +lazy_static = { workspace = true } +async-trait = { workspace = true } +tokio = { version = "1.28.2", features = ["full", "fs"]} +tokio-util = "0.7" +chrono = { workspace = true } +itertools = { workspace = true } +native-tls = { workspace = true } +postgres-native-tls = { workspace = true } +postgres-types = { version = "0.2.6", features = ["derive", "with-serde_json-1"] } +tokio-postgres = { version = "0.7.8", features = ["with-chrono-0_4"] } + diff --git a/benchrunner-service/README.md b/benchrunner-service/README.md new file mode 100644 index 00000000..c43b9786 --- /dev/null +++ b/benchrunner-service/README.md @@ -0,0 +1,35 @@ + + +# Setup +### Hardware +Hardware: recommend 1024MB RAM, 2 vCPUs, small disk + + +### Environment Variables +| Environment Variable | Purpose | Required? | Default Value | +|----------------------|-------------------------------------------------------|---------------|---------------| +| `PG_ENABLED` | Enable writing to PostgreSQL | No | false | +| `PG_CONFIG` | PostgreSQL connection string | if PG_ENABLED | | +| `TENANT1_ID` | Technical ID for the tenant | Yes | | +| `TENANT1_RPC_ADDR` | RPC address for the target RPC node | Yes | | +| `TENANT2_.. | more tenants can be added using TENANT2, TENANT3, ... | | | + +### Command-line Arguments +``` +Options: + -b, --bench-interval + interval in milliseconds to run the benchmark [default: 60000] + -n, --tx-count + [default: 10] + -s, --size-tx + [default: small] [possible values: small, large] + -p, --prio-fees + [default: 0] +``` + +```bash +solana-lite-rpc-benchrunner-service \ + --bench-interval 600000 \ + --tx-count 100 \ + --prio-fees 0 --prio-fees 1000 --prio-fees 100000 +``` diff --git a/benchrunner-service/src/args.rs b/benchrunner-service/src/args.rs new file mode 100644 index 00000000..5e4a25ea --- /dev/null +++ b/benchrunner-service/src/args.rs @@ -0,0 +1,80 @@ +use itertools::Itertools; +use solana_sdk::signature::Keypair; + +#[derive(Debug, Clone)] +pub struct TenantConfig { + // technical identifier for the tenant, e.g. "solana-rpc" + pub tenant_id: String, + pub rpc_addr: String, +} + +// recommend to use one payer keypair for all targets and fund that keypair with enough SOL +pub fn get_funded_payer_from_env() -> Keypair { + let keypair58_string: String = std::env::var("FUNDED_PAYER_KEYPAIR58") + .expect("need funded payer keypair on env (variable FUNDED_PAYER_KEYPAIR58)"); + Keypair::from_base58_string(&keypair58_string) +} + +pub fn read_tenant_configs(env_vars: Vec<(String, String)>) -> Vec { + let map = env_vars + .iter() + .filter(|(k, _)| k.starts_with("TENANT")) + .into_group_map_by(|(k, _v)| { + let tenant_counter = k + .split('_') + .next() + .expect("tenant prefix must be split by underscore (e.g. TENANT99_SOMETHING") + .replace("TENANT", ""); + tenant_counter + .parse::() + .expect("tenant counter must be a number (e.g. TENANT99)") + }); + + let values = map + .iter() + .sorted() + .map(|(tc, v)| TenantConfig { + tenant_id: v + .iter() + .find(|(v, _)| *v == format!("TENANT{}_ID", tc)) + .iter() + .exactly_one() + .expect("need ID") + .1 + .to_string(), + rpc_addr: v + .iter() + .find(|(v, _)| *v == format!("TENANT{}_RPC_ADDR", tc)) + .iter() + .exactly_one() + .expect("need RPC_ADDR") + .1 + .to_string(), + }) + .collect::>(); + + values +} + +#[test] +fn test_env_vars() { + let env_vars = vec![ + (String::from("TENANT1_ID"), String::from("solana-rpc")), + ( + String::from("TENANT1_RPC_ADDR"), + String::from("http://localhost:8899"), + ), + (String::from("TENANT2_ID"), String::from("lite-rpc")), + ( + String::from("TENANT2_RPC_ADDR"), + String::from("http://localhost:8890"), + ), + ]; + let tenant_configs = read_tenant_configs(env_vars); + + assert_eq!(tenant_configs.len(), 2); + assert_eq!(tenant_configs[0].tenant_id, "solana-rpc"); + assert_eq!(tenant_configs[0].rpc_addr, "http://localhost:8899"); + assert_eq!(tenant_configs[1].tenant_id, "lite-rpc"); + assert_eq!(tenant_configs[1].rpc_addr, "http://localhost:8890"); +} diff --git a/benchrunner-service/src/cli.rs b/benchrunner-service/src/cli.rs new file mode 100644 index 00000000..af0944cb --- /dev/null +++ b/benchrunner-service/src/cli.rs @@ -0,0 +1,16 @@ +use bench::tx_size::TxSize; +use clap::Parser; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +pub struct Args { + /// interval in milliseconds to run the benchmark + #[arg(short = 'b', long, default_value_t = 60_000)] + pub bench_interval: u64, + #[arg(short = 'n', long, default_value_t = 10)] + pub tx_count: usize, + #[clap(short, long, default_value_t = TxSize::Small)] + pub size_tx: TxSize, + #[clap(short, long, default_values_t = [0])] + pub prio_fees: Vec, +} diff --git a/benchrunner-service/src/main.rs b/benchrunner-service/src/main.rs new file mode 100644 index 00000000..4e45a0fa --- /dev/null +++ b/benchrunner-service/src/main.rs @@ -0,0 +1,149 @@ +mod args; +mod cli; +mod postgres; +mod prometheus; + +use crate::args::{get_funded_payer_from_env, read_tenant_configs}; +use crate::cli::Args; +use crate::postgres::metrics_dbstore::{ + save_metrics_to_postgres, upsert_benchrun_status, BenchRunStatus, +}; +use crate::postgres::postgres_session::PostgresSessionConfig; +use crate::postgres::postgres_session_cache::PostgresSessionCache; +use crate::prometheus::metrics_prometheus::publish_metrics_on_prometheus; +use crate::prometheus::prometheus_sync::PrometheusSync; +use bench::service_adapter::BenchConfig; +use clap::Parser; +use futures_util::future::join_all; +use itertools::Itertools; +use log::{debug, error, info}; +use std::net::SocketAddr; +use std::str::FromStr; +use std::time::{Duration, SystemTime}; + +#[tokio::main] +async fn main() { + tracing_subscriber::fmt::init(); + + let Args { + bench_interval, + tx_count, + size_tx, + prio_fees, + } = Args::parse(); + + let postgres_config = PostgresSessionConfig::new_from_env().unwrap(); + + let bench_interval = Duration::from_millis(bench_interval); + + let funded_payer = get_funded_payer_from_env(); + + let tenant_configs = read_tenant_configs(std::env::vars().collect::>()); + + info!("Use postgres config: {:?}", postgres_config.is_some()); + info!("Use prio fees: [{}]", prio_fees.iter().join(",")); + info!("Start running benchmarks every {:?}", bench_interval); + info!( + "Found tenants: {}", + tenant_configs.iter().map(|tc| &tc.tenant_id).join(", ") + ); + + if tenant_configs.is_empty() { + error!("No tenants found (missing env vars) - exit"); + return; + } + + let _prometheus_task = PrometheusSync::sync(SocketAddr::from_str("[::]:9091").unwrap()); + + let mut jh_tenant_task = Vec::new(); + // let postgres_session = Arc::new(PostgresSession::new(postgres_config.unwrap()).await); + + let postgres_session = match postgres_config { + None => None, + Some(x) => { + let session_cache = PostgresSessionCache::new(x) + .await + .expect("PostgreSQL session cache"); + Some(session_cache) + } + }; + + let bench_configs = prio_fees + .iter() + .map(|prio_fees| BenchConfig { + tx_count, + cu_price_micro_lamports: *prio_fees, + }) + .collect_vec(); + + for tenant_config in &tenant_configs { + let funded_payer = funded_payer.insecure_clone(); + let tenant_id = tenant_config.tenant_id.clone(); + let postgres_session = postgres_session.clone(); + let tenant_config = tenant_config.clone(); + let bench_configs = bench_configs.clone(); + let jh_runner = tokio::spawn(async move { + let mut interval = tokio::time::interval(bench_interval); + for run_count in 1.. { + let bench_config = bench_configs[run_count % bench_configs.len()].clone(); + debug!( + "Invoke bench execution (#{}) on tenant <{}> using {}", + run_count, tenant_id, bench_config + ); + let benchrun_at = SystemTime::now(); + + if let Some(postgres_session) = postgres_session.as_ref() { + let _dbstatus = upsert_benchrun_status( + postgres_session, + &tenant_config, + &bench_config, + benchrun_at, + BenchRunStatus::STARTED, + ) + .await; + } + + let metric = bench::service_adapter::bench_servicerunner( + &bench_config, + tenant_config.rpc_addr.clone(), + funded_payer.insecure_clone(), + size_tx, + ) + .await; + + if let Some(postgres_session) = postgres_session.as_ref() { + let _dbstatus = save_metrics_to_postgres( + postgres_session, + &tenant_config, + &bench_config, + &metric, + benchrun_at, + ) + .await; + } + + publish_metrics_on_prometheus(&tenant_config, &bench_config, &metric).await; + + if let Some(postgres_session) = postgres_session.as_ref() { + let _dbstatus = upsert_benchrun_status( + postgres_session, + &tenant_config, + &bench_config, + benchrun_at, + BenchRunStatus::FINISHED, + ) + .await; + } + debug!( + "Bench execution (#{}) done in {:?}", + run_count, + benchrun_at.elapsed().unwrap() + ); + interval.tick().await; + } + }); + jh_tenant_task.push(jh_runner); + } // -- END tenant loop + + join_all(jh_tenant_task).await; +} diff --git a/benchrunner-service/src/postgres/metrics_dbstore.rs b/benchrunner-service/src/postgres/metrics_dbstore.rs new file mode 100644 index 00000000..a5cb6cb3 --- /dev/null +++ b/benchrunner-service/src/postgres/metrics_dbstore.rs @@ -0,0 +1,104 @@ +use crate::args::TenantConfig; +use crate::postgres::postgres_session_cache::PostgresSessionCache; +use bench::metrics::Metric; +use bench::service_adapter::BenchConfig; +use log::warn; +use postgres_types::ToSql; +use std::time::SystemTime; + +#[allow(clippy::upper_case_acronyms)] +pub enum BenchRunStatus { + STARTED, + FINISHED, +} + +impl BenchRunStatus { + pub fn to_db_string(&self) -> &str { + match self { + BenchRunStatus::STARTED => "STARTED", + BenchRunStatus::FINISHED => "FINISHED", + } + } +} + +pub async fn upsert_benchrun_status( + postgres_session: &PostgresSessionCache, + tenant_config: &TenantConfig, + _bench_config: &BenchConfig, + benchrun_at: SystemTime, + status: BenchRunStatus, +) -> anyhow::Result<()> { + let values: &[&(dyn ToSql + Sync)] = &[ + &tenant_config.tenant_id, + &benchrun_at, + &status.to_db_string(), + ]; + let write_result = postgres_session + .get_session() + .await? + .execute( + r#" + INSERT INTO benchrunner.bench_runs ( + tenant, + ts, + status + ) + VALUES ($1, $2, $3) + ON CONFLICT (tenant, ts) DO UPDATE SET status = $3 + "#, + values, + ) + .await; + + if let Err(err) = write_result { + warn!("Failed to upsert status (err {:?}) - continue", err); + } + + Ok(()) +} + +pub async fn save_metrics_to_postgres( + postgres_session: &PostgresSessionCache, + tenant_config: &TenantConfig, + bench_config: &BenchConfig, + metric: &Metric, + benchrun_at: SystemTime, +) -> anyhow::Result<()> { + let metricjson = serde_json::to_value(metric).unwrap(); + let values: &[&(dyn ToSql + Sync)] = &[ + &tenant_config.tenant_id, + &benchrun_at, + &(bench_config.cu_price_micro_lamports as i64), + &(metric.txs_sent as i64), + &(metric.txs_confirmed as i64), + &(metric.txs_un_confirmed as i64), + &(metric.average_confirmation_time_ms as f32), + &metricjson, + ]; + let write_result = postgres_session + .get_session() + .await? + .execute( + r#" + INSERT INTO + benchrunner.bench_metrics ( + tenant, + ts, + prio_fees, + txs_sent, + txs_confirmed, txs_un_confirmed, + average_confirmation_time_ms, + metric_json + ) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8) + "#, + values, + ) + .await; + + if let Err(err) = write_result { + warn!("Failed to insert metrics (err {:?}) - continue", err); + } + + Ok(()) +} diff --git a/benchrunner-service/src/postgres/mod.rs b/benchrunner-service/src/postgres/mod.rs new file mode 100644 index 00000000..3131825c --- /dev/null +++ b/benchrunner-service/src/postgres/mod.rs @@ -0,0 +1,3 @@ +pub mod metrics_dbstore; +pub mod postgres_session; +pub mod postgres_session_cache; diff --git a/benchrunner-service/src/postgres/postgres_session.rs b/benchrunner-service/src/postgres/postgres_session.rs new file mode 100644 index 00000000..5951ecf2 --- /dev/null +++ b/benchrunner-service/src/postgres/postgres_session.rs @@ -0,0 +1,214 @@ +#![allow(dead_code)] + +use std::env; +use std::sync::Arc; + +use anyhow::Context; +use native_tls::{Certificate, Identity, TlsConnector}; +use postgres_native_tls::MakeTlsConnector; +use solana_lite_rpc_util::encoding::BinaryEncoding; +use tokio_postgres::{ + config::SslMode, tls::MakeTlsConnect, types::ToSql, Client, Error, NoTls, Row, Socket, +}; + +#[derive(serde::Deserialize, Debug, Clone)] +pub struct PostgresSessionConfig { + pub pg_config: String, + pub ssl: Option, +} + +#[derive(serde::Deserialize, Debug, Clone)] +pub struct PostgresSessionSslConfig { + pub ca_pem_b64: String, + pub client_pks_b64: String, + pub client_pks_pass: String, +} + +impl PostgresSessionConfig { + pub fn new_from_env() -> anyhow::Result> { + // pg not enabled + if env::var("PG_ENABLED").is_err() { + return Ok(None); + } + + let enable_pg = env::var("PG_ENABLED").context("PG_ENABLED")?; + if enable_pg != *"true" { + return Ok(None); + } + + let env_pg_config = env::var("PG_CONFIG").context("PG_CONFIG not found")?; + + let ssl_config = if env_pg_config + .parse::()? + .get_ssl_mode() + .eq(&SslMode::Disable) + { + None + } else { + let env_ca_pem_b64 = env::var("CA_PEM_B64").context("CA_PEM_B64 not found")?; + let env_client_pks_b64 = + env::var("CLIENT_PKS_B64").context("CLIENT_PKS_B64 not found")?; + let env_client_pks_pass = + env::var("CLIENT_PKS_PASS").context("CLIENT_PKS_PASS not found")?; + + Some(PostgresSessionSslConfig { + ca_pem_b64: env_ca_pem_b64, + client_pks_b64: env_client_pks_b64, + client_pks_pass: env_client_pks_pass, + }) + }; + + Ok(Some(Self { + pg_config: env_pg_config, + ssl: ssl_config, + })) + } +} + +#[derive(Clone)] +pub struct PostgresSession { + client: Arc, +} + +impl PostgresSession { + pub async fn new_from_env() -> anyhow::Result { + let pg_session_config = PostgresSessionConfig::new_from_env() + .expect("failed to start Postgres Client") + .expect("Postgres not enabled (use PG_ENABLED)"); + PostgresSession::new(pg_session_config).await + } + + pub async fn new( + PostgresSessionConfig { pg_config, ssl }: PostgresSessionConfig, + ) -> anyhow::Result { + let pg_config = pg_config.parse::()?; + + let client = if let SslMode::Disable = pg_config.get_ssl_mode() { + Self::spawn_connection(pg_config, NoTls).await? + } else { + let PostgresSessionSslConfig { + ca_pem_b64, + client_pks_b64, + client_pks_pass, + } = ssl.as_ref().unwrap(); + + let ca_pem = BinaryEncoding::Base64 + .decode(ca_pem_b64) + .context("ca pem decode")?; + let client_pks = BinaryEncoding::Base64 + .decode(client_pks_b64) + .context("client pks decode")?; + + let connector = TlsConnector::builder() + .add_root_certificate(Certificate::from_pem(&ca_pem)?) + .identity(Identity::from_pkcs12(&client_pks, client_pks_pass).context("Identity")?) + .danger_accept_invalid_hostnames(true) + .danger_accept_invalid_certs(true) + .build()?; + + Self::spawn_connection(pg_config, MakeTlsConnector::new(connector)).await? + }; + + Ok(Self { + client: Arc::new(client), + }) + } + + async fn spawn_connection( + pg_config: tokio_postgres::Config, + connector: T, + ) -> anyhow::Result + where + T: MakeTlsConnect + Send + 'static, + >::Stream: Send, + { + let (client, connection) = pg_config + .connect(connector) + .await + .context("Connecting to Postgres failed")?; + + tokio::spawn(async move { + log::info!("Connecting to Postgres"); + + if let Err(err) = connection.await { + log::error!("Connection to Postgres broke: {err:?}"); + return; + } + log::debug!("Postgres thread shutting down"); + }); + + Ok(client) + } + + pub fn is_closed(&self) -> bool { + self.client.is_closed() + } + + pub async fn execute( + &self, + statement: &str, + params: &[&(dyn ToSql + Sync)], + ) -> Result { + self.client.execute(statement, params).await + } + + // execute statements seperated by semicolon + pub async fn execute_multiple(&self, statement: &str) -> Result<(), Error> { + self.client.batch_execute(statement).await + } + + pub async fn execute_prepared_batch( + &self, + statement: &str, + params: &Vec>, + ) -> Result { + let prepared_stmt = self.client.prepare(statement).await?; + let mut total_inserted = 0; + for row in params { + let result = self.client.execute(&prepared_stmt, row).await; + total_inserted += result?; + } + Ok(total_inserted) + } + + pub async fn execute_prepared( + &self, + statement: &str, + params: &[&(dyn ToSql + Sync)], + ) -> Result { + let prepared_stmt = self.client.prepare(statement).await?; + self.client.execute(&prepared_stmt, params).await + } + + pub async fn execute_and_return( + &self, + statement: &str, + params: &[&(dyn ToSql + Sync)], + ) -> Result, Error> { + self.client.query_opt(statement, params).await + } + + pub async fn query_opt( + &self, + statement: &str, + params: &[&(dyn ToSql + Sync)], + ) -> Result, Error> { + self.client.query_opt(statement, params).await + } + + pub async fn query_one( + &self, + statement: &str, + params: &[&(dyn ToSql + Sync)], + ) -> Result { + self.client.query_one(statement, params).await + } + + pub async fn query_list( + &self, + statement: &str, + params: &[&(dyn ToSql + Sync)], + ) -> Result, Error> { + self.client.query(statement, params).await + } +} diff --git a/benchrunner-service/src/postgres/postgres_session_cache.rs b/benchrunner-service/src/postgres/postgres_session_cache.rs new file mode 100644 index 00000000..a6fb0ce2 --- /dev/null +++ b/benchrunner-service/src/postgres/postgres_session_cache.rs @@ -0,0 +1,33 @@ +use crate::postgres::postgres_session::{PostgresSession, PostgresSessionConfig}; +use log::info; +use std::sync::Arc; +use tokio::sync::RwLock; + +#[derive(Clone)] +pub struct PostgresSessionCache { + session: Arc>, + config: Arc, +} + +impl PostgresSessionCache { + pub async fn new(config: PostgresSessionConfig) -> anyhow::Result { + let session = PostgresSession::new(config.clone()).await?; + Ok(Self { + session: Arc::new(RwLock::new(session)), + config: Arc::new(config), + }) + } + + pub async fn get_session(&self) -> anyhow::Result { + let session = self.session.read().await; + if session.is_closed() { + info!("PostgreSQL session closed - reconnecting"); + drop(session); + let session = PostgresSession::new(self.config.as_ref().clone()).await?; + *self.session.write().await = session.clone(); + Ok(session) + } else { + Ok(session.clone()) + } + } +} diff --git a/benchrunner-service/src/prometheus/metrics_prometheus.rs b/benchrunner-service/src/prometheus/metrics_prometheus.rs new file mode 100644 index 00000000..8d250659 --- /dev/null +++ b/benchrunner-service/src/prometheus/metrics_prometheus.rs @@ -0,0 +1,36 @@ +use bench::metrics::Metric; + +use crate::args::TenantConfig; +use bench::service_adapter::BenchConfig; +use prometheus::{opts, register_gauge_vec, register_int_gauge_vec, GaugeVec, IntGaugeVec}; + +// https://github.com/blockworks-foundation/lite-rpc/blob/production/bench/src/metrics.rs +lazy_static::lazy_static! { + static ref PROM_TXS_SENT: IntGaugeVec = register_int_gauge_vec!(opts!("literpc_benchrunner_txs_sent", "Total number of transactions sent"), &["tenant"]).unwrap(); + static ref PROM_TXS_CONFIRMED: IntGaugeVec = register_int_gauge_vec!(opts!("literpc_benchrunner_txs_confirmed", "Number of transactions confirmed"), &["tenant"]).unwrap(); + static ref PROM_TXS_UN_CONFIRMED: IntGaugeVec = register_int_gauge_vec!(opts!("literpc_benchrunner_txs_un_confirmed", "Number of transactions not confirmed"), &["tenant"]).unwrap(); + static ref PROM_AVG_CONFIRM: GaugeVec = register_gauge_vec!(opts!("literpc_benchrunner_avg_confirmation_time", "Confirmation time(ms)"), &["tenant"]).unwrap(); + // static ref RPC_RESPONDING: Gauge = register_gauge!(opts!("literpc_benchrunner_send_tps", "Transactions")).unwrap(); + // TODO add more +} + +pub async fn publish_metrics_on_prometheus( + tenant_config: &TenantConfig, + _bench_config: &BenchConfig, + metric: &Metric, +) { + let dimensions: &[&str] = &[&tenant_config.tenant_id]; + + PROM_TXS_SENT + .with_label_values(dimensions) + .set(metric.txs_sent as i64); + PROM_TXS_CONFIRMED + .with_label_values(dimensions) + .set(metric.txs_confirmed as i64); + PROM_TXS_UN_CONFIRMED + .with_label_values(dimensions) + .set(metric.txs_un_confirmed as i64); + PROM_AVG_CONFIRM + .with_label_values(dimensions) + .set(metric.average_confirmation_time_ms); +} diff --git a/benchrunner-service/src/prometheus/mod.rs b/benchrunner-service/src/prometheus/mod.rs new file mode 100644 index 00000000..906f95af --- /dev/null +++ b/benchrunner-service/src/prometheus/mod.rs @@ -0,0 +1,2 @@ +pub mod metrics_prometheus; +pub mod prometheus_sync; diff --git a/benchrunner-service/src/prometheus/prometheus_sync.rs b/benchrunner-service/src/prometheus/prometheus_sync.rs new file mode 100644 index 00000000..ac0b47f1 --- /dev/null +++ b/benchrunner-service/src/prometheus/prometheus_sync.rs @@ -0,0 +1,58 @@ +use std::time::Duration; + +use log::error; +use prometheus::{Encoder, TextEncoder}; +use tokio::{ + io::AsyncWriteExt, + net::{TcpListener, TcpStream, ToSocketAddrs}, +}; + +pub struct PrometheusSync; + +impl PrometheusSync { + fn create_response(payload: &str) -> String { + format!( + "HTTP/1.1 200 OK\r\nContent-Length: {}\r\n\r\n{}", + payload.len(), + payload + ) + } + + async fn handle_stream(stream: &mut TcpStream) -> anyhow::Result<()> { + let mut metrics_buffer = Vec::new(); + let encoder = TextEncoder::new(); + + let metric_families = prometheus::gather(); + encoder + .encode(&metric_families, &mut metrics_buffer) + .unwrap(); + + let metrics_buffer = String::from_utf8(metrics_buffer).unwrap(); + let response = Self::create_response(&metrics_buffer); + + stream.writable().await?; + stream.write_all(response.as_bytes()).await?; + + stream.flush().await?; + + Ok(()) + } + + pub fn sync( + addr: impl ToSocketAddrs + Send + 'static, + ) -> tokio::task::JoinHandle> { + tokio::spawn(async move { + let listener = TcpListener::bind(addr).await?; + + loop { + let Ok((mut stream, _addr)) = listener.accept().await else { + error!("Error accepting prometheus stream"); + tokio::time::sleep(Duration::from_millis(1)).await; + continue; + }; + + let _ = Self::handle_stream(&mut stream).await; + } + }) + } +} diff --git a/blockstore/tests/blockstore_integration_tests.rs b/blockstore/tests/blockstore_integration_tests.rs index dadde559..9da84a70 100644 --- a/blockstore/tests/blockstore_integration_tests.rs +++ b/blockstore/tests/blockstore_integration_tests.rs @@ -70,7 +70,7 @@ async fn storage_test() { let (slot_notifier, _jh_multiplex_slotstream) = create_grpc_multiplex_processed_slots_subscription(grpc_sources.clone()); - let (blocks_notifier, _jh_multiplex_blockstream) = + let (blocks_notifier, _blockmeta_output_stream, _jh_multiplex_blockstream) = create_grpc_multiplex_blocks_subscription(grpc_sources); let (epoch_cache, _) = EpochCache::bootstrap_epoch(&rpc_client).await.unwrap(); diff --git a/cd/lite-rpc-accounts.toml b/cd/lite-rpc-accounts.toml new file mode 100644 index 00000000..f0cc2a53 --- /dev/null +++ b/cd/lite-rpc-accounts.toml @@ -0,0 +1,22 @@ +app = "solana-lite-rpc-accounts" +kill_signal = "SIGINT" +kill_timeout = 5 + +[build] + dockerfile = "../Dockerfile" + +[env] + PORT_HTTP = "8890" + PORT_WS = "8891" + RUST_LOG = "info" + ENABLE_ADDRESS_LOOKUP_TABLES = "true" + ACCOUNT_FILTERS = "[{\"accounts\":[],\"programId\":\"FsJ3A3u2vn5cTVofAjvy6y5kwABJAqYWpe4975bi2epH\",\"filters\":null},{\"accounts\":[],\"programId\":\"3BUZXy9mPcsSCoxJQiBu2xxpMP6HEvFMZbaL5CAWwLUf\",\"filters\":null},{\"accounts\":[],\"programId\":\"4MangoMjqJ2firMokCjjGgoK8d4MXcrgL7XJaL3w6fVg\",\"filters\":null},{\"accounts\":[],\"programId\":\"SW1TCH7qEPTdLsDHRgPuMQjbQxKdH2aBStViMFnt64f\",\"filters\":null},{\"accounts\":[],\"programId\":\"SBondMDrcV3K4kxZR1HNVT7osZxAHVHgYXL5Ze1oMUv\",\"filters\":null},{\"accounts\":[\"9BoFW2JxdCDodsa2zfxAZpyT9yiTgSYEcHdNSuA7s5Sf\",\"CYGfrBJB9HgLf9iZyN4aH5HvUAi2htQ4MjPxeXMf4Egn\",\"7UYk5yhrQtFbZV2bLX1gtqN7QdU9xpBMyAk7tFgoTatk\",\"2PRxDHabumHHv6fgcrcyvHsV8ENkWdEph27vhpbSMLn3\",\"CtJ8EkqLmeYyGB8s4jevpeNsvmD4dxVR2krfsDLcvV8Y\",\"3pxTFXBJbTAtHLCgSWjeasngGCi4ohV16F4fDnd4Xh81\",\"GVXRSBjFk6e6J3NbVPXohDJetcTjaeeuykUpbQF8UoMU\",\"2dAsTriwLdgmGt7N6Dkq1iUV6pGhSUUwqqePp4qorzor\",\"FnVC5oSSdnCHfN5W7xbu74HbxXF3Kmy63gUKWdaaZwD7\",\"6ABgrEZk8urs6kJ1JNdC1sspH5zKXRqxy8sg3ZG2cQps\",\"7moA1i5vQUpfDwSpK6Pw9s56ahB7WFGidtbL2ujWrVvm\",\"Bt1hEbY62aMriY1SyQqbeZbm8VmSbQVGBFzSzMuVNWzN\",\"4ivThkX8uRxBpHsdWSqyXYihzKF3zpRGAUCqyuagnLoV\",\"D8UUgr8a3aR3yUeHLu7v8FWK7E8Y5sSU7qrYBXUJXBQ5\",\"7fMKXU6AnatycNu1CAMndLkKmDPtjZaPNZSJSfXR92Ez\",\"JBu1AL4obBcCMqKBBxhpWCNUt136ijcuMZLFvTP7iWdB\",\"AnLf8tVYCM816gmBjiy8n53eXKKEDydT5piYjjQDPgTB\",\"4dusJxxxiYrMTLGYS6cCAyu3gPn2xXLBjS7orMToZHi1\",\"hnkVVuJTRZvX2SawUsecZz2eHJP2oGMdnhdDJa33KSY\",\"AFrYBhb5wKQtxRS9UA9YRS4V3dwFm7SqmS6DHKq6YVgo\",\"Ag7RdWj5t3U9avU4XKAY7rBbGDCNz456ckNmcpW1aHoE\",\"AwpALBTXcaz2t6BayXvQQu7eZ6h7u2UNRCQNmD9ShY7Z\",\"8ihFLu5FimgTQ1Unh4dVyEHUGodJ5gJQCrQf4KUVB9bN\",\"AV67ufGVkHrPKXdeupXE2MXdw3puq7xnkPNrTxGP3suU\",\"3uZCMHY3vnNJspSVk6TvE9qmb4iYVbrEWFQ71uCE5hFR\",\"5wRjzrwWZG3af3FE26ZrRj3s8A3BVNyeJ9Pt9Uf2ogdf\",\"2qHkYmAn7HNtAGw45hQQkRthDDNiyVyVfDJDaw6iSoRm\",\"2FGoL9PNhNGpduRKLsTa4teRaX3vfarXAc1an2KyXxQm\",\"4BA3RcS4zE32WWgp49vvvre2t6nXY1W1kMyKZxeeuUey\",\"Bfz5q3cDywSSjnWb9oXeQZqYzHwqFGp75mm34eYCPNEA\",\"H6ARHf6YXhGYeQfUzQNGk6rDNnLBQKrenN712K4AQJEG\",\"79wm3jjcPr6RaNQ4DGvP5KxG1mNd3gEBsg6FsNVFezK4\",\"Gnt27xtC473ZT2Mw5u8wZ68Z3gULkSTb5DuxJy7eJotD\",\"g6eRCbboSwK4tSWngn773RCMexr1APQr4uA9bGZBYfo\",\"91Sfpm86H7ZgngdGfAiVJTNbg42CXBPiurruf29kinMh\",\"nrYkQQQur7z8rYTST3G9GqATviK5SxTDkrqd21MW6Ue\",\"E4v1BBgoso9s64TQvmyownAVJbhbEPGyzA3qn4n46qj9\",\"EzBoEHzYSx37RULrQCh756kNcA7iLrmGesxqpzSwo4v3\",\"3vxLXJqLqF3JG5TCbYycbKWRBbCJQLxQmBGCkyqEEefL\",\"7yyaeuJ1GGtVBLT2z2xub5ZWYKaNhF28mj1RdV4VDFVk\",\"BeAZ81UvesnJR7VVGNzRQGKFHrnxm77x5ozesC1pTjrY\",\"H5hokc8gcKezGcwbqFbss99QrpA3WxsRfqGYCm6F1EBy\",\"ELrhqYY3WjLRnLwWt3u7sMykNc87EScEAsyCyrDDSAXv\",\"FYghp2wYzq36yqXYd8D3Lu6jpMWETHTtxYDZPXdpppyc\",\"2sn1w3krTLhW4EDbLL5etf8NjwSHMhmK6CJmWYpkMoXL\",\"Fi8vncGpNKbq62gPo56G4toCehWNy77GgqGkTaAF5Lkk\",\"9puoc5B1ioxkKMMx1rs1M5kLWhCDrPawkCnTkk44jjCs\",\"7KbMt281Rjx3E3MU7GSEEBfByhNBxfU1dZbTimGaKCfV\",\"7tV5jsyNUg9j1AARv56b7AirdpLBecibRXLEJtycEgpP\",\"ARjaHVxGCQfTvvKjLd7U7srvk6orthZSE6uqWchCczZc\",\"E5AmUKMFgxjEihVwEQNrNfnri5EexYHSBC4HkicVtfxG\",\"GoXhYTpRF4vs4gx48S7XhbaukVbJXVycXimhGfzWNGLF\",\"BWt1ABFexE3gEKRFGZmMo2DFLgAU8SUJc4NZi1t3jvUP\",\"5BocsfuYVNzu5huEbx3jreueTPzsKaGQMj9Xwb5enMP\",\"7n7TafVxb1j3Zwh2sX3s8k3eXg7UHp5VZnb9uhhCLvy9\",\"EA1eJqandDNrw627mSA1Rrp2xMUvWoJBz2WwQxZYP9YX\",\"EN41nj1uHaTHmJJLPTerwVu2P9r5G8pMiNvfNX5V2PtP\",\"DvfMMNzFTvPofaEzdt3ez4U6ziRJcMCm7LGGKskznThz\",\"HDwpKCNpB9JvcGrZv6TWcXjFvzxxfzq7ci6kQ1Kv8FMY\",\"GgpDLzVuUFLyNzNn6oVVEVxbmkdp2xkjX8BrzaFFt3yA\",\"GFJjJmm7jTDb7WEM4TkYdA9eAEeJGK1t73tcdDNeZLGT\",\"CeQ7wj43PJ28EXU1QVNMPxmwrg955KejYD68bMYWTvAp\",\"27Nuo9hEVSVrrUyQ83NPnuDR4ZSE8KBb3xiU1XeaBVwL\",\"CcyCcZyvfEkBUngS7KytFPWKWSrpDfmHx81aqAtgS9oC\",\"2JAg3Rm6TmQ3gSYgUCCyZ9bCQKThD9jxHCN6U2ByTPMb\",\"Dj1qPXjnWWMkuPiq4Y51JNvSkCBqU38P541Te2ugqYpD\",\"Ed3EJ3jxXWGgDAkWLXfGojStQ7g1SbYNQGhetMSm6PKb\",\"8JK7S6h5FktYXq4wC75WeyNFWZpa1jMNXNtTNuy26ixy\",\"2mBnnBywAuMwH5FhH27UUFyDGk7J77m5LcKK4VtmwJQi\",\"Fb5BfdB7zk2zfWfqgpRtRQbYSYERASsBjz213FaT461F\",\"2oxZZ3YXaVhbZmtzagGooewBAofyVbBTzayAD9UR1eBh\",\"3BAKsQd3RuhZKES2DGysMhjBdwjZYKYmxRqnSMtZ4KSN\",\"BWMbNAMVkz197EsnhZ3rHCJAB1BYgaQxHaDzjeVvgXdk\",\"Gi8KdURhXWvsRvDFHpqy1gNfnnYuTsWexQvDpP9711id\",\"EkKZwBeKWPvhraYERfUNr2fdh1eazrbTrQXYkRZs24XB\",\"9UPokT1qLN2PVMxGNSYnRnYNgRRJTwr9dzLctFFZQFa2\",\"8CvwxZ9Db6XbLD46NZwwmVDZZRDy7eydFcAGkXKh9axa\",\"5x2sfymw7CcrWx6WvZ5UV7Bg1iZTXbwh1XUhxKqhJqni\",\"DkbVbMhFxswS32xnn1K2UY4aoBugXooBTxdzkWWDWRkH\",\"CmHpRnmd8h6kH8ogwLuihKnGYJvaS2PdXNjBt2JhFT5w\",\"FALKx6CxTcwzpTdihdR7ZuK3Wd2H2aEvCNBS4K8UfPxe\",\"AYhLYoDr6QCtVb5n1M5hsWLG74oB8VEz378brxGTnjjn\",\"8PhnCfgqpgFM7ZJvttGdBVMXHuU4Q23ACxCvWkbs1M71\",\"JCKa72xFYGWBEVJZ7AKZ2ofugWPBfrrouQviaGaohi3R\",\"FsruqicZDGnCnm7dRthjL5eFrTmaNRkkomxhPJQP2kdu\",\"FGZCgVhVGqzfWnmJFP9Hx4BvGvnFApEp1dM2whzXvg1Z\",\"YFzPfYrMTWPZEhhvq5QyHEu5otDrQYVtfxzRbkTHQvd\",\"4hgruY5SXYRHSrJFBjX4DFf39ef2wgYGGasjrUtwS9S1\",\"2KajVpMkF3Z53MgTgo7dDh23av6xWKgKssbtjogdY7Vu\",\"72h8rWaWwfPUL36PAFqyQZU8RT1V3FKG7Nc45aK89xTs\",\"6QNusiQ1g7fKierMQhNeAJxfLXomfcAX3tGRMwxfESsw\",\"74fKpZ1NFfusLacyVzQdMXXawe9Dr1Kz8Yw1cw12QQ3y\",\"BEhRuJZiKwTdVTsGYjbHRh9RmGbKBtT6xo7yPqxLiSSY\",\"3NnxQvDcZXputNMxaxsGvqiKpqgPfSYXpNigZNFcknmD\",\"8QCdRwLp5CX2XYVaKX3GFxsbc8n7M2xEtMXyAa8tL7r3\",\"H6Wvvx5dpt8yGdwsqAsz9WDkT43eQUHwAiafDvbcTQoQ\",\"28dSAygC8Vqzbm5r7f3mPnQ6vKVqXkjzoXD9SVpi75jV\",\"5jWUncPNBMZJ3sTHKmMLszypVkoRK6bfEQMQUHweeQnh\",\"DFizHnakzudcEfz4YKrsxQsRADgUMc6ifaMs3wU9pBSV\",\"4RNVNS8EZWwkYgNeRV4A75oScC3afD7cWV6VLywRcKik\",\"AmFXLH3jbcQNqgJjVuMZCeiaU2HmrW1UwMTWR5wU4ijd\",\"DG5EXd99EfnMFXqVXjciWi5HuXHUKdwkQ2WCncEsCeKW\",\"9zH66LpNcwBausdXLT765dgyLZSiTGUno22orC6Q3AFT\",\"9LezACAkFsv78P7nBJEzi6QeF9h1QF8hGx2LRN7u9Vww\",\"6nh2KwhGF8Tott22smj2E3G1R15iXhBrL7Lx6vKgdPFK\",\"4E17F3BxtNVqzVsirxguuqkpYLtFgCR6NfTpccPh82WE\",\"3FFGnQWo7LH5qHK96yXFxRzGL7wB3BZqJpW25rk6xZkP\",\"B2na8Awyd7cpC59iEU43FagJAPLigr3AP3s38KM982bu\",\"9Lyhks5bQQxb9EyyX55NtgKQzpM4WK7JCmeaWuQ5MoXD\",\"CXMRrGEseppLPmzYJsx5vYwTkaDEag4A9LJvgrAeNpF\",\"G8KnvNg5puzLmxQVeWT2cRHCm1XmurbWGG9B8Bze6mav\",\"HKm7iBQw488qHyXYh5wpqKnMpvbu3TaH4wVWf52i4d8\",\"8xdpZNtxfWY96sKH6LBmDbRYDhMqmujuXRnvr9xDF9mt\",\"4WeAXG1V8QTtt3T9ao6LkQa8m1AuwRcY8YLvVcabiuby\",\"H87FfmHABiZLRGrDsXRZtqq25YpARzaokCzL1vMYGiep\",\"AuqKXU1Nb5XvRxr5A4vRBLnnSJrdujNJV7HWsfj4KBWS\",\"DcM7ufYEveXMfB1HQru1jHh7td6DPxgDDJRL8LR79gMb\",\"27BrDDYtv9NDQCALCNnDqe3BqjYkgiaQwKBbyqCA8p8B\",\"8BnEgHoWFysVcuFFX7QztDmzuH8r5ZFvyP3sYwn1XTh6\",\"6wD9zcNZi2VpvUB8dnEsF242Gf1tn6qNhLF2UZ3w9MwD\",\"Do9Xu1dvgZukExvRLHsnH8cHzjMrhrGxY81ukEudm4XX\",\"9FjM1wHvGg2ZZaB3XyRsYELoQE7iD6uwHXizQUDKRYff\",\"BqApFW7DwXThCDZAbK13nbHksEsv6YJMCdj58sJmRLdy\",\"3wueUtibiTVWJZoL1WhphgDw48r9LLzUriT1a4CnZfGG\",\"51Tk5PRCwvz5L3Z8EGw3HpW9YMLMGwyqRiDbNV4T1GXi\",\"6yBKgj4PZK2sShV2e8rKDc8EArsvScmhLm328iKasBvh\",\"Dq2eptabWaGfFm6QezfX9BzfTuWA58jXhwXMHUnfuUe8\",\"CsB4XjwH4uZRjRXEXRBFJ3hi3mb9jujzRCW7NXg7TRtX\",\"HKEJCi2gVDWGfUB7YufNtJjyZiBgM9KZk9tgRE1r3RsX\",\"3QDSfdXSbUhGFe6K4EZbasfpyUTndAzkkLdX9HGpRbB9\",\"HCaq6dL3DJzKyWEeSCCDy9NdKFRL56Ad7G4bSVVMRchr\",\"CgPg6FRyerP6esjvadjdAJCKboLJzbu2ihWgjquHdAvn\",\"6z1KcPcBnrzebgYjHwVTzu6VgcxY6niLfo3dxF1n2xSx\",\"BbJgE7HZMaDp5NTYvRh5jZSkQPVDTU8ubPFtpogUkEj4\",\"4Ymax5Tmk7LACxDtPPFHB4w89557era7wyxodUBqd4fW\",\"8nA9AqeGsviExkDZChaviW4mGwmdw6GYQAcUYCrszj42\",\"Hs97TCZeuYiJxooo3U73qEHXg3dKpRL4uYKYRryEK9CF\",\"Dgt13dmzN6cZFgGE3hjfm4VVFE7pxCL2GeTmxbz4fmfA\",\"2BtDHBTCTUxvdur498ZEcMgimasaFrY5GzLv8wS8XgCb\",\"HvRdVui29hRwToAPMEfQJY795EC1XyW2ZoWvuSmgqq7v\",\"5EKbLBd12TVUUSh1WH4jE884S1XaikSga7pjMNA8jnxD\",\"Cop9Lvri1yYEmV8fWuG6i7EAJ2RdPynU66ZrhM8kGH9V\",\"iAHbTAWfvM269DnnzzQgMocMZLV8PnUoNF7g33i95dR\",\"DZjbn4XC8qoHKikZqzmhemykVzmossoayV9ffbsUqxVj\",\"H5uQWZrwRm3EABCzYRmDmti5UnMTUP5ifxsPHrJHpZdC\",\"Cn6Fu2MfaE6sahWwa8HGZQuKuLD6Fd8GJ784Tv7UtdRu\",\"7S2fEFvce5n9hGpjp9jd8JRfuBngcDJfykygeqqzEwmq\",\"5U2Vzi7Lwvyzw7hoHKpAZUD2SPfmsYfydRZrejmquso2\",\"H6rrYK3SUHF2eguZCyJxnSBMJqjXhUtuaki6PHiutvum\",\"EaXdHx7x3mdGA38j5RSmKYSXMzAFzzUXCLNBEDXDn1d5\",\"BBRvF4etMRitpSwFdXSMzPg547Lxnr3G95aQtBhMiWhB\",\"BAPLDQS9wa5BwzEuWeA1CMKAxjASieFzgSWW9nEJdMCc\",\"3PoEWQNTCaoDsBH32Rmp9yqbgFtg4Z4ik13kkRyN2CQf\",\"6NfwhGjfrE4dp7XFut2kYcUQsH4P4AJVSoe9cLVM2z43\",\"FbwncFP5bZjdx8J6yfDDTrCmmMkwieuape1enCvwLG33\",\"CK1X54onkDCqVnqY7hnvhcT7EosnjiLTwPBXAMLxkA2A\",\"kB3BTG6Pz3W7JzKQLHyrAZaFk1LVmeeXRP4ZiCKUaA3\",\"63XwffQkMcNqEacDNhixmBxnydkRE3uigV7VoLNfqh9k\",\"CFxJ8jyFQinvFSL6CNQCLWyPnbeYcCpCWmfPFC43qMkp\",\"21QRUCjxuXC9GAyTGvfYyumWYTqCtewM3ZcCfFDTYi5L\",\"ASUyMMNBpFzpW3zDSPYdDVggKajq1DMKFFPK1JS9hoSR\",\"FyyGTHKJBf1nGHHL44HE91EcGRFg2Y7XazA3SjQpcU3i\",\"CVwFcGuWYH1chdbR988K7ppbwVLErqfdreFCPsg9ttnq\",\"CDm1Uaos4vWPXezgEobUarGJ6ddKCywvFp8XLcNSqzU9\",\"4P4pgJC7omZWRL4qmngUMW3ETTr2yLXhgaoAPpSp8TWG\",\"3rQH87K3UfrDjbjSktHy7EwQHvX4BoRu3Py52D25gKSS\",\"CCepXEQxo8eTqCGtRHXrSnZdhCEQjQeEW3M85AH9skMJ\",\"GqEejRjBRnZTZg417SUmSwpJ4nhTK5E8Ey8JeVPd3cY5\",\"2m7ZLEKtxWF29727DSb5D91erpXPUY1bqhRWRC3wQX7u\",\"G4BZUKuUy1qkjtG3xZN8x4tXoJrjPhuxtU5t2HHgMaZe\",\"CC9VYJprbxacpiS94tPJ1GyBhfvrLQbUiUSVMWvFohNW\",\"6XsUQYAkKSy4mSQfMxYqpF4U7X3JsPDbG4vRQQEvCPb6\",\"6YSbESJWKwtC4x7rBdHbcYSVEejCWo2sZkapKQE8Y289\",\"NiWvcoCvqmUKSf1Avey5KWBXa5oAm6h7LWv4sqyCbBn\",\"AZUaEDdGXiryUbAi56MMrn1Em8t2dSy5mvq5SxX6S1np\",\"AgCBUZ6UMWqPLftTxeAqpQxtrfiCyL2HgRfmmM6QTfCj\",\"7AdBcVGejZy3981JfJ8mDywzMqAL31vBpgnD3PpnTpms\",\"Av8JgExs2LDubedW47Kvw8J47hko45P8TYqiHmtD2Ey6\",\"5aNxjc8upaPiGvcC9YJ48geiRSQLzGEdeL1zJakLn7t3\",\"9h4bddiPyfTyTztuyo7mEcpQz3QrR6cSPt1id6UmWj5H\"],\"programId\":null,\"filters\":null},{\"accounts\":[],\"programId\":\"srmqPvymJeFKQ4zGQed1GFppgkRHL9kaELCbyksJtPX\",\"filters\":[{\"datasize\":3228},{\"memcmp\":{\"offset\":0,\"data\":{\"bytes\":[115,101,114,117,109,5,0,0,0,0,0,0,0]}}},{\"memcmp\":{\"offset\":45,\"data\":{\"bytes\":[91,23,199,200,106,110,115,159,175,23,81,129,131,99,233,79,144,139,243,112,4,206,109,63,188,241,151,189,210,245,31,28]}}}]}]" + ENABLE_ACCOUNT_ON_DEMAND = "true" + ENABLE_SMART_ACCOUNTS_WARMUP = "true" + PG_ENABLED = "false" + USE_GRPC = "true" + DISABLE_GSO = "true" + + [metrics] + path = "/metrics" + port = 9091 diff --git a/cd/solana-lite-rpc-benchrunner.toml b/cd/solana-lite-rpc-benchrunner.toml new file mode 100644 index 00000000..65adc1e2 --- /dev/null +++ b/cd/solana-lite-rpc-benchrunner.toml @@ -0,0 +1,14 @@ +app = "solana-lite-rpc-benchrunner" +kill_signal = "SIGINT" +kill_timeout = 5 +primary_region = "ams" + +[build] + dockerfile = "../Dockerfile-benchrunner" + +[env] + RUST_LOG = "info" + +[metrics] + path = "/metrics" + port = 9091 diff --git a/cluster-endpoints/Cargo.toml b/cluster-endpoints/Cargo.toml index d07c810e..333b5096 100644 --- a/cluster-endpoints/Cargo.toml +++ b/cluster-endpoints/Cargo.toml @@ -9,7 +9,7 @@ license = "AGPL" [dependencies] #geyser-grpc-connector = { path = "../../geyser-grpc-connector" } -geyser-grpc-connector = { git = "https://github.com/blockworks-foundation/geyser-grpc-connector.git", branch = "v1.13.0+solana.1.17.25" } +geyser-grpc-connector = { tag = "v0.10.3+yellowstone.1.12+solana.1.17.15-hacked-windowsize3", git = "https://github.com/blockworks-foundation/geyser-grpc-connector.git" } solana-sdk = { workspace = true } solana-rpc-client-api = { workspace = true } @@ -46,4 +46,5 @@ yellowstone-grpc-client = { workspace = true } yellowstone-grpc-proto = { workspace = true } itertools = {workspace = true} prometheus = { workspace = true } -lazy_static = { workspace = true } \ No newline at end of file +lazy_static = { workspace = true } +tonic-health = { workspace = true } diff --git a/cluster-endpoints/src/endpoint_stremers.rs b/cluster-endpoints/src/endpoint_stremers.rs index aeab980c..97a65a44 100644 --- a/cluster-endpoints/src/endpoint_stremers.rs +++ b/cluster-endpoints/src/endpoint_stremers.rs @@ -1,11 +1,12 @@ use solana_lite_rpc_core::{ structures::account_data::AccountStream, - types::{BlockStream, ClusterInfoStream, SlotStream, VoteAccountStream}, + types::{BlockInfoStream, BlockStream, ClusterInfoStream, SlotStream, VoteAccountStream}, }; /// subscribers to broadcast channels should assume that channels are not getting closed unless the system is shutting down pub struct EndpointStreaming { pub blocks_notifier: BlockStream, + pub blockinfo_notifier: BlockInfoStream, pub slot_notifier: SlotStream, pub vote_account_notifier: VoteAccountStream, pub cluster_info_notifier: ClusterInfoStream, diff --git a/cluster-endpoints/src/grpc/gprc_accounts_streaming.rs b/cluster-endpoints/src/grpc/grpc_accounts_streaming.rs similarity index 84% rename from cluster-endpoints/src/grpc/gprc_accounts_streaming.rs rename to cluster-endpoints/src/grpc/grpc_accounts_streaming.rs index 445d8be5..a52778cf 100644 --- a/cluster-endpoints/src/grpc/gprc_accounts_streaming.rs +++ b/cluster-endpoints/src/grpc/grpc_accounts_streaming.rs @@ -11,13 +11,13 @@ use itertools::Itertools; use solana_lite_rpc_core::{ commitment_utils::Commitment, structures::{ - account_data::{AccountData, AccountNotificationMessage, AccountStream}, + account_data::{AccountData, AccountNotificationMessage}, account_filter::{AccountFilterType, AccountFilters, MemcmpFilterData}, }, AnyhowJoinHandle, }; use solana_sdk::{account::Account, pubkey::Pubkey}; -use tokio::sync::broadcast; +use tokio::sync::Notify; use yellowstone_grpc_proto::geyser::{ subscribe_request_filter_accounts_filter::Filter, subscribe_request_filter_accounts_filter_memcmp::Data, subscribe_update::UpdateOneof, @@ -25,10 +25,13 @@ use yellowstone_grpc_proto::geyser::{ SubscribeRequestFilterAccountsFilterMemcmp, }; +use crate::grpc::grpc_utils::connect_with_timeout_hacked; + pub fn start_account_streaming_tasks( grpc_config: GrpcSourceConfig, accounts_filters: AccountFilters, - account_stream_sx: tokio::sync::mpsc::UnboundedSender, + account_stream_sx: tokio::sync::broadcast::Sender, + has_started: Arc, ) -> AnyhowJoinHandle { tokio::spawn(async move { 'main_loop: loop { @@ -108,12 +111,11 @@ pub fn start_account_streaming_tasks( ping: None, }; - let mut client = yellowstone_grpc_client::GeyserGrpcClient::connect( + let mut client = connect_with_timeout_hacked( grpc_config.grpc_addr.clone(), grpc_config.grpc_x_token.clone(), - None, ) - .unwrap(); + .await?; let account_stream = client.subscribe_once2(program_subscription).await.unwrap(); // each account subscription batch will require individual stream @@ -134,12 +136,11 @@ pub fn start_account_streaming_tasks( filters: vec![], }, ); - let mut client = yellowstone_grpc_client::GeyserGrpcClient::connect( + let mut client = connect_with_timeout_hacked( grpc_config.grpc_addr.clone(), grpc_config.grpc_x_token.clone(), - None, ) - .unwrap(); + .await?; let account_request = SubscribeRequest { accounts: accounts_subscription, @@ -159,11 +160,17 @@ pub fn start_account_streaming_tasks( let mut merged_stream = subscriptions.merge(); while let Some(message) = merged_stream.next().await { - let message = message.unwrap(); + let Ok(message) = message else { + // channel broken resubscribe + break; + }; + let Some(update) = message.update_oneof else { continue; }; + has_started.notify_one(); + match update { UpdateOneof::Account(account) => { if let Some(account_data) = account.account { @@ -215,46 +222,50 @@ pub fn start_account_streaming_tasks( pub fn create_grpc_account_streaming( grpc_sources: Vec, accounts_filters: AccountFilters, -) -> (AnyhowJoinHandle, AccountStream) { - let (account_sender, accounts_stream) = broadcast::channel::(1024); - + account_stream_sx: tokio::sync::broadcast::Sender, + notify_abort: Arc, +) -> AnyhowJoinHandle { let jh: AnyhowJoinHandle = tokio::spawn(async move { loop { - let (accounts_sx, mut accounts_rx) = tokio::sync::mpsc::unbounded_channel(); let jhs = grpc_sources .iter() .map(|grpc_config| { start_account_streaming_tasks( grpc_config.clone(), accounts_filters.clone(), - accounts_sx.clone(), + account_stream_sx.clone(), + Arc::new(Notify::new()), ) }) .collect_vec(); - drop(accounts_sx); + let mut rx = account_stream_sx.subscribe(); loop { - match tokio::time::timeout(Duration::from_secs(60), accounts_rx.recv()).await { - Ok(Some(data)) => { - let _ = account_sender.send(data); - } - Ok(None) => { - log::error!("All grpc accounts channels close; restarting subscription"); - break; - } - Err(_elapsed) => { - log::error!("No accounts data for a minute; restarting subscription"); + tokio::select! { + data = tokio::time::timeout(Duration::from_secs(60), rx.recv()) => { + match data{ + Ok(Ok(_)) => { + // do nothing / notification channel is working fine + } + Ok(Err(e)) => { + log::error!("Grpc stream failed by error : {e:?}"); + break; + } + Err(_elapsed) => { + log::error!("No accounts data for a minute; restarting subscription"); + break; + } + } + }, + _ = notify_abort.notified() => { + log::debug!("Account stream aborted"); break; } } } - - for jh in jhs { - // abort previous handles - jh.abort(); - } + jhs.iter().for_each(|x| x.abort()); } }); - (jh, accounts_stream) + jh } diff --git a/cluster-endpoints/src/grpc/grpc_utils.rs b/cluster-endpoints/src/grpc/grpc_utils.rs new file mode 100644 index 00000000..547ae181 --- /dev/null +++ b/cluster-endpoints/src/grpc/grpc_utils.rs @@ -0,0 +1,38 @@ +use bytes::Bytes; +use std::time::Duration; +use tonic::metadata::{errors::InvalidMetadataValue, AsciiMetadataValue}; +use tonic::service::Interceptor; +use tonic::transport::ClientTlsConfig; +use tonic_health::pb::health_client::HealthClient; +use yellowstone_grpc_client::{GeyserGrpcClient, InterceptorXToken}; +use yellowstone_grpc_proto::geyser::geyser_client::GeyserClient; +use yellowstone_grpc_proto::tonic; + +pub async fn connect_with_timeout_hacked( + endpoint: E, + x_token: Option, +) -> anyhow::Result> +where + E: Into, + T: TryInto, +{ + let endpoint = tonic::transport::Endpoint::from_shared(endpoint)? + .buffer_size(Some(65536)) + .initial_connection_window_size(4194304) + .initial_stream_window_size(4194304) + .connect_timeout(Duration::from_secs(10)) + .timeout(Duration::from_secs(10)) + // .http2_adaptive_window() + .tls_config(ClientTlsConfig::new())?; + + let x_token: Option = x_token.map(|v| v.try_into()).transpose()?; + let interceptor = InterceptorXToken { x_token }; + + let channel = endpoint.connect_lazy(); + let client = GeyserGrpcClient::new( + HealthClient::with_interceptor(channel.clone(), interceptor.clone()), + GeyserClient::with_interceptor(channel, interceptor) + .max_decoding_message_size(GeyserGrpcClient::max_decoding_message_size()), + ); + Ok(client) +} diff --git a/cluster-endpoints/src/grpc/mod.rs b/cluster-endpoints/src/grpc/mod.rs index 5809b7e9..448512c8 100644 --- a/cluster-endpoints/src/grpc/mod.rs +++ b/cluster-endpoints/src/grpc/mod.rs @@ -1 +1,2 @@ -pub mod gprc_accounts_streaming; +pub mod grpc_accounts_streaming; +pub mod grpc_utils; diff --git a/cluster-endpoints/src/grpc_multiplex.rs b/cluster-endpoints/src/grpc_multiplex.rs index b64444f7..f72a6ea9 100644 --- a/cluster-endpoints/src/grpc_multiplex.rs +++ b/cluster-endpoints/src/grpc_multiplex.rs @@ -1,6 +1,5 @@ use anyhow::{bail, Context}; use geyser_grpc_connector::grpc_subscription_autoreconnect_tasks::create_geyser_autoconnection_task_with_mpsc; -use geyser_grpc_connector::grpcmultiplex_fastestwins::FromYellowstoneExtractor; use geyser_grpc_connector::{GeyserFilter, GrpcSourceConfig, Message}; use log::{debug, info, trace, warn}; use solana_lite_rpc_core::structures::produced_block::ProducedBlock; @@ -10,6 +9,7 @@ use solana_sdk::clock::Slot; use solana_sdk::commitment_config::CommitmentConfig; use solana_lite_rpc_core::solana_utils::hash_from_str; +use solana_lite_rpc_core::structures::block_info::BlockInfo; use std::collections::{BTreeSet, HashMap, HashSet}; use std::time::Duration; use tokio::sync::broadcast::Receiver; @@ -109,9 +109,9 @@ fn create_grpc_multiplex_processed_block_task( } // backpressure: the mpsc sender will block grpc stream until capacity is available -fn create_grpc_multiplex_block_meta_task( +fn create_grpc_multiplex_block_info_task( grpc_sources: &Vec, - block_meta_sender: tokio::sync::mpsc::Sender, + block_info_sender: tokio::sync::mpsc::Sender, commitment_config: CommitmentConfig, ) -> Vec { let (autoconnect_tx, mut blocks_rx) = tokio::sync::mpsc::channel(10); @@ -134,14 +134,24 @@ fn create_grpc_multiplex_block_meta_task( let proposed_slot = block_meta.slot; if proposed_slot > tip { tip = proposed_slot; - let block_meta = BlockMeta { + let block_meta = BlockInfo { slot: proposed_slot, + block_height: block_meta + .block_height + .expect("block_height from geyser block meta") + .block_height, blockhash: hash_from_str(&block_meta.blockhash) .expect("valid blockhash"), + commitment_config, + block_time: block_meta + .block_time + .expect("block_time from geyser block meta") + .timestamp + as u64, }; let send_started_at = Instant::now(); - let send_result = block_meta_sender + let send_result = block_info_sender .send(block_meta) .await .context("Send block to channel"); @@ -188,7 +198,11 @@ fn create_grpc_multiplex_block_meta_task( /// the channel must never be closed pub fn create_grpc_multiplex_blocks_subscription( grpc_sources: Vec, -) -> (Receiver, AnyhowJoinHandle) { +) -> ( + Receiver, + Receiver, + AnyhowJoinHandle, +) { info!("Setup grpc multiplexed blocks connection..."); if grpc_sources.is_empty() { info!("- no grpc connection configured"); @@ -198,9 +212,13 @@ pub fn create_grpc_multiplex_blocks_subscription( } // return value is the broadcast receiver - // must NEVER be closed form inside this method + // must NEVER be closed from inside this method let (producedblock_sender, blocks_output_stream) = tokio::sync::broadcast::channel::(32); + // provide information about finalized blocks as quickly as possible + // note that produced block stream might most probably lag behind + let (blockinfo_sender, blockinfo_output_stream) = + tokio::sync::broadcast::channel::(32); let mut reconnect_attempts = 0; @@ -210,10 +228,12 @@ pub fn create_grpc_multiplex_blocks_subscription( // channels must NEVER GET CLOSED (unless full restart of multiplexer) let (processed_block_sender, mut processed_block_reciever) = tokio::sync::mpsc::channel::(10); // experiemental - let (block_meta_sender_confirmed, mut block_meta_reciever_confirmed) = - tokio::sync::mpsc::channel::(500); - let (block_meta_sender_finalized, mut block_meta_reciever_finalized) = - tokio::sync::mpsc::channel::(500); + let (block_info_sender_processed, mut block_info_reciever_processed) = + tokio::sync::mpsc::channel::(500); + let (block_info_sender_confirmed, mut block_info_reciever_confirmed) = + tokio::sync::mpsc::channel::(500); + let (block_info_sender_finalized, mut block_info_reciever_finalized) = + tokio::sync::mpsc::channel::(500); let processed_block_sender = processed_block_sender.clone(); reconnect_attempts += 1; @@ -234,15 +254,22 @@ pub fn create_grpc_multiplex_blocks_subscription( task_list.extend(processed_blocks_tasks); // TODO apply same pattern as in create_grpc_multiplex_processed_block_task - let jh_meta_task_confirmed = create_grpc_multiplex_block_meta_task( + + let jh_meta_task_processed = create_grpc_multiplex_block_info_task( + &grpc_sources, + block_info_sender_processed.clone(), + CommitmentConfig::processed(), + ); + task_list.extend(jh_meta_task_processed); + let jh_meta_task_confirmed = create_grpc_multiplex_block_info_task( &grpc_sources, - block_meta_sender_confirmed.clone(), + block_info_sender_confirmed.clone(), CommitmentConfig::confirmed(), ); task_list.extend(jh_meta_task_confirmed); - let jh_meta_task_finalized = create_grpc_multiplex_block_meta_task( + let jh_meta_task_finalized = create_grpc_multiplex_block_info_task( &grpc_sources, - block_meta_sender_finalized.clone(), + block_info_sender_finalized.clone(), CommitmentConfig::finalized(), ); task_list.extend(jh_meta_task_finalized); @@ -259,15 +286,16 @@ pub fn create_grpc_multiplex_blocks_subscription( let mut cleanup_without_confirmed_recv_blocks_meta: u8 = 0; let mut cleanup_without_finalized_recv_blocks_meta: u8 = 0; let mut confirmed_block_not_yet_processed = HashSet::::new(); + let mut finalized_block_not_yet_processed = HashSet::::new(); // start logging errors when we recieve first finalized block let mut startup_completed = false; const MAX_ALLOWED_CLEANUP_WITHOUT_RECV: u8 = 12; // 12*5 = 60s without recving data 'recv_loop: loop { - debug!("processed_block_sender: {}, block_meta_sender_confirmed: {}, block_meta_sender_finalized: {}", + debug!("channel capacities: processed_block_sender={}, block_info_sender_confirmed={}, block_info_sender_finalized={}", processed_block_sender.capacity(), - block_meta_sender_confirmed.capacity(), - block_meta_sender_finalized.capacity() + block_info_sender_confirmed.capacity(), + block_info_sender_finalized.capacity() ); tokio::select! { processed_block = processed_block_reciever.recv() => { @@ -276,6 +304,11 @@ pub fn create_grpc_multiplex_blocks_subscription( let processed_block = processed_block.expect("processed block from stream"); trace!("got processed block {} with blockhash {}", processed_block.slot, processed_block.blockhash.clone()); + + if processed_block.commitment_config.is_finalized() { + last_finalized_slot = last_finalized_slot.max(processed_block.slot); + } + if let Err(e) = producedblock_sender.send(processed_block.clone()) { warn!("produced block channel has no receivers {e:?}"); } @@ -284,15 +317,36 @@ pub fn create_grpc_multiplex_blocks_subscription( warn!("produced block channel has no receivers while trying to send confirmed block {e:?}"); } } + if finalized_block_not_yet_processed.remove(&processed_block.blockhash) { + if let Err(e) = producedblock_sender.send(processed_block.to_finalized_block()) { + warn!("produced block channel has no receivers while trying to send confirmed block {e:?}"); + } + } recent_processed_blocks.insert(processed_block.blockhash, processed_block); + + }, + blockinfo_processed = block_info_reciever_processed.recv() => { + let blockinfo_processed = blockinfo_processed.expect("processed block info from stream"); + let blockhash = blockinfo_processed.blockhash; + trace!("got processed blockinfo {} with blockhash {}", + blockinfo_processed.slot, blockhash); + if let Err(e) = blockinfo_sender.send(blockinfo_processed) { + warn!("Processed blockinfo channel has no receivers {e:?}"); + } }, - meta_confirmed = block_meta_reciever_confirmed.recv() => { + blockinfo_confirmed = block_info_reciever_confirmed.recv() => { cleanup_without_confirmed_recv_blocks_meta = 0; - let meta_confirmed = meta_confirmed.expect("confirmed block meta from stream"); - let blockhash = meta_confirmed.blockhash; + let blockinfo_confirmed = blockinfo_confirmed.expect("confirmed block info from stream"); + let blockhash = blockinfo_confirmed.blockhash; + trace!("got confirmed blockinfo {} with blockhash {}", + blockinfo_confirmed.slot, blockhash); + if let Err(e) = blockinfo_sender.send(blockinfo_confirmed) { + warn!("Confirmed blockinfo channel has no receivers {e:?}"); + } + if let Some(cached_processed_block) = recent_processed_blocks.get(&blockhash) { let confirmed_block = cached_processed_block.to_confirmed_block(); - debug!("got confirmed blockmeta {} with blockhash {}", + debug!("got confirmed blockinfo {} with blockhash {}", confirmed_block.slot, confirmed_block.blockhash.clone()); if let Err(e) = producedblock_sender.send(confirmed_block) { warn!("confirmed block channel has no receivers {e:?}"); @@ -303,23 +357,30 @@ pub fn create_grpc_multiplex_blocks_subscription( confirmed_block_not_yet_processed.len(), recent_processed_blocks.len()); } }, - meta_finalized = block_meta_reciever_finalized.recv() => { + blockinfo_finalized = block_info_reciever_finalized.recv() => { cleanup_without_finalized_recv_blocks_meta = 0; - let meta_finalized = meta_finalized.expect("finalized block meta from stream"); - // let _span = debug_span!("sequence_block_meta_finalized", ?meta_finalized.slot).entered(); - let blockhash = meta_finalized.blockhash; + let blockinfo_finalized = blockinfo_finalized.expect("finalized block info from stream"); + last_finalized_slot = last_finalized_slot.max(blockinfo_finalized.slot); + + let blockhash = blockinfo_finalized.blockhash; + trace!("got finalized blockinfo {} with blockhash {}", + blockinfo_finalized.slot, blockhash); + if let Err(e) = blockinfo_sender.send(blockinfo_finalized) { + warn!("Finalized blockinfo channel has no receivers {e:?}"); + } + if let Some(cached_processed_block) = recent_processed_blocks.remove(&blockhash) { let finalized_block = cached_processed_block.to_finalized_block(); - last_finalized_slot = finalized_block.slot; startup_completed = true; - debug!("got finalized blockmeta {} with blockhash {}", + debug!("got finalized blockinfo {} with blockhash {}", finalized_block.slot, finalized_block.blockhash.clone()); if let Err(e) = producedblock_sender.send(finalized_block) { warn!("Finalized block channel has no receivers {e:?}"); } } else if startup_completed { // this warning is ok for first few blocks when we start lrpc - log::warn!("finalized block meta received for blockhash {} which was never seen or already emitted", blockhash); + log::warn!("finalized blockinfo received for blockhash {} which was never seen or already emitted", blockhash); + finalized_block_not_yet_processed.insert(blockhash); } }, _ = cleanup_tick.tick() => { @@ -327,10 +388,10 @@ pub fn create_grpc_multiplex_blocks_subscription( if cleanup_without_recv_full_blocks > MAX_ALLOWED_CLEANUP_WITHOUT_RECV || cleanup_without_confirmed_recv_blocks_meta > MAX_ALLOWED_CLEANUP_WITHOUT_RECV || cleanup_without_finalized_recv_blocks_meta > MAX_ALLOWED_CLEANUP_WITHOUT_RECV { - log::error!("block or block meta geyser stream stopped - restarting multiplexer ({}-{}-{})", + log::error!("block or block info geyser stream stopped - restarting multiplexer ({}-{}-{})", cleanup_without_recv_full_blocks, cleanup_without_confirmed_recv_blocks_meta, cleanup_without_finalized_recv_blocks_meta,); // throttle a bit - sleep(Duration::from_millis(1500)).await; + sleep(Duration::from_millis(200)).await; break 'recv_loop; } cleanup_without_recv_full_blocks += 1; @@ -351,7 +412,11 @@ pub fn create_grpc_multiplex_blocks_subscription( } // -- END reconnect loop }); - (blocks_output_stream, jh_block_emitter_task) + ( + blocks_output_stream, + blockinfo_output_stream, + jh_block_emitter_task, + ) } pub fn create_grpc_multiplex_processed_slots_subscription( @@ -436,30 +501,6 @@ pub fn create_grpc_multiplex_processed_slots_subscription( (multiplexed_messages_rx, jh_multiplex_task) } -#[allow(dead_code)] -struct BlockMeta { - pub slot: Slot, - pub blockhash: solana_sdk::hash::Hash, -} - -struct BlockMetaExtractor(CommitmentConfig); - -impl FromYellowstoneExtractor for BlockMetaExtractor { - type Target = BlockMeta; - fn map_yellowstone_update(&self, update: SubscribeUpdate) -> Option<(u64, BlockMeta)> { - match update.update_oneof { - Some(UpdateOneof::BlockMeta(block_meta)) => Some(( - block_meta.slot, - BlockMeta { - slot: block_meta.slot, - blockhash: hash_from_str(&block_meta.blockhash).unwrap(), - }, - )), - _ => None, - } - } -} - fn map_slot_from_yellowstone_update(update: SubscribeUpdate) -> Option { match update.update_oneof { Some(UpdateOneof::Slot(update_slot_message)) => Some(update_slot_message.slot), diff --git a/cluster-endpoints/src/grpc_subscription.rs b/cluster-endpoints/src/grpc_subscription.rs index 20c852f1..c1a5f50d 100644 --- a/cluster-endpoints/src/grpc_subscription.rs +++ b/cluster-endpoints/src/grpc_subscription.rs @@ -1,12 +1,16 @@ use crate::endpoint_stremers::EndpointStreaming; -use crate::grpc::gprc_accounts_streaming::create_grpc_account_streaming; +use crate::grpc::grpc_accounts_streaming::create_grpc_account_streaming; +use crate::grpc::grpc_utils::connect_with_timeout_hacked; use crate::grpc_multiplex::{ create_grpc_multiplex_blocks_subscription, create_grpc_multiplex_processed_slots_subscription, }; +use anyhow::Context; +use futures::StreamExt; use geyser_grpc_connector::GrpcSourceConfig; use itertools::Itertools; use log::trace; use solana_client::nonblocking::rpc_client::RpcClient; +use solana_lite_rpc_core::structures::account_data::AccountNotificationMessage; use solana_lite_rpc_core::structures::account_filter::AccountFilters; use solana_lite_rpc_core::{ structures::produced_block::{ProducedBlock, TransactionInfo}, @@ -30,8 +34,15 @@ use solana_sdk::{ }; use solana_transaction_status::{Reward, RewardType}; use std::cell::OnceCell; +use std::collections::HashMap; use std::sync::Arc; +use tokio::sync::Notify; use tracing::trace_span; +use yellowstone_grpc_client::GeyserGrpcClient; +use yellowstone_grpc_proto::geyser::subscribe_update::UpdateOneof; +use yellowstone_grpc_proto::geyser::{ + CommitmentLevel, SubscribeRequestFilterBlocks, SubscribeRequestFilterSlots, SubscribeUpdateSlot, +}; use crate::rpc_polling::vote_accounts_and_cluster_info_polling::{ poll_cluster_info, poll_vote_accounts, @@ -259,6 +270,138 @@ fn map_compute_budget_instructions(message: &VersionedMessage) -> (Option, (cu_requested, prioritization_fees) } +// not called +pub fn create_block_processing_task( + grpc_addr: String, + grpc_x_token: Option, + block_sx: async_channel::Sender, + commitment_level: CommitmentLevel, +) -> AnyhowJoinHandle { + tokio::spawn(async move { + loop { + let mut blocks_subs = HashMap::new(); + blocks_subs.insert( + "block_client".to_string(), + SubscribeRequestFilterBlocks { + account_include: Default::default(), + include_transactions: Some(true), + include_accounts: Some(false), + include_entries: Some(false), + }, + ); + + // connect to grpc + let mut client = + connect_with_timeout_hacked(grpc_addr.clone(), grpc_x_token.clone()).await?; + let mut stream = client + .subscribe_once( + HashMap::new(), + Default::default(), + HashMap::new(), + Default::default(), + blocks_subs, + Default::default(), + Some(commitment_level), + Default::default(), + None, + ) + .await?; + + while let Some(message) = stream.next().await { + let message = message?; + + let Some(update) = message.update_oneof else { + continue; + }; + + match update { + UpdateOneof::Block(block) => { + log::trace!( + "received block, hash: {} slot: {}", + block.blockhash, + block.slot + ); + block_sx + .send(block) + .await + .context("Problem sending on block channel")?; + } + UpdateOneof::Ping(_) => { + log::trace!("GRPC Ping"); + } + _ => { + log::trace!("unknown GRPC notification"); + } + }; + } + log::error!("Grpc block subscription broken (resubscribing)"); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + } + }) +} + +// not used +pub fn create_slot_stream_task( + grpc_addr: String, + grpc_x_token: Option, + slot_sx: async_channel::Sender, + commitment_level: CommitmentLevel, +) -> AnyhowJoinHandle { + tokio::spawn(async move { + loop { + let mut slots = HashMap::new(); + slots.insert( + "client_slot".to_string(), + SubscribeRequestFilterSlots { + filter_by_commitment: Some(true), + }, + ); + + // connect to grpc + let mut client = + GeyserGrpcClient::connect(grpc_addr.clone(), grpc_x_token.clone(), None)?; + let mut stream = client + .subscribe_once( + slots, + Default::default(), + HashMap::new(), + Default::default(), + HashMap::new(), + Default::default(), + Some(commitment_level), + Default::default(), + None, + ) + .await?; + + while let Some(message) = stream.next().await { + let message = message?; + + let Some(update) = message.update_oneof else { + continue; + }; + + match update { + UpdateOneof::Slot(slot) => { + slot_sx + .send(slot) + .await + .context("Problem sending on block channel")?; + } + UpdateOneof::Ping(_) => { + log::trace!("GRPC Ping"); + } + _ => { + log::trace!("unknown GRPC notification"); + } + }; + } + log::error!("Grpc block subscription broken (resubscribing)"); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + } + }) +} + pub fn create_grpc_subscription( rpc_client: Arc, grpc_sources: Vec, @@ -271,22 +414,28 @@ pub fn create_grpc_subscription( let (slot_multiplex_channel, jh_multiplex_slotstream) = create_grpc_multiplex_processed_slots_subscription(grpc_sources.clone()); - let (block_multiplex_channel, jh_multiplex_blockstream) = + let (block_multiplex_channel, blockmeta_channel, jh_multiplex_blockstream) = create_grpc_multiplex_blocks_subscription(grpc_sources.clone()); let cluster_info_polling = poll_cluster_info(rpc_client.clone(), cluster_info_sx); let vote_accounts_polling = poll_vote_accounts(rpc_client.clone(), va_sx); - // accounts if !accounts_filter.is_empty() { - let (account_jh, processed_account_stream) = - create_grpc_account_streaming(grpc_sources, accounts_filter); + let (account_sender, accounts_stream) = + tokio::sync::broadcast::channel::(1024); + let account_jh = create_grpc_account_streaming( + grpc_sources, + accounts_filter, + account_sender, + Arc::new(Notify::new()), + ); let streamers = EndpointStreaming { blocks_notifier: block_multiplex_channel, + blockinfo_notifier: blockmeta_channel, slot_notifier: slot_multiplex_channel, cluster_info_notifier, vote_account_notifier, - processed_account_stream: Some(processed_account_stream), + processed_account_stream: Some(accounts_stream), }; let endpoint_tasks = vec![ @@ -300,6 +449,7 @@ pub fn create_grpc_subscription( } else { let streamers = EndpointStreaming { blocks_notifier: block_multiplex_channel, + blockinfo_notifier: blockmeta_channel, slot_notifier: slot_multiplex_channel, cluster_info_notifier, vote_account_notifier, diff --git a/cluster-endpoints/src/json_rpc_subscription.rs b/cluster-endpoints/src/json_rpc_subscription.rs index ba32c475..8c0a4eef 100644 --- a/cluster-endpoints/src/json_rpc_subscription.rs +++ b/cluster-endpoints/src/json_rpc_subscription.rs @@ -16,6 +16,7 @@ pub fn create_json_rpc_polling_subscription( ) -> anyhow::Result<(EndpointStreaming, Vec)> { let (slot_sx, slot_notifier) = tokio::sync::broadcast::channel(16); let (block_sx, blocks_notifier) = tokio::sync::broadcast::channel(16); + let (blockinfo_sx, blockinfo_notifier) = tokio::sync::broadcast::channel(16); let (cluster_info_sx, cluster_info_notifier) = tokio::sync::broadcast::channel(16); let (va_sx, vote_account_notifier) = tokio::sync::broadcast::channel(16); // does not support accounts support with rpc polling @@ -26,6 +27,7 @@ pub fn create_json_rpc_polling_subscription( let mut block_polling_tasks = poll_block( rpc_client.clone(), block_sx, + blockinfo_sx, slot_notifier.resubscribe(), num_parallel_tasks, ); @@ -39,6 +41,7 @@ pub fn create_json_rpc_polling_subscription( let streamers = EndpointStreaming { blocks_notifier, + blockinfo_notifier, slot_notifier, cluster_info_notifier, vote_account_notifier, diff --git a/cluster-endpoints/src/rpc_polling/poll_blocks.rs b/cluster-endpoints/src/rpc_polling/poll_blocks.rs index 65979e47..e0238226 100644 --- a/cluster-endpoints/src/rpc_polling/poll_blocks.rs +++ b/cluster-endpoints/src/rpc_polling/poll_blocks.rs @@ -1,6 +1,7 @@ use anyhow::{bail, Context}; use solana_client::nonblocking::rpc_client::RpcClient; use solana_lite_rpc_core::solana_utils::hash_from_str; +use solana_lite_rpc_core::structures::block_info::BlockInfo; use solana_lite_rpc_core::structures::produced_block::{ProducedBlockInner, TransactionInfo}; use solana_lite_rpc_core::{ structures::{ @@ -54,6 +55,7 @@ pub async fn process_block( pub fn poll_block( rpc_client: Arc, block_notification_sender: Sender, + blockinfo_notification_sender: Sender, slot_notification: Receiver, num_parallel_tasks: usize, ) -> Vec { @@ -66,6 +68,7 @@ pub fn poll_block( for _i in 0..num_parallel_tasks { let block_notification_sender = block_notification_sender.clone(); + let blockinfo_notification_sender = blockinfo_notification_sender.clone(); let rpc_client = rpc_client.clone(); let block_schedule_queue_rx = block_schedule_queue_rx.clone(); let slot_retry_queue_sx = slot_retry_queue_sx.clone(); @@ -79,9 +82,13 @@ pub fn poll_block( process_block(rpc_client.as_ref(), slot, commitment_config).await; match processed_block { Some(processed_block) => { + let block_info = map_block_info(&processed_block); block_notification_sender .send(processed_block) .context("Processed block should be sent")?; + blockinfo_notification_sender + .send(block_info) + .context("Processed block info should be sent")?; // schedule to get finalized commitment if commitment_config.commitment != CommitmentLevel::Finalized { let retry_at = tokio::time::Instant::now() @@ -332,6 +339,16 @@ pub fn from_ui_block( ProducedBlock::new(inner, commitment_config) } +fn map_block_info(produced_block: &ProducedBlock) -> BlockInfo { + BlockInfo { + slot: produced_block.slot, + block_height: produced_block.block_height, + blockhash: produced_block.blockhash, + commitment_config: produced_block.commitment_config, + block_time: produced_block.block_time, + } +} + #[inline] fn calc_prioritization_fees(units: u32, additional_fee: u32) -> u64 { (units as u64 * 1000) / additional_fee as u64 diff --git a/core/src/encoding.rs b/core/src/encoding.rs index c4138ec9..60dbd6b3 100644 --- a/core/src/encoding.rs +++ b/core/src/encoding.rs @@ -1,6 +1,8 @@ use base64::Engine; use serde::{Deserialize, Serialize}; +// TODO moved to util + #[derive(Debug, Clone, Copy, Default, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum BinaryEncoding { diff --git a/core/src/solana_utils.rs b/core/src/solana_utils.rs index d54c7c71..9653f30b 100644 --- a/core/src/solana_utils.rs +++ b/core/src/solana_utils.rs @@ -40,7 +40,7 @@ pub async fn get_current_confirmed_slot(data_cache: &DataCache) -> u64 { let commitment = CommitmentConfig::confirmed(); let BlockInformation { slot, .. } = data_cache .block_information_store - .get_latest_block(commitment) + .get_latest_block_information(commitment) .await; slot } diff --git a/core/src/stores/block_information_store.rs b/core/src/stores/block_information_store.rs index 67c9c3cb..5ab7c547 100644 --- a/core/src/stores/block_information_store.rs +++ b/core/src/stores/block_information_store.rs @@ -7,6 +7,7 @@ use std::sync::atomic::AtomicU64; use std::sync::Arc; use tokio::sync::RwLock; +use crate::structures::block_info::BlockInfo; use crate::structures::produced_block::ProducedBlock; use solana_sdk::hash::Hash; @@ -33,6 +34,17 @@ impl BlockInformation { block_time: block.block_time, } } + pub fn from_block_info(block_info: &BlockInfo) -> Self { + BlockInformation { + slot: block_info.slot, + block_height: block_info.block_height, + last_valid_blockheight: block_info.block_height + MAX_RECENT_BLOCKHASHES as u64, + cleanup_slot: block_info.block_height + 1000, + blockhash: block_info.blockhash, + commitment_config: block_info.commitment_config, + block_time: block_info.block_time, + } + } } /// - Block Information Store @@ -89,7 +101,7 @@ impl BlockInformationStore { .blockhash } - pub async fn get_latest_block_info( + pub async fn get_latest_block_information( &self, commitment_config: CommitmentConfig, ) -> BlockInformation { @@ -99,13 +111,6 @@ impl BlockInformationStore { .clone() } - pub async fn get_latest_block(&self, commitment_config: CommitmentConfig) -> BlockInformation { - self.get_latest_block_arc(commitment_config) - .read() - .await - .clone() - } - pub async fn add_block(&self, block_info: BlockInformation) -> bool { // save slot copy to avoid borrow issues let slot = block_info.slot; @@ -121,10 +126,18 @@ impl BlockInformationStore { std::sync::atomic::Ordering::Relaxed, ); } - // check if the block has already been added with higher commitment level - match self.blocks.get_mut(&block_info.blockhash) { - Some(mut prev_block_info) => { - let should_update = match prev_block_info.commitment_config.commitment { + + // update latest block + { + let latest_block = self.get_latest_block_arc(commitment_config); + if slot > latest_block.read().await.slot { + *latest_block.write().await = block_info.clone(); + } + } + + match self.blocks.entry(block_info.blockhash) { + dashmap::mapref::entry::Entry::Occupied(entry) => { + let should_update = match entry.get().commitment_config.commitment { CommitmentLevel::Finalized => false, // should never update blocks of finalized commitment CommitmentLevel::Confirmed => { commitment_config == CommitmentConfig::finalized() @@ -134,27 +147,21 @@ impl BlockInformationStore { || commitment_config == CommitmentConfig::finalized() } }; - if !should_update { - return false; + if should_update { + entry.replace_entry(block_info); } - *prev_block_info = block_info.clone(); + should_update } - None => { - self.blocks.insert(block_info.blockhash, block_info.clone()); + dashmap::mapref::entry::Entry::Vacant(entry) => { + entry.insert(block_info); + true } } - - // update latest block - let latest_block = self.get_latest_block_arc(commitment_config); - if slot > latest_block.read().await.slot { - *latest_block.write().await = block_info; - } - true } pub async fn clean(&self) { let finalized_block_information = self - .get_latest_block_info(CommitmentConfig::finalized()) + .get_latest_block_information(CommitmentConfig::finalized()) .await; let before_length = self.blocks.len(); self.blocks @@ -175,7 +182,7 @@ impl BlockInformationStore { blockhash: &Hash, commitment_config: CommitmentConfig, ) -> (bool, Slot) { - let latest_block = self.get_latest_block(commitment_config).await; + let latest_block = self.get_latest_block_information(commitment_config).await; match self.blocks.get(blockhash) { Some(block_information) => ( latest_block.block_height <= block_information.last_valid_blockheight, diff --git a/core/src/stores/data_cache.rs b/core/src/stores/data_cache.rs index 0e24f6ed..da6a76f5 100644 --- a/core/src/stores/data_cache.rs +++ b/core/src/stores/data_cache.rs @@ -45,7 +45,7 @@ impl DataCache { pub async fn clean(&self, ttl_duration: std::time::Duration) { let block_info = self .block_information_store - .get_latest_block_info(CommitmentConfig::finalized()) + .get_latest_block_information(CommitmentConfig::finalized()) .await; self.block_information_store.clean().await; self.txs.clean(block_info.block_height); @@ -67,7 +67,7 @@ impl DataCache { pub async fn get_current_epoch(&self, commitment: CommitmentConfig) -> Epoch { let BlockInformation { slot, .. } = self .block_information_store - .get_latest_block(commitment) + .get_latest_block_information(commitment) .await; self.epoch_data.get_epoch_at_slot(slot) } diff --git a/core/src/structures/block_info.rs b/core/src/structures/block_info.rs new file mode 100644 index 00000000..8f112d87 --- /dev/null +++ b/core/src/structures/block_info.rs @@ -0,0 +1,11 @@ +use solana_sdk::commitment_config::CommitmentConfig; +use solana_sdk::hash::Hash; + +#[derive(Clone, Debug)] +pub struct BlockInfo { + pub slot: u64, + pub block_height: u64, + pub blockhash: Hash, + pub commitment_config: CommitmentConfig, + pub block_time: u64, +} diff --git a/core/src/structures/leaderschedule.rs b/core/src/structures/leaderschedule.rs index 88aef66a..96ad5c8a 100644 --- a/core/src/structures/leaderschedule.rs +++ b/core/src/structures/leaderschedule.rs @@ -60,7 +60,7 @@ impl CalculatedSchedule { None => { let BlockInformation { slot, .. } = data_cache .block_information_store - .get_latest_block(commitment) + .get_latest_block_information(commitment) .await; slot } diff --git a/core/src/structures/mod.rs b/core/src/structures/mod.rs index 41c1b30d..889d7c62 100644 --- a/core/src/structures/mod.rs +++ b/core/src/structures/mod.rs @@ -2,6 +2,7 @@ pub mod account_data; pub mod account_filter; +pub mod block_info; pub mod epoch; pub mod identity_stakes; pub mod leader_data; diff --git a/core/src/structures/prioritization_fee_heap.rs b/core/src/structures/prioritization_fee_heap.rs index 22a510c8..6bd26a8a 100644 --- a/core/src/structures/prioritization_fee_heap.rs +++ b/core/src/structures/prioritization_fee_heap.rs @@ -112,6 +112,14 @@ impl PrioritizationFeesHeap { pub async fn size(&self) -> usize { self.map.lock().await.signatures.len() } + + pub async fn clear(&self) -> usize { + let mut lk = self.map.lock().await; + lk.map.clear(); + let size = lk.signatures.len(); + lk.signatures.clear(); + size + } } #[cfg(test)] @@ -189,8 +197,8 @@ mod tests { let mut height = 0; while instant.elapsed() < Duration::from_secs(45) { - let burst_count = rand::random::() % 1024 + 1; - for _ in 0..burst_count { + let burst_count = rand::random::() % 128 + 1; + for _c in 0..burst_count { let prioritization_fee = rand::random::() % 100000; let info = SentTransactionInfo { signature: Signature::new_unique(), diff --git a/core/src/types.rs b/core/src/types.rs index 96ba3cee..733925d1 100644 --- a/core/src/types.rs +++ b/core/src/types.rs @@ -3,13 +3,22 @@ use std::sync::Arc; use solana_rpc_client_api::response::{RpcContactInfo, RpcVoteAccountStatus}; use tokio::sync::broadcast::Receiver; +use crate::structures::block_info::BlockInfo; use crate::{ structures::{produced_block::ProducedBlock, slot_notification::SlotNotification}, traits::subscription_sink::SubscriptionSink, }; +// full blocks, commitment level: processed, confirmed, finalized +// note: there is no guarantee about the order +// note: there is no guarantee about the order wrt commitment level +// note: there is no guarantee about the order wrt block vs block meta pub type BlockStream = Receiver; +// block info (slot, blockhash, etc), commitment level: processed, confirmed, finalized +// note: there is no guarantee about the order wrt commitment level +pub type BlockInfoStream = Receiver; pub type SlotStream = Receiver; + pub type VoteAccountStream = Receiver; pub type ClusterInfoStream = Receiver>; pub type SubscptionHanderSink = Arc; diff --git a/lite-rpc/Cargo.toml b/lite-rpc/Cargo.toml index 637473d3..0ad19bb2 100644 --- a/lite-rpc/Cargo.toml +++ b/lite-rpc/Cargo.toml @@ -50,6 +50,7 @@ cap = { version = "0.1.2", features = ["stats"] } tower = "0.4.13" hyper = { version = "0.14", features = ["server", "http1", "http2"] } tower-http = { version = "0.4.0", features = ["full"] } +jemallocator = { workspace = true } solana-lite-rpc-core = { workspace = true } solana-lite-rpc-util = { workspace = true } diff --git a/lite-rpc/src/bridge.rs b/lite-rpc/src/bridge.rs index d1f2bfb0..c77543c9 100644 --- a/lite-rpc/src/bridge.rs +++ b/lite-rpc/src/bridge.rs @@ -149,7 +149,7 @@ impl LiteRpcServer for LiteBridge { let BlockInformation { slot, .. } = self .data_cache .block_information_store - .get_latest_block(commitment_config) + .get_latest_block_information(commitment_config) .await; Ok(slot) } @@ -161,7 +161,7 @@ impl LiteRpcServer for LiteBridge { let block_info = self .data_cache .block_information_store - .get_latest_block(commitment_config) + .get_latest_block_information(commitment_config) .await; Ok(block_info.block_height) } @@ -189,7 +189,7 @@ impl LiteRpcServer for LiteBridge { RPC_GET_LATEST_BLOCKHASH.inc(); let commitment_config = config - .map(|config| config.commitment.unwrap_or_default()) + .map(|config| config.commitment.unwrap_or(CommitmentConfig::confirmed())) .unwrap_or_default(); let BlockInformation { @@ -200,7 +200,7 @@ impl LiteRpcServer for LiteBridge { } = self .data_cache .block_information_store - .get_latest_block(commitment_config) + .get_latest_block_information(commitment_config) .await; log::trace!("glb {blockhash} {slot} {block_height}"); @@ -252,7 +252,7 @@ impl LiteRpcServer for LiteBridge { let block_info = self .data_cache .block_information_store - .get_latest_block_info(commitment_config) + .get_latest_block_information(commitment_config) .await; //TODO manage transaction_count of epoch info. Currently None. @@ -294,7 +294,7 @@ impl LiteRpcServer for LiteBridge { slot: self .data_cache .block_information_store - .get_latest_block_info(CommitmentConfig::finalized()) + .get_latest_block_information(CommitmentConfig::finalized()) .await .slot, api_version: None, @@ -424,6 +424,7 @@ impl LiteRpcServer for LiteBridge { .await; Ok(schedule) } + async fn get_slot_leaders(&self, start_slot: u64, limit: u64) -> RpcResult> { let epock_schedule = self.data_cache.epoch_data.get_epoch_schedule(); @@ -518,10 +519,19 @@ impl LiteRpcServer for LiteBridge { return Err(jsonrpsee::types::error::ErrorCode::InvalidParams.into()); }; if let Some(account_service) = &self.accounts_service { + let commitment = config + .as_ref() + .and_then(|x| x.commitment) + .unwrap_or_default(); + let current_block_info = self + .data_cache + .block_information_store + .get_latest_block_information(commitment) + .await; match account_service.get_account(pubkey, config).await { - Ok((slot, ui_account)) => Ok(RpcResponse { + Ok((_, ui_account)) => Ok(RpcResponse { context: RpcResponseContext { - slot, + slot: current_block_info.slot, api_version: None, }, value: ui_account, @@ -555,16 +565,12 @@ impl LiteRpcServer for LiteBridge { if let Some(account_service) = &self.accounts_service { let mut ui_accounts = vec![]; - let mut max_slot = 0; for pubkey in pubkeys { match account_service .get_account(pubkey.unwrap(), config.clone()) .await { - Ok((slot, ui_account)) => { - if slot > max_slot { - max_slot = slot; - } + Ok((_, ui_account)) => { ui_accounts.push(ui_account); } Err(_) => { @@ -572,10 +578,19 @@ impl LiteRpcServer for LiteBridge { } } } + let commitment = config + .as_ref() + .and_then(|x| x.commitment) + .unwrap_or_default(); + let current_block_info = self + .data_cache + .block_information_store + .get_latest_block_information(commitment) + .await; assert_eq!(ui_accounts.len(), pubkey_strs.len()); Ok(RpcResponse { context: RpcResponseContext { - slot: max_slot, + slot: current_block_info.slot, api_version: None, }, value: ui_accounts, @@ -599,16 +614,26 @@ impl LiteRpcServer for LiteBridge { .map(|value| value.with_context.unwrap_or_default()) .unwrap_or_default(); + let commitment: CommitmentConfig = config + .as_ref() + .and_then(|x| x.account_config.commitment) + .unwrap_or_default(); + let current_block_info = self + .data_cache + .block_information_store + .get_latest_block_information(commitment) + .await; + if let Some(account_service) = &self.accounts_service { match account_service .get_program_accounts(program_id, config) .await { - Ok((slot, ui_account)) => { + Ok((_, ui_account)) => { if with_context { Ok(OptionalContext::Context(RpcResponse { context: RpcResponseContext { - slot, + slot: current_block_info.slot, api_version: None, }, value: ui_account, @@ -645,11 +670,22 @@ impl LiteRpcServer for LiteBridge { commitment: x.commitment, min_context_slot: x.min_context_slot, }); + + let commitment = config + .as_ref() + .and_then(|x| x.commitment) + .unwrap_or_default(); + let current_block_info = self + .data_cache + .block_information_store + .get_latest_block_information(commitment) + .await; + if let Some(account_service) = &self.accounts_service { match account_service.get_account(pubkey, config).await { - Ok((slot, ui_account)) => Ok(RpcResponse { + Ok((_, ui_account)) => Ok(RpcResponse { context: RpcResponseContext { - slot, + slot: current_block_info.slot, api_version: None, }, value: ui_account.map(|x| x.lamports).unwrap_or_default(), diff --git a/lite-rpc/src/cli.rs b/lite-rpc/src/cli.rs index 82aba777..b8c334ca 100644 --- a/lite-rpc/src/cli.rs +++ b/lite-rpc/src/cli.rs @@ -389,10 +389,12 @@ fn quic_params_from_environment() -> Option { .map(|millis| millis.parse().unwrap()) .unwrap_or(quic_connection_parameters.number_of_transactions_per_unistream); - quic_connection_parameters.percentage_of_connection_limit_to_create_new = + quic_connection_parameters.unistreams_to_create_new_connection_in_percentage = env::var("QUIC_PERCENTAGE_TO_CREATE_NEW_CONNECTION") .map(|millis| millis.parse().unwrap()) - .unwrap_or(quic_connection_parameters.percentage_of_connection_limit_to_create_new); + .unwrap_or( + quic_connection_parameters.unistreams_to_create_new_connection_in_percentage, + ); Some(quic_connection_parameters) } diff --git a/lite-rpc/src/main.rs b/lite-rpc/src/main.rs index 11f11055..5266fe62 100644 --- a/lite-rpc/src/main.rs +++ b/lite-rpc/src/main.rs @@ -19,6 +19,7 @@ use solana_lite_rpc_accounts_on_demand::accounts_on_demand::AccountsOnDemand; use solana_lite_rpc_address_lookup_tables::address_lookup_table_store::AddressLookupTableStore; use solana_lite_rpc_blockstore::history::History; use solana_lite_rpc_cluster_endpoints::endpoint_stremers::EndpointStreaming; + use solana_lite_rpc_cluster_endpoints::geyser_grpc_connector::{ GrpcConnectionTimeouts, GrpcSourceConfig, }; @@ -41,10 +42,9 @@ use solana_lite_rpc_core::structures::account_filter::AccountFilters; use solana_lite_rpc_core::structures::leaderschedule::CalculatedSchedule; use solana_lite_rpc_core::structures::{ epoch::EpochCache, identity_stakes::IdentityStakes, notifications::NotificationSender, - produced_block::ProducedBlock, }; use solana_lite_rpc_core::traits::address_lookup_table_interface::AddressLookupTableInterface; -use solana_lite_rpc_core::types::BlockStream; +use solana_lite_rpc_core::types::{BlockInfoStream, BlockStream}; use solana_lite_rpc_core::AnyhowJoinHandle; use solana_lite_rpc_prioritization_fees::account_prio_service::AccountPrioService; use solana_lite_rpc_services::data_caching_service::DataCachingService; @@ -54,6 +54,7 @@ use solana_lite_rpc_services::transaction_replayer::TransactionReplayer; use solana_lite_rpc_services::tx_sender::TxSender; use lite_rpc::postgres_logger; +use solana_lite_rpc_core::structures::block_info::BlockInfo; use solana_lite_rpc_prioritization_fees::start_block_priofees_task; use solana_lite_rpc_util::obfuscate_rpcurl; use solana_rpc_client::nonblocking::rpc_client::RpcClient; @@ -70,27 +71,32 @@ use tokio::time::{timeout, Instant}; use tracing_subscriber::fmt::format::FmtSpan; use tracing_subscriber::EnvFilter; -async fn get_latest_block( - mut block_stream: BlockStream, +// jemalloc seems to be better at keeping the memory footprint reasonable over +// longer periods of time +#[global_allocator] +static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; + +async fn get_latest_block_info( + mut blockinfo_stream: BlockInfoStream, commitment_config: CommitmentConfig, -) -> ProducedBlock { +) -> BlockInfo { let started = Instant::now(); loop { - match timeout(Duration::from_millis(500), block_stream.recv()).await { - Ok(Ok(block)) => { - if block.commitment_config == commitment_config { - return block; + match timeout(Duration::from_millis(500), blockinfo_stream.recv()).await { + Ok(Ok(block_info)) => { + if block_info.commitment_config == commitment_config { + return block_info; } } Err(_elapsed) => { debug!( - "waiting for latest block ({}) ... {:.02}ms", + "waiting for latest block info ({}) ... {:.02}ms", commitment_config.commitment, started.elapsed().as_secs_f32() * 1000.0 ); } Ok(Err(_error)) => { - panic!("Did not recv blocks"); + panic!("Did not recv block info"); } } } @@ -196,6 +202,7 @@ pub async fn start_lite_rpc(args: Config, rpc_client: Arc) -> anyhow: let EndpointStreaming { // note: blocks_notifier will be dropped at some point blocks_notifier, + blockinfo_notifier, cluster_info_notifier, slot_notifier, vote_account_notifier, @@ -230,8 +237,10 @@ pub async fn start_lite_rpc(args: Config, rpc_client: Arc) -> anyhow: let account_service = AccountService::new(account_storage, account_notification_sender); - account_service - .process_account_stream(account_stream.resubscribe(), blocks_notifier.resubscribe()); + account_service.process_account_stream( + account_stream.resubscribe(), + blockinfo_notifier.resubscribe(), + ); account_service .populate_from_rpc( @@ -245,21 +254,24 @@ pub async fn start_lite_rpc(args: Config, rpc_client: Arc) -> anyhow: None }; - info!("Waiting for first finalized block..."); - let finalized_block = - get_latest_block(blocks_notifier.resubscribe(), CommitmentConfig::finalized()).await; - info!("Got finalized block: {:?}", finalized_block.slot); + info!("Waiting for first finalized block info..."); + let finalized_block_info = get_latest_block_info( + blockinfo_notifier.resubscribe(), + CommitmentConfig::finalized(), + ) + .await; + info!("Got finalized block info: {:?}", finalized_block_info.slot); let (epoch_data, _current_epoch_info) = EpochCache::bootstrap_epoch(&rpc_client).await?; let block_information_store = - BlockInformationStore::new(BlockInformation::from_block(&finalized_block)); + BlockInformationStore::new(BlockInformation::from_block_info(&finalized_block_info)); let data_cache = DataCache { block_information_store, cluster_info: ClusterInfo::default(), identity_stakes: IdentityStakes::new(validator_identity.pubkey()), - slot_cache: SlotCache::new(finalized_block.slot), + slot_cache: SlotCache::new(finalized_block_info.slot), tx_subs: SubscriptionStore::default(), txs: TxStore { store: Arc::new(DashMap::new()), @@ -276,6 +288,7 @@ pub async fn start_lite_rpc(args: Config, rpc_client: Arc) -> anyhow: // to avoid laggin we resubscribe to block notification let data_caching_service = data_cache_service.listen( blocks_notifier.resubscribe(), + blockinfo_notifier.resubscribe(), slot_notifier.resubscribe(), cluster_info_notifier, vote_account_notifier, @@ -378,6 +391,7 @@ pub async fn start_lite_rpc(args: Config, rpc_client: Arc) -> anyhow: pubsub_service, lite_rpc_ws_addr, lite_rpc_http_addr, + None, )); drop(slot_notifier); @@ -427,7 +441,7 @@ fn setup_grpc_stream_debugging(blocks_notifier: &BlockStream) { debugtask_blockstream_confirmation_sequence(blocks_notifier.resubscribe()); } -#[tokio::main(flavor = "multi_thread", worker_threads = 16)] +#[tokio::main()] pub async fn main() -> anyhow::Result<()> { setup_tracing_subscriber(); diff --git a/lite-rpc/src/postgres_logger/postgres_session.rs b/lite-rpc/src/postgres_logger/postgres_session.rs index bd4781b7..d716798e 100644 --- a/lite-rpc/src/postgres_logger/postgres_session.rs +++ b/lite-rpc/src/postgres_logger/postgres_session.rs @@ -65,7 +65,7 @@ impl PostgresSession { .context("Connecting to Postgres failed")?; tokio::spawn(async move { - log::info!("Connecting to Postgres"); + log::debug!("Connecting to Postgres"); if let Err(err) = connection.await { log::error!("Connection to Postgres broke {err:?}"); diff --git a/lite-rpc/src/service_spawner.rs b/lite-rpc/src/service_spawner.rs index 69669509..0be16a1d 100644 --- a/lite-rpc/src/service_spawner.rs +++ b/lite-rpc/src/service_spawner.rs @@ -1,3 +1,4 @@ +use solana_lite_rpc_core::types::BlockInfoStream; use solana_lite_rpc_core::{ stores::data_cache::DataCache, structures::notifications::NotificationSender, @@ -14,6 +15,7 @@ use solana_lite_rpc_services::{ tx_sender::TxSender, }; use std::time::Duration; + pub struct ServiceSpawner { pub prometheus_addr: String, pub data_cache: DataCache, @@ -38,9 +40,11 @@ impl ServiceSpawner { } } - pub async fn spawn_data_caching_service( + // TODO remove + pub async fn _spawn_data_caching_service( &self, block_notifier: BlockStream, + blockinfo_notifier: BlockInfoStream, slot_notification: SlotStream, cluster_info_notification: ClusterInfoStream, va_notification: VoteAccountStream, @@ -52,6 +56,7 @@ impl ServiceSpawner { data_service.listen( block_notifier, + blockinfo_notifier, slot_notification, cluster_info_notification, va_notification, diff --git a/lite-rpc/src/start_server.rs b/lite-rpc/src/start_server.rs index 8d9bcaa4..89d27e31 100644 --- a/lite-rpc/src/start_server.rs +++ b/lite-rpc/src/start_server.rs @@ -9,14 +9,35 @@ use solana_lite_rpc_core::AnyhowJoinHandle; use std::time::Duration; use tower_http::cors::{Any, CorsLayer}; +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +pub struct ServerConfiguration { + pub max_request_body_size: u32, + + pub max_response_body_size: u32, + + pub max_connection: u32, +} + +impl Default for ServerConfiguration { + fn default() -> Self { + Self { + max_request_body_size: 50 * (1 << 10), // 50kb + max_response_body_size: 500_000 * (1 << 10), // 500MB response size + max_connection: 1000000, + } + } +} + pub async fn start_servers( rpc: LiteBridge, pubsub: LitePubSubBridge, ws_addr: String, http_addr: String, + server_configuration: Option, ) -> anyhow::Result<()> { let rpc = rpc.into_rpc(); let pubsub = pubsub.into_rpc(); + let server_configuration = server_configuration.unwrap_or_default(); let ws_server_handle = ServerBuilder::default() .ws_only() @@ -36,6 +57,9 @@ pub async fn start_servers( let http_server_handle = ServerBuilder::default() .set_middleware(middleware) + .max_connections(server_configuration.max_connection) + .max_request_body_size(server_configuration.max_response_body_size) + .max_response_body_size(server_configuration.max_response_body_size) .http_only() .build(http_addr.clone()) .await? diff --git a/migrations/create_benchrunner.sql b/migrations/create_benchrunner.sql new file mode 100644 index 00000000..77d32450 --- /dev/null +++ b/migrations/create_benchrunner.sql @@ -0,0 +1,26 @@ + +CREATE SCHEMA benchrunner; + +CREATE TABLE benchrunner.bench_metrics ( + tenant text NOT NULL, + ts timestamp NOT NULL, + prio_fees int8 NOT NULL, + txs_sent int8 NOT NULL, + txs_confirmed int8 NOT NULL, + txs_un_confirmed int8 NOT NULL, + average_confirmation_time_ms real NOT NULL, + metric_json jsonb NOT NULL, + PRIMARY KEY (tenant, ts) +); + +CREATE TABLE benchrunner.bench_runs ( + tenant text NOT NULL, + ts timestamp NOT NULL, + status text NOT NULL, + PRIMARY KEY (tenant, ts) +); + +GRANT USAGE ON SCHEMA benchrunner TO r_benchrunner; +GRANT SELECT, INSERT, UPDATE ON ALL TABLES IN SCHEMA benchrunner TO r_benchrunner; +GRANT USAGE ON SCHEMA benchrunner TO ro_benchrunner; +GRANT SELECT ON ALL TABLES IN SCHEMA benchrunner TO ro_benchrunner; diff --git a/migrations/permissions.sql b/migrations/permissions.sql index 09bdaff9..72fc2832 100644 --- a/migrations/permissions.sql +++ b/migrations/permissions.sql @@ -21,3 +21,11 @@ CREATE ROLE ro_literpc; GRANT ro_literpc TO literpc_app; GRANT CONNECT ON DATABASE literpc_integrationtest TO ro_literpc; -- TODO adjust database name + +-- required for benchrunner-service +CREATE ROLE r_benchrunner; +CREATE ROLE ro_benchrunner; +GRANT ro_benchrunner TO r_benchrunner; + +GRANT r_benchrunner TO literpc_app; +GRANT ro_benchrunner TO literpc_app; diff --git a/openssl-legacy.cnf b/openssl-legacy.cnf new file mode 100644 index 00000000..38438a50 --- /dev/null +++ b/openssl-legacy.cnf @@ -0,0 +1,19 @@ +# Extend the default debian openssl config +# see https://gist.github.com/tothi/392dbb008ae0b60d25cfa4447bc21121 +# fixes "Global default library context, Algorithm (RC2-40-CBC : 0)" +.include = /etc/ssl/openssl.cnf + +openssl_conf = openssl_init + +[openssl_init] +providers = provider_sect + +[provider_sect] +default = default_sect +legacy = legacy_sect + +[default_sect] +activate = 1 + +[legacy_sect] +activate = 1 diff --git a/quic-forward-proxy-integration-test/tests/quic_proxy_tpu_integrationtest.rs b/quic-forward-proxy-integration-test/tests/quic_proxy_tpu_integrationtest.rs index eaceadb1..2033dd95 100644 --- a/quic-forward-proxy-integration-test/tests/quic_proxy_tpu_integrationtest.rs +++ b/quic-forward-proxy-integration-test/tests/quic_proxy_tpu_integrationtest.rs @@ -60,7 +60,7 @@ const QUIC_CONNECTION_PARAMS: QuicConnectionParameters = QuicConnectionParameter unistream_timeout: Duration::from_secs(2), write_timeout: Duration::from_secs(2), number_of_transactions_per_unistream: 10, - percentage_of_connection_limit_to_create_new: 10, + unistreams_to_create_new_connection_in_percentage: 10, }; #[test] diff --git a/services/src/data_caching_service.rs b/services/src/data_caching_service.rs index a97beec4..826ddef5 100644 --- a/services/src/data_caching_service.rs +++ b/services/src/data_caching_service.rs @@ -7,12 +7,14 @@ use prometheus::{opts, register_int_counter, register_int_gauge, IntCounter}; use solana_lite_rpc_core::stores::{ block_information_store::BlockInformation, data_cache::DataCache, }; +use solana_lite_rpc_core::structures::block_info::BlockInfo; use solana_lite_rpc_core::types::{BlockStream, ClusterInfoStream, SlotStream, VoteAccountStream}; use solana_lite_rpc_core::AnyhowJoinHandle; use solana_sdk::clock::MAX_RECENT_BLOCKHASHES; use solana_sdk::commitment_config::CommitmentLevel; use solana_transaction_status::{TransactionConfirmationStatus, TransactionStatus}; use tokio::sync::broadcast::error::RecvError; +use tokio::sync::broadcast::Receiver; lazy_static::lazy_static! { static ref NB_CLUSTER_NODES: GenericGauge = @@ -43,13 +45,15 @@ impl DataCachingService { pub fn listen( self, block_notifier: BlockStream, + blockinfo_notifier: Receiver, slot_notification: SlotStream, cluster_info_notification: ClusterInfoStream, va_notification: VoteAccountStream, ) -> Vec { - // clone the ledger to move into the processor task let data_cache = self.data_cache.clone(); - // process all the data into the ledger + let block_information_store_block = data_cache.block_information_store.clone(); + let block_information_store_block_info = data_cache.block_information_store.clone(); + let block_cache_jh = tokio::spawn(async move { let mut block_notifier = block_notifier; loop { @@ -64,8 +68,8 @@ impl DataCachingService { } }; - data_cache - .block_information_store + // note: most likely the block has been added from blockinfo_notifier stream already + block_information_store_block .add_block(BlockInformation::from_block(&block)) .await; @@ -76,9 +80,8 @@ impl DataCachingService { }; for tx in &block.transactions { - let block_info = data_cache - .block_information_store - .get_block_info(&tx.recent_blockhash); + let block_info = + block_information_store_block.get_block_info(&tx.recent_blockhash); let last_valid_blockheight = if let Some(block_info) = block_info { block_info.last_valid_blockheight } else { @@ -118,6 +121,26 @@ impl DataCachingService { } }); + let blockinfo_cache_jh = tokio::spawn(async move { + let mut blockinfo_notifier = blockinfo_notifier; + loop { + let block_info = match blockinfo_notifier.recv().await { + Ok(block_info) => block_info, + Err(RecvError::Lagged(blockinfo_lagged)) => { + warn!("Lagged {} block info - continue", blockinfo_lagged); + continue; + } + Err(RecvError::Closed) => { + bail!("BlockInfo stream has been closed - abort"); + } + }; + + block_information_store_block_info + .add_block(BlockInformation::from_block_info(&block_info)) + .await; + } + }); + let data_cache = self.data_cache.clone(); let slot_cache_jh = tokio::spawn(async move { let mut slot_notification = slot_notification; @@ -174,6 +197,7 @@ impl DataCachingService { vec![ slot_cache_jh, block_cache_jh, + blockinfo_cache_jh, cluster_info_jh, identity_stakes_jh, cleaning_service, diff --git a/services/src/quic_connection.rs b/services/src/quic_connection.rs index 502908dd..82e68202 100644 --- a/services/src/quic_connection.rs +++ b/services/src/quic_connection.rs @@ -110,8 +110,14 @@ impl QuicConnection { } None => { NB_QUIC_CONNECTION_REQUESTED.inc(); + // so that only one instance is connecting + let mut lk = self.connection.write().await; + if lk.is_some() { + // connection has recently been established/ just use it + return (*lk).clone(); + } let connection = self.connect(false).await; - *self.connection.write().await = connection.clone(); + *lk = connection.clone(); self.has_connected_once.store(true, Ordering::Relaxed); connection } @@ -211,7 +217,7 @@ pub struct QuicConnectionPool { // counting semaphore is ideal way to manage backpressure on the connection // because a connection can create only N unistream connections transactions_in_sending_semaphore: Vec>, - permit_threshold: usize, + threshold_to_create_new_connection: usize, } pub struct PooledConnection { @@ -250,9 +256,9 @@ impl QuicConnectionPool { }); v }, - permit_threshold: max_number_of_unistream_connection - .saturating_mul(std::cmp::max( - connection_parameters.percentage_of_connection_limit_to_create_new, + threshold_to_create_new_connection: max_number_of_unistream_connection + .saturating_mul(std::cmp::min( + connection_parameters.unistreams_to_create_new_connection_in_percentage, 100, ) as usize) .saturating_div(100), @@ -266,7 +272,7 @@ impl QuicConnectionPool { if !connection.has_connected_atleast_once() || (connection.is_connected().await - && sem.available_permits() > self.permit_threshold) + && sem.available_permits() > self.threshold_to_create_new_connection) { // if it is connection is not yet connected even once or connection is still open if let Ok(permit) = sem.clone().try_acquire_owned() { @@ -289,9 +295,6 @@ impl QuicConnectionPool { let (permit, index) = self.get_permit_and_index().await?; // establish a connection if the connection has not yet been used let connection = self.connections[index].clone(); - if !connection.has_connected_atleast_once() { - connection.get_connection().await; - } Ok(PooledConnection { connection, permit }) } diff --git a/services/src/quic_connection_utils.rs b/services/src/quic_connection_utils.rs index bc3749d4..dc864610 100644 --- a/services/src/quic_connection_utils.rs +++ b/services/src/quic_connection_utils.rs @@ -1,5 +1,7 @@ use log::trace; -use prometheus::{core::GenericGauge, opts, register_int_gauge}; +use prometheus::{ + core::GenericGauge, histogram_opts, opts, register_histogram, register_int_gauge, Histogram, +}; use quinn::{ ClientConfig, Connection, ConnectionError, Endpoint, EndpointConfig, IdleTimeout, SendStream, TokioRuntime, TransportConfig, VarInt, @@ -45,6 +47,26 @@ lazy_static::lazy_static! { register_int_gauge!(opts!("literpc_quic_finish_timedout", "Number of times finish timedout")).unwrap(); static ref NB_QUIC_FINISH_ERRORED: GenericGauge = register_int_gauge!(opts!("literpc_quic_finish_errored", "Number of times finish errored")).unwrap(); + + static ref NB_QUIC_CONNECTIONS: GenericGauge = + register_int_gauge!(opts!("literpc_nb_active_quic_connections", "Number of quic connections open")).unwrap(); + + static ref TIME_OF_CONNECT: Histogram = register_histogram!(histogram_opts!( + "literpc_quic_connection_timer_histogram", + "Time to connect to the TPU port", + )) + .unwrap(); + static ref TIME_TO_WRITE: Histogram = register_histogram!(histogram_opts!( + "literpc_quic_write_timer_histogram", + "Time to write on the TPU port", + )) + .unwrap(); + + static ref TIME_TO_FINISH: Histogram = register_histogram!(histogram_opts!( + "literpc_quic_finish_timer_histogram", + "Time to finish on the TPU port", +)) +.unwrap(); } const ALPN_TPU_PROTOCOL_ID: &[u8] = b"solana-tpu"; @@ -63,20 +85,20 @@ pub struct QuicConnectionParameters { pub connection_retry_count: usize, pub max_number_of_connections: usize, pub number_of_transactions_per_unistream: usize, - pub percentage_of_connection_limit_to_create_new: u8, + pub unistreams_to_create_new_connection_in_percentage: u8, } impl Default for QuicConnectionParameters { fn default() -> Self { Self { - connection_timeout: Duration::from_millis(5000), - unistream_timeout: Duration::from_millis(5000), - write_timeout: Duration::from_millis(5000), - finalize_timeout: Duration::from_millis(5000), + connection_timeout: Duration::from_millis(10000), + unistream_timeout: Duration::from_millis(10000), + write_timeout: Duration::from_millis(10000), + finalize_timeout: Duration::from_millis(10000), connection_retry_count: 20, max_number_of_connections: 8, number_of_transactions_per_unistream: 1, - percentage_of_connection_limit_to_create_new: 50, + unistreams_to_create_new_connection_in_percentage: 10, } } } @@ -137,10 +159,12 @@ impl QuicConnectionUtils { addr: SocketAddr, connection_timeout: Duration, ) -> anyhow::Result { + let timer = TIME_OF_CONNECT.start_timer(); let connecting = endpoint.connect(addr, "connect")?; match timeout(connection_timeout, connecting).await { Ok(res) => match res { Ok(connection) => { + timer.observe_duration(); NB_QUIC_CONN_SUCCESSFUL.inc(); Ok(connection) } @@ -210,6 +234,7 @@ impl QuicConnectionUtils { }; match conn { Ok(conn) => { + NB_QUIC_CONNECTIONS.inc(); return Some(conn); } Err(e) => { @@ -229,6 +254,7 @@ impl QuicConnectionUtils { identity: Pubkey, connection_params: QuicConnectionParameters, ) -> Result<(), QuicConnectionError> { + let timer = TIME_TO_WRITE.start_timer(); let write_timeout_res = timeout( connection_params.write_timeout, send_stream.write_all(tx.as_slice()), @@ -244,6 +270,8 @@ impl QuicConnectionUtils { ); NB_QUIC_WRITEALL_ERRORED.inc(); return Err(QuicConnectionError::ConnectionError { retry: true }); + } else { + timer.observe_duration(); } } Err(_) => { @@ -253,6 +281,7 @@ impl QuicConnectionUtils { } } + let timer: prometheus::HistogramTimer = TIME_TO_FINISH.start_timer(); let finish_timeout_res = timeout(connection_params.finalize_timeout, send_stream.finish()).await; match finish_timeout_res { @@ -265,6 +294,8 @@ impl QuicConnectionUtils { ); NB_QUIC_FINISH_ERRORED.inc(); return Err(QuicConnectionError::ConnectionError { retry: false }); + } else { + timer.observe_duration(); } } Err(_) => { diff --git a/services/src/tpu_utils/tpu_connection_manager.rs b/services/src/tpu_utils/tpu_connection_manager.rs index e87ad46d..9200c60b 100644 --- a/services/src/tpu_utils/tpu_connection_manager.rs +++ b/services/src/tpu_utils/tpu_connection_manager.rs @@ -33,8 +33,6 @@ use crate::{ }; lazy_static::lazy_static! { - static ref NB_QUIC_CONNECTIONS: GenericGauge = - register_int_gauge!(opts!("literpc_nb_active_quic_connections", "Number of quic connections open")).unwrap(); static ref NB_QUIC_ACTIVE_CONNECTIONS: GenericGauge = register_int_gauge!(opts!("literpc_nb_active_connections", "Number quic tasks that are running")).unwrap(); static ref NB_CONNECTIONS_TO_KEEP: GenericGauge = @@ -46,6 +44,9 @@ lazy_static::lazy_static! { "Time to send transaction batch", )) .unwrap(); + + static ref TRANSACTIONS_IN_HEAP: GenericGauge = + register_int_gauge!(opts!("literpc_transactions_in_priority_heap", "Number of transactions in priority heap")).unwrap(); } #[derive(Clone)] @@ -84,19 +85,41 @@ impl ActiveConnection { addr: SocketAddr, identity_stakes: IdentityStakesData, ) { - let priorization_heap = PrioritizationFeesHeap::new(2048); let fill_notify = Arc::new(Notify::new()); let identity = self.identity; + NB_QUIC_ACTIVE_CONNECTIONS.inc(); + + let max_number_of_connections = self.connection_parameters.max_number_of_connections; + + let max_uni_stream_connections = compute_max_allowed_uni_streams( + identity_stakes.peer_type, + identity_stakes.stakes, + identity_stakes.total_stakes, + ); + let exit_signal = self.exit_signal.clone(); + let connection_pool = QuicConnectionPool::new( + identity, + self.endpoints.clone(), + addr, + self.connection_parameters, + exit_signal.clone(), + max_number_of_connections, + max_uni_stream_connections, + ); + + let priorization_heap = PrioritizationFeesHeap::new(2 * max_uni_stream_connections); + let heap_filler_task = { let priorization_heap = priorization_heap.clone(); let data_cache = self.data_cache.clone(); let fill_notify = fill_notify.clone(); + let exit_signal = exit_signal.clone(); tokio::spawn(async move { let mut current_blockheight = data_cache.block_information_store.get_last_blockheight(); - loop { + while !exit_signal.load(Ordering::Relaxed) { let tx = transaction_reciever.recv().await; match tx { Ok(transaction_sent_info) => { @@ -108,6 +131,8 @@ impl ActiveConnection { } priorization_heap.insert(transaction_sent_info).await; + TRANSACTIONS_IN_HEAP.inc(); + fill_notify.notify_one(); // give little more priority to read the transaction sender with this wait let last_blockheight = @@ -134,25 +159,15 @@ impl ActiveConnection { }) }; - NB_QUIC_ACTIVE_CONNECTIONS.inc(); - - let max_number_of_connections = self.connection_parameters.max_number_of_connections; - - let max_uni_stream_connections = compute_max_allowed_uni_streams( - identity_stakes.peer_type, - identity_stakes.stakes, - identity_stakes.total_stakes, - ); - let exit_signal = self.exit_signal.clone(); - let connection_pool = QuicConnectionPool::new( - identity, - self.endpoints.clone(), - addr, - self.connection_parameters, - exit_signal.clone(), - max_number_of_connections, - max_uni_stream_connections, - ); + // create atleast one connection before waiting from transactions + if let Ok(PooledConnection { connection, permit }) = + connection_pool.get_pooled_connection().await + { + tokio::task::spawn(async move { + let _permit = permit; + connection.get_connection().await; + }); + } 'main_loop: loop { // exit signal set @@ -173,6 +188,7 @@ impl ActiveConnection { // wait to get notification from fill event break; }; + TRANSACTIONS_IN_HEAP.dec(); // check if transaction is already confirmed if self.data_cache.txs.is_transaction_confirmed(&tx.signature) { @@ -193,28 +209,32 @@ impl ActiveConnection { tokio::spawn(async move { // permit will be used to send all the transaction and then destroyed let _permit = permit; + let timer = TT_SENT_TIMER.start_timer(); + NB_QUIC_TASKS.inc(); + connection.send_transaction(tx.transaction).await; + timer.observe_duration(); NB_QUIC_TASKS.dec(); }); } }, _ = exit_notifier.notified() => { - // notified to exit - break; + break 'main_loop; } } } heap_filler_task.abort(); - NB_QUIC_CONNECTIONS.dec(); + let elements_removed = priorization_heap.clear().await; + TRANSACTIONS_IN_HEAP.sub(elements_removed as i64); NB_QUIC_ACTIVE_CONNECTIONS.dec(); } pub fn start_listening( &self, transaction_reciever: Receiver, - exit_notifier: Arc, + exit_notifier: Arc, identity_stakes: IdentityStakesData, ) { let addr = self.tpu_address; @@ -226,14 +246,14 @@ impl ActiveConnection { } } -struct ActiveConnectionWithExitChannel { +struct ActiveConnectionWithExitNotifier { pub active_connection: ActiveConnection, - pub exit_notifier: Arc, + pub exit_notifier: Arc, } pub struct TpuConnectionManager { endpoints: RotatingQueue, - identity_to_active_connection: Arc>>, + identity_to_active_connection: Arc>>, } impl TpuConnectionManager { @@ -271,7 +291,7 @@ impl TpuConnectionManager { connection_parameters, ); // using mpsc as a oneshot channel/ because with one shot channel we cannot reuse the reciever - let exit_notifier = Arc::new(tokio::sync::Notify::new()); + let exit_notifier = Arc::new(Notify::new()); let broadcast_receiver = broadcast_sender.subscribe(); active_connection.start_listening( @@ -281,7 +301,7 @@ impl TpuConnectionManager { ); self.identity_to_active_connection.insert( *identity, - Arc::new(ActiveConnectionWithExitChannel { + Arc::new(ActiveConnectionWithExitNotifier { active_connection, exit_notifier, }), @@ -290,22 +310,19 @@ impl TpuConnectionManager { } // remove connections which are no longer needed - let collect_current_active_connections = self - .identity_to_active_connection - .iter() - .map(|x| (*x.key(), x.value().clone())) - .collect::>(); - for (identity, value) in collect_current_active_connections.iter() { - if !connections_to_keep.contains_key(identity) { - trace!("removing a connection for {}", identity); + self.identity_to_active_connection.retain(|key, value| { + if !connections_to_keep.contains_key(key) { + trace!("removing a connection for {}", key.to_string()); // ignore error for exit channel value .active_connection .exit_signal .store(true, Ordering::Relaxed); value.exit_notifier.notify_one(); - self.identity_to_active_connection.remove(identity); + false + } else { + true } - } + }); } } diff --git a/services/src/transaction_service.rs b/services/src/transaction_service.rs index dfc1f87d..2c00efd6 100644 --- a/services/src/transaction_service.rs +++ b/services/src/transaction_service.rs @@ -9,6 +9,7 @@ use crate::{ tx_sender::TxSender, }; use anyhow::bail; +use prometheus::{histogram_opts, register_histogram, Histogram}; use solana_lite_rpc_core::{ solana_utils::SerializableTransaction, structures::transaction_sent_info::SentTransactionInfo, types::SlotStream, @@ -28,6 +29,14 @@ use tokio::{ time::Instant, }; +lazy_static::lazy_static! { + static ref PRIORITY_FEES_HISTOGRAM: Histogram = register_histogram!(histogram_opts!( + "literpc_txs_priority_fee", + "Priority fees of transactions sent by lite-rpc", + )) + .unwrap(); +} + #[derive(Clone)] pub struct TransactionServiceBuilder { tx_sender: TxSender, @@ -157,6 +166,8 @@ impl TransactionService { prioritization_fee }; + PRIORITY_FEES_HISTOGRAM.observe(prioritization_fee as f64); + let max_replay = max_retries.map_or(self.max_retries, |x| x as usize); let transaction_info = SentTransactionInfo { signature, @@ -192,3 +203,5 @@ impl TransactionService { Ok(signature.to_string()) } } + +mod test {} diff --git a/util/src/encoding.rs b/util/src/encoding.rs new file mode 100644 index 00000000..c4138ec9 --- /dev/null +++ b/util/src/encoding.rs @@ -0,0 +1,52 @@ +use base64::Engine; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum BinaryEncoding { + #[default] + Base58, + Base64, +} + +#[derive(thiserror::Error, Debug)] +pub enum BinaryCodecError { + #[error("Base58DecodeError {0}")] + Base58DecodeError(#[from] bs58::decode::Error), + #[error("Base58EncodeError {0}")] + Base58EncodeError(#[from] bs58::encode::Error), + #[error("Base64DecodeError {0}")] + Base64DecodeError(#[from] base64::DecodeError), +} + +impl BinaryEncoding { + pub fn decode>(&self, to_decode: D) -> Result, BinaryCodecError> { + match self { + Self::Base58 => Ok(bs58::decode(to_decode).into_vec()?), + Self::Base64 => Ok(base64::engine::general_purpose::STANDARD.decode(to_decode)?), + } + } + + pub fn encode>(&self, to_encode: E) -> String { + match self { + Self::Base58 => bs58::encode(to_encode).into_string(), + Self::Base64 => base64::engine::general_purpose::STANDARD.encode(to_encode), + } + } + + pub fn serialize(&self, to_serialize: &E) -> anyhow::Result { + let bytes = bincode::serialize(to_serialize)?; + Ok(self.encode(bytes)) + } + + pub fn deserialize Deserialize<'a>>( + &self, + to_deserialize: &String, + ) -> anyhow::Result { + let bytes = self.decode(to_deserialize)?; + Ok(bincode::deserialize(&bytes)?) + } +} + +pub const BASE64: BinaryEncoding = BinaryEncoding::Base64; +pub const BASE58: BinaryEncoding = BinaryEncoding::Base58; diff --git a/util/src/lib.rs b/util/src/lib.rs index 030118fd..5c4bafd8 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -1,3 +1,7 @@ +pub mod encoding; +pub mod secrets; +pub mod statistics; + // http://mango.rpcpool.com/c232ab232ba2323 pub fn obfuscate_rpcurl(rpc_addr: &str) -> String { if rpc_addr.contains("rpcpool.com") { diff --git a/util/src/secrets.rs b/util/src/secrets.rs new file mode 100644 index 00000000..294b52fb --- /dev/null +++ b/util/src/secrets.rs @@ -0,0 +1,20 @@ +#![allow(dead_code)] + +pub fn obfuscate_rpcurl(rpc_addr: &str) -> String { + if rpc_addr.contains("rpcpool.com") { + return rpc_addr.replacen(char::is_numeric, "X", 99); + } + rpc_addr.to_string() +} + +pub fn obfuscate_token(token: &Option) -> String { + match token { + None => "n/a".to_string(), + Some(token) => { + let mut token = token.clone(); + token.truncate(5); + token += "..."; + token + } + } +} diff --git a/util/src/statistics.rs b/util/src/statistics.rs new file mode 100644 index 00000000..8fd5cb14 --- /dev/null +++ b/util/src/statistics.rs @@ -0,0 +1,40 @@ +pub fn mean(data: &[f32]) -> Option { + let sum = data.iter().sum::(); + let count = data.len(); + + match count { + positive if positive > 0 => Some(sum / count as f32), + _ => None, + } +} + +pub fn std_deviation(data: &[f32]) -> Option { + match (mean(data), data.len()) { + (Some(data_mean), count) if count > 0 => { + let variance = data + .iter() + .map(|value| { + let diff = data_mean - *value; + + diff * diff + }) + .sum::() + / count as f32; + + Some(variance.sqrt()) + } + _ => None, + } +} + +#[test] +fn test_mean() { + let data = [1.0, 2.0, 3.0, 4.0, 5.0]; + assert_eq!(mean(&data), Some(3.0)); +} + +#[test] +fn test_std_deviation() { + let data = [1.0, 3.0, 5.0]; + assert_eq!(std_deviation(&data), Some(1.6329932)); +}