From cdc9638ac1256f8a5305adb2f50a188de8874a0f Mon Sep 17 00:00:00 2001 From: Piotr Galar Date: Fri, 13 Sep 2024 19:18:30 +0200 Subject: [PATCH 01/50] chore: parameterise s3 build cache setup (#5586) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As we're setting up a new cache bucket, we'd like to be able to control its' configuration via GitHub vars/secrets fully. FYI, the secrets are not set up yet. --------- Co-authored-by: João Oliveira Co-authored-by: Guillaume Michel --- .github/workflows/docker-image.yml | 8 +------- .github/workflows/interop-test.yml | 17 +++++++++-------- scripts/build-interop-image.sh | 4 ++-- 3 files changed, 12 insertions(+), 17 deletions(-) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 21863d0ed39..5cbfc20d69d 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -6,7 +6,6 @@ on: - 'master' tags: - 'libp2p-server-**' - pull_request: jobs: server: @@ -34,11 +33,6 @@ jobs: with: context: . file: ./misc/server/Dockerfile - push: ${{ ! github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' }} # Only push image if we have the required permissions, i.e. not running from a fork - cache-from: ${{ ! github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' && type=s3,mode=max,bucket=libp2p-by-tf-aws-bootstrap,region=us-east-1,prefix=buildCache,name=rust-libp2p-server }} - cache-to: ${{ ! github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' && type=s3,mode=max,bucket=libp2p-by-tf-aws-bootstrap,region=us-east-1,prefix=buildCache,name=rust-libp2p-server }} + push: true tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} - env: - AWS_ACCESS_KEY_ID: ${{ vars.TEST_PLANS_BUILD_CACHE_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.TEST_PLANS_BUILD_CACHE_KEY }} diff --git a/.github/workflows/interop-test.yml b/.github/workflows/interop-test.yml index f3950897089..1d70ca2eaee 100644 --- a/.github/workflows/interop-test.yml +++ b/.github/workflows/interop-test.yml @@ -24,8 +24,9 @@ jobs: - name: Build ${{ matrix.flavour }} image run: ./scripts/build-interop-image.sh env: - AWS_ACCESS_KEY_ID: ${{ vars.TEST_PLANS_BUILD_CACHE_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.TEST_PLANS_BUILD_CACHE_KEY }} + AWS_BUCKET_NAME: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }} + AWS_ACCESS_KEY_ID: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }} FLAVOUR: ${{ matrix.flavour }} - name: Run ${{ matrix.flavour }} tests @@ -33,9 +34,9 @@ jobs: with: test-filter: ${{ matrix.flavour }}-rust-libp2p-head extra-versions: ${{ github.workspace }}/interop-tests/${{ matrix.flavour }}-ping-version.json - s3-cache-bucket: libp2p-by-tf-aws-bootstrap - s3-access-key-id: ${{ vars.TEST_PLANS_BUILD_CACHE_KEY_ID }} - s3-secret-access-key: ${{ secrets.TEST_PLANS_BUILD_CACHE_KEY }} + s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }} + s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }} + s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }} worker-count: 16 run-holepunching-interop: name: Run hole-punch interoperability tests @@ -50,7 +51,7 @@ jobs: with: test-filter: rust-libp2p-head extra-versions: ${{ github.workspace }}/hole-punching-tests/version.json - s3-cache-bucket: libp2p-by-tf-aws-bootstrap - s3-access-key-id: ${{ vars.TEST_PLANS_BUILD_CACHE_KEY_ID }} - s3-secret-access-key: ${{ secrets.TEST_PLANS_BUILD_CACHE_KEY }} + s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }} + s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }} + s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }} worker-count: 16 diff --git a/scripts/build-interop-image.sh b/scripts/build-interop-image.sh index 28a8db9188d..ad6ef78b153 100755 --- a/scripts/build-interop-image.sh +++ b/scripts/build-interop-image.sh @@ -6,13 +6,13 @@ CACHE_TO="" # If we have credentials, write to cache if [[ -n "${AWS_SECRET_ACCESS_KEY}" ]]; then - CACHE_TO="--cache-to type=s3,mode=max,bucket=libp2p-by-tf-aws-bootstrap,region=us-east-1,prefix=buildCache,name=${FLAVOUR}-rust-libp2p-head" + CACHE_TO="--cache-to type=s3,mode=max,bucket=${AWS_BUCKET_NAME},region=us-east-1,prefix=buildCache,name=${FLAVOUR}-rust-libp2p-head" fi docker buildx build \ --load \ $CACHE_TO \ - --cache-from type=s3,mode=max,bucket=libp2p-by-tf-aws-bootstrap,region=us-east-1,prefix=buildCache,name=${FLAVOUR}-rust-libp2p-head \ + --cache-from type=s3,mode=max,bucket=${AWS_BUCKET_NAME},region=us-east-1,prefix=buildCache,name=${FLAVOUR}-rust-libp2p-head \ -t ${FLAVOUR}-rust-libp2p-head \ . \ -f interop-tests/Dockerfile.${FLAVOUR} From a2a281609a0a64b211f7917aa856924983b63200 Mon Sep 17 00:00:00 2001 From: Stefan Date: Sat, 14 Sep 2024 00:33:14 +0200 Subject: [PATCH 02/50] fix(autonat): reject inbound dial request from peer if its not connected (#5597) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Description As discovered and described in the issue below, there are situations where an incoming AutoNAT dial can come from a non-connected peer. However `resolve_inbound_request` expects that this situation cannot occur. This PR adds a check upfront and refuses the incoming dial when no connected peer is found. Fixes https://github.com/libp2p/rust-libp2p/issues/5570. ## Change checklist - [x] I have performed a self-review of my own code - [x] I have made corresponding changes to the documentation - [ ] I have added tests that prove my fix is effective or that my feature works - [x] A changelog entry has been made in the appropriate crates Co-authored-by: João Oliveira --- Cargo.lock | 2 +- Cargo.toml | 2 +- protocols/autonat/CHANGELOG.md | 3 +++ protocols/autonat/Cargo.toml | 2 +- protocols/autonat/src/v1/behaviour/as_server.rs | 15 +++++++++++++++ 5 files changed, 21 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4c12e6fb984..b3d1cd0d76d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2687,7 +2687,7 @@ dependencies = [ [[package]] name = "libp2p-autonat" -version = "0.13.0" +version = "0.13.1" dependencies = [ "async-std", "async-trait", diff --git a/Cargo.toml b/Cargo.toml index da8d32e1a4a..c9fe928096d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -77,7 +77,7 @@ futures-bounded = { version = "0.2.4" } futures-rustls = { version = "0.26.0", default-features = false } libp2p = { version = "0.54.1", path = "libp2p" } libp2p-allow-block-list = { version = "0.4.1", path = "misc/allow-block-list" } -libp2p-autonat = { version = "0.13.0", path = "protocols/autonat" } +libp2p-autonat = { version = "0.13.1", path = "protocols/autonat" } libp2p-connection-limits = { version = "0.4.0", path = "misc/connection-limits" } libp2p-core = { version = "0.42.0", path = "core" } libp2p-dcutr = { version = "0.12.0", path = "protocols/dcutr" } diff --git a/protocols/autonat/CHANGELOG.md b/protocols/autonat/CHANGELOG.md index e171412aa58..f1aeda6ac18 100644 --- a/protocols/autonat/CHANGELOG.md +++ b/protocols/autonat/CHANGELOG.md @@ -1,3 +1,6 @@ +## 0.13.1 +- Verify that an incoming AutoNAT dial comes from a connected peer. See [PR 5597](https://github.com/libp2p/rust-libp2p/pull/5597). + ## 0.13.0 - Due to the refactor of `Transport` it's no longer required to create a seperate transport for diff --git a/protocols/autonat/Cargo.toml b/protocols/autonat/Cargo.toml index 2c01d18dceb..0c0e757641d 100644 --- a/protocols/autonat/Cargo.toml +++ b/protocols/autonat/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-autonat" edition = "2021" rust-version = { workspace = true } description = "NAT and firewall detection for libp2p" -version = "0.13.0" +version = "0.13.1" authors = ["David Craven ", "Elena Frank ", "Hannes Furmans "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" diff --git a/protocols/autonat/src/v1/behaviour/as_server.rs b/protocols/autonat/src/v1/behaviour/as_server.rs index 3ecdd3ac26e..1289bd53d24 100644 --- a/protocols/autonat/src/v1/behaviour/as_server.rs +++ b/protocols/autonat/src/v1/behaviour/as_server.rs @@ -107,6 +107,21 @@ impl<'a> HandleInnerEvent for AsServer<'a> { }, } => { let probe_id = self.probe_id.next(); + if !self.connected.contains_key(&peer) { + tracing::debug!( + %peer, + "Reject inbound dial request from peer since it is not connected" + ); + + return VecDeque::from([ToSwarm::GenerateEvent(Event::InboundProbe( + InboundProbeEvent::Error { + probe_id, + peer, + error: InboundProbeError::Response(ResponseError::DialRefused), + }, + ))]); + } + match self.resolve_inbound_request(peer, request) { Ok(addrs) => { tracing::debug!( From fd4e1e1e89189af938460a182ad3c2374654c7e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Fri, 20 Sep 2024 22:37:32 +0100 Subject: [PATCH 03/50] chore(ci): only run interop tests on commits to master (#5604) ## Description This is done as temporary measure to unblock PR merging as the CI is currently broken Co-authored-by: Guillaume Michel --- .github/workflows/interop-test.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/interop-test.yml b/.github/workflows/interop-test.yml index 1d70ca2eaee..558adcda66c 100644 --- a/.github/workflows/interop-test.yml +++ b/.github/workflows/interop-test.yml @@ -1,6 +1,5 @@ name: Interoperability Testing on: - pull_request: push: branches: - "master" From c6cf7fec6913aa590622aeea16709fce6e9c99a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Tue, 24 Sep 2024 16:00:02 +0100 Subject: [PATCH 04/50] fix(ci): address cargo-deny advisories (#5596) ## Description by updating: - `bytes` to 1.7.1, `1.6.0` was [yanked](https://crates.io/crates/bytes/1.6.0) - `quinn-proto` to 0.11.8 to address [RUSTSEC-2024-0373](https://rustsec.org/advisories/RUSTSEC-2024-0373.html) - thirtyfour-macros to 0.1.11 to remove `proc-macro-error` dependency and address [RUSTSEC-2024-0370](https://rustsec.org/advisories/RUSTSEC-2024-0370.html) --- Cargo.lock | 73 ++++++++++++------------------- examples/autonatv2/Dockerfile | 2 +- hole-punching-tests/Dockerfile | 2 +- interop-tests/Dockerfile.chromium | 2 +- interop-tests/Dockerfile.native | 2 +- misc/server/Dockerfile | 2 +- protocols/perf/Dockerfile | 2 +- 7 files changed, 33 insertions(+), 52 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b3d1cd0d76d..4c3498a0635 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -833,9 +833,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" dependencies = [ "serde", ] @@ -4584,30 +4584,6 @@ dependencies = [ "elliptic-curve", ] -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn 1.0.109", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", -] - [[package]] name = "proc-macro2" version = "1.0.85" @@ -4724,7 +4700,7 @@ dependencies = [ "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash", + "rustc-hash 1.1.0", "rustls 0.23.11", "thiserror", "tokio", @@ -4733,14 +4709,14 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.2" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e974563a4b1c2206bbc61191ca4da9c22e4308b4c455e8906751cc7828393f08" +checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" dependencies = [ "bytes", "rand 0.8.5", "ring 0.17.8", - "rustc-hash", + "rustc-hash 2.0.0", "rustls 0.23.11", "slab", "thiserror", @@ -5206,6 +5182,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc-hash" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" + [[package]] name = "rustc_version" version = "0.4.0" @@ -5471,18 +5453,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.203" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.203" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", @@ -5513,9 +5495,9 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" +checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", @@ -6002,30 +5984,29 @@ dependencies = [ [[package]] name = "thirtyfour-macros" -version = "0.1.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cae91d1c7c61ec65817f1064954640ee350a50ae6548ff9a1bdd2489d6ffbb0" +checksum = "b72d056365e368fc57a56d0cec9e41b02fb4a3474a61c8735262b1cfebe67425" dependencies = [ - "proc-macro-error", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.66", ] [[package]] name = "thiserror" -version = "1.0.61" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" +checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.61" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" +checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", @@ -6576,9 +6557,9 @@ dependencies = [ [[package]] name = "url" -version = "2.5.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", "idna 0.5.0", diff --git a/examples/autonatv2/Dockerfile b/examples/autonatv2/Dockerfile index 5a523649d80..6bc92e4d11b 100644 --- a/examples/autonatv2/Dockerfile +++ b/examples/autonatv2/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.75-alpine as builder +FROM rust:1.81-alpine as builder RUN apk add musl-dev diff --git a/hole-punching-tests/Dockerfile b/hole-punching-tests/Dockerfile index af00ef2272f..403cc301fc6 100644 --- a/hole-punching-tests/Dockerfile +++ b/hole-punching-tests/Dockerfile @@ -1,5 +1,5 @@ # syntax=docker/dockerfile:1.5-labs -FROM rust:1.75.0 as builder +FROM rust:1.81.0 as builder # Run with access to the target cache to speed up builds WORKDIR /workspace diff --git a/interop-tests/Dockerfile.chromium b/interop-tests/Dockerfile.chromium index a6b0fc89e82..86edbc5b9d2 100644 --- a/interop-tests/Dockerfile.chromium +++ b/interop-tests/Dockerfile.chromium @@ -1,5 +1,5 @@ # syntax=docker/dockerfile:1.5-labs -FROM rust:1.75.0 as chef +FROM rust:1.81 as chef RUN rustup target add wasm32-unknown-unknown RUN wget -q -O- https://github.com/rustwasm/wasm-pack/releases/download/v0.12.1/wasm-pack-v0.12.1-x86_64-unknown-linux-musl.tar.gz | tar -zx -C /usr/local/bin --strip-components 1 --wildcards "wasm-pack-*/wasm-pack" RUN wget -q -O- https://github.com/WebAssembly/binaryen/releases/download/version_115/binaryen-version_115-x86_64-linux.tar.gz | tar -zx -C /usr/local/bin --strip-components 2 --wildcards "binaryen-version_*/bin/wasm-opt" diff --git a/interop-tests/Dockerfile.native b/interop-tests/Dockerfile.native index b122ac72991..499c73437fc 100644 --- a/interop-tests/Dockerfile.native +++ b/interop-tests/Dockerfile.native @@ -1,5 +1,5 @@ # syntax=docker/dockerfile:1.5-labs -FROM lukemathwalker/cargo-chef:0.1.62-rust-1.75.0 as chef +FROM lukemathwalker/cargo-chef:0.1.67-rust-bullseye as chef WORKDIR /app FROM chef AS planner diff --git a/misc/server/Dockerfile b/misc/server/Dockerfile index 1583fba6bef..24ae2b9fd99 100644 --- a/misc/server/Dockerfile +++ b/misc/server/Dockerfile @@ -1,5 +1,5 @@ # syntax=docker/dockerfile:1.5-labs -FROM rust:1.75.0 as chef +FROM rust:1.81.0 as chef RUN wget -q -O- https://github.com/LukeMathWalker/cargo-chef/releases/download/v0.1.62/cargo-chef-x86_64-unknown-linux-gnu.tar.gz | tar -zx -C /usr/local/bin RUN cargo install --locked --root /usr/local libp2p-lookup --version 0.6.4 WORKDIR /app diff --git a/protocols/perf/Dockerfile b/protocols/perf/Dockerfile index 1bd846cc228..f68ea6ef211 100644 --- a/protocols/perf/Dockerfile +++ b/protocols/perf/Dockerfile @@ -1,5 +1,5 @@ # syntax=docker/dockerfile:1.5-labs -FROM rust:1.75.0 as builder +FROM rust:1.81.0 as builder # Run with access to the target cache to speed up builds WORKDIR /workspace From f3e0e554821ca9233f202e80d1bae9e27cfb3ab7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Wed, 25 Sep 2024 10:36:01 +0100 Subject: [PATCH 05/50] chore(ci): address beta clippy lints (#5606) Co-authored-by: Darius Clark --- misc/allow-block-list/src/lib.rs | 2 ++ misc/connection-limits/src/lib.rs | 4 ++++ misc/memory-connection-limits/src/lib.rs | 2 ++ misc/memory-connection-limits/tests/util/mod.rs | 2 ++ misc/quick-protobuf-codec/src/lib.rs | 1 + .../autonat/src/v2/client/handler/dial_back.rs | 2 ++ .../autonat/src/v2/client/handler/dial_request.rs | 2 ++ protocols/autonat/src/v2/server/behaviour.rs | 2 ++ .../autonat/src/v2/server/handler/dial_request.rs | 2 ++ protocols/dcutr/src/behaviour.rs | 2 ++ protocols/dcutr/src/handler/relayed.rs | 8 ++++++++ protocols/gossipsub/src/handler.rs | 4 ++++ protocols/gossipsub/src/transform.rs | 1 + protocols/kad/src/handler.rs | 2 ++ protocols/perf/src/client/handler.rs | 4 ++++ protocols/perf/src/server/handler.rs | 8 ++++++++ protocols/ping/src/handler.rs | 2 ++ protocols/relay/src/behaviour.rs | 2 ++ protocols/relay/src/behaviour/handler.rs | 2 ++ protocols/relay/src/priv_client.rs | 2 ++ protocols/relay/src/priv_client/handler.rs | 6 ++++++ protocols/request-response/src/cbor.rs | 2 ++ protocols/request-response/src/handler.rs | 6 ++++++ protocols/stream/src/handler.rs | 4 ++++ swarm/src/behaviour/toggle.rs | 4 ++++ swarm/src/connection.rs | 10 ++++++++++ swarm/src/connection/pool/task.rs | 4 ++++ swarm/src/dummy.rs | 14 ++++++++++++++ swarm/src/handler/pending.rs | 8 ++++++++ swarm/src/upgrade.rs | 1 + swarm/tests/swarm_derive.rs | 2 ++ transports/quic/src/hole_punching.rs | 2 ++ 32 files changed, 119 insertions(+) diff --git a/misc/allow-block-list/src/lib.rs b/misc/allow-block-list/src/lib.rs index 7646638a651..56de29d1985 100644 --- a/misc/allow-block-list/src/lib.rs +++ b/misc/allow-block-list/src/lib.rs @@ -271,6 +271,8 @@ where _: ConnectionId, event: THandlerOutEvent, ) { + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] void::unreachable(event) } diff --git a/misc/connection-limits/src/lib.rs b/misc/connection-limits/src/lib.rs index b02e52f25a1..05a9b639f26 100644 --- a/misc/connection-limits/src/lib.rs +++ b/misc/connection-limits/src/lib.rs @@ -355,6 +355,8 @@ impl NetworkBehaviour for Behaviour { _: ConnectionId, event: THandlerOutEvent, ) { + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] void::unreachable(event) } @@ -586,6 +588,8 @@ mod tests { _connection_id: ConnectionId, event: THandlerOutEvent, ) { + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] void::unreachable(event) } diff --git a/misc/memory-connection-limits/src/lib.rs b/misc/memory-connection-limits/src/lib.rs index 7b5803a61aa..757ff770487 100644 --- a/misc/memory-connection-limits/src/lib.rs +++ b/misc/memory-connection-limits/src/lib.rs @@ -190,6 +190,8 @@ impl NetworkBehaviour for Behaviour { _: ConnectionId, event: THandlerOutEvent, ) { + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] void::unreachable(event) } diff --git a/misc/memory-connection-limits/tests/util/mod.rs b/misc/memory-connection-limits/tests/util/mod.rs index d18aa78fd22..01e8cd9f655 100644 --- a/misc/memory-connection-limits/tests/util/mod.rs +++ b/misc/memory-connection-limits/tests/util/mod.rs @@ -116,6 +116,8 @@ impl NetworkBehaviour _: ConnectionId, event: THandlerOutEvent, ) { + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] void::unreachable(event) } diff --git a/misc/quick-protobuf-codec/src/lib.rs b/misc/quick-protobuf-codec/src/lib.rs index 166ee82ff08..32cee8eccac 100644 --- a/misc/quick-protobuf-codec/src/lib.rs +++ b/misc/quick-protobuf-codec/src/lib.rs @@ -12,6 +12,7 @@ mod generated; pub use generated::test as proto; /// [`Codec`] implements [`Encoder`] and [`Decoder`], uses [`unsigned_varint`] +/// /// to prefix messages with their length and uses [`quick_protobuf`] and a provided /// `struct` implementing [`MessageRead`] and [`MessageWrite`] to do the encoding. pub struct Codec { diff --git a/protocols/autonat/src/v2/client/handler/dial_back.rs b/protocols/autonat/src/v2/client/handler/dial_back.rs index b94580e69ba..98a41a82504 100644 --- a/protocols/autonat/src/v2/client/handler/dial_back.rs +++ b/protocols/autonat/src/v2/client/handler/dial_back.rs @@ -83,6 +83,8 @@ impl ConnectionHandler for Handler { tracing::warn!("Dial back request dropped, too many requests in flight"); } } + // TODO: remove when Rust 1.82 is MSRVprotocols/autonat/src/v2/client/handler/dial_back.rs + #[allow(unreachable_patterns)] ConnectionEvent::ListenUpgradeError(ListenUpgradeError { error, .. }) => { void::unreachable(error); } diff --git a/protocols/autonat/src/v2/client/handler/dial_request.rs b/protocols/autonat/src/v2/client/handler/dial_request.rs index 9d2df8ee6b4..85ad176ec30 100644 --- a/protocols/autonat/src/v2/client/handler/dial_request.rs +++ b/protocols/autonat/src/v2/client/handler/dial_request.rs @@ -216,6 +216,8 @@ async fn start_stream_handle( .map_err(|e| match e { StreamUpgradeError::NegotiationFailed => Error::UnsupportedProtocol, StreamUpgradeError::Timeout => Error::Io(io::ErrorKind::TimedOut.into()), + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] StreamUpgradeError::Apply(v) => void::unreachable(v), StreamUpgradeError::Io(e) => Error::Io(e), })?; diff --git a/protocols/autonat/src/v2/server/behaviour.rs b/protocols/autonat/src/v2/server/behaviour.rs index 5f7b21d165b..9264c728fe4 100644 --- a/protocols/autonat/src/v2/server/behaviour.rs +++ b/protocols/autonat/src/v2/server/behaviour.rs @@ -112,6 +112,8 @@ where Either::Left(Either::Left(Err(e))) => { tracing::debug!("dial back error: {e:?}"); } + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] Either::Left(Either::Right(v)) => void::unreachable(v), Either::Right(Either::Left(cmd)) => { let addr = cmd.addr.clone(); diff --git a/protocols/autonat/src/v2/server/handler/dial_request.rs b/protocols/autonat/src/v2/server/handler/dial_request.rs index 9a3729d4ccf..14ddb153416 100644 --- a/protocols/autonat/src/v2/server/handler/dial_request.rs +++ b/protocols/autonat/src/v2/server/handler/dial_request.rs @@ -143,6 +143,8 @@ where ); } } + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] ConnectionEvent::ListenUpgradeError(ListenUpgradeError { error, .. }) => { tracing::debug!("inbound request failed: {:?}", error); } diff --git a/protocols/dcutr/src/behaviour.rs b/protocols/dcutr/src/behaviour.rs index 574c96205fa..babd56bd28e 100644 --- a/protocols/dcutr/src/behaviour.rs +++ b/protocols/dcutr/src/behaviour.rs @@ -314,6 +314,8 @@ impl NetworkBehaviour for Behaviour { .or_default() += 1; self.queued_events.push_back(ToSwarm::Dial { opts }); } + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] Either::Right(never) => void::unreachable(never), }; } diff --git a/protocols/dcutr/src/handler/relayed.rs b/protocols/dcutr/src/handler/relayed.rs index eba58f89313..72af9fec264 100644 --- a/protocols/dcutr/src/handler/relayed.rs +++ b/protocols/dcutr/src/handler/relayed.rs @@ -115,6 +115,8 @@ impl Handler { self.attempts += 1; } // A connection listener denies all incoming substreams, thus none can ever be fully negotiated. + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] future::Either::Right(output) => void::unreachable(output), } } @@ -153,6 +155,8 @@ impl Handler { ::InboundProtocol, >, ) { + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] void::unreachable(error.into_inner()); } @@ -164,6 +168,8 @@ impl Handler { >, ) { let error = match error { + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] StreamUpgradeError::Apply(v) => void::unreachable(v), StreamUpgradeError::NegotiationFailed => outbound::Error::Unsupported, StreamUpgradeError::Io(e) => outbound::Error::Io(e), @@ -298,6 +304,8 @@ impl ConnectionHandler for Handler { ConnectionEvent::FullyNegotiatedOutbound(fully_negotiated_outbound) => { self.on_fully_negotiated_outbound(fully_negotiated_outbound) } + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] ConnectionEvent::ListenUpgradeError(listen_upgrade_error) => { self.on_listen_upgrade_error(listen_upgrade_error) } diff --git a/protocols/gossipsub/src/handler.rs b/protocols/gossipsub/src/handler.rs index 88def13a521..8e3b3a8b022 100644 --- a/protocols/gossipsub/src/handler.rs +++ b/protocols/gossipsub/src/handler.rs @@ -493,6 +493,8 @@ impl ConnectionHandler for Handler { .. }) => match protocol { Either::Left(protocol) => handler.on_fully_negotiated_inbound(protocol), + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] Either::Right(v) => void::unreachable(v), }, ConnectionEvent::FullyNegotiatedOutbound(fully_negotiated_outbound) => { @@ -504,6 +506,8 @@ impl ConnectionHandler for Handler { }) => { tracing::debug!("Dial upgrade error: Protocol negotiation timeout"); } + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] ConnectionEvent::DialUpgradeError(DialUpgradeError { error: StreamUpgradeError::Apply(e), .. diff --git a/protocols/gossipsub/src/transform.rs b/protocols/gossipsub/src/transform.rs index 6f57d9fc46b..4831f9781b0 100644 --- a/protocols/gossipsub/src/transform.rs +++ b/protocols/gossipsub/src/transform.rs @@ -28,6 +28,7 @@ use crate::{Message, RawMessage, TopicHash}; /// A general trait of transforming a [`RawMessage`] into a [`Message`]. The +/// /// [`RawMessage`] is obtained from the wire and the [`Message`] is used to /// calculate the [`crate::MessageId`] of the message and is what is sent to the application. /// diff --git a/protocols/kad/src/handler.rs b/protocols/kad/src/handler.rs index 5e7c2e21b8b..17c483da709 100644 --- a/protocols/kad/src/handler.rs +++ b/protocols/kad/src/handler.rs @@ -501,6 +501,8 @@ impl Handler { // is a `Void`. let protocol = match protocol { future::Either::Left(p) => p, + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] future::Either::Right(p) => void::unreachable(p), }; diff --git a/protocols/perf/src/client/handler.rs b/protocols/perf/src/client/handler.rs index 2a2c5499fc2..55fafad7fcc 100644 --- a/protocols/perf/src/client/handler.rs +++ b/protocols/perf/src/client/handler.rs @@ -112,6 +112,8 @@ impl ConnectionHandler for Handler { >, ) { match event { + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { protocol, .. }) => void::unreachable(protocol), @@ -144,6 +146,8 @@ impl ConnectionHandler for Handler { result: Err(error.into()), })); } + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] ConnectionEvent::ListenUpgradeError(ListenUpgradeError { info: (), error }) => { void::unreachable(error) } diff --git a/protocols/perf/src/server/handler.rs b/protocols/perf/src/server/handler.rs index ddfe8f881e5..4cb535a452c 100644 --- a/protocols/perf/src/server/handler.rs +++ b/protocols/perf/src/server/handler.rs @@ -73,6 +73,8 @@ impl ConnectionHandler for Handler { } fn on_behaviour_event(&mut self, v: Self::FromBehaviour) { + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] void::unreachable(v) } @@ -98,16 +100,22 @@ impl ConnectionHandler for Handler { tracing::warn!("Dropping inbound stream because we are at capacity"); } } + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { info, .. }) => { void::unreachable(info) } + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] ConnectionEvent::DialUpgradeError(DialUpgradeError { info, .. }) => { void::unreachable(info) } ConnectionEvent::AddressChange(_) | ConnectionEvent::LocalProtocolsChange(_) | ConnectionEvent::RemoteProtocolsChange(_) => {} + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] ConnectionEvent::ListenUpgradeError(ListenUpgradeError { info: (), error }) => { void::unreachable(error) } diff --git a/protocols/ping/src/handler.rs b/protocols/ping/src/handler.rs index 2816cdc4048..7b36b2d4b3d 100644 --- a/protocols/ping/src/handler.rs +++ b/protocols/ping/src/handler.rs @@ -210,6 +210,8 @@ impl Handler { "ping protocol negotiation timed out", )), }, + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] StreamUpgradeError::Apply(e) => void::unreachable(e), StreamUpgradeError::Io(e) => Failure::Other { error: Box::new(e) }, }; diff --git a/protocols/relay/src/behaviour.rs b/protocols/relay/src/behaviour.rs index 463febf9f2f..46419ae64e3 100644 --- a/protocols/relay/src/behaviour.rs +++ b/protocols/relay/src/behaviour.rs @@ -366,6 +366,8 @@ impl NetworkBehaviour for Behaviour { ) { let event = match event { Either::Left(e) => e, + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] Either::Right(v) => void::unreachable(v), }; diff --git a/protocols/relay/src/behaviour/handler.rs b/protocols/relay/src/behaviour/handler.rs index 92557287099..23e90f4b3f8 100644 --- a/protocols/relay/src/behaviour/handler.rs +++ b/protocols/relay/src/behaviour/handler.rs @@ -449,6 +449,8 @@ impl Handler { StreamUpgradeError::Timeout => outbound_stop::Error::Io(io::ErrorKind::TimedOut.into()), StreamUpgradeError::NegotiationFailed => outbound_stop::Error::Unsupported, StreamUpgradeError::Io(e) => outbound_stop::Error::Io(e), + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] StreamUpgradeError::Apply(v) => void::unreachable(v), }; diff --git a/protocols/relay/src/priv_client.rs b/protocols/relay/src/priv_client.rs index f8d1d9c9eb2..8bbc813ec4c 100644 --- a/protocols/relay/src/priv_client.rs +++ b/protocols/relay/src/priv_client.rs @@ -236,6 +236,8 @@ impl NetworkBehaviour for Behaviour { ) { let handler_event = match handler_event { Either::Left(e) => e, + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] Either::Right(v) => void::unreachable(v), }; diff --git a/protocols/relay/src/priv_client/handler.rs b/protocols/relay/src/priv_client/handler.rs index 662d63cc742..05fdd5673ae 100644 --- a/protocols/relay/src/priv_client/handler.rs +++ b/protocols/relay/src/priv_client/handler.rs @@ -445,6 +445,8 @@ impl ConnectionHandler for Handler { let _ = next.send(Ok(ev.protocol)); } } + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] ConnectionEvent::ListenUpgradeError(ev) => void::unreachable(ev.error), ConnectionEvent::DialUpgradeError(ev) => { if let Some(next) = self.pending_streams.pop_front() { @@ -583,6 +585,8 @@ fn into_reserve_error(e: StreamUpgradeError) -> outbound_hop::ReserveError StreamUpgradeError::Timeout => { outbound_hop::ReserveError::Io(io::ErrorKind::TimedOut.into()) } + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] StreamUpgradeError::Apply(never) => void::unreachable(never), StreamUpgradeError::NegotiationFailed => outbound_hop::ReserveError::Unsupported, StreamUpgradeError::Io(e) => outbound_hop::ReserveError::Io(e), @@ -594,6 +598,8 @@ fn into_connect_error(e: StreamUpgradeError) -> outbound_hop::ConnectError StreamUpgradeError::Timeout => { outbound_hop::ConnectError::Io(io::ErrorKind::TimedOut.into()) } + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] StreamUpgradeError::Apply(never) => void::unreachable(never), StreamUpgradeError::NegotiationFailed => outbound_hop::ConnectError::Unsupported, StreamUpgradeError::Io(e) => outbound_hop::ConnectError::Io(e), diff --git a/protocols/request-response/src/cbor.rs b/protocols/request-response/src/cbor.rs index 44d82be2630..a27d069e758 100644 --- a/protocols/request-response/src/cbor.rs +++ b/protocols/request-response/src/cbor.rs @@ -143,6 +143,8 @@ mod codec { fn decode_into_io_error(err: cbor4ii::serde::DecodeError) -> io::Error { match err { + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] cbor4ii::serde::DecodeError::Core(DecodeError::Read(e)) => { io::Error::new(io::ErrorKind::Other, e) } diff --git a/protocols/request-response/src/handler.rs b/protocols/request-response/src/handler.rs index f0467593f85..0591b37dc30 100644 --- a/protocols/request-response/src/handler.rs +++ b/protocols/request-response/src/handler.rs @@ -240,6 +240,8 @@ where self.pending_events .push_back(Event::OutboundUnsupportedProtocols(message.request_id)); } + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] StreamUpgradeError::Apply(e) => void::unreachable(e), StreamUpgradeError::Io(e) => { self.pending_events.push_back(Event::OutboundStreamFailed { @@ -256,6 +258,8 @@ where ::InboundProtocol, >, ) { + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] void::unreachable(error) } } @@ -484,6 +488,8 @@ where ConnectionEvent::DialUpgradeError(dial_upgrade_error) => { self.on_dial_upgrade_error(dial_upgrade_error) } + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] ConnectionEvent::ListenUpgradeError(listen_upgrade_error) => { self.on_listen_upgrade_error(listen_upgrade_error) } diff --git a/protocols/stream/src/handler.rs b/protocols/stream/src/handler.rs index f63b93c1761..bf80e30c3c6 100644 --- a/protocols/stream/src/handler.rs +++ b/protocols/stream/src/handler.rs @@ -96,6 +96,8 @@ impl ConnectionHandler for Handler { } fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] void::unreachable(event) } @@ -143,6 +145,8 @@ impl ConnectionHandler for Handler { swarm::StreamUpgradeError::Timeout => { OpenStreamError::Io(io::Error::from(io::ErrorKind::TimedOut)) } + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] swarm::StreamUpgradeError::Apply(v) => void::unreachable(v), swarm::StreamUpgradeError::NegotiationFailed => { OpenStreamError::UnsupportedProtocol(p) diff --git a/swarm/src/behaviour/toggle.rs b/swarm/src/behaviour/toggle.rs index 398c919ae86..5d72534c91e 100644 --- a/swarm/src/behaviour/toggle.rs +++ b/swarm/src/behaviour/toggle.rs @@ -210,6 +210,8 @@ where ) { let out = match out { future::Either::Left(out) => out, + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] future::Either::Right(v) => void::unreachable(v), }; @@ -251,6 +253,8 @@ where let err = match err { Either::Left(e) => e, + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] Either::Right(v) => void::unreachable(v), }; diff --git a/swarm/src/connection.rs b/swarm/src/connection.rs index 603a5b0d7c4..2f9afc38418 100644 --- a/swarm/src/connection.rs +++ b/swarm/src/connection.rs @@ -1189,10 +1189,14 @@ mod tests { >, ) { match event { + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { protocol, .. }) => void::unreachable(protocol), + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { protocol, .. @@ -1200,6 +1204,8 @@ mod tests { ConnectionEvent::DialUpgradeError(DialUpgradeError { error, .. }) => { self.error = Some(error) } + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] ConnectionEvent::AddressChange(_) | ConnectionEvent::ListenUpgradeError(_) | ConnectionEvent::LocalProtocolsChange(_) @@ -1208,6 +1214,8 @@ mod tests { } fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] void::unreachable(event) } @@ -1283,6 +1291,8 @@ mod tests { } fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] void::unreachable(event) } diff --git a/swarm/src/connection/pool/task.rs b/swarm/src/connection/pool/task.rs index 08674fd2ee5..13977a17b85 100644 --- a/swarm/src/connection/pool/task.rs +++ b/swarm/src/connection/pool/task.rs @@ -105,6 +105,8 @@ pub(crate) async fn new_for_pending_outgoing_connection( }) .await; } + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] Either::Left((Ok(v), _)) => void::unreachable(v), Either::Right((Ok((address, output, errors)), _)) => { let _ = events @@ -143,6 +145,8 @@ pub(crate) async fn new_for_pending_incoming_connection( }) .await; } + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] Either::Left((Ok(v), _)) => void::unreachable(v), Either::Right((Ok(output), _)) => { let _ = events diff --git a/swarm/src/dummy.rs b/swarm/src/dummy.rs index 6e1b4d56eb9..0bd8c06862d 100644 --- a/swarm/src/dummy.rs +++ b/swarm/src/dummy.rs @@ -49,6 +49,8 @@ impl NetworkBehaviour for Behaviour { _: ConnectionId, event: THandlerOutEvent, ) { + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] void::unreachable(event) } @@ -76,6 +78,8 @@ impl crate::handler::ConnectionHandler for ConnectionHandler { } fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] void::unreachable(event) } @@ -98,19 +102,29 @@ impl crate::handler::ConnectionHandler for ConnectionHandler { >, ) { match event { + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { protocol, .. }) => void::unreachable(protocol), + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { protocol, .. }) => void::unreachable(protocol), + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] ConnectionEvent::DialUpgradeError(DialUpgradeError { info: _, error }) => match error { + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] StreamUpgradeError::Timeout => unreachable!(), StreamUpgradeError::Apply(e) => void::unreachable(e), StreamUpgradeError::NegotiationFailed | StreamUpgradeError::Io(_) => { unreachable!("Denied upgrade does not support any protocols") } }, + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] ConnectionEvent::AddressChange(_) | ConnectionEvent::ListenUpgradeError(_) | ConnectionEvent::LocalProtocolsChange(_) diff --git a/swarm/src/handler/pending.rs b/swarm/src/handler/pending.rs index 23b9adcfd90..9601f5cf78b 100644 --- a/swarm/src/handler/pending.rs +++ b/swarm/src/handler/pending.rs @@ -52,6 +52,8 @@ impl ConnectionHandler for PendingConnectionHandler { } fn on_behaviour_event(&mut self, v: Self::FromBehaviour) { + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] void::unreachable(v) } @@ -74,9 +76,13 @@ impl ConnectionHandler for PendingConnectionHandler { >, ) { match event { + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { protocol, .. }) => void::unreachable(protocol), + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { protocol, info: _info, @@ -87,6 +93,8 @@ impl ConnectionHandler for PendingConnectionHandler { void::unreachable(_info); } } + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] ConnectionEvent::AddressChange(_) | ConnectionEvent::DialUpgradeError(_) | ConnectionEvent::ListenUpgradeError(_) diff --git a/swarm/src/upgrade.rs b/swarm/src/upgrade.rs index 53b627458c9..f6c6648a373 100644 --- a/swarm/src/upgrade.rs +++ b/swarm/src/upgrade.rs @@ -121,6 +121,7 @@ where } /// Wraps around a type that implements [`OutboundUpgradeSend`], [`InboundUpgradeSend`], or +/// /// both, and implements [`OutboundUpgrade`](upgrade::OutboundUpgrade) and/or /// [`InboundUpgrade`](upgrade::InboundUpgrade). /// diff --git a/swarm/tests/swarm_derive.rs b/swarm/tests/swarm_derive.rs index 919ed0cab7f..667f68408cf 100644 --- a/swarm/tests/swarm_derive.rs +++ b/swarm/tests/swarm_derive.rs @@ -577,6 +577,8 @@ fn custom_out_event_no_type_parameters() { _connection: ConnectionId, message: THandlerOutEvent, ) { + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] void::unreachable(message); } diff --git a/transports/quic/src/hole_punching.rs b/transports/quic/src/hole_punching.rs index 605799af5e1..a38d123a6a4 100644 --- a/transports/quic/src/hole_punching.rs +++ b/transports/quic/src/hole_punching.rs @@ -20,6 +20,8 @@ pub(crate) async fn hole_puncher( match futures::future::select(P::sleep(timeout_duration), punch_holes_future).await { Either::Left(_) => Error::HandshakeTimedOut, Either::Right((Err(hole_punch_err), _)) => hole_punch_err, + // TODO: remove when Rust 1.82 is MSRV + #[allow(unreachable_patterns)] Either::Right((Ok(never), _)) => match never {}, } } From 8ceadaac5aec4b462463ef4082d6af577a3158b1 Mon Sep 17 00:00:00 2001 From: stormshield-frb <144998884+stormshield-frb@users.noreply.github.com> Date: Thu, 26 Sep 2024 16:17:47 +0200 Subject: [PATCH 06/50] fix(swarm): don't report `NewExternalAddrCandidate` if already confirmed (#5582) ## Description Currently, `NewExternalAddrCandidate` events are emitted for every connections. However, we continue to get this event even when `autonat` has already confirmed that this address is external. So we should not continue to advertise the "candidate" event. ## Notes & open questions We have made the changes in the `swarm` instead of `identify` because it does not make it necessary to duplicate the `ConfirmedExternalAddr` vector in the `identify` Behaviour. Moreover, if any future Behaviour emit `NewExternalAddrCandidate`, the same rule will also be applied. I had to edit the `autonat_v2` tests which were always expecting a `NewExternalAddrCandidate` but the address was already confirmed. ## Change checklist - [x] I have performed a self-review of my own code - [ ] I have made corresponding changes to the documentation - [ ] I have added tests that prove my fix is effective or that my feature works - [x] A changelog entry has been made in the appropriate crates --------- Co-authored-by: Darius Clark Co-authored-by: Guillaume Michel --- Cargo.lock | 2 +- Cargo.toml | 2 +- protocols/autonat/tests/autonatv2.rs | 38 +++++++++++++--------------- swarm/CHANGELOG.md | 5 ++++ swarm/Cargo.toml | 2 +- swarm/src/lib.rs | 14 +++++----- 6 files changed, 34 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4c3498a0635..79eb2eb4ad5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3329,7 +3329,7 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.45.1" +version = "0.45.2" dependencies = [ "async-std", "criterion", diff --git a/Cargo.toml b/Cargo.toml index c9fe928096d..32f72c5e252 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -102,7 +102,7 @@ libp2p-rendezvous = { version = "0.15.0", path = "protocols/rendezvous" } libp2p-request-response = { version = "0.27.0", path = "protocols/request-response" } libp2p-server = { version = "0.12.7", path = "misc/server" } libp2p-stream = { version = "0.2.0-alpha", path = "protocols/stream" } -libp2p-swarm = { version = "0.45.1", path = "swarm" } +libp2p-swarm = { version = "0.45.2", path = "swarm" } libp2p-swarm-derive = { version = "=0.35.0", path = "swarm-derive" } # `libp2p-swarm-derive` may not be compatible with different `libp2p-swarm` non-breaking releases. E.g. `libp2p-swarm` might introduce a new enum variant `FromSwarm` (which is `#[non-exhaustive]`) in a non-breaking release. Older versions of `libp2p-swarm-derive` would not forward this enum variant within the `NetworkBehaviour` hierarchy. Thus the version pinning is required. libp2p-swarm-test = { version = "0.4.0", path = "swarm-test" } libp2p-tcp = { version = "0.42.0", path = "transports/tcp" } diff --git a/protocols/autonat/tests/autonatv2.rs b/protocols/autonat/tests/autonatv2.rs index abd0c4bd8eb..3d792172445 100644 --- a/protocols/autonat/tests/autonatv2.rs +++ b/protocols/autonat/tests/autonatv2.rs @@ -1,5 +1,6 @@ use libp2p_autonat::v2::client::{self, Config}; use libp2p_autonat::v2::server; +use libp2p_core::multiaddr::Protocol; use libp2p_core::transport::TransportError; use libp2p_core::Multiaddr; use libp2p_swarm::{ @@ -21,17 +22,10 @@ async fn confirm_successful() { let cor_server_peer = *alice.local_peer_id(); let cor_client_peer = *bob.local_peer_id(); - let bob_external_addrs = Arc::new(bob.external_addresses().cloned().collect::>()); - let alice_bob_external_addrs = bob_external_addrs.clone(); + let bob_tcp_listeners = Arc::new(tcp_listeners(&bob)); + let alice_bob_tcp_listeners = bob_tcp_listeners.clone(); let alice_task = async { - let _ = alice - .wait(|event| match event { - SwarmEvent::NewExternalAddrCandidate { .. } => Some(()), - _ => None, - }) - .await; - let (dialed_peer_id, dialed_connection_id) = alice .wait(|event| match event { SwarmEvent::Dialing { @@ -76,10 +70,10 @@ async fn confirm_successful() { }) .await; - assert_eq!(tested_addr, bob_external_addrs.first().cloned().unwrap()); + assert_eq!(tested_addr, bob_tcp_listeners.first().cloned().unwrap()); assert_eq!(data_amount, 0); assert_eq!(client, cor_client_peer); - assert_eq!(&all_addrs[..], &bob_external_addrs[..]); + assert_eq!(&all_addrs[..], &bob_tcp_listeners[..]); assert!(result.is_ok(), "Result: {result:?}"); }; @@ -122,7 +116,7 @@ async fn confirm_successful() { .await; assert_eq!( tested_addr, - alice_bob_external_addrs.first().cloned().unwrap() + alice_bob_tcp_listeners.first().cloned().unwrap() ); assert_eq!(bytes_sent, 0); assert_eq!(server, cor_server_peer); @@ -446,7 +440,7 @@ async fn new_client() -> Swarm { identity.public().clone(), )), }); - node.listen().with_tcp_addr_external().await; + node.listen().await; node } @@ -490,13 +484,6 @@ async fn bootstrap() -> (Swarm, Swarm) { let cor_client_peer = *bob.local_peer_id(); let alice_task = async { - let _ = alice - .wait(|event| match event { - SwarmEvent::NewExternalAddrCandidate { .. } => Some(()), - _ => None, - }) - .await; - let (dialed_peer_id, dialed_connection_id) = alice .wait(|event| match event { SwarmEvent::Dialing { @@ -566,3 +553,14 @@ async fn bootstrap() -> (Swarm, Swarm) { tokio::join!(alice_task, bob_task); (alice, bob) } + +fn tcp_listeners(swarm: &Swarm) -> Vec { + swarm + .listeners() + .filter(|addr| { + addr.iter() + .any(|protocol| matches!(protocol, Protocol::Tcp(_))) + }) + .cloned() + .collect::>() +} diff --git a/swarm/CHANGELOG.md b/swarm/CHANGELOG.md index e7931a60de2..c5d10872d40 100644 --- a/swarm/CHANGELOG.md +++ b/swarm/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.45.2 + +- Don't report `NewExternalAddrCandidate` for confirmed external addresses. + See [PR 5582](https://github.com/libp2p/rust-libp2p/pull/5582). + ## 0.45.1 - Update `libp2p-swarm-derive` to version `0.35.0`, see [PR 5545] diff --git a/swarm/Cargo.toml b/swarm/Cargo.toml index 3d0b1a84eee..cdee67f3fb3 100644 --- a/swarm/Cargo.toml +++ b/swarm/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-swarm" edition = "2021" rust-version = { workspace = true } description = "The libp2p swarm" -version = "0.45.1" +version = "0.45.2" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" diff --git a/swarm/src/lib.rs b/swarm/src/lib.rs index 81b1ca1a68d..12280e99f07 100644 --- a/swarm/src/lib.rs +++ b/swarm/src/lib.rs @@ -1140,12 +1140,14 @@ where self.pending_handler_event = Some((peer_id, handler, event)); } ToSwarm::NewExternalAddrCandidate(addr) => { - self.behaviour - .on_swarm_event(FromSwarm::NewExternalAddrCandidate( - NewExternalAddrCandidate { addr: &addr }, - )); - self.pending_swarm_events - .push_back(SwarmEvent::NewExternalAddrCandidate { address: addr }); + if !self.confirmed_external_addr.contains(&addr) { + self.behaviour + .on_swarm_event(FromSwarm::NewExternalAddrCandidate( + NewExternalAddrCandidate { addr: &addr }, + )); + self.pending_swarm_events + .push_back(SwarmEvent::NewExternalAddrCandidate { address: addr }); + } } ToSwarm::ExternalAddrConfirmed(addr) => { self.add_external_address(addr.clone()); From 89d78dda296fc3747d3760f73d4b6c558e34c8d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Fri, 4 Oct 2024 14:35:08 +0100 Subject: [PATCH 07/50] chore(autonat-v2): fix dial_back_to_non_libp2p test (#5621) ## Description By avoiding dialing an external address Co-authored-by: Guillaume Michel --- protocols/autonat/tests/autonatv2.rs | 147 +++++++++++++-------------- 1 file changed, 72 insertions(+), 75 deletions(-) diff --git a/protocols/autonat/tests/autonatv2.rs b/protocols/autonat/tests/autonatv2.rs index 3d792172445..f22a2e51470 100644 --- a/protocols/autonat/tests/autonatv2.rs +++ b/protocols/autonat/tests/autonatv2.rs @@ -232,87 +232,84 @@ async fn dial_back_to_non_libp2p() { let (mut alice, mut bob) = bootstrap().await; let alice_peer_id = *alice.local_peer_id(); - for addr_str in ["/ip4/169.150.247.38/tcp/32", "/ip6/::1/tcp/1000"] { - let addr: Multiaddr = addr_str.parse().unwrap(); - let bob_addr = addr.clone(); - bob.behaviour_mut() - .autonat - .on_swarm_event(FromSwarm::NewExternalAddrCandidate( - NewExternalAddrCandidate { addr: &addr }, - )); - - let alice_task = async { - let (alice_dialing_peer, alice_conn_id) = alice - .wait(|event| match event { - SwarmEvent::Dialing { - peer_id, - connection_id, - } => peer_id.map(|p| (p, connection_id)), - _ => None, - }) - .await; - let mut outgoing_conn_error = alice - .wait(|event| match event { - SwarmEvent::OutgoingConnectionError { - connection_id, - peer_id: Some(peer_id), - error: DialError::Transport(peers), - } if connection_id == alice_conn_id && peer_id == alice_dialing_peer => { - Some(peers) - } - _ => None, - }) - .await; - - if let Some((multiaddr, TransportError::Other(o))) = outgoing_conn_error.pop() { - assert_eq!( - multiaddr, - addr.clone().with_p2p(alice_dialing_peer).unwrap() - ); - let error_string = o.to_string(); - assert!( - error_string.contains("Connection refused"), - "Correct error string: {error_string} for {addr_str}" - ); - } else { - panic!("No outgoing connection errors"); - } + let addr_str = "/ip6/::1/tcp/1000"; + let addr: Multiaddr = addr_str.parse().unwrap(); + let bob_addr = addr.clone(); + bob.behaviour_mut() + .autonat + .on_swarm_event(FromSwarm::NewExternalAddrCandidate( + NewExternalAddrCandidate { addr: &addr }, + )); - alice - .wait(|event| match event { - SwarmEvent::Behaviour(CombinedServerEvent::Autonat(server::Event { - all_addrs, - tested_addr, - client, - data_amount, - result: Ok(()), - })) if all_addrs == vec![addr.clone()] - && tested_addr == addr - && alice_dialing_peer == client => - { - Some(data_amount) - } - _ => None, - }) - .await - }; - let bob_task = async { - bob.wait(|event| match event { - SwarmEvent::Behaviour(CombinedClientEvent::Autonat(client::Event { + let alice_task = async { + let (alice_dialing_peer, alice_conn_id) = alice + .wait(|event| match event { + SwarmEvent::Dialing { + peer_id, + connection_id, + } => peer_id.map(|p| (p, connection_id)), + _ => None, + }) + .await; + let mut outgoing_conn_error = alice + .wait(|event| match event { + SwarmEvent::OutgoingConnectionError { + connection_id, + peer_id: Some(peer_id), + error: DialError::Transport(peers), + } if connection_id == alice_conn_id && peer_id == alice_dialing_peer => Some(peers), + _ => None, + }) + .await; + + if let Some((multiaddr, TransportError::Other(o))) = outgoing_conn_error.pop() { + assert_eq!( + multiaddr, + addr.clone().with_p2p(alice_dialing_peer).unwrap() + ); + let error_string = o.to_string(); + assert!( + error_string.contains("Connection refused"), + "Correct error string: {error_string} for {addr_str}" + ); + } else { + panic!("No outgoing connection errors"); + } + + alice + .wait(|event| match event { + SwarmEvent::Behaviour(CombinedServerEvent::Autonat(server::Event { + all_addrs, tested_addr, - bytes_sent, - server, - result: Err(_), - })) if tested_addr == bob_addr && server == alice_peer_id => Some(bytes_sent), + client, + data_amount, + result: Ok(()), + })) if all_addrs == vec![addr.clone()] + && tested_addr == addr + && alice_dialing_peer == client => + { + Some(data_amount) + } _ => None, }) .await - }; + }; + let bob_task = async { + bob.wait(|event| match event { + SwarmEvent::Behaviour(CombinedClientEvent::Autonat(client::Event { + tested_addr, + bytes_sent, + server, + result: Err(_), + })) if tested_addr == bob_addr && server == alice_peer_id => Some(bytes_sent), + _ => None, + }) + .await + }; - let (alice_bytes_sent, bob_bytes_sent) = tokio::join!(alice_task, bob_task); - assert_eq!(alice_bytes_sent, bob_bytes_sent); - bob.behaviour_mut().autonat.validate_addr(&addr); - } + let (alice_bytes_sent, bob_bytes_sent) = tokio::join!(alice_task, bob_task); + assert_eq!(alice_bytes_sent, bob_bytes_sent); + bob.behaviour_mut().autonat.validate_addr(&addr); } #[tokio::test] From b83dd95c384fefc0aca489f5ffefa74c72db0d9d Mon Sep 17 00:00:00 2001 From: Guillaume Michel Date: Fri, 4 Oct 2024 15:50:57 +0200 Subject: [PATCH 08/50] chore: update interop test run condition (#5611) ## Description Follow up to https://github.com/libp2p/rust-libp2p/pull/5604. Interop tests only work on the main `rust-libp2p` repo, and not on forks, because of the S3 cache (introduced in https://github.com/libp2p/rust-libp2p/pull/5586). The interop tests currently don't run in the PRs, but they run after the PRs are merged to `master`. This PR is trying to run interop tests in PR that are branches of the main repo (not forks). --- .github/workflows/interop-test.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/interop-test.yml b/.github/workflows/interop-test.yml index 558adcda66c..57d0f1a692d 100644 --- a/.github/workflows/interop-test.yml +++ b/.github/workflows/interop-test.yml @@ -11,6 +11,7 @@ concurrency: jobs: run-transport-interop: name: Run transport interoperability tests + if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository runs-on: ${{ fromJSON(github.repository == 'libp2p/rust-libp2p' && '["self-hosted", "linux", "x64", "4xlarge"]' || '"ubuntu-latest"') }} strategy: matrix: @@ -39,6 +40,7 @@ jobs: worker-count: 16 run-holepunching-interop: name: Run hole-punch interoperability tests + if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository runs-on: ${{ fromJSON(github.repository == 'libp2p/rust-libp2p' && '["self-hosted", "linux", "x64", "4xlarge"]' || '"ubuntu-latest"') }} steps: - uses: actions/checkout@v4 From 7669b1696bf62fa82e2a1da959cf537dc98959ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Fri, 4 Oct 2024 15:45:16 +0100 Subject: [PATCH 09/50] deps: update metrics example dependencies (#5617) ## Description and address cargo-deny [RUSTSEC-2024-0376](https://rustsec.org/advisories/RUSTSEC-2024-0376.html) --- Cargo.lock | 196 +++++++++++++---------------------- examples/metrics/Cargo.toml | 8 +- examples/metrics/src/main.rs | 8 +- 3 files changed, 80 insertions(+), 132 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 79eb2eb4ad5..38c3436237a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -545,34 +545,6 @@ dependencies = [ "tracing-subscriber", ] -[[package]] -name = "axum" -version = "0.6.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" -dependencies = [ - "async-trait", - "axum-core 0.3.4", - "bitflags 1.3.2", - "bytes", - "futures-util", - "http 0.2.9", - "http-body 0.4.5", - "hyper 0.14.27", - "itoa", - "matchit", - "memchr", - "mime", - "percent-encoding", - "pin-project-lite", - "rustversion", - "serde", - "sync_wrapper 0.1.2", - "tower", - "tower-layer", - "tower-service", -] - [[package]] name = "axum" version = "0.7.5" @@ -580,13 +552,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" dependencies = [ "async-trait", - "axum-core 0.4.3", + "axum-core", "bytes", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "http-body-util", - "hyper 1.1.0", + "hyper 1.4.1", "hyper-util", "itoa", "matchit", @@ -607,23 +579,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "axum-core" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" -dependencies = [ - "async-trait", - "bytes", - "futures-util", - "http 0.2.9", - "http-body 0.4.5", - "mime", - "rustversion", - "tower-layer", - "tower-service", -] - [[package]] name = "axum-core" version = "0.4.3" @@ -633,7 +588,7 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "http-body-util", "mime", @@ -779,7 +734,7 @@ name = "browser-webrtc-example" version = "0.1.0" dependencies = [ "anyhow", - "axum 0.7.5", + "axum", "futures", "js-sys", "libp2p", @@ -1940,7 +1895,7 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http 1.0.0", + "http 1.1.0", "indexmap 2.2.1", "slab", "tokio", @@ -2131,9 +2086,9 @@ dependencies = [ [[package]] name = "http" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ "bytes", "fnv", @@ -2158,7 +2113,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" dependencies = [ "bytes", - "http 1.0.0", + "http 1.1.0", ] [[package]] @@ -2169,7 +2124,7 @@ checksum = "41cb79eb393015dadd30fc252023adb0b2400a0caee0fa2a077e6e21a551e840" dependencies = [ "bytes", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "pin-project-lite", ] @@ -2224,20 +2179,21 @@ dependencies = [ [[package]] name = "hyper" -version = "1.1.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5aa53871fc917b1a9ed87b683a5d86db645e23acb32c2e0785a353e522fb75" +checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" dependencies = [ "bytes", "futures-channel", "futures-util", "h2 0.4.4", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "httparse", "httpdate", "itoa", "pin-project-lite", + "smallvec", "tokio", "want", ] @@ -2249,8 +2205,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" dependencies = [ "futures-util", - "http 1.0.0", - "hyper 1.1.0", + "http 1.1.0", + "hyper 1.4.1", "hyper-util", "rustls 0.22.4", "rustls-pki-types", @@ -2261,14 +2217,15 @@ dependencies = [ [[package]] name = "hyper-timeout" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" dependencies = [ - "hyper 0.14.27", + "hyper 1.4.1", + "hyper-util", "pin-project-lite", "tokio", - "tokio-io-timeout", + "tower-service", ] [[package]] @@ -2279,7 +2236,7 @@ checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", "http-body-util", - "hyper 1.1.0", + "hyper 1.4.1", "hyper-util", "native-tls", "tokio", @@ -2289,20 +2246,19 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.3" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa" +checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" dependencies = [ "bytes", "futures-channel", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", - "hyper 1.1.0", + "hyper 1.4.1", "pin-project-lite", "socket2 0.5.7", "tokio", - "tower", "tower-service", "tracing", ] @@ -2456,7 +2412,7 @@ name = "interop-tests" version = "0.1.0" dependencies = [ "anyhow", - "axum 0.7.5", + "axum", "console_error_panic_hook", "either", "futures", @@ -3295,7 +3251,7 @@ dependencies = [ name = "libp2p-server" version = "0.12.7" dependencies = [ - "axum 0.7.5", + "axum", "base64 0.22.1", "clap", "futures", @@ -3774,16 +3730,16 @@ dependencies = [ name = "metrics-example" version = "0.1.0" dependencies = [ - "axum 0.7.5", + "axum", "futures", "libp2p", - "opentelemetry 0.23.0", + "opentelemetry 0.25.0", "opentelemetry-otlp", - "opentelemetry_sdk 0.23.0", + "opentelemetry_sdk 0.25.0", "prometheus-client", "tokio", "tracing", - "tracing-opentelemetry 0.24.0", + "tracing-opentelemetry 0.26.0", "tracing-subscriber", ] @@ -4198,9 +4154,9 @@ dependencies = [ [[package]] name = "opentelemetry" -version = "0.23.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b69a91d4893e713e06f724597ad630f1fa76057a5e1026c0ca67054a9032a76" +checksum = "803801d3d3b71cd026851a53f974ea03df3d179cb758b260136a6c9e22e196af" dependencies = [ "futures-core", "futures-sink", @@ -4228,16 +4184,16 @@ dependencies = [ [[package]] name = "opentelemetry-otlp" -version = "0.16.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a94c69209c05319cdf7460c6d4c055ed102be242a0a6245835d7bc42c6ec7f54" +checksum = "596b1719b3cab83addb20bcbffdf21575279d9436d9ccccfe651a3bf0ab5ab06" dependencies = [ "async-trait", "futures-core", - "http 0.2.9", - "opentelemetry 0.23.0", + "http 1.1.0", + "opentelemetry 0.25.0", "opentelemetry-proto", - "opentelemetry_sdk 0.23.0", + "opentelemetry_sdk 0.25.0", "prost", "thiserror", "tokio", @@ -4246,12 +4202,12 @@ dependencies = [ [[package]] name = "opentelemetry-proto" -version = "0.6.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "984806e6cf27f2b49282e2a05e288f30594f3dbc74eb7a6e99422bc48ed78162" +checksum = "2c43620e8f93359eb7e627a3b16ee92d8585774986f24f2ab010817426c5ce61" dependencies = [ - "opentelemetry 0.23.0", - "opentelemetry_sdk 0.23.0", + "opentelemetry 0.25.0", + "opentelemetry_sdk 0.25.0", "prost", "tonic", ] @@ -4289,21 +4245,20 @@ dependencies = [ [[package]] name = "opentelemetry_sdk" -version = "0.23.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae312d58eaa90a82d2e627fd86e075cf5230b3f11794e2ed74199ebbe572d4fd" +checksum = "e0da0d6b47a3dbc6e9c9e36a0520e25cf943e046843818faaa3f87365a548c82" dependencies = [ "async-trait", "futures-channel", "futures-executor", "futures-util", "glob", - "lazy_static", "once_cell", - "opentelemetry 0.23.0", - "ordered-float 4.2.0", + "opentelemetry 0.25.0", "percent-encoding", "rand 0.8.5", + "serde_json", "thiserror", "tokio", "tokio-stream", @@ -4618,9 +4573,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.12.3" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146c289cda302b98a28d40c8b3b90498d6e526dd24ac2ecea73e4e491685b94a" +checksum = "7b0487d90e047de87f984913713b85c601c05609aad5b0df4b4573fbf69aa13f" dependencies = [ "bytes", "prost-derive", @@ -4628,9 +4583,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.12.3" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e" +checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" dependencies = [ "anyhow", "itertools", @@ -4978,10 +4933,10 @@ dependencies = [ "futures-core", "futures-util", "h2 0.4.4", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "http-body-util", - "hyper 1.1.0", + "hyper 1.4.1", "hyper-rustls", "hyper-tls", "hyper-util", @@ -5965,7 +5920,7 @@ dependencies = [ "async-trait", "base64 0.22.1", "futures", - "http 1.0.0", + "http 1.1.0", "indexmap 2.2.1", "parking_lot", "paste", @@ -6120,16 +6075,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "tokio-io-timeout" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" -dependencies = [ - "pin-project-lite", - "tokio", -] - [[package]] name = "tokio-macros" version = "2.3.0" @@ -6164,9 +6109,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.14" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", "pin-project-lite", @@ -6223,23 +6168,26 @@ dependencies = [ [[package]] name = "tonic" -version = "0.11.0" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76c4eb7a4e9ef9d4763600161f12f5070b92a578e1b634db88a6887844c91a13" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" dependencies = [ "async-stream", "async-trait", - "axum 0.6.20", - "base64 0.21.7", + "axum", + "base64 0.22.1", "bytes", - "h2 0.3.26", - "http 0.2.9", - "http-body 0.4.5", - "hyper 0.14.27", + "h2 0.4.4", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.4.1", "hyper-timeout", + "hyper-util", "percent-encoding", "pin-project", "prost", + "socket2 0.5.7", "tokio", "tokio-stream", "tower", @@ -6277,7 +6225,7 @@ dependencies = [ "bitflags 2.4.1", "bytes", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "http-body-util", "http-range-header", @@ -6369,14 +6317,14 @@ dependencies = [ [[package]] name = "tracing-opentelemetry" -version = "0.24.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f68803492bf28ab40aeccaecc7021096bd256baf7ca77c3d425d89b35a7be4e4" +checksum = "5eabc56d23707ad55ba2a0750fc24767125d5a0f51993ba41ad2c441cc7b8dea" dependencies = [ "js-sys", "once_cell", - "opentelemetry 0.23.0", - "opentelemetry_sdk 0.23.0", + "opentelemetry 0.25.0", + "opentelemetry_sdk 0.25.0", "smallvec", "tracing", "tracing-core", diff --git a/examples/metrics/Cargo.toml b/examples/metrics/Cargo.toml index 2b82668f52a..129b1abb1f3 100644 --- a/examples/metrics/Cargo.toml +++ b/examples/metrics/Cargo.toml @@ -12,13 +12,13 @@ release = false futures = { workspace = true } axum = "0.7" libp2p = { path = "../../libp2p", features = ["tokio", "metrics", "ping", "noise", "identify", "tcp", "yamux", "macros"] } -opentelemetry = { version = "0.23.0", features = ["metrics"] } -opentelemetry-otlp = { version = "0.16.0", features = ["metrics"] } -opentelemetry_sdk = { version = "0.23.0", features = ["rt-tokio", "metrics"] } +opentelemetry = { version = "0.25.0", features = ["metrics"] } +opentelemetry-otlp = { version = "0.25.0", features = ["metrics"] } +opentelemetry_sdk = { version = "0.25.0", features = ["rt-tokio", "metrics"] } prometheus-client = { workspace = true } tokio = { workspace = true, features = ["full"] } tracing = { workspace = true } -tracing-opentelemetry = "0.24.0" +tracing-opentelemetry = "0.26.0" tracing-subscriber = { workspace = true, features = ["env-filter"] } [lints] diff --git a/examples/metrics/src/main.rs b/examples/metrics/src/main.rs index 99a9ca66aaf..1755c769053 100644 --- a/examples/metrics/src/main.rs +++ b/examples/metrics/src/main.rs @@ -25,7 +25,7 @@ use libp2p::core::Multiaddr; use libp2p::metrics::{Metrics, Recorder}; use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; use libp2p::{identify, identity, noise, ping, tcp, yamux}; -use opentelemetry::KeyValue; +use opentelemetry::{trace::TracerProvider, KeyValue}; use prometheus_client::registry::Registry; use std::error::Error; use std::time::Duration; @@ -90,7 +90,7 @@ async fn main() -> Result<(), Box> { } fn setup_tracing() -> Result<(), Box> { - let tracer = opentelemetry_otlp::new_pipeline() + let provider = opentelemetry_otlp::new_pipeline() .tracing() .with_exporter(opentelemetry_otlp::new_exporter().tonic()) .with_trace_config(opentelemetry_sdk::trace::Config::default().with_resource( @@ -102,10 +102,10 @@ fn setup_tracing() -> Result<(), Box> { .with(tracing_subscriber::fmt::layer().with_filter(EnvFilter::from_default_env())) .with( tracing_opentelemetry::layer() - .with_tracer(tracer) + .with_tracer(provider.tracer("libp2p-subscriber")) .with_filter(EnvFilter::from_default_env()), ) - .try_init()?; + .init(); Ok(()) } From fcff3f80401895362060f8e448ec992b6db7fb9b Mon Sep 17 00:00:00 2001 From: P1R0 Date: Fri, 4 Oct 2024 16:40:47 -0600 Subject: [PATCH 10/50] refactor(examples): use tokio instead of async-std in relay-server (#5600) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Description Following on issue #4449 refactor: use tokio instead of async-std in the relay-servert example and remove unnecessary dependencies ## Notes & open questions Fails on testing with the [whole punch tutorial](https://docs.rs/libp2p/0.54.1/libp2p/tutorials/hole_punching/index.html) possibly because of my networking topology. connection established event registered. I will publish the logs and testing information as a comment ## Change checklist * Removed unnecessary dependencies on examples/relay-server/Cargo.toml * Updated tokio version to "1.37.0" - [ ] I have performed a self-review of my own code - [ ] I have made corresponding changes to the documentation - [ ] I have added tests that prove my fix is effective or that my feature works - [ ] A changelog entry has been made in the appropriate crates --------- Co-authored-by: David E. Perez Negron R. Co-authored-by: Guillaume Michel Co-authored-by: João Oliveira --- Cargo.lock | 3 +-- examples/relay-server/Cargo.toml | 5 ++-- examples/relay-server/src/main.rs | 42 +++++++++++++++---------------- 3 files changed, 23 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 38c3436237a..91767968898 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4901,11 +4901,10 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" name = "relay-server-example" version = "0.1.0" dependencies = [ - "async-std", - "async-trait", "clap", "futures", "libp2p", + "tokio", "tracing", "tracing-subscriber", ] diff --git a/examples/relay-server/Cargo.toml b/examples/relay-server/Cargo.toml index 12d3e2467ce..7385cf6c033 100644 --- a/examples/relay-server/Cargo.toml +++ b/examples/relay-server/Cargo.toml @@ -10,10 +10,9 @@ release = false [dependencies] clap = { version = "4.5.6", features = ["derive"] } -async-std = { version = "1.12", features = ["attributes"] } -async-trait = "0.1" +tokio = { version = "1.37.0", features = ["full"] } futures = { workspace = true } -libp2p = { path = "../../libp2p", features = [ "async-std", "noise", "macros", "ping", "tcp", "identify", "yamux", "relay", "quic"] } +libp2p = { path = "../../libp2p", features = ["tokio", "noise", "macros", "ping", "tcp", "identify", "yamux", "relay", "quic"] } tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } diff --git a/examples/relay-server/src/main.rs b/examples/relay-server/src/main.rs index bf5817454f8..46a122d0717 100644 --- a/examples/relay-server/src/main.rs +++ b/examples/relay-server/src/main.rs @@ -22,8 +22,7 @@ #![doc = include_str!("../README.md")] use clap::Parser; -use futures::executor::block_on; -use futures::stream::StreamExt; +use futures::StreamExt; use libp2p::{ core::multiaddr::Protocol, core::Multiaddr, @@ -35,7 +34,8 @@ use std::error::Error; use std::net::{Ipv4Addr, Ipv6Addr}; use tracing_subscriber::EnvFilter; -fn main() -> Result<(), Box> { +#[tokio::main] +async fn main() -> Result<(), Box> { let _ = tracing_subscriber::fmt() .with_env_filter(EnvFilter::from_default_env()) .try_init(); @@ -46,7 +46,7 @@ fn main() -> Result<(), Box> { let local_key: identity::Keypair = generate_ed25519(opt.secret_key_seed); let mut swarm = libp2p::SwarmBuilder::with_existing_identity(local_key) - .with_async_std() + .with_tokio() .with_tcp( tcp::Config::default(), noise::Config::new, @@ -81,27 +81,25 @@ fn main() -> Result<(), Box> { .with(Protocol::QuicV1); swarm.listen_on(listen_addr_quic)?; - block_on(async { - loop { - match swarm.next().await.expect("Infinite Stream.") { - SwarmEvent::Behaviour(event) => { - if let BehaviourEvent::Identify(identify::Event::Received { - info: identify::Info { observed_addr, .. }, - .. - }) = &event - { - swarm.add_external_address(observed_addr.clone()); - } - - println!("{event:?}") + loop { + match swarm.next().await.expect("Infinite Stream.") { + SwarmEvent::Behaviour(event) => { + if let BehaviourEvent::Identify(identify::Event::Received { + info: identify::Info { observed_addr, .. }, + .. + }) = &event + { + swarm.add_external_address(observed_addr.clone()); } - SwarmEvent::NewListenAddr { address, .. } => { - println!("Listening on {address:?}"); - } - _ => {} + + println!("{event:?}") + } + SwarmEvent::NewListenAddr { address, .. } => { + println!("Listening on {address:?}"); } + _ => {} } - }) + } } #[derive(NetworkBehaviour)] From 93ad28c4cce236b1657241ba623ea30077565afd Mon Sep 17 00:00:00 2001 From: Guillaume Michel Date: Sat, 5 Oct 2024 01:07:49 +0200 Subject: [PATCH 11/50] fix(server): removing dependency on libp2p-lookup (#5610) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Description Remove dependency on [`libp2p-lookup`](https://github.com/mxinden/libp2p-lookup) which is no longer maintained, and [makes checks fail](https://github.com/libp2p/rust-libp2p/actions/runs/11016492372/job/30628121728). ## Change checklist - [x] I have performed a self-review of my own code - [x] I have made corresponding changes to the documentation - [ ] I have added tests that prove my fix is effective or that my feature works - [x] A changelog entry has been made in the appropriate crates --------- Co-authored-by: João Oliveira --- Cargo.lock | 2 +- Cargo.toml | 8 ++++++-- misc/server/CHANGELOG.md | 30 ++++++++++++++++++++++++++++++ misc/server/Cargo.toml | 20 ++++++++++++++++++-- misc/server/Dockerfile | 2 -- misc/server/README.md | 7 ------- 6 files changed, 55 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 91767968898..28107c00ee9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3249,7 +3249,7 @@ dependencies = [ [[package]] name = "libp2p-server" -version = "0.12.7" +version = "0.12.8" dependencies = [ "axum", "base64 0.22.1", diff --git a/Cargo.toml b/Cargo.toml index 32f72c5e252..cc677013fa9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -100,7 +100,7 @@ libp2p-quic = { version = "0.11.1", path = "transports/quic" } libp2p-relay = { version = "0.18.0", path = "protocols/relay" } libp2p-rendezvous = { version = "0.15.0", path = "protocols/rendezvous" } libp2p-request-response = { version = "0.27.0", path = "protocols/request-response" } -libp2p-server = { version = "0.12.7", path = "misc/server" } +libp2p-server = { version = "0.12.8", path = "misc/server" } libp2p-stream = { version = "0.2.0-alpha", path = "protocols/stream" } libp2p-swarm = { version = "0.45.2", path = "swarm" } libp2p-swarm-derive = { version = "=0.35.0", path = "swarm-derive" } # `libp2p-swarm-derive` may not be compatible with different `libp2p-swarm` non-breaking releases. E.g. `libp2p-swarm` might introduce a new enum variant `FromSwarm` (which is `#[non-exhaustive]`) in a non-breaking release. Older versions of `libp2p-swarm-derive` would not forward this enum variant within the `NetworkBehaviour` hierarchy. Thus the version pinning is required. @@ -151,4 +151,8 @@ clippy.manual_let_else = "warn" clippy.dbg_macro = "warn" [workspace.metadata.release] -pre-release-hook = ["/bin/sh", '-c', '/bin/sh $WORKSPACE_ROOT/scripts/add-changelog-header.sh'] # Nested use of shell to expand variables. +pre-release-hook = [ + "/bin/sh", + '-c', + '/bin/sh $WORKSPACE_ROOT/scripts/add-changelog-header.sh', +] # Nested use of shell to expand variables. diff --git a/misc/server/CHANGELOG.md b/misc/server/CHANGELOG.md index 5369163460c..fe48de0f553 100644 --- a/misc/server/CHANGELOG.md +++ b/misc/server/CHANGELOG.md @@ -1,3 +1,10 @@ +## 0.12.8 + +### Changed + +- Remove deprecated [`libp2p-lookup`](https://github.com/mxinden/libp2p-lookup) from Dockerfile. + See [PR 5610](https://github.com/libp2p/rust-libp2p/pull/5610). + ## 0.12.7 ### Changed @@ -31,6 +38,7 @@ ## 0.12.3 ### Changed + - Add libp2p-lookup to Dockerfile to enable healthchecks. ### Fixed @@ -42,14 +50,18 @@ [PR 4467]: https://github.com/libp2p/rust-libp2p/pull/4467 ## 0.12.2 + ### Fixed + - Adhere to `--metrics-path` flag and listen on `0.0.0.0:8888` (default IPFS metrics port). [PR 4392] [PR 4392]: https://github.com/libp2p/rust-libp2p/pull/4392 ## 0.12.1 + ### Changed + - Move to tokio and hyper. See [PR 4311]. - Move to distroless Docker base image. @@ -58,39 +70,57 @@ [PR 4311]: https://github.com/libp2p/rust-libp2p/pull/4311 ## 0.8.0 + ### Changed + - Remove mplex support. ## 0.7.0 + ### Changed + - Update to libp2p v0.47.0. ## 0.6.0 - 2022-05-05 + ### Changed + - Update to libp2p v0.44.0. ## 0.5.4 - 2022-01-11 + ### Changed + - Pull latest autonat changes. ## 0.5.3 - 2021-12-25 + ### Changed + - Update dependencies. - Pull in autonat fixes. ## 0.5.2 - 2021-12-20 + ### Added + - Add support for libp2p autonat protocol via `--enable-autonat`. ## 0.5.1 - 2021-12-20 + ### Fixed + - Update dependencies. - Fix typo in command line flag `--enable-kademlia`. ## 0.5.0 - 2021-11-18 + ### Changed + - Disable Kademlia protocol by default. ## 0.4.0 - 2021-11-18 + ### Fixed + - Update dependencies. diff --git a/misc/server/Cargo.toml b/misc/server/Cargo.toml index 798ecfa07a9..0954e2f38d8 100644 --- a/misc/server/Cargo.toml +++ b/misc/server/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libp2p-server" -version = "0.12.7" +version = "0.12.8" authors = ["Max Inden "] edition = "2021" repository = "https://github.com/libp2p/rust-libp2p" @@ -16,7 +16,23 @@ clap = { version = "4.5.6", features = ["derive"] } futures = { workspace = true } futures-timer = "3" axum = "0.7" -libp2p = { workspace = true, features = ["autonat", "dns", "tokio", "noise", "tcp", "yamux", "identify", "kad", "ping", "relay", "metrics", "rsa", "macros", "quic", "websocket"] } +libp2p = { workspace = true, features = [ + "autonat", + "dns", + "tokio", + "noise", + "tcp", + "yamux", + "identify", + "kad", + "ping", + "relay", + "metrics", + "rsa", + "macros", + "quic", + "websocket", +] } prometheus-client = { workspace = true } serde = "1.0.203" serde_derive = "1.0.125" diff --git a/misc/server/Dockerfile b/misc/server/Dockerfile index 24ae2b9fd99..12a8982eb3f 100644 --- a/misc/server/Dockerfile +++ b/misc/server/Dockerfile @@ -1,7 +1,6 @@ # syntax=docker/dockerfile:1.5-labs FROM rust:1.81.0 as chef RUN wget -q -O- https://github.com/LukeMathWalker/cargo-chef/releases/download/v0.1.62/cargo-chef-x86_64-unknown-linux-gnu.tar.gz | tar -zx -C /usr/local/bin -RUN cargo install --locked --root /usr/local libp2p-lookup --version 0.6.4 WORKDIR /app FROM chef AS planner @@ -17,5 +16,4 @@ COPY . . RUN cargo build --release --package libp2p-server FROM gcr.io/distroless/cc -COPY --from=builder /usr/local/bin/libp2p-server /usr/local/bin/libp2p-lookup /usr/local/bin/ CMD ["libp2p-server"] diff --git a/misc/server/README.md b/misc/server/README.md index 0da1bd8abd9..f9a5d65124a 100644 --- a/misc/server/README.md +++ b/misc/server/README.md @@ -25,7 +25,6 @@ Options: -h, --help Print help ``` - ``` cargo run -- --config ~/.ipfs/config @@ -33,9 +32,3 @@ Local peer id: PeerId("12D3KooWSa1YEeQVSwvoqAMhwjKQ6kqZQckhWPb3RWEGV3sZGU6Z") Listening on "/ip4/127.0.0.1/udp/4001/quic" [...] ``` - -The Docker container includes [libp2-lookup](https://github.com/mxinden/libp2p-lookup/) to enable adding a proper healthcheck for container startup, e.g. - -``` shell -docker run --health-cmd 'libp2p-lookup direct --address /ip4/127.0.0.1/tcp/4001/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa' /home/ipfs/.ipfs:/ipfs ghcr.io/libp2p/rust-libp2p-server --config /ipfs/config -``` From 9a45db3f82b760c93099e66ec77a7a772d1f6cd3 Mon Sep 17 00:00:00 2001 From: Darius Clark Date: Tue, 8 Oct 2024 10:16:59 -0400 Subject: [PATCH 12/50] chore: update igd-next to 0.15.1 (#5625) ## Description Resolves #5506. ## Notes & open questions ## Change checklist - [ ] I have performed a self-review of my own code - [ ] I have made corresponding changes to the documentation - [ ] I have added tests that prove my fix is effective or that my feature works - [x] A changelog entry has been made in the appropriate crates --- Cargo.lock | 102 +++++++++--------------------------- Cargo.toml | 2 +- protocols/upnp/CHANGELOG.md | 4 ++ protocols/upnp/Cargo.toml | 4 +- 4 files changed, 32 insertions(+), 80 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 28107c00ee9..3d77ed104dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -556,9 +556,9 @@ dependencies = [ "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body", "http-body-util", - "hyper 1.4.1", + "hyper", "hyper-util", "itoa", "matchit", @@ -589,7 +589,7 @@ dependencies = [ "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body", "http-body-util", "mime", "pin-project-lite", @@ -1865,25 +1865,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "h2" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http 0.2.9", - "indexmap 2.2.1", - "slab", - "tokio", - "tokio-util", - "tracing", -] - [[package]] name = "h2" version = "0.4.4" @@ -2095,17 +2076,6 @@ dependencies = [ "itoa", ] -[[package]] -name = "http-body" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" -dependencies = [ - "bytes", - "http 0.2.9", - "pin-project-lite", -] - [[package]] name = "http-body" version = "1.0.0" @@ -2125,7 +2095,7 @@ dependencies = [ "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body", "pin-project-lite", ] @@ -2153,30 +2123,6 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" -[[package]] -name = "hyper" -version = "0.14.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" -dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "h2 0.3.26", - "http 0.2.9", - "http-body 0.4.5", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "socket2 0.4.9", - "tokio", - "tower-service", - "tracing", - "want", -] - [[package]] name = "hyper" version = "1.4.1" @@ -2186,9 +2132,9 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.4", + "h2", "http 1.1.0", - "http-body 1.0.0", + "http-body", "httparse", "httpdate", "itoa", @@ -2206,7 +2152,7 @@ checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" dependencies = [ "futures-util", "http 1.1.0", - "hyper 1.4.1", + "hyper", "hyper-util", "rustls 0.22.4", "rustls-pki-types", @@ -2221,7 +2167,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" dependencies = [ - "hyper 1.4.1", + "hyper", "hyper-util", "pin-project-lite", "tokio", @@ -2236,7 +2182,7 @@ checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", "http-body-util", - "hyper 1.4.1", + "hyper", "hyper-util", "native-tls", "tokio", @@ -2254,8 +2200,8 @@ dependencies = [ "futures-channel", "futures-util", "http 1.1.0", - "http-body 1.0.0", - "hyper 1.4.1", + "http-body", + "hyper", "pin-project-lite", "socket2 0.5.7", "tokio", @@ -2326,16 +2272,18 @@ dependencies = [ [[package]] name = "igd-next" -version = "0.14.3" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "064d90fec10d541084e7b39ead8875a5a80d9114a2b18791565253bae25f49e4" +checksum = "76b0d7d4541def58a37bf8efc559683f21edce7c82f0d866c93ac21f7e098f93" dependencies = [ "async-trait", "attohttpc", "bytes", "futures", - "http 0.2.9", - "hyper 0.14.27", + "http 1.1.0", + "http-body-util", + "hyper", + "hyper-util", "log", "rand 0.8.5", "tokio", @@ -3399,7 +3347,7 @@ dependencies = [ [[package]] name = "libp2p-upnp" -version = "0.3.0" +version = "0.3.1" dependencies = [ "futures", "futures-timer", @@ -4931,11 +4879,11 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "h2 0.4.4", + "h2", "http 1.1.0", - "http-body 1.0.0", + "http-body", "http-body-util", - "hyper 1.4.1", + "hyper", "hyper-rustls", "hyper-tls", "hyper-util", @@ -6176,11 +6124,11 @@ dependencies = [ "axum", "base64 0.22.1", "bytes", - "h2 0.4.4", + "h2", "http 1.1.0", - "http-body 1.0.0", + "http-body", "http-body-util", - "hyper 1.4.1", + "hyper", "hyper-timeout", "hyper-util", "percent-encoding", @@ -6225,7 +6173,7 @@ dependencies = [ "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body", "http-body-util", "http-range-header", "httpdate", diff --git a/Cargo.toml b/Cargo.toml index cc677013fa9..9e18af5f706 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -108,7 +108,7 @@ libp2p-swarm-test = { version = "0.4.0", path = "swarm-test" } libp2p-tcp = { version = "0.42.0", path = "transports/tcp" } libp2p-tls = { version = "0.5.0", path = "transports/tls" } libp2p-uds = { version = "0.41.0", path = "transports/uds" } -libp2p-upnp = { version = "0.3.0", path = "protocols/upnp" } +libp2p-upnp = { version = "0.3.1", path = "protocols/upnp" } libp2p-webrtc = { version = "0.8.0-alpha", path = "transports/webrtc" } libp2p-webrtc-utils = { version = "0.3.0", path = "misc/webrtc-utils" } libp2p-webrtc-websys = { version = "0.4.0-alpha.2", path = "transports/webrtc-websys" } diff --git a/protocols/upnp/CHANGELOG.md b/protocols/upnp/CHANGELOG.md index 21e90f9534b..d9c24f8efcc 100644 --- a/protocols/upnp/CHANGELOG.md +++ b/protocols/upnp/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.3.1 +- update igd-next to 0.15.1. + See [PR XXXX](https://github.com/libp2p/rust-libp2p/pull/XXXX). + ## 0.3.0 diff --git a/protocols/upnp/Cargo.toml b/protocols/upnp/Cargo.toml index e9c7414236d..209733f53e6 100644 --- a/protocols/upnp/Cargo.toml +++ b/protocols/upnp/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-upnp" edition = "2021" rust-version = "1.60.0" description = "UPnP support for libp2p transports" -version = "0.3.0" +version = "0.3.1" license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" keywords = ["peer-to-peer", "libp2p", "networking"] @@ -13,7 +13,7 @@ publish = true [dependencies] futures = { workspace = true } futures-timer = "3.0.3" -igd-next = "0.14.3" +igd-next = "0.15.1" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } tokio = { workspace = true, default-features = false, features = ["rt"], optional = true } From d3228adf64cf68f419eeb0b18a5c0b762d29abbe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Oct 2024 09:00:56 +0000 Subject: [PATCH 13/50] deps: bump Swatinem/rust-cache from 2.7.3 to 2.7.5 (#5633) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [Swatinem/rust-cache](https://github.com/swatinem/rust-cache) from 2.7.3 to 2.7.5.
Release notes

Sourced from Swatinem/rust-cache's releases.

v2.7.5

What's Changed

New Contributors

Full Changelog: https://github.com/Swatinem/rust-cache/compare/v2.7.3...v2.7.5

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=Swatinem/rust-cache&package-manager=github_actions&previous-version=2.7.3&new-version=2.7.5)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/cache-factory.yml | 2 +- .github/workflows/ci.yml | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/cache-factory.yml b/.github/workflows/cache-factory.yml index 8c49b335f1b..7623b56f450 100644 --- a/.github/workflows/cache-factory.yml +++ b/.github/workflows/cache-factory.yml @@ -22,7 +22,7 @@ jobs: - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 + - uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 with: shared-key: stable-cache diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2c1dfc8aaef..daee569d047 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -40,7 +40,7 @@ jobs: - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 + - uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 with: shared-key: stable-cache save-if: false @@ -149,7 +149,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 + - uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 with: key: ${{ matrix.target }} save-if: ${{ github.ref == 'refs/heads/master' }} @@ -174,7 +174,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 + - uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -195,7 +195,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 + - uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 with: key: ${{ matrix.features }} save-if: ${{ github.ref == 'refs/heads/master' }} @@ -212,7 +212,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 + - uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -238,7 +238,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 + - uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -254,7 +254,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 + - uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -273,7 +273,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 + - uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 with: shared-key: stable-cache save-if: false @@ -365,7 +365,7 @@ jobs: steps: - uses: actions/checkout@v4 - - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 + - uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 - run: cargo install --version 0.10.0 pb-rs --locked @@ -391,7 +391,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 + - uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 - run: cargo metadata --locked --format-version=1 > /dev/null cargo-deny: From 812a7cdf5d229610bb8eda1f106eee5ac3f699d2 Mon Sep 17 00:00:00 2001 From: hanabi1224 Date: Wed, 16 Oct 2024 01:32:02 +0800 Subject: [PATCH 14/50] feat: make runtime features optional in swarm-test (#5551) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Description Sometimes a test uses custom swarm building logic and doesn't need `fn new_ephemeral`, and sometimes a test uses `tokio` runtime other than `async-std`. This PR adds the `tokio` runtime support and makes both `async-std` and `tokio` runtimes optional behind features to make it more flexible. ## Notes & open questions ## Change checklist - [x] I have performed a self-review of my own code - [x] I have made corresponding changes to the documentation - [ ] I have added tests that prove my fix is effective or that my feature works - [x] A changelog entry has been made in the appropriate crates --------- Co-authored-by: João Oliveira --- Cargo.lock | 2 +- Cargo.toml | 2 +- swarm-test/CHANGELOG.md | 7 +++++ swarm-test/Cargo.toml | 12 +++++-- swarm-test/src/lib.rs | 69 ++++++++++++++++++++++++++++++++--------- 5 files changed, 72 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3d77ed104dc..40ff97009de 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3278,7 +3278,7 @@ dependencies = [ [[package]] name = "libp2p-swarm-test" -version = "0.4.0" +version = "0.4.1" dependencies = [ "async-trait", "futures", diff --git a/Cargo.toml b/Cargo.toml index 9e18af5f706..7b52e4a7b42 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -104,7 +104,7 @@ libp2p-server = { version = "0.12.8", path = "misc/server" } libp2p-stream = { version = "0.2.0-alpha", path = "protocols/stream" } libp2p-swarm = { version = "0.45.2", path = "swarm" } libp2p-swarm-derive = { version = "=0.35.0", path = "swarm-derive" } # `libp2p-swarm-derive` may not be compatible with different `libp2p-swarm` non-breaking releases. E.g. `libp2p-swarm` might introduce a new enum variant `FromSwarm` (which is `#[non-exhaustive]`) in a non-breaking release. Older versions of `libp2p-swarm-derive` would not forward this enum variant within the `NetworkBehaviour` hierarchy. Thus the version pinning is required. -libp2p-swarm-test = { version = "0.4.0", path = "swarm-test" } +libp2p-swarm-test = { version = "0.4.1", path = "swarm-test" } libp2p-tcp = { version = "0.42.0", path = "transports/tcp" } libp2p-tls = { version = "0.5.0", path = "transports/tls" } libp2p-uds = { version = "0.41.0", path = "transports/uds" } diff --git a/swarm-test/CHANGELOG.md b/swarm-test/CHANGELOG.md index 98027fcbea2..33eebb2412c 100644 --- a/swarm-test/CHANGELOG.md +++ b/swarm-test/CHANGELOG.md @@ -1,3 +1,10 @@ +## 0.4.1 + +- Add `tokio` runtime support and make `tokio` and `async-std` runtimes optional behind features. + See [PR 5551]. + +[PR 5551]: https://github.com/libp2p/rust-libp2p/pull/5551 + ## 0.4.0 diff --git a/swarm-test/Cargo.toml b/swarm-test/Cargo.toml index b285da34f87..fa51454dd58 100644 --- a/swarm-test/Cargo.toml +++ b/swarm-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libp2p-swarm-test" -version = "0.4.0" +version = "0.4.1" edition = "2021" rust-version = { workspace = true } license = "MIT" @@ -16,13 +16,19 @@ async-trait = "0.1.80" libp2p-core = { workspace = true } libp2p-identity = { workspace = true, features = ["rand"] } libp2p-plaintext = { workspace = true } -libp2p-swarm = { workspace = true, features = ["async-std"] } -libp2p-tcp = { workspace = true, features = ["async-io"] } +libp2p-swarm = { workspace = true } +libp2p-tcp = { workspace = true } libp2p-yamux = { workspace = true } futures = { workspace = true } rand = "0.8.5" tracing = { workspace = true } futures-timer = "3.0.3" +[features] +default = ["async-std"] + +async-std = ["libp2p-swarm/async-std", "libp2p-tcp/async-io"] +tokio = ["libp2p-swarm/tokio", "libp2p-tcp/tokio"] + [lints] workspace = true diff --git a/swarm-test/src/lib.rs b/swarm-test/src/lib.rs index 48f5bcbf4ef..bcab6e5b700 100644 --- a/swarm-test/src/lib.rs +++ b/swarm-test/src/lib.rs @@ -21,14 +21,10 @@ use async_trait::async_trait; use futures::future::{BoxFuture, Either}; use futures::{FutureExt, StreamExt}; -use libp2p_core::{ - multiaddr::Protocol, transport::MemoryTransport, upgrade::Version, Multiaddr, Transport, -}; -use libp2p_identity::{Keypair, PeerId}; -use libp2p_plaintext as plaintext; +use libp2p_core::{multiaddr::Protocol, Multiaddr}; +use libp2p_identity::PeerId; use libp2p_swarm::dial_opts::PeerCondition; -use libp2p_swarm::{self as swarm, dial_opts::DialOpts, NetworkBehaviour, Swarm, SwarmEvent}; -use libp2p_yamux as yamux; +use libp2p_swarm::{dial_opts::DialOpts, NetworkBehaviour, Swarm, SwarmEvent}; use std::fmt::Debug; use std::future::IntoFuture; use std::time::Duration; @@ -38,12 +34,23 @@ use std::time::Duration; pub trait SwarmExt { type NB: NetworkBehaviour; - /// Create a new [`Swarm`] with an ephemeral identity. + /// Create a new [`Swarm`] with an ephemeral identity and the `async-std` runtime. /// - /// The swarm will use a [`MemoryTransport`] together with a [`plaintext::Config`] authentication layer and - /// [`yamux::Config`] as the multiplexer. However, these details should not be relied upon by the test + /// The swarm will use a [`libp2p_core::transport::MemoryTransport`] together with a [`libp2p_plaintext::Config`] authentication layer and + /// [`libp2p_yamux::Config`] as the multiplexer. However, these details should not be relied upon by the test /// and may change at any time. - fn new_ephemeral(behaviour_fn: impl FnOnce(Keypair) -> Self::NB) -> Self + #[cfg(feature = "async-std")] + fn new_ephemeral(behaviour_fn: impl FnOnce(libp2p_identity::Keypair) -> Self::NB) -> Self + where + Self: Sized; + + /// Create a new [`Swarm`] with an ephemeral identity and the `tokio` runtime. + /// + /// The swarm will use a [`libp2p_core::transport::MemoryTransport`] together with a [`libp2p_plaintext::Config`] authentication layer and + /// [`libp2p_yamux::Config`] as the multiplexer. However, these details should not be relied upon by the test + /// and may change at any time. + #[cfg(feature = "tokio")] + fn new_ephemeral_tokio(behaviour_fn: impl FnOnce(libp2p_identity::Keypair) -> Self::NB) -> Self where Self: Sized; @@ -200,18 +207,50 @@ where { type NB = B; - fn new_ephemeral(behaviour_fn: impl FnOnce(Keypair) -> Self::NB) -> Self + #[cfg(feature = "async-std")] + fn new_ephemeral(behaviour_fn: impl FnOnce(libp2p_identity::Keypair) -> Self::NB) -> Self where Self: Sized, { + use libp2p_core::{transport::MemoryTransport, upgrade::Version, Transport as _}; + use libp2p_identity::Keypair; + let identity = Keypair::generate_ed25519(); let peer_id = PeerId::from(identity.public()); let transport = MemoryTransport::default() .or_transport(libp2p_tcp::async_io::Transport::default()) .upgrade(Version::V1) - .authenticate(plaintext::Config::new(&identity)) - .multiplex(yamux::Config::default()) + .authenticate(libp2p_plaintext::Config::new(&identity)) + .multiplex(libp2p_yamux::Config::default()) + .timeout(Duration::from_secs(20)) + .boxed(); + + Swarm::new( + transport, + behaviour_fn(identity), + peer_id, + libp2p_swarm::Config::with_async_std_executor() + .with_idle_connection_timeout(Duration::from_secs(5)), // Some tests need connections to be kept alive beyond what the individual behaviour configures., + ) + } + + #[cfg(feature = "tokio")] + fn new_ephemeral_tokio(behaviour_fn: impl FnOnce(libp2p_identity::Keypair) -> Self::NB) -> Self + where + Self: Sized, + { + use libp2p_core::{transport::MemoryTransport, upgrade::Version, Transport as _}; + use libp2p_identity::Keypair; + + let identity = Keypair::generate_ed25519(); + let peer_id = PeerId::from(identity.public()); + + let transport = MemoryTransport::default() + .or_transport(libp2p_tcp::tokio::Transport::default()) + .upgrade(Version::V1) + .authenticate(libp2p_plaintext::Config::new(&identity)) + .multiplex(libp2p_yamux::Config::default()) .timeout(Duration::from_secs(20)) .boxed(); @@ -219,7 +258,7 @@ where transport, behaviour_fn(identity), peer_id, - swarm::Config::with_async_std_executor() + libp2p_swarm::Config::with_tokio_executor() .with_idle_connection_timeout(Duration::from_secs(5)), // Some tests need connections to be kept alive beyond what the individual behaviour configures., ) } From 524d7f1560d084b14b0ec53563f13ec8c4b7bcf3 Mon Sep 17 00:00:00 2001 From: yanziseeker <153156292+AdventureSeeker987@users.noreply.github.com> Date: Wed, 23 Oct 2024 07:52:20 +0800 Subject: [PATCH 15/50] chore: fix typo in comment (#5643) ## Description ## Notes & open questions ## Change checklist - [x] I have performed a self-review of my own code - [x] I have made corresponding changes to the documentation - [ ] I have added tests that prove my fix is effective or that my feature works - [ ] A changelog entry has been made in the appropriate crates --- protocols/kad/src/query/peers/fixed.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/protocols/kad/src/query/peers/fixed.rs b/protocols/kad/src/query/peers/fixed.rs index b34f7516801..2d0b312454d 100644 --- a/protocols/kad/src/query/peers/fixed.rs +++ b/protocols/kad/src/query/peers/fixed.rs @@ -25,7 +25,7 @@ use std::{collections::hash_map::Entry, num::NonZeroUsize, vec}; /// A peer iterator for a fixed set of peers. pub(crate) struct FixedPeersIter { - /// Ther permitted parallelism, i.e. number of pending results. + /// The permitted parallelism, i.e. number of pending results. parallelism: NonZeroUsize, /// The state of peers emitted by the iterator. From 9ed181b3acc77cac1f081a98e4c7762411812e12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Thu, 24 Oct 2024 16:13:15 +0100 Subject: [PATCH 16/50] deps(ci): update cargo-semver-checks (#5647) --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index daee569d047..aad5b39aec7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -308,7 +308,7 @@ jobs: RUSTFLAGS: '' steps: - uses: actions/checkout@v4 - - run: wget -q -O- https://github.com/obi1kenobi/cargo-semver-checks/releases/download/v0.33.0/cargo-semver-checks-x86_64-unknown-linux-gnu.tar.gz | tar -xz -C ~/.cargo/bin + - run: wget -q -O- https://github.com/obi1kenobi/cargo-semver-checks/releases/download/v0.36.0/cargo-semver-checks-x86_64-unknown-linux-gnu.tar.gz | tar -xz -C ~/.cargo/bin shell: bash - uses: obi1kenobi/cargo-semver-checks-action@7272cc2caa468d3e009a2b0a9cc366839348237b # v2.6 From 6cb116e8fc49c0b54765a0178aaa4a057d290e87 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Thu, 24 Oct 2024 17:05:30 +0100 Subject: [PATCH 17/50] fix(swarm-test): set proper version (#5648) ## Description To unblock CI Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- Cargo.lock | 2 +- Cargo.toml | 2 +- swarm-test/CHANGELOG.md | 2 +- swarm-test/Cargo.toml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 40ff97009de..54a0f8657a1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3278,7 +3278,7 @@ dependencies = [ [[package]] name = "libp2p-swarm-test" -version = "0.4.1" +version = "0.5.0" dependencies = [ "async-trait", "futures", diff --git a/Cargo.toml b/Cargo.toml index 7b52e4a7b42..af7e47f8359 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -104,7 +104,7 @@ libp2p-server = { version = "0.12.8", path = "misc/server" } libp2p-stream = { version = "0.2.0-alpha", path = "protocols/stream" } libp2p-swarm = { version = "0.45.2", path = "swarm" } libp2p-swarm-derive = { version = "=0.35.0", path = "swarm-derive" } # `libp2p-swarm-derive` may not be compatible with different `libp2p-swarm` non-breaking releases. E.g. `libp2p-swarm` might introduce a new enum variant `FromSwarm` (which is `#[non-exhaustive]`) in a non-breaking release. Older versions of `libp2p-swarm-derive` would not forward this enum variant within the `NetworkBehaviour` hierarchy. Thus the version pinning is required. -libp2p-swarm-test = { version = "0.4.1", path = "swarm-test" } +libp2p-swarm-test = { version = "0.5.0", path = "swarm-test" } libp2p-tcp = { version = "0.42.0", path = "transports/tcp" } libp2p-tls = { version = "0.5.0", path = "transports/tls" } libp2p-uds = { version = "0.41.0", path = "transports/uds" } diff --git a/swarm-test/CHANGELOG.md b/swarm-test/CHANGELOG.md index 33eebb2412c..5700460b3a6 100644 --- a/swarm-test/CHANGELOG.md +++ b/swarm-test/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.4.1 +## 0.5.0 - Add `tokio` runtime support and make `tokio` and `async-std` runtimes optional behind features. See [PR 5551]. diff --git a/swarm-test/Cargo.toml b/swarm-test/Cargo.toml index fa51454dd58..7ac7c900deb 100644 --- a/swarm-test/Cargo.toml +++ b/swarm-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libp2p-swarm-test" -version = "0.4.1" +version = "0.5.0" edition = "2021" rust-version = { workspace = true } license = "MIT" From 84c617fd16048828dfe4cc3279e893282221bc19 Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Fri, 25 Oct 2024 02:15:00 +0300 Subject: [PATCH 18/50] feat(kad): add `Behavior::find_closest_local_peers()` (#5645) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Description Fixes https://github.com/libp2p/rust-libp2p/issues/5626 ## Notes & open questions This is the nicest way I came up with, I decided to leave `get_closest_local_peers` as is since it does return all peers, not just `replication_factor` peers. Looking at https://github.com/libp2p/rust-libp2p/pull/2436 it is not clear if @folex really needed all peers returned or it just happened that way. I'm also happy to change proposed API to return all peers if that is preferred by others. It is very unfortunate that `&mut self` is needed for this, I created https://github.com/libp2p/rust-libp2p/issues/5644 that if resolved will allow to have `&self` instead. ## Change checklist - [x] I have performed a self-review of my own code - [x] I have made corresponding changes to the documentation - [ ] I have added tests that prove my fix is effective or that my feature works - [x] A changelog entry has been made in the appropriate crates Co-authored-by: João Oliveira --- protocols/kad/CHANGELOG.md | 5 ++-- protocols/kad/src/behaviour.rs | 48 ++++++++++++++++++++-------------- protocols/kad/src/lib.rs | 2 +- 3 files changed, 32 insertions(+), 23 deletions(-) diff --git a/protocols/kad/CHANGELOG.md b/protocols/kad/CHANGELOG.md index d0ab7986aad..55d269bf98f 100644 --- a/protocols/kad/CHANGELOG.md +++ b/protocols/kad/CHANGELOG.md @@ -1,10 +1,11 @@ ## 0.47.0 -- Expose a kad query facility allowing specify num_results dynamicly. +- Expose a kad query facility allowing specify num_results dynamicaly. See [PR 5555](https://github.com/libp2p/rust-libp2p/pull/5555). - Add `mode` getter on `Behaviour`. See [PR 5573](https://github.com/libp2p/rust-libp2p/pull/5573). - +- Add `Behavior::find_closest_local_peers()`. + See [PR 5645](https://github.com/libp2p/rust-libp2p/pull/5645). ## 0.46.2 diff --git a/protocols/kad/src/behaviour.rs b/protocols/kad/src/behaviour.rs index 0b15e507ba4..84133d31acb 100644 --- a/protocols/kad/src/behaviour.rs +++ b/protocols/kad/src/behaviour.rs @@ -771,7 +771,8 @@ where self.queries.add_iter_closest(target, peer_keys, info) } - /// Returns closest peers to the given key; takes peers from local routing table only. + /// Returns all peers ordered by distance to the given key; takes peers from local routing table + /// only. pub fn get_closest_local_peers<'a, K: Clone>( &'a mut self, key: &'a kbucket::Key, @@ -779,6 +780,23 @@ where self.kbuckets.closest_keys(key) } + /// Finds the closest peers to a `key` in the context of a request by the `source` peer, such + /// that the `source` peer is never included in the result. + /// + /// Takes peers from local routing table only. Only returns number of peers equal to configured + /// replication factor. + pub fn find_closest_local_peers<'a, K: Clone>( + &'a mut self, + key: &'a kbucket::Key, + source: &'a PeerId, + ) -> impl Iterator + 'a { + self.kbuckets + .closest(key) + .filter(move |e| e.node.key.preimage() != source) + .take(self.queries.config().replication_factor.get()) + .map(KadPeer::from) + } + /// Performs a lookup for a record in the DHT. /// /// The result of this operation is delivered in a @@ -1212,22 +1230,6 @@ where } } - /// Finds the closest peers to a `target` in the context of a request by - /// the `source` peer, such that the `source` peer is never included in the - /// result. - fn find_closest( - &mut self, - target: &kbucket::Key, - source: &PeerId, - ) -> Vec { - self.kbuckets - .closest(target) - .filter(|e| e.node.key.preimage() != source) - .take(self.queries.config().replication_factor.get()) - .map(KadPeer::from) - .collect() - } - /// Collects all peers who are known to be providers of the value for a given `Multihash`. fn provider_peers(&mut self, key: &record::Key, source: &PeerId) -> Vec { let kbuckets = &mut self.kbuckets; @@ -2300,7 +2302,9 @@ where } HandlerEvent::FindNodeReq { key, request_id } => { - let closer_peers = self.find_closest(&kbucket::Key::new(key), &source); + let closer_peers = self + .find_closest_local_peers(&kbucket::Key::new(key), &source) + .collect::>(); self.queued_events .push_back(ToSwarm::GenerateEvent(Event::InboundRequest { @@ -2328,7 +2332,9 @@ where HandlerEvent::GetProvidersReq { key, request_id } => { let provider_peers = self.provider_peers(&key, &source); - let closer_peers = self.find_closest(&kbucket::Key::new(key), &source); + let closer_peers = self + .find_closest_local_peers(&kbucket::Key::new(key), &source) + .collect::>(); self.queued_events .push_back(ToSwarm::GenerateEvent(Event::InboundRequest { @@ -2422,7 +2428,9 @@ where None => None, }; - let closer_peers = self.find_closest(&kbucket::Key::new(key), &source); + let closer_peers = self + .find_closest_local_peers(&kbucket::Key::new(key), &source) + .collect::>(); self.queued_events .push_back(ToSwarm::GenerateEvent(Event::InboundRequest { diff --git a/protocols/kad/src/lib.rs b/protocols/kad/src/lib.rs index 681d135f79b..060bfc518e4 100644 --- a/protocols/kad/src/lib.rs +++ b/protocols/kad/src/lib.rs @@ -69,7 +69,7 @@ pub use behaviour::{ pub use kbucket::{ Distance as KBucketDistance, EntryView, KBucketRef, Key as KBucketKey, NodeStatus, }; -pub use protocol::ConnectionType; +pub use protocol::{ConnectionType, KadPeer}; pub use query::QueryId; pub use record::{store, Key as RecordKey, ProviderRecord, Record}; From c7e8129726c584e1fe41ffe3aadf686863906f9f Mon Sep 17 00:00:00 2001 From: Dzmitry Kalabuk Date: Fri, 25 Oct 2024 15:30:32 +0300 Subject: [PATCH 19/50] feat(gossipsub): apply `max_transmit_size` to the published message (#5642) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Description When trying to publish a message using gossipsub's `publish` method, it should be possible to predict whether it will fit in the limit defined by the `max_transmit_size` config option. If this limit applies to the final protobuf payload, it's not possible to know that in advance because the size of the added fields is not fixed. This change makes the limit apply to the passed message size instead of the final wire size. ## Notes & open questions This is a minor version change because it changes the meaning of the existing config option. However, for the existing clients the limit will only become more permissive, so it shouldn't break anything. ## Change checklist - [x] I have performed a self-review of my own code - [x] I have made corresponding changes to the documentation - [ ] I have added tests that prove my fix is effective or that my feature works - [x] A changelog entry has been made in the appropriate crates --------- Co-authored-by: Darius Clark Co-authored-by: João Oliveira --- Cargo.lock | 2 +- Cargo.toml | 2 +- protocols/gossipsub/CHANGELOG.md | 5 +++++ protocols/gossipsub/Cargo.toml | 2 +- protocols/gossipsub/src/behaviour.rs | 10 +++++----- protocols/gossipsub/src/config.rs | 3 ++- 6 files changed, 15 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 54a0f8657a1..3b983a80d00 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2741,7 +2741,7 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.47.1" +version = "0.48.0" dependencies = [ "async-std", "asynchronous-codec", diff --git a/Cargo.toml b/Cargo.toml index af7e47f8359..8869505921d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -83,7 +83,7 @@ libp2p-core = { version = "0.42.0", path = "core" } libp2p-dcutr = { version = "0.12.0", path = "protocols/dcutr" } libp2p-dns = { version = "0.42.0", path = "transports/dns" } libp2p-floodsub = { version = "0.45.0", path = "protocols/floodsub" } -libp2p-gossipsub = { version = "0.47.1", path = "protocols/gossipsub" } +libp2p-gossipsub = { version = "0.48.0", path = "protocols/gossipsub" } libp2p-identify = { version = "0.45.1", path = "protocols/identify" } libp2p-identity = { version = "0.2.9" } libp2p-kad = { version = "0.47.0", path = "protocols/kad" } diff --git a/protocols/gossipsub/CHANGELOG.md b/protocols/gossipsub/CHANGELOG.md index c47a9f40f66..cdd170c0d4b 100644 --- a/protocols/gossipsub/CHANGELOG.md +++ b/protocols/gossipsub/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.48.0 + +- Apply `max_transmit_size` to the inner message instead of the final payload. + See [PR 5642](https://github.com/libp2p/rust-libp2p/pull/5642). + ## 0.47.1 - Attempt to publish to at least mesh_n peers when flood publish is disabled. diff --git a/protocols/gossipsub/Cargo.toml b/protocols/gossipsub/Cargo.toml index 665f757fcb3..734ac36a231 100644 --- a/protocols/gossipsub/Cargo.toml +++ b/protocols/gossipsub/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-gossipsub" edition = "2021" rust-version = { workspace = true } description = "Gossipsub protocol for libp2p" -version = "0.47.1" +version = "0.48.0" authors = ["Age Manning "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 16adb555a44..6ddb25316e5 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -583,6 +583,11 @@ where .data_transform .outbound_transform(&topic, data.clone())?; + // check that the size doesn't exceed the max transmission size. + if transformed_data.len() > self.config.max_transmit_size() { + return Err(PublishError::MessageTooLarge); + } + let raw_message = self.build_raw_message(topic, transformed_data)?; // calculate the message id from the un-transformed data @@ -593,11 +598,6 @@ where topic: raw_message.topic.clone(), }); - // check that the size doesn't exceed the max transmission size - if raw_message.raw_protobuf_len() > self.config.max_transmit_size() { - return Err(PublishError::MessageTooLarge); - } - // Check the if the message has been published before if self.duplicate_cache.contains(&msg_id) { // This message has already been seen. We don't re-publish messages that have already diff --git a/protocols/gossipsub/src/config.rs b/protocols/gossipsub/src/config.rs index febe2514a30..1ee2e940661 100644 --- a/protocols/gossipsub/src/config.rs +++ b/protocols/gossipsub/src/config.rs @@ -174,7 +174,8 @@ impl Config { /// The maximum byte size for each gossipsub RPC (default is 65536 bytes). /// - /// This represents the maximum size of the entire protobuf payload. It must be at least + /// This represents the maximum size of the published message. It is additionally wrapped + /// in a protobuf struct, so the actual wire size may be a bit larger. It must be at least /// large enough to support basic control messages. If Peer eXchange is enabled, this /// must be large enough to transmit the desired peer information on pruning. It must be at /// least 100 bytes. Default is 65536 bytes. From 4e7ff3ebf27336f2f014067f33437c428c81f5e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Sat, 26 Oct 2024 20:10:25 +0100 Subject: [PATCH 20/50] chore(ci): address clippy beta lints (#5649) Co-authored-by: Darius Clark --- Cargo.lock | 1 + identity/src/peer_id.rs | 2 +- identity/src/rsa.rs | 4 ++-- misc/quick-protobuf-codec/src/lib.rs | 2 +- muxers/test-harness/src/lib.rs | 2 +- .../autonat/src/v1/behaviour/as_client.rs | 4 ++-- .../autonat/src/v1/behaviour/as_server.rs | 4 ++-- protocols/gossipsub/src/backoff.rs | 3 +-- protocols/kad/src/behaviour.rs | 4 ++-- protocols/ping/src/protocol.rs | 1 - swarm/src/connection.rs | 2 +- swarm/src/handler.rs | 6 ++--- transports/noise/src/io/handshake.rs | 1 - transports/websocket-websys/src/lib.rs | 4 ++-- transports/websocket/src/framed.rs | 2 +- transports/webtransport-websys/Cargo.toml | 1 + transports/webtransport-websys/src/utils.rs | 22 +++++++------------ 17 files changed, 29 insertions(+), 36 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3b983a80d00..f0c36291839 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3484,6 +3484,7 @@ dependencies = [ "multiaddr", "multibase", "multihash", + "once_cell", "send_wrapper 0.6.0", "thiserror", "tracing", diff --git a/identity/src/peer_id.rs b/identity/src/peer_id.rs index 7b3f799f612..8ae6d99ae32 100644 --- a/identity/src/peer_id.rs +++ b/identity/src/peer_id.rs @@ -191,7 +191,7 @@ impl<'de> Deserialize<'de> for PeerId { struct PeerIdVisitor; - impl<'de> Visitor<'de> for PeerIdVisitor { + impl Visitor<'_> for PeerIdVisitor { type Value = PeerId; fn expecting(&self, f: &mut fmt::Formatter) -> fmt::Result { diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs index cbfe3c1b919..5eb78a4af75 100644 --- a/identity/src/rsa.rs +++ b/identity/src/rsa.rs @@ -149,7 +149,7 @@ struct Asn1RawOid<'a> { object: DerObject<'a>, } -impl<'a> Asn1RawOid<'a> { +impl Asn1RawOid<'_> { /// The underlying OID as byte literal. pub(crate) fn oid(&self) -> &[u8] { self.object.value() @@ -169,7 +169,7 @@ impl<'a> DerTypeView<'a> for Asn1RawOid<'a> { } } -impl<'a> DerEncodable for Asn1RawOid<'a> { +impl DerEncodable for Asn1RawOid<'_> { fn encode(&self, sink: &mut S) -> Result<(), Asn1DerError> { self.object.encode(sink) } diff --git a/misc/quick-protobuf-codec/src/lib.rs b/misc/quick-protobuf-codec/src/lib.rs index 32cee8eccac..c57b7da7db8 100644 --- a/misc/quick-protobuf-codec/src/lib.rs +++ b/misc/quick-protobuf-codec/src/lib.rs @@ -120,7 +120,7 @@ impl<'a> BytesMutWriterBackend<'a> { } } -impl<'a> WriterBackend for BytesMutWriterBackend<'a> { +impl WriterBackend for BytesMutWriterBackend<'_> { fn pb_write_u8(&mut self, x: u8) -> quick_protobuf::Result<()> { self.dst.put_u8(x); diff --git a/muxers/test-harness/src/lib.rs b/muxers/test-harness/src/lib.rs index 16c71f414f0..d03bdbdfed7 100644 --- a/muxers/test-harness/src/lib.rs +++ b/muxers/test-harness/src/lib.rs @@ -206,7 +206,7 @@ enum Event { ProtocolComplete, } -impl<'m, M> Stream for Harness<'m, M> +impl Stream for Harness<'_, M> where M: StreamMuxer + Unpin, { diff --git a/protocols/autonat/src/v1/behaviour/as_client.rs b/protocols/autonat/src/v1/behaviour/as_client.rs index 8960163ccb3..385dee50ee1 100644 --- a/protocols/autonat/src/v1/behaviour/as_client.rs +++ b/protocols/autonat/src/v1/behaviour/as_client.rs @@ -98,7 +98,7 @@ pub(crate) struct AsClient<'a> { pub(crate) other_candidates: &'a HashSet, } -impl<'a> HandleInnerEvent for AsClient<'a> { +impl HandleInnerEvent for AsClient<'_> { fn handle_event( &mut self, event: request_response::Event, @@ -179,7 +179,7 @@ impl<'a> HandleInnerEvent for AsClient<'a> { } } -impl<'a> AsClient<'a> { +impl AsClient<'_> { pub(crate) fn poll_auto_probe(&mut self, cx: &mut Context<'_>) -> Poll { match self.schedule_probe.poll_unpin(cx) { Poll::Ready(()) => { diff --git a/protocols/autonat/src/v1/behaviour/as_server.rs b/protocols/autonat/src/v1/behaviour/as_server.rs index 1289bd53d24..01148add6e8 100644 --- a/protocols/autonat/src/v1/behaviour/as_server.rs +++ b/protocols/autonat/src/v1/behaviour/as_server.rs @@ -91,7 +91,7 @@ pub(crate) struct AsServer<'a> { >, } -impl<'a> HandleInnerEvent for AsServer<'a> { +impl HandleInnerEvent for AsServer<'_> { fn handle_event( &mut self, event: request_response::Event, @@ -208,7 +208,7 @@ impl<'a> HandleInnerEvent for AsServer<'a> { } } -impl<'a> AsServer<'a> { +impl AsServer<'_> { pub(crate) fn on_outbound_connection( &mut self, peer: &PeerId, diff --git a/protocols/gossipsub/src/backoff.rs b/protocols/gossipsub/src/backoff.rs index b24da318582..4414ffb00e6 100644 --- a/protocols/gossipsub/src/backoff.rs +++ b/protocols/gossipsub/src/backoff.rs @@ -48,8 +48,7 @@ pub(crate) struct BackoffStorage { impl BackoffStorage { fn heartbeats(d: &Duration, heartbeat_interval: &Duration) -> usize { - ((d.as_nanos() + heartbeat_interval.as_nanos() - 1) / heartbeat_interval.as_nanos()) - as usize + d.as_nanos().div_ceil(heartbeat_interval.as_nanos()) as usize } pub(crate) fn new( diff --git a/protocols/kad/src/behaviour.rs b/protocols/kad/src/behaviour.rs index 84133d31acb..f577971167f 100644 --- a/protocols/kad/src/behaviour.rs +++ b/protocols/kad/src/behaviour.rs @@ -3361,7 +3361,7 @@ pub struct QueryMut<'a> { query: &'a mut Query, } -impl<'a> QueryMut<'a> { +impl QueryMut<'_> { pub fn id(&self) -> QueryId { self.query.id() } @@ -3391,7 +3391,7 @@ pub struct QueryRef<'a> { query: &'a Query, } -impl<'a> QueryRef<'a> { +impl QueryRef<'_> { pub fn id(&self) -> QueryId { self.query.id() } diff --git a/protocols/ping/src/protocol.rs b/protocols/ping/src/protocol.rs index 566e5e258e2..6e3f06d0498 100644 --- a/protocols/ping/src/protocol.rs +++ b/protocols/ping/src/protocol.rs @@ -44,7 +44,6 @@ pub const PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/ipfs/ping/1.0.0" /// > Nagle's algorithm, delayed acks and similar configuration options /// > which can affect latencies especially on otherwise low-volume /// > connections. - const PING_SIZE: usize = 32; /// Sends a ping and waits for the pong. diff --git a/swarm/src/connection.rs b/swarm/src/connection.rs index 2f9afc38418..859d138b83a 100644 --- a/swarm/src/connection.rs +++ b/swarm/src/connection.rs @@ -516,7 +516,7 @@ pub(crate) struct IncomingInfo<'a> { pub(crate) send_back_addr: &'a Multiaddr, } -impl<'a> IncomingInfo<'a> { +impl IncomingInfo<'_> { /// Builds the [`ConnectedPoint`] corresponding to the incoming connection. pub(crate) fn create_connected_point(&self) -> ConnectedPoint { ConnectedPoint::Listener { diff --git a/swarm/src/handler.rs b/swarm/src/handler.rs index 610b95b8cf1..9e31592d68d 100644 --- a/swarm/src/handler.rs +++ b/swarm/src/handler.rs @@ -226,7 +226,7 @@ pub enum ConnectionEvent<'a, IP: InboundUpgradeSend, OP: OutboundUpgradeSend, IO RemoteProtocolsChange(ProtocolsChange<'a>), } -impl<'a, IP, OP, IOI, OOI> fmt::Debug for ConnectionEvent<'a, IP, OP, IOI, OOI> +impl fmt::Debug for ConnectionEvent<'_, IP, OP, IOI, OOI> where IP: InboundUpgradeSend + fmt::Debug, IP::Output: fmt::Debug, @@ -262,8 +262,8 @@ where } } -impl<'a, IP: InboundUpgradeSend, OP: OutboundUpgradeSend, IOI, OOI> - ConnectionEvent<'a, IP, OP, IOI, OOI> +impl + ConnectionEvent<'_, IP, OP, IOI, OOI> { /// Whether the event concerns an outbound stream. pub fn is_outbound(&self) -> bool { diff --git a/transports/noise/src/io/handshake.rs b/transports/noise/src/io/handshake.rs index 5c1fa806b6d..8993a5795b6 100644 --- a/transports/noise/src/io/handshake.rs +++ b/transports/noise/src/io/handshake.rs @@ -73,7 +73,6 @@ where /// will be sent and received on the given I/O resource and using the /// provided session for cryptographic operations according to the chosen /// Noise handshake pattern. - pub(crate) fn new( io: T, session: snow::HandshakeState, diff --git a/transports/websocket-websys/src/lib.rs b/transports/websocket-websys/src/lib.rs index f353d92b204..3467e802bc5 100644 --- a/transports/websocket-websys/src/lib.rs +++ b/transports/websocket-websys/src/lib.rs @@ -96,8 +96,8 @@ impl libp2p_core::Transport for Transport { return Err(TransportError::MultiaddrNotSupported(addr)); } - let url = extract_websocket_url(&addr) - .ok_or_else(|| TransportError::MultiaddrNotSupported(addr))?; + let url = + extract_websocket_url(&addr).ok_or(TransportError::MultiaddrNotSupported(addr))?; Ok(async move { let socket = match WebSocket::new(&url) { diff --git a/transports/websocket/src/framed.rs b/transports/websocket/src/framed.rs index a547aea21ef..198443508d9 100644 --- a/transports/websocket/src/framed.rs +++ b/transports/websocket/src/framed.rs @@ -442,7 +442,7 @@ pub(crate) enum WsListenProto<'a> { TlsWs(Cow<'a, str>), } -impl<'a> WsListenProto<'a> { +impl WsListenProto<'_> { pub(crate) fn append_on_addr(&self, addr: &mut Multiaddr) { match self { WsListenProto::Ws(path) => { diff --git a/transports/webtransport-websys/Cargo.toml b/transports/webtransport-websys/Cargo.toml index 9541c49b737..eeb474d4a63 100644 --- a/transports/webtransport-websys/Cargo.toml +++ b/transports/webtransport-websys/Cargo.toml @@ -21,6 +21,7 @@ libp2p-identity = { workspace = true } libp2p-noise = { workspace = true } multiaddr = { workspace = true } multihash = { workspace = true } +once_cell = "1.19.0" send_wrapper = { version = "0.6.0", features = ["futures"] } thiserror = "1.0.61" tracing = { workspace = true } diff --git a/transports/webtransport-websys/src/utils.rs b/transports/webtransport-websys/src/utils.rs index 55bad08e00c..0b3550e5b5b 100644 --- a/transports/webtransport-websys/src/utils.rs +++ b/transports/webtransport-websys/src/utils.rs @@ -1,10 +1,17 @@ use js_sys::{Promise, Reflect}; +use once_cell::sync::Lazy; use send_wrapper::SendWrapper; use std::io; use wasm_bindgen::{JsCast, JsValue}; use crate::Error; +type Closure = wasm_bindgen::closure::Closure; +static DO_NOTHING: Lazy> = Lazy::new(|| { + let cb = Closure::new(|_| {}); + SendWrapper::new(cb) +}); + /// Properly detach a promise. /// /// A promise always runs in the background, however if you don't await it, @@ -13,22 +20,9 @@ use crate::Error; // // Ref: https://github.com/typescript-eslint/typescript-eslint/blob/391a6702c0a9b5b3874a7a27047f2a721f090fb6/packages/eslint-plugin/docs/rules/no-floating-promises.md pub(crate) fn detach_promise(promise: Promise) { - type Closure = wasm_bindgen::closure::Closure; - static mut DO_NOTHING: Option> = None; - - // Allocate Closure only once and reuse it - let do_nothing = unsafe { - if DO_NOTHING.is_none() { - let cb = Closure::new(|_| {}); - DO_NOTHING = Some(SendWrapper::new(cb)); - } - - DO_NOTHING.as_deref().unwrap() - }; - // Avoid having "floating" promise and ignore any errors. // After `catch` promise is allowed to be dropped. - let _ = promise.catch(do_nothing); + let _ = promise.catch(&DO_NOTHING); } /// Typecasts a JavaScript type. From 9586071e54a090a2526c8179c508b93d3cfdf4ac Mon Sep 17 00:00:00 2001 From: Krishang Shah <93703995+kamuik16@users.noreply.github.com> Date: Tue, 29 Oct 2024 15:48:41 +0530 Subject: [PATCH 21/50] feat: refactor distributed-key-value-store example (#5652) ## Description ref #4449 Refactored distributed-key-value-store example to use `tokio` instead of `async-std` ## Change checklist - [x] I have performed a self-review of my own code - [x] I have made corresponding changes to the documentation - [x] I have added tests that prove my fix is effective or that my feature works - [x] A changelog entry has been made in the appropriate crates --- Cargo.lock | 2 +- .../distributed-key-value-store/Cargo.toml | 4 ++-- .../distributed-key-value-store/src/main.rs | 21 ++++++++++++------- 3 files changed, 16 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f0c36291839..d3e2fc9fa47 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1361,10 +1361,10 @@ dependencies = [ name = "distributed-key-value-store-example" version = "0.1.0" dependencies = [ - "async-std", "async-trait", "futures", "libp2p", + "tokio", "tracing", "tracing-subscriber", ] diff --git a/examples/distributed-key-value-store/Cargo.toml b/examples/distributed-key-value-store/Cargo.toml index 9c2e2bce5c9..3846e54c8d3 100644 --- a/examples/distributed-key-value-store/Cargo.toml +++ b/examples/distributed-key-value-store/Cargo.toml @@ -9,10 +9,10 @@ license = "MIT" release = false [dependencies] -async-std = { version = "1.12", features = ["attributes"] } +tokio = { workspace = true, features = ["full"] } async-trait = "0.1" futures = { workspace = true } -libp2p = { path = "../../libp2p", features = [ "async-std", "dns", "kad", "mdns", "noise", "macros", "tcp", "yamux"] } +libp2p = { path = "../../libp2p", features = [ "tokio", "dns", "kad", "mdns", "noise", "macros", "tcp", "yamux"] } tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } diff --git a/examples/distributed-key-value-store/src/main.rs b/examples/distributed-key-value-store/src/main.rs index 404333f3d20..6b7947b7eb3 100644 --- a/examples/distributed-key-value-store/src/main.rs +++ b/examples/distributed-key-value-store/src/main.rs @@ -20,8 +20,7 @@ #![doc = include_str!("../README.md")] -use async_std::io; -use futures::{prelude::*, select}; +use futures::stream::StreamExt; use libp2p::kad; use libp2p::kad::store::MemoryStore; use libp2p::kad::Mode; @@ -32,9 +31,13 @@ use libp2p::{ }; use std::error::Error; use std::time::Duration; +use tokio::{ + io::{self, AsyncBufReadExt}, + select, +}; use tracing_subscriber::EnvFilter; -#[async_std::main] +#[tokio::main] async fn main() -> Result<(), Box> { let _ = tracing_subscriber::fmt() .with_env_filter(EnvFilter::from_default_env()) @@ -44,11 +47,11 @@ async fn main() -> Result<(), Box> { #[derive(NetworkBehaviour)] struct Behaviour { kademlia: kad::Behaviour, - mdns: mdns::async_io::Behaviour, + mdns: mdns::tokio::Behaviour, } let mut swarm = libp2p::SwarmBuilder::with_new_identity() - .with_async_std() + .with_tokio() .with_tcp( tcp::Config::default(), noise::Config::new, @@ -60,7 +63,7 @@ async fn main() -> Result<(), Box> { key.public().to_peer_id(), MemoryStore::new(key.public().to_peer_id()), ), - mdns: mdns::async_io::Behaviour::new( + mdns: mdns::tokio::Behaviour::new( mdns::Config::default(), key.public().to_peer_id(), )?, @@ -72,7 +75,7 @@ async fn main() -> Result<(), Box> { swarm.behaviour_mut().kademlia.set_mode(Some(Mode::Server)); // Read full lines from stdin - let mut stdin = io::BufReader::new(io::stdin()).lines().fuse(); + let mut stdin = io::BufReader::new(io::stdin()).lines(); // Listen on all interfaces and whatever port the OS assigns. swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?; @@ -80,7 +83,9 @@ async fn main() -> Result<(), Box> { // Kick it off. loop { select! { - line = stdin.select_next_some() => handle_input_line(&mut swarm.behaviour_mut().kademlia, line.expect("Stdin not to close")), + Ok(Some(line)) = stdin.next_line() => { + handle_input_line(&mut swarm.behaviour_mut().kademlia, line); + } event = swarm.select_next_some() => match event { SwarmEvent::NewListenAddr { address, .. } => { println!("Listening in {address:?}"); From 8387749048f9c922dc8a30e18a709eb82ec76503 Mon Sep 17 00:00:00 2001 From: Krishang Shah <93703995+kamuik16@users.noreply.github.com> Date: Wed, 30 Oct 2024 20:17:50 +0530 Subject: [PATCH 22/50] chore: refactor ping tests (#5655) ## Description ref #4449 Refactored ping tests to use `tokio` instead of `async-std`. ## Change checklist - [x] I have performed a self-review of my own code - [x] I have made corresponding changes to the documentation - [x] I have added tests that prove my fix is effective or that my feature works - [x] A changelog entry has been made in the appropriate crates --- Cargo.lock | 2 +- protocols/ping/Cargo.toml | 2 +- protocols/ping/src/protocol.rs | 34 ++++++++++++++++------------------ protocols/ping/tests/ping.rs | 16 ++++++++-------- 4 files changed, 26 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d3e2fc9fa47..c0ada652504 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3023,7 +3023,6 @@ dependencies = [ name = "libp2p-ping" version = "0.45.0" dependencies = [ - "async-std", "either", "futures", "futures-timer", @@ -3033,6 +3032,7 @@ dependencies = [ "libp2p-swarm-test", "quickcheck-ext", "rand 0.8.5", + "tokio", "tracing", "tracing-subscriber", "void", diff --git a/protocols/ping/Cargo.toml b/protocols/ping/Cargo.toml index 66775d3ba8d..794ab54ba42 100644 --- a/protocols/ping/Cargo.toml +++ b/protocols/ping/Cargo.toml @@ -23,11 +23,11 @@ tracing = { workspace = true } void = "1.0" [dev-dependencies] -async-std = "1.6.2" libp2p-swarm = { workspace = true, features = ["macros"] } libp2p-swarm-test = { path = "../../swarm-test" } quickcheck = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } +tokio = {workspace = true, features = ["rt", "macros"]} # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/ping/src/protocol.rs b/protocols/ping/src/protocol.rs index 6e3f06d0498..101c219aac4 100644 --- a/protocols/ping/src/protocol.rs +++ b/protocols/ping/src/protocol.rs @@ -89,8 +89,8 @@ mod tests { Endpoint, }; - #[test] - fn ping_pong() { + #[tokio::test] + async fn ping_pong() { let mem_addr = multiaddr![Memory(thread_rng().gen::())]; let mut transport = MemoryTransport::new().boxed(); transport.listen_on(ListenerId::next(), mem_addr).unwrap(); @@ -101,27 +101,25 @@ mod tests { .and_then(|ev| ev.into_new_address()) .expect("MemoryTransport not listening on an address!"); - async_std::task::spawn(async move { + tokio::spawn(async move { let transport_event = transport.next().await.unwrap(); let (listener_upgrade, _) = transport_event.into_incoming().unwrap(); let conn = listener_upgrade.await.unwrap(); recv_ping(conn).await.unwrap(); }); - async_std::task::block_on(async move { - let c = MemoryTransport::new() - .dial( - listener_addr, - DialOpts { - role: Endpoint::Dialer, - port_use: PortUse::Reuse, - }, - ) - .unwrap() - .await - .unwrap(); - let (_, rtt) = send_ping(c).await.unwrap(); - assert!(rtt > Duration::from_secs(0)); - }); + let c = MemoryTransport::new() + .dial( + listener_addr, + DialOpts { + role: Endpoint::Dialer, + port_use: PortUse::Reuse, + }, + ) + .unwrap() + .await + .unwrap(); + let (_, rtt) = send_ping(c).await.unwrap(); + assert!(rtt > Duration::from_secs(0)); } } diff --git a/protocols/ping/tests/ping.rs b/protocols/ping/tests/ping.rs index 3ca469f16a8..0752b1fced9 100644 --- a/protocols/ping/tests/ping.rs +++ b/protocols/ping/tests/ping.rs @@ -27,15 +27,15 @@ use libp2p_swarm_test::SwarmExt; use quickcheck::*; use std::{num::NonZeroU8, time::Duration}; -#[test] -fn ping_pong() { +#[tokio::test] +async fn ping_pong() { fn prop(count: NonZeroU8) { let cfg = ping::Config::new().with_interval(Duration::from_millis(10)); let mut swarm1 = Swarm::new_ephemeral(|_| ping::Behaviour::new(cfg.clone())); let mut swarm2 = Swarm::new_ephemeral(|_| ping::Behaviour::new(cfg.clone())); - async_std::task::block_on(async { + tokio::spawn(async move { swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; @@ -61,16 +61,16 @@ fn assert_ping_rtt_less_than_50ms(e: ping::Event) { assert!(rtt < Duration::from_millis(50)) } -#[test] -fn unsupported_doesnt_fail() { +#[tokio::test] +async fn unsupported_doesnt_fail() { let mut swarm1 = Swarm::new_ephemeral(|_| dummy::Behaviour); let mut swarm2 = Swarm::new_ephemeral(|_| ping::Behaviour::new(ping::Config::new())); - let result = async_std::task::block_on(async { + let result = { swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; let swarm1_peer_id = *swarm1.local_peer_id(); - async_std::task::spawn(swarm1.loop_on_next()); + tokio::spawn(swarm1.loop_on_next()); loop { match swarm2.next_swarm_event().await { @@ -89,7 +89,7 @@ fn unsupported_doesnt_fail() { _ => {} } } - }); + }; result.expect("node with ping should not fail connection due to unsupported protocol"); } From d021ce29c7f52833fefba845d3c11dafc0c06bba Mon Sep 17 00:00:00 2001 From: Hamza Date: Tue, 5 Nov 2024 20:34:30 +0300 Subject: [PATCH 23/50] fix(websocket): don't dial `/dnsaddr` addresses (#5613) ## Description Returns `Error::InvalidMultiaddr` when `parse_ws_dial_addr` is called with `/dnsaddr`. As per its specification, `/dnsaddr` domains are not meant to be directly dialed, instead it should be appended with `_dnsaddr.` and used for DNS lookups afterwards Related: #5529 Fixes: #5601 ## Notes & open questions * Is it okay to return an error, or should I perform a DNS lookup and resolve that DNS afterwards if address has `/dnsaddr`? * If so, how should I handle that case where DNS lookup returns multiple multiaddrs? ## Change checklist - [x] I have performed a self-review of my own code - [ ] I have made corresponding changes to the documentation - [x] I have added tests that prove my fix is effective or that my feature works - [x] A changelog entry has been made in the appropriate crates --------- Co-authored-by: Darius Clark --- Cargo.lock | 4 ++-- Cargo.toml | 4 ++-- transports/websocket-websys/CHANGELOG.md | 5 +++++ transports/websocket-websys/Cargo.toml | 2 +- transports/websocket-websys/src/lib.rs | 9 +++++++-- transports/websocket/CHANGELOG.md | 5 +++++ transports/websocket/Cargo.toml | 2 +- transports/websocket/src/framed.rs | 9 +++++++-- 8 files changed, 30 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c0ada652504..8a6278717f1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3432,7 +3432,7 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.44.0" +version = "0.44.1" dependencies = [ "async-std", "either", @@ -3455,7 +3455,7 @@ dependencies = [ [[package]] name = "libp2p-websocket-websys" -version = "0.4.0" +version = "0.4.1" dependencies = [ "bytes", "futures", diff --git a/Cargo.toml b/Cargo.toml index 8869505921d..780d26240db 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -112,8 +112,8 @@ libp2p-upnp = { version = "0.3.1", path = "protocols/upnp" } libp2p-webrtc = { version = "0.8.0-alpha", path = "transports/webrtc" } libp2p-webrtc-utils = { version = "0.3.0", path = "misc/webrtc-utils" } libp2p-webrtc-websys = { version = "0.4.0-alpha.2", path = "transports/webrtc-websys" } -libp2p-websocket = { version = "0.44.0", path = "transports/websocket" } -libp2p-websocket-websys = { version = "0.4.0", path = "transports/websocket-websys" } +libp2p-websocket = { version = "0.44.1", path = "transports/websocket" } +libp2p-websocket-websys = { version = "0.4.1", path = "transports/websocket-websys" } libp2p-webtransport-websys = { version = "0.4.0", path = "transports/webtransport-websys" } libp2p-yamux = { version = "0.46.0", path = "muxers/yamux" } multiaddr = "0.18.1" diff --git a/transports/websocket-websys/CHANGELOG.md b/transports/websocket-websys/CHANGELOG.md index d0aeb509823..9d0cb7d7726 100644 --- a/transports/websocket-websys/CHANGELOG.md +++ b/transports/websocket-websys/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.4.1 + +- fix: Return `None` when extracting a `/dnsaddr` address + See [PR 5613](https://github.com/libp2p/rust-libp2p/pull/5613) + ## 0.4.0 - Implement refactored `Transport`. diff --git a/transports/websocket-websys/Cargo.toml b/transports/websocket-websys/Cargo.toml index 32483f28c57..1687d3c0fb5 100644 --- a/transports/websocket-websys/Cargo.toml +++ b/transports/websocket-websys/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-websocket-websys" edition = "2021" rust-version = "1.60.0" description = "WebSocket for libp2p under WASM environment" -version = "0.4.0" +version = "0.4.1" authors = ["Vince Vasta "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" diff --git a/transports/websocket-websys/src/lib.rs b/transports/websocket-websys/src/lib.rs index 3467e802bc5..17b07c71c0a 100644 --- a/transports/websocket-websys/src/lib.rs +++ b/transports/websocket-websys/src/lib.rs @@ -130,8 +130,7 @@ fn extract_websocket_url(addr: &Multiaddr) -> Option { } (Some(Protocol::Dns(h)), Some(Protocol::Tcp(port))) | (Some(Protocol::Dns4(h)), Some(Protocol::Tcp(port))) - | (Some(Protocol::Dns6(h)), Some(Protocol::Tcp(port))) - | (Some(Protocol::Dnsaddr(h)), Some(Protocol::Tcp(port))) => { + | (Some(Protocol::Dns6(h)), Some(Protocol::Tcp(port))) => { format!("{}:{}", &h, port) } _ => return None, @@ -549,6 +548,12 @@ mod tests { .unwrap(); assert!(extract_websocket_url(&addr).is_none()); + // Check `/dnsaddr` + let addr = "/dnsaddr/example.com/tcp/2222/ws" + .parse::() + .unwrap(); + assert!(extract_websocket_url(&addr).is_none()); + // Check non-ws address let addr = "/ip4/127.0.0.1/tcp/2222".parse::().unwrap(); assert!(extract_websocket_url(&addr).is_none()); diff --git a/transports/websocket/CHANGELOG.md b/transports/websocket/CHANGELOG.md index cd079cfdd5a..1a8e9569c06 100644 --- a/transports/websocket/CHANGELOG.md +++ b/transports/websocket/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.44.1 + +- fix: Return `Error::InvalidMultiaddr` when dialed to a `/dnsaddr` address + See [PR 5613](https://github.com/libp2p/rust-libp2p/pull/5613) + ## 0.44.0 - Implement refactored `Transport`. diff --git a/transports/websocket/Cargo.toml b/transports/websocket/Cargo.toml index e08346da5ca..07f84901eda 100644 --- a/transports/websocket/Cargo.toml +++ b/transports/websocket/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-websocket" edition = "2021" rust-version = { workspace = true } description = "WebSocket transport for libp2p" -version = "0.44.0" +version = "0.44.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" diff --git a/transports/websocket/src/framed.rs b/transports/websocket/src/framed.rs index 198443508d9..259be6a68f8 100644 --- a/transports/websocket/src/framed.rs +++ b/transports/websocket/src/framed.rs @@ -510,8 +510,7 @@ fn parse_ws_dial_addr(addr: Multiaddr) -> Result> { } (Some(Protocol::Dns(h)), Some(Protocol::Tcp(port))) | (Some(Protocol::Dns4(h)), Some(Protocol::Tcp(port))) - | (Some(Protocol::Dns6(h)), Some(Protocol::Tcp(port))) - | (Some(Protocol::Dnsaddr(h)), Some(Protocol::Tcp(port))) => { + | (Some(Protocol::Dns6(h)), Some(Protocol::Tcp(port))) => { break (format!("{h}:{port}"), tls::dns_name_ref(&h)?) } (Some(_), Some(p)) => { @@ -993,6 +992,12 @@ mod tests { assert_eq!(info.server_name, "::1".try_into().unwrap()); assert_eq!(info.tcp_addr, "/ip6/::1/tcp/2222".parse().unwrap()); + // Check `/dnsaddr` + let addr = "/dnsaddr/example.com/tcp/2222/ws" + .parse::() + .unwrap(); + parse_ws_dial_addr::(addr).unwrap_err(); + // Check non-ws address let addr = "/ip4/127.0.0.1/tcp/2222".parse::().unwrap(); parse_ws_dial_addr::(addr).unwrap_err(); From 5179d78e0ab602d03f672c3e96160181cf8f7188 Mon Sep 17 00:00:00 2001 From: yanziseeker <153156292+AdventureSeeker987@users.noreply.github.com> Date: Wed, 6 Nov 2024 07:10:30 +0800 Subject: [PATCH 24/50] chore: fix some comments (#5661) ## Description ## Notes & open questions ## Change checklist - [x] I have performed a self-review of my own code - [ ] I have made corresponding changes to the documentation - [ ] I have added tests that prove my fix is effective or that my feature works - [ ] A changelog entry has been made in the appropriate crates --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- protocols/gossipsub/src/behaviour.rs | 2 +- protocols/gossipsub/src/peer_score/params.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 6ddb25316e5..bf94a5b7920 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -947,7 +947,7 @@ where && !self.backoffs.is_backoff_with_slack(topic_hash, p) }); - // Add up to mesh_n of them them to the mesh + // Add up to mesh_n of them to the mesh // NOTE: These aren't randomly added, currently FIFO let add_peers = std::cmp::min(peers.len(), self.config.mesh_n()); tracing::debug!( diff --git a/protocols/gossipsub/src/peer_score/params.rs b/protocols/gossipsub/src/peer_score/params.rs index 35bea0e4353..8c7fdb9bd35 100644 --- a/protocols/gossipsub/src/peer_score/params.rs +++ b/protocols/gossipsub/src/peer_score/params.rs @@ -229,7 +229,7 @@ pub struct TopicScoreParams { /// P1: time in the mesh /// This is the time the peer has been grafted in the mesh. - /// The value of of the parameter is the `time/time_in_mesh_quantum`, capped by `time_in_mesh_cap` + /// The value of the parameter is the `time/time_in_mesh_quantum`, capped by `time_in_mesh_cap` /// The weight of the parameter must be positive (or zero to disable). pub time_in_mesh_weight: f64, pub time_in_mesh_quantum: Duration, From 858a4cd954bf52be108ebc63bbb00b52964770dd Mon Sep 17 00:00:00 2001 From: Krishang Shah <93703995+kamuik16@users.noreply.github.com> Date: Wed, 6 Nov 2024 17:52:32 +0530 Subject: [PATCH 25/50] chore: identify::Config fields private (#5663) ## Description Closes #5660 ## Change checklist - [x] I have performed a self-review of my own code - [x] I have made corresponding changes to the documentation - [x] I have added tests that prove my fix is effective or that my feature works - [x] A changelog entry has been made in the appropriate crates --- Cargo.lock | 2 +- Cargo.toml | 2 +- protocols/identify/CHANGELOG.md | 5 +++ protocols/identify/Cargo.toml | 2 +- protocols/identify/src/behaviour.rs | 49 ++++++++++++++++++++++++----- 5 files changed, 50 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8a6278717f1..e5e41d3bdf8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2778,7 +2778,7 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.45.1" +version = "0.46.0" dependencies = [ "async-std", "asynchronous-codec", diff --git a/Cargo.toml b/Cargo.toml index 780d26240db..aab7f0d71d4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -84,7 +84,7 @@ libp2p-dcutr = { version = "0.12.0", path = "protocols/dcutr" } libp2p-dns = { version = "0.42.0", path = "transports/dns" } libp2p-floodsub = { version = "0.45.0", path = "protocols/floodsub" } libp2p-gossipsub = { version = "0.48.0", path = "protocols/gossipsub" } -libp2p-identify = { version = "0.45.1", path = "protocols/identify" } +libp2p-identify = { version = "0.46.0", path = "protocols/identify" } libp2p-identity = { version = "0.2.9" } libp2p-kad = { version = "0.47.0", path = "protocols/kad" } libp2p-mdns = { version = "0.46.0", path = "protocols/mdns" } diff --git a/protocols/identify/CHANGELOG.md b/protocols/identify/CHANGELOG.md index c5778ff92ee..9051c331bbc 100644 --- a/protocols/identify/CHANGELOG.md +++ b/protocols/identify/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.46.0 + +- Make `identify::Config` fields private and add getter functions. + See [PR 5663](https://github.com/libp2p/rust-libp2p/pull/5663). + ## 0.45.1 - Add `hide_listen_addrs` option to prevent leaking (local) listen addresses. diff --git a/protocols/identify/Cargo.toml b/protocols/identify/Cargo.toml index c3fb585c99c..13c43b6a71f 100644 --- a/protocols/identify/Cargo.toml +++ b/protocols/identify/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-identify" edition = "2021" rust-version = { workspace = true } description = "Nodes identification protocol for libp2p" -version = "0.45.1" +version = "0.46.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" diff --git a/protocols/identify/src/behaviour.rs b/protocols/identify/src/behaviour.rs index c8672674c1a..1f82cd154e3 100644 --- a/protocols/identify/src/behaviour.rs +++ b/protocols/identify/src/behaviour.rs @@ -117,20 +117,20 @@ pub struct Behaviour { pub struct Config { /// Application-specific version of the protocol family used by the peer, /// e.g. `ipfs/1.0.0` or `polkadot/1.0.0`. - pub protocol_version: String, + protocol_version: String, /// The public key of the local node. To report on the wire. - pub local_public_key: PublicKey, + local_public_key: PublicKey, /// Name and version of the local peer implementation, similar to the /// `User-Agent` header in the HTTP protocol. /// /// Defaults to `rust-libp2p/`. - pub agent_version: String, + agent_version: String, /// The interval at which identification requests are sent to /// the remote on established connections after the first request, /// i.e. the delay between identification requests. /// /// Defaults to 5 minutes. - pub interval: Duration, + interval: Duration, /// Whether new or expired listen addresses of the local node should /// trigger an active push of an identify message to all connected peers. @@ -140,19 +140,19 @@ pub struct Config { /// i.e. before the next periodic identify request with each peer. /// /// Disabled by default. - pub push_listen_addr_updates: bool, + push_listen_addr_updates: bool, /// How many entries of discovered peers to keep before we discard /// the least-recently used one. /// /// Disabled by default. - pub cache_size: usize, + cache_size: usize, /// Whether to include our listen addresses in our responses. If enabled, /// we will effectively only share our external addresses. /// /// Disabled by default. - pub hide_listen_addrs: bool, + hide_listen_addrs: bool, } impl Config { @@ -202,6 +202,41 @@ impl Config { self.hide_listen_addrs = b; self } + + /// Get the protocol version of the Config. + pub fn protocol_version(&self) -> &str { + &self.protocol_version + } + + /// Get the local public key of the Config. + pub fn local_public_key(&self) -> &PublicKey { + &self.local_public_key + } + + /// Get the agent version of the Config. + pub fn agent_version(&self) -> &str { + &self.agent_version + } + + /// Get the interval of the Config. + pub fn interval(&self) -> Duration { + self.interval + } + + /// Get the push listen address updates boolean value of the Config. + pub fn push_listen_addr_updates(&self) -> bool { + self.push_listen_addr_updates + } + + /// Get the cache size of the Config. + pub fn cache_size(&self) -> usize { + self.cache_size + } + + /// Get the hide listen address boolean value of the Config. + pub fn hide_listen_addrs(&self) -> bool { + self.hide_listen_addrs + } } impl Behaviour { From a9b67995883d2b10ec8fe3b1e6644f44257de90f Mon Sep 17 00:00:00 2001 From: wangjingcun Date: Fri, 8 Nov 2024 12:23:13 +0800 Subject: [PATCH 26/50] chore(protocols): fix some typos in comment (#5665) ## Description fix some typos in comment ## Notes & open questions ## Change checklist - [ ] I have performed a self-review of my own code - [ ] I have made corresponding changes to the documentation - [ ] I have added tests that prove my fix is effective or that my feature works - [ ] A changelog entry has been made in the appropriate crates Signed-off-by: wangjingcun --- protocols/autonat/src/v2.rs | 2 +- protocols/identify/src/behaviour.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/protocols/autonat/src/v2.rs b/protocols/autonat/src/v2.rs index cdc807ea303..48e9f25f890 100644 --- a/protocols/autonat/src/v2.rs +++ b/protocols/autonat/src/v2.rs @@ -10,7 +10,7 @@ //! server then the dial back puts on the client, thus making the protocol unatractive for an //! attacker. //! -//! The protocol is seperated into two parts: +//! The protocol is separated into two parts: //! - The client part, which is implemented in the `client` module. (The client is the party that //! wants to check if it is reachable from the outside.) //! - The server part, which is implemented in the `server` module. (The server is the party diff --git a/protocols/identify/src/behaviour.rs b/protocols/identify/src/behaviour.rs index 1f82cd154e3..b69f2014d81 100644 --- a/protocols/identify/src/behaviour.rs +++ b/protocols/identify/src/behaviour.rs @@ -360,7 +360,7 @@ impl Behaviour { } // outgoing connection dialed with port reuse - // incomming connection + // incoming connection self.events .push_back(ToSwarm::NewExternalAddrCandidate(observed.clone())); } From 4192fc3daed761a127e6986a75e016d483846555 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Fri, 8 Nov 2024 19:21:23 +0000 Subject: [PATCH 27/50] chore(ci): fix interop tests region, and run them again on each PR (#5666) --- .github/workflows/interop-test.yml | 1 + scripts/build-interop-image.sh | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/interop-test.yml b/.github/workflows/interop-test.yml index 57d0f1a692d..e9446d013d7 100644 --- a/.github/workflows/interop-test.yml +++ b/.github/workflows/interop-test.yml @@ -1,5 +1,6 @@ name: Interoperability Testing on: + pull_request: push: branches: - "master" diff --git a/scripts/build-interop-image.sh b/scripts/build-interop-image.sh index ad6ef78b153..4b96e353f9a 100755 --- a/scripts/build-interop-image.sh +++ b/scripts/build-interop-image.sh @@ -6,13 +6,13 @@ CACHE_TO="" # If we have credentials, write to cache if [[ -n "${AWS_SECRET_ACCESS_KEY}" ]]; then - CACHE_TO="--cache-to type=s3,mode=max,bucket=${AWS_BUCKET_NAME},region=us-east-1,prefix=buildCache,name=${FLAVOUR}-rust-libp2p-head" + CACHE_TO="--cache-to type=s3,mode=max,bucket=${AWS_BUCKET_NAME},region=ap-southeast-2,prefix=buildCache,name=${FLAVOUR}-rust-libp2p-head" fi docker buildx build \ --load \ $CACHE_TO \ - --cache-from type=s3,mode=max,bucket=${AWS_BUCKET_NAME},region=us-east-1,prefix=buildCache,name=${FLAVOUR}-rust-libp2p-head \ + --cache-from type=s3,mode=max,bucket=${AWS_BUCKET_NAME},region=ap-southeast-2,prefix=buildCache,name=${FLAVOUR}-rust-libp2p-head \ -t ${FLAVOUR}-rust-libp2p-head \ . \ -f interop-tests/Dockerfile.${FLAVOUR} From 0c34d9fdd45e81892824ba5faf3875ab35f14f1b Mon Sep 17 00:00:00 2001 From: hanabi1224 Date: Fri, 15 Nov 2024 02:09:15 +0700 Subject: [PATCH 28/50] chore: deprecate `void` crate (#5676) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Description The `void` crate provides a `Void` type that is conceptually equivalent to the [`never` type(!)](https://doc.rust-lang.org/std/primitive.never.html). This PR tries to remove `void` crate from the dependency tree by replacing `void::Void` with [`std::convert::Infallible`](https://doc.rust-lang.org/std/convert/enum.Infallible.html) that will eventually become an alias of the `never` type(!) > This enum has the same role as [the ! “never” type](https://doc.rust-lang.org/std/primitive.never.html), which is unstable in this version of Rust. When ! is stabilized, we plan to make Infallible a type alias to it: ## Notes & open questions ## Change checklist - [x] I have performed a self-review of my own code - [ ] I have made corresponding changes to the documentation - [ ] I have added tests that prove my fix is effective or that my feature works - [ ] A changelog entry has been made in the appropriate crates --- Cargo.lock | 26 ---------------- core/Cargo.toml | 1 - core/src/lib.rs | 10 +++++++ core/src/upgrade/denied.rs | 10 +++---- core/src/upgrade/pending.rs | 10 +++---- core/src/upgrade/ready.rs | 6 ++-- examples/file-sharing/Cargo.toml | 1 - identity/Cargo.toml | 3 +- identity/src/ecdsa.rs | 7 +++-- misc/allow-block-list/Cargo.toml | 1 - misc/allow-block-list/src/lib.rs | 6 ++-- misc/connection-limits/Cargo.toml | 1 - misc/connection-limits/src/lib.rs | 10 +++---- misc/memory-connection-limits/Cargo.toml | 1 - misc/memory-connection-limits/src/lib.rs | 6 ++-- .../tests/util/mod.rs | 6 ++-- protocols/autonat/Cargo.toml | 3 +- .../src/v2/client/handler/dial_back.rs | 6 ++-- .../src/v2/client/handler/dial_request.rs | 5 ++-- protocols/autonat/src/v2/server/behaviour.rs | 2 +- .../src/v2/server/handler/dial_request.rs | 3 +- protocols/dcutr/Cargo.toml | 1 - protocols/dcutr/src/behaviour.rs | 6 ++-- protocols/dcutr/src/handler/relayed.rs | 6 ++-- protocols/gossipsub/Cargo.toml | 1 - protocols/gossipsub/src/handler.rs | 4 +-- protocols/gossipsub/src/protocol.rs | 6 ++-- protocols/identify/Cargo.toml | 1 - protocols/identify/src/handler.rs | 2 +- protocols/kad/Cargo.toml | 1 - protocols/kad/src/handler.rs | 4 +-- protocols/mdns/Cargo.toml | 1 - protocols/mdns/src/behaviour.rs | 2 +- protocols/perf/Cargo.toml | 1 - protocols/perf/src/client.rs | 4 +-- protocols/perf/src/client/handler.rs | 4 +-- protocols/perf/src/server/handler.rs | 14 ++++----- protocols/ping/Cargo.toml | 1 - protocols/ping/src/handler.rs | 8 ++--- protocols/relay/Cargo.toml | 1 - protocols/relay/src/behaviour.rs | 2 +- protocols/relay/src/behaviour/handler.rs | 2 +- protocols/relay/src/priv_client.rs | 6 ++-- protocols/relay/src/priv_client/handler.rs | 14 ++++----- protocols/rendezvous/Cargo.toml | 1 - protocols/request-response/Cargo.toml | 1 - protocols/request-response/src/handler.rs | 4 +-- .../request-response/src/handler/protocol.rs | 6 ++-- protocols/stream/Cargo.toml | 1 - protocols/stream/src/behaviour.rs | 2 +- protocols/stream/src/handler.rs | 9 +++--- protocols/stream/src/upgrade.rs | 9 ++++-- protocols/upnp/Cargo.toml | 1 - protocols/upnp/src/behaviour.rs | 2 +- swarm/Cargo.toml | 2 -- swarm/src/behaviour/toggle.rs | 4 +-- swarm/src/connection.rs | 30 +++++++++---------- swarm/src/connection/pool.rs | 4 +-- swarm/src/connection/pool/task.rs | 10 +++---- swarm/src/dummy.rs | 20 ++++++------- swarm/src/handler/one_shot.rs | 4 +-- swarm/src/handler/pending.rs | 16 +++++----- swarm/tests/connection_close.rs | 6 ++-- swarm/tests/listener.rs | 3 +- swarm/tests/swarm_derive.rs | 10 +++---- 65 files changed, 168 insertions(+), 193 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e5e41d3bdf8..5ef3a17a4a1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1570,7 +1570,6 @@ dependencies = [ "tokio", "tracing", "tracing-subscriber", - "void", ] [[package]] @@ -2586,7 +2585,6 @@ dependencies = [ "libp2p-swarm", "libp2p-swarm-derive", "libp2p-swarm-test", - "void", ] [[package]] @@ -2615,7 +2613,6 @@ dependencies = [ "tokio", "tracing", "tracing-subscriber", - "void", "web-time 1.1.0", ] @@ -2633,7 +2630,6 @@ dependencies = [ "libp2p-swarm-test", "quickcheck-ext", "rand 0.8.5", - "void", ] [[package]] @@ -2663,7 +2659,6 @@ dependencies = [ "thiserror", "tracing", "unsigned-varint 0.8.0", - "void", "web-time 1.1.0", ] @@ -2697,7 +2692,6 @@ dependencies = [ "thiserror", "tracing", "tracing-subscriber", - "void", "web-time 1.1.0", ] @@ -2772,7 +2766,6 @@ dependencies = [ "smallvec", "tracing", "tracing-subscriber", - "void", "web-time 1.1.0", ] @@ -2797,7 +2790,6 @@ dependencies = [ "thiserror", "tracing", "tracing-subscriber", - "void", ] [[package]] @@ -2825,7 +2817,6 @@ dependencies = [ "sha2 0.10.8", "thiserror", "tracing", - "void", "zeroize", ] @@ -2860,7 +2851,6 @@ dependencies = [ "tracing", "tracing-subscriber", "uint", - "void", "web-time 1.1.0", ] @@ -2887,7 +2877,6 @@ dependencies = [ "tokio", "tracing", "tracing-subscriber", - "void", ] [[package]] @@ -2905,7 +2894,6 @@ dependencies = [ "rand 0.8.5", "sysinfo", "tracing", - "void", ] [[package]] @@ -3015,7 +3003,6 @@ dependencies = [ "tokio", "tracing", "tracing-subscriber", - "void", "web-time 1.1.0", ] @@ -3035,7 +3022,6 @@ dependencies = [ "tokio", "tracing", "tracing-subscriber", - "void", "web-time 1.1.0", ] @@ -3132,7 +3118,6 @@ dependencies = [ "thiserror", "tracing", "tracing-subscriber", - "void", "web-time 1.1.0", ] @@ -3162,7 +3147,6 @@ dependencies = [ "tokio", "tracing", "tracing-subscriber", - "void", "web-time 1.1.0", ] @@ -3191,7 +3175,6 @@ dependencies = [ "smallvec", "tracing", "tracing-subscriber", - "void", "web-time 1.1.0", ] @@ -3228,7 +3211,6 @@ dependencies = [ "tokio", "tracing", "tracing-subscriber", - "void", ] [[package]] @@ -3261,7 +3243,6 @@ dependencies = [ "tracing", "tracing-subscriber", "trybuild", - "void", "wasm-bindgen-futures", "web-time 1.1.0", ] @@ -3356,7 +3337,6 @@ dependencies = [ "libp2p-swarm", "tokio", "tracing", - "void", ] [[package]] @@ -6507,12 +6487,6 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" -[[package]] -name = "void" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" - [[package]] name = "waitgroup" version = "0.1.2" diff --git a/core/Cargo.toml b/core/Cargo.toml index 8a083276e7f..d8260e14d1f 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -31,7 +31,6 @@ smallvec = "1.13.2" thiserror = "1.0" tracing = { workspace = true } unsigned-varint = { workspace = true } -void = "1" [dev-dependencies] async-std = { version = "1.6.2", features = ["attributes"] } diff --git a/core/src/lib.rs b/core/src/lib.rs index a42f56773df..ab5afbedae4 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -66,3 +66,13 @@ pub use upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; #[derive(Debug, thiserror::Error)] #[error(transparent)] pub struct DecodeError(quick_protobuf::Error); + +pub mod util { + use std::convert::Infallible; + + /// A safe version of [`std::intrinsics::unreachable`]. + #[inline(always)] + pub fn unreachable(x: Infallible) -> ! { + match x {} + } +} diff --git a/core/src/upgrade/denied.rs b/core/src/upgrade/denied.rs index 353a184822d..568bbfb056d 100644 --- a/core/src/upgrade/denied.rs +++ b/core/src/upgrade/denied.rs @@ -20,8 +20,8 @@ use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use futures::future; +use std::convert::Infallible; use std::iter; -use void::Void; /// Dummy implementation of `UpgradeInfo`/`InboundUpgrade`/`OutboundUpgrade` that doesn't support /// any protocol. @@ -38,8 +38,8 @@ impl UpgradeInfo for DeniedUpgrade { } impl InboundUpgrade for DeniedUpgrade { - type Output = Void; - type Error = Void; + type Output = Infallible; + type Error = Infallible; type Future = future::Pending>; fn upgrade_inbound(self, _: C, _: Self::Info) -> Self::Future { @@ -48,8 +48,8 @@ impl InboundUpgrade for DeniedUpgrade { } impl OutboundUpgrade for DeniedUpgrade { - type Output = Void; - type Error = Void; + type Output = Infallible; + type Error = Infallible; type Future = future::Pending>; fn upgrade_outbound(self, _: C, _: Self::Info) -> Self::Future { diff --git a/core/src/upgrade/pending.rs b/core/src/upgrade/pending.rs index 6931e20bfdc..5e3c65422f1 100644 --- a/core/src/upgrade/pending.rs +++ b/core/src/upgrade/pending.rs @@ -21,8 +21,8 @@ use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use futures::future; +use std::convert::Infallible; use std::iter; -use void::Void; /// Implementation of [`UpgradeInfo`], [`InboundUpgrade`] and [`OutboundUpgrade`] that always /// returns a pending upgrade. @@ -53,8 +53,8 @@ impl InboundUpgrade for PendingUpgrade

where P: AsRef + Clone, { - type Output = Void; - type Error = Void; + type Output = Infallible; + type Error = Infallible; type Future = future::Pending>; fn upgrade_inbound(self, _: C, _: Self::Info) -> Self::Future { @@ -66,8 +66,8 @@ impl OutboundUpgrade for PendingUpgrade

where P: AsRef + Clone, { - type Output = Void; - type Error = Void; + type Output = Infallible; + type Error = Infallible; type Future = future::Pending>; fn upgrade_outbound(self, _: C, _: Self::Info) -> Self::Future { diff --git a/core/src/upgrade/ready.rs b/core/src/upgrade/ready.rs index 7e235902651..13270aa8b6d 100644 --- a/core/src/upgrade/ready.rs +++ b/core/src/upgrade/ready.rs @@ -21,8 +21,8 @@ use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use futures::future; +use std::convert::Infallible; use std::iter; -use void::Void; /// Implementation of [`UpgradeInfo`], [`InboundUpgrade`] and [`OutboundUpgrade`] that directly yields the substream. #[derive(Debug, Copy, Clone)] @@ -53,7 +53,7 @@ where P: AsRef + Clone, { type Output = C; - type Error = Void; + type Error = Infallible; type Future = future::Ready>; fn upgrade_inbound(self, stream: C, _: Self::Info) -> Self::Future { @@ -66,7 +66,7 @@ where P: AsRef + Clone, { type Output = C; - type Error = Void; + type Error = Infallible; type Future = future::Ready>; fn upgrade_outbound(self, stream: C, _: Self::Info) -> Self::Future { diff --git a/examples/file-sharing/Cargo.toml b/examples/file-sharing/Cargo.toml index 7cbb96cc7ed..d098ce44317 100644 --- a/examples/file-sharing/Cargo.toml +++ b/examples/file-sharing/Cargo.toml @@ -16,7 +16,6 @@ futures = { workspace = true } libp2p = { path = "../../libp2p", features = [ "tokio", "cbor", "dns", "kad", "noise", "macros", "request-response", "tcp", "websocket", "yamux"] } tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } -void = "1.0.2" [lints] workspace = true diff --git a/identity/Cargo.toml b/identity/Cargo.toml index cb0b8cb000e..370533eed58 100644 --- a/identity/Cargo.toml +++ b/identity/Cargo.toml @@ -26,7 +26,6 @@ sec1 = { version = "0.7", default-features = false, optional = true } serde = { version = "1", optional = true, features = ["derive"] } sha2 = { version = "0.10.8", optional = true } thiserror = { version = "1.0", optional = true } -void = { version = "1.0", optional = true } zeroize = { version = "1.8", optional = true } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] @@ -34,7 +33,7 @@ ring = { workspace = true, features = ["alloc", "std"], optional = true } [features] secp256k1 = ["dep:libsecp256k1", "dep:asn1_der", "dep:sha2", "dep:hkdf", "dep:zeroize"] -ecdsa = ["dep:p256", "dep:void", "dep:zeroize", "dep:sec1", "dep:sha2", "dep:hkdf"] +ecdsa = ["dep:p256", "dep:zeroize", "dep:sec1", "dep:sha2", "dep:hkdf"] rsa = ["dep:ring", "dep:asn1_der", "dep:rand", "dep:zeroize"] ed25519 = ["dep:ed25519-dalek", "dep:zeroize", "dep:sha2", "dep:hkdf"] peerid = ["dep:multihash", "dep:bs58", "dep:thiserror", "dep:sha2", "dep:hkdf"] diff --git a/identity/src/ecdsa.rs b/identity/src/ecdsa.rs index 65cbe885b86..922675097df 100644 --- a/identity/src/ecdsa.rs +++ b/identity/src/ecdsa.rs @@ -32,7 +32,7 @@ use p256::{ EncodedPoint, }; use sec1::{DecodeEcPrivateKey, EncodeEcPrivateKey}; -use void::Void; +use std::convert::Infallible; use zeroize::Zeroize; /// An ECDSA keypair generated using `secp256r1` curve. @@ -182,7 +182,10 @@ impl PublicKey { /// Try to decode a public key from a DER encoded byte buffer as defined by SEC1 standard. pub fn try_decode_der(k: &[u8]) -> Result { let buf = Self::del_asn1_header(k).ok_or_else(|| { - DecodingError::failed_to_parse::("ASN.1-encoded ecdsa p256 public key", None) + DecodingError::failed_to_parse::( + "ASN.1-encoded ecdsa p256 public key", + None, + ) })?; Self::try_from_bytes(buf) } diff --git a/misc/allow-block-list/Cargo.toml b/misc/allow-block-list/Cargo.toml index 1ff0ccff906..c169be87056 100644 --- a/misc/allow-block-list/Cargo.toml +++ b/misc/allow-block-list/Cargo.toml @@ -13,7 +13,6 @@ categories = ["network-programming", "asynchronous"] libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true, features = ["peerid"] } -void = "1" [dev-dependencies] async-std = { version = "1.12.0", features = ["attributes"] } diff --git a/misc/allow-block-list/src/lib.rs b/misc/allow-block-list/src/lib.rs index 56de29d1985..f93cf4ffefa 100644 --- a/misc/allow-block-list/src/lib.rs +++ b/misc/allow-block-list/src/lib.rs @@ -69,9 +69,9 @@ use libp2p_swarm::{ THandlerInEvent, THandlerOutEvent, ToSwarm, }; use std::collections::{HashSet, VecDeque}; +use std::convert::Infallible; use std::fmt; use std::task::{Context, Poll, Waker}; -use void::Void; /// A [`NetworkBehaviour`] that can act as an allow or block list. #[derive(Default, Debug)] @@ -222,7 +222,7 @@ where S: Enforce, { type ConnectionHandler = dummy::ConnectionHandler; - type ToSwarm = Void; + type ToSwarm = Infallible; fn handle_established_inbound_connection( &mut self, @@ -273,7 +273,7 @@ where ) { // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - void::unreachable(event) + libp2p_core::util::unreachable(event) } fn poll( diff --git a/misc/connection-limits/Cargo.toml b/misc/connection-limits/Cargo.toml index 56fe97f984b..0d17cb74862 100644 --- a/misc/connection-limits/Cargo.toml +++ b/misc/connection-limits/Cargo.toml @@ -13,7 +13,6 @@ categories = ["network-programming", "asynchronous"] libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true, features = ["peerid"] } -void = "1" [dev-dependencies] async-std = { version = "1.12.0", features = ["attributes"] } diff --git a/misc/connection-limits/src/lib.rs b/misc/connection-limits/src/lib.rs index 05a9b639f26..016a7f2cfd4 100644 --- a/misc/connection-limits/src/lib.rs +++ b/misc/connection-limits/src/lib.rs @@ -26,9 +26,9 @@ use libp2p_swarm::{ THandlerInEvent, THandlerOutEvent, ToSwarm, }; use std::collections::{HashMap, HashSet}; +use std::convert::Infallible; use std::fmt; use std::task::{Context, Poll}; -use void::Void; /// A [`NetworkBehaviour`] that enforces a set of [`ConnectionLimits`]. /// @@ -203,7 +203,7 @@ impl ConnectionLimits { impl NetworkBehaviour for Behaviour { type ConnectionHandler = dummy::ConnectionHandler; - type ToSwarm = Void; + type ToSwarm = Infallible; fn handle_pending_inbound_connection( &mut self, @@ -357,7 +357,7 @@ impl NetworkBehaviour for Behaviour { ) { // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - void::unreachable(event) + libp2p_core::util::unreachable(event) } fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { @@ -551,7 +551,7 @@ mod tests { impl NetworkBehaviour for ConnectionDenier { type ConnectionHandler = dummy::ConnectionHandler; - type ToSwarm = Void; + type ToSwarm = Infallible; fn handle_established_inbound_connection( &mut self, @@ -590,7 +590,7 @@ mod tests { ) { // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - void::unreachable(event) + libp2p_core::util::unreachable(event) } fn poll( diff --git a/misc/memory-connection-limits/Cargo.toml b/misc/memory-connection-limits/Cargo.toml index f56ed33d5ad..19ae256e853 100644 --- a/misc/memory-connection-limits/Cargo.toml +++ b/misc/memory-connection-limits/Cargo.toml @@ -16,7 +16,6 @@ libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true, features = ["peerid"] } sysinfo = "0.30" tracing = { workspace = true } -void = "1" [dev-dependencies] async-std = { version = "1.12.0", features = ["attributes"] } diff --git a/misc/memory-connection-limits/src/lib.rs b/misc/memory-connection-limits/src/lib.rs index 757ff770487..e2a89977991 100644 --- a/misc/memory-connection-limits/src/lib.rs +++ b/misc/memory-connection-limits/src/lib.rs @@ -24,7 +24,7 @@ use libp2p_swarm::{ dummy, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use void::Void; +use std::convert::Infallible; use std::{ fmt, @@ -139,7 +139,7 @@ impl Behaviour { impl NetworkBehaviour for Behaviour { type ConnectionHandler = dummy::ConnectionHandler; - type ToSwarm = Void; + type ToSwarm = Infallible; fn handle_pending_inbound_connection( &mut self, @@ -192,7 +192,7 @@ impl NetworkBehaviour for Behaviour { ) { // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - void::unreachable(event) + libp2p_core::util::unreachable(event) } fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { diff --git a/misc/memory-connection-limits/tests/util/mod.rs b/misc/memory-connection-limits/tests/util/mod.rs index 01e8cd9f655..333b0ee135f 100644 --- a/misc/memory-connection-limits/tests/util/mod.rs +++ b/misc/memory-connection-limits/tests/util/mod.rs @@ -26,7 +26,7 @@ use libp2p_swarm::{ dummy, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use void::Void; +use std::convert::Infallible; #[derive(libp2p_swarm_derive::NetworkBehaviour)] #[behaviour(prelude = "libp2p_swarm::derive_prelude")] @@ -62,7 +62,7 @@ impl NetworkBehaviour for ConsumeMemoryBehaviour { type ConnectionHandler = dummy::ConnectionHandler; - type ToSwarm = Void; + type ToSwarm = Infallible; fn handle_pending_inbound_connection( &mut self, @@ -118,7 +118,7 @@ impl NetworkBehaviour ) { // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - void::unreachable(event) + libp2p_core::util::unreachable(event) } fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { diff --git a/protocols/autonat/Cargo.toml b/protocols/autonat/Cargo.toml index 0c0e757641d..169006e7508 100644 --- a/protocols/autonat/Cargo.toml +++ b/protocols/autonat/Cargo.toml @@ -30,7 +30,6 @@ quick-protobuf-codec = { workspace = true } rand = "0.8" rand_core = { version = "0.6", optional = true } thiserror = { version = "1.0.52", optional = true } -void = { version = "1", optional = true } [dev-dependencies] tokio = { version = "1", features = ["macros", "rt", "sync"]} @@ -43,7 +42,7 @@ libp2p-swarm = { workspace = true, features = ["macros"]} [features] default = ["v1", "v2"] v1 = ["dep:libp2p-request-response", "dep:web-time", "dep:async-trait"] -v2 = ["dep:bytes", "dep:either", "dep:futures-bounded", "dep:thiserror", "dep:void", "dep:rand_core"] +v2 = ["dep:bytes", "dep:either", "dep:futures-bounded", "dep:thiserror", "dep:rand_core"] # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/autonat/src/v2/client/handler/dial_back.rs b/protocols/autonat/src/v2/client/handler/dial_back.rs index 98a41a82504..b3b3a59c02d 100644 --- a/protocols/autonat/src/v2/client/handler/dial_back.rs +++ b/protocols/autonat/src/v2/client/handler/dial_back.rs @@ -11,7 +11,7 @@ use libp2p_swarm::{ handler::{ConnectionEvent, FullyNegotiatedInbound, ListenUpgradeError}, ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, SubstreamProtocol, }; -use void::Void; +use std::convert::Infallible; use crate::v2::{protocol, Nonce, DIAL_BACK_PROTOCOL}; @@ -28,7 +28,7 @@ impl Handler { } impl ConnectionHandler for Handler { - type FromBehaviour = Void; + type FromBehaviour = Infallible; type ToBehaviour = IncomingNonce; type InboundProtocol = ReadyUpgrade; type OutboundProtocol = DeniedUpgrade; @@ -86,7 +86,7 @@ impl ConnectionHandler for Handler { // TODO: remove when Rust 1.82 is MSRVprotocols/autonat/src/v2/client/handler/dial_back.rs #[allow(unreachable_patterns)] ConnectionEvent::ListenUpgradeError(ListenUpgradeError { error, .. }) => { - void::unreachable(error); + libp2p_core::util::unreachable(error); } _ => {} } diff --git a/protocols/autonat/src/v2/client/handler/dial_request.rs b/protocols/autonat/src/v2/client/handler/dial_request.rs index 85ad176ec30..0f303167523 100644 --- a/protocols/autonat/src/v2/client/handler/dial_request.rs +++ b/protocols/autonat/src/v2/client/handler/dial_request.rs @@ -15,6 +15,7 @@ use libp2p_swarm::{ }; use std::{ collections::VecDeque, + convert::Infallible, io, iter::{once, repeat}, task::{Context, Poll}, @@ -208,7 +209,7 @@ impl ConnectionHandler for Handler { async fn start_stream_handle( req: DialRequest, - stream_recv: oneshot::Receiver>>, + stream_recv: oneshot::Receiver>>, ) -> Result<(Multiaddr, usize), Error> { let stream = stream_recv .await @@ -218,7 +219,7 @@ async fn start_stream_handle( StreamUpgradeError::Timeout => Error::Io(io::ErrorKind::TimedOut.into()), // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - StreamUpgradeError::Apply(v) => void::unreachable(v), + StreamUpgradeError::Apply(v) => libp2p_core::util::unreachable(v), StreamUpgradeError::Io(e) => Error::Io(e), })?; diff --git a/protocols/autonat/src/v2/server/behaviour.rs b/protocols/autonat/src/v2/server/behaviour.rs index 9264c728fe4..027cfff7c13 100644 --- a/protocols/autonat/src/v2/server/behaviour.rs +++ b/protocols/autonat/src/v2/server/behaviour.rs @@ -114,7 +114,7 @@ where } // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - Either::Left(Either::Right(v)) => void::unreachable(v), + Either::Left(Either::Right(v)) => libp2p_core::util::unreachable(v), Either::Right(Either::Left(cmd)) => { let addr = cmd.addr.clone(); let opts = DialOpts::peer_id(peer_id) diff --git a/protocols/autonat/src/v2/server/handler/dial_request.rs b/protocols/autonat/src/v2/server/handler/dial_request.rs index 14ddb153416..5058e0f3f42 100644 --- a/protocols/autonat/src/v2/server/handler/dial_request.rs +++ b/protocols/autonat/src/v2/server/handler/dial_request.rs @@ -1,4 +1,5 @@ use std::{ + convert::Infallible, io, task::{Context, Poll}, time::Duration, @@ -73,7 +74,7 @@ impl ConnectionHandler for Handler where R: RngCore + Send + Clone + 'static, { - type FromBehaviour = void::Void; + type FromBehaviour = Infallible; type ToBehaviour = Either; type InboundProtocol = ReadyUpgrade; type OutboundProtocol = DeniedUpgrade; diff --git a/protocols/dcutr/Cargo.toml b/protocols/dcutr/Cargo.toml index 6b1d04f82f5..c470291af0d 100644 --- a/protocols/dcutr/Cargo.toml +++ b/protocols/dcutr/Cargo.toml @@ -23,7 +23,6 @@ quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } thiserror = "1.0" tracing = { workspace = true } -void = "1" lru = "0.12.3" futures-bounded = { workspace = true } diff --git a/protocols/dcutr/src/behaviour.rs b/protocols/dcutr/src/behaviour.rs index babd56bd28e..7d0366c98bc 100644 --- a/protocols/dcutr/src/behaviour.rs +++ b/protocols/dcutr/src/behaviour.rs @@ -36,10 +36,10 @@ use libp2p_swarm::{ use libp2p_swarm::{NetworkBehaviour, NotifyHandler, THandlerInEvent, ToSwarm}; use lru::LruCache; use std::collections::{HashMap, HashSet, VecDeque}; +use std::convert::Infallible; use std::num::NonZeroUsize; use std::task::{Context, Poll}; use thiserror::Error; -use void::Void; pub(crate) const MAX_NUMBER_OF_UPGRADE_ATTEMPTS: u8 = 3; @@ -68,7 +68,7 @@ enum InnerError { pub struct Behaviour { /// Queue of actions to return when polled. - queued_events: VecDeque>>, + queued_events: VecDeque>>, /// All direct (non-relayed) connections. direct_connections: HashMap>, @@ -316,7 +316,7 @@ impl NetworkBehaviour for Behaviour { } // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - Either::Right(never) => void::unreachable(never), + Either::Right(never) => libp2p_core::util::unreachable(never), }; } diff --git a/protocols/dcutr/src/handler/relayed.rs b/protocols/dcutr/src/handler/relayed.rs index 72af9fec264..ad12a196cb9 100644 --- a/protocols/dcutr/src/handler/relayed.rs +++ b/protocols/dcutr/src/handler/relayed.rs @@ -117,7 +117,7 @@ impl Handler { // A connection listener denies all incoming substreams, thus none can ever be fully negotiated. // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - future::Either::Right(output) => void::unreachable(output), + future::Either::Right(output) => libp2p_core::util::unreachable(output), } } @@ -157,7 +157,7 @@ impl Handler { ) { // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - void::unreachable(error.into_inner()); + libp2p_core::util::unreachable(error.into_inner()); } fn on_dial_upgrade_error( @@ -170,7 +170,7 @@ impl Handler { let error = match error { // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - StreamUpgradeError::Apply(v) => void::unreachable(v), + StreamUpgradeError::Apply(v) => libp2p_core::util::unreachable(v), StreamUpgradeError::NegotiationFailed => outbound::Error::Unsupported, StreamUpgradeError::Io(e) => outbound::Error::Io(e), StreamUpgradeError::Timeout => outbound::Error::Io(io::ErrorKind::TimedOut.into()), diff --git a/protocols/gossipsub/Cargo.toml b/protocols/gossipsub/Cargo.toml index 734ac36a231..1416cdb8de3 100644 --- a/protocols/gossipsub/Cargo.toml +++ b/protocols/gossipsub/Cargo.toml @@ -36,7 +36,6 @@ serde = { version = "1", optional = true, features = ["derive"] } sha2 = "0.10.8" smallvec = "1.13.2" tracing = { workspace = true } -void = "1.0.2" # Metrics dependencies prometheus-client = { workspace = true } diff --git a/protocols/gossipsub/src/handler.rs b/protocols/gossipsub/src/handler.rs index 8e3b3a8b022..0ccea667268 100644 --- a/protocols/gossipsub/src/handler.rs +++ b/protocols/gossipsub/src/handler.rs @@ -495,7 +495,7 @@ impl ConnectionHandler for Handler { Either::Left(protocol) => handler.on_fully_negotiated_inbound(protocol), // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - Either::Right(v) => void::unreachable(v), + Either::Right(v) => libp2p_core::util::unreachable(v), }, ConnectionEvent::FullyNegotiatedOutbound(fully_negotiated_outbound) => { handler.on_fully_negotiated_outbound(fully_negotiated_outbound) @@ -511,7 +511,7 @@ impl ConnectionHandler for Handler { ConnectionEvent::DialUpgradeError(DialUpgradeError { error: StreamUpgradeError::Apply(e), .. - }) => void::unreachable(e), + }) => libp2p_core::util::unreachable(e), ConnectionEvent::DialUpgradeError(DialUpgradeError { error: StreamUpgradeError::NegotiationFailed, .. diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index c13caae58b6..13edecd5846 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -34,8 +34,8 @@ use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use libp2p_identity::{PeerId, PublicKey}; use libp2p_swarm::StreamProtocol; use quick_protobuf::Writer; +use std::convert::Infallible; use std::pin::Pin; -use void::Void; pub(crate) const SIGNING_PREFIX: &[u8] = b"libp2p-pubsub:"; @@ -102,7 +102,7 @@ where TSocket: AsyncRead + AsyncWrite + Unpin + Send + 'static, { type Output = (Framed, PeerKind); - type Error = Void; + type Error = Infallible; type Future = Pin> + Send>>; fn upgrade_inbound(self, socket: TSocket, protocol_id: Self::Info) -> Self::Future { @@ -121,7 +121,7 @@ where TSocket: AsyncWrite + AsyncRead + Unpin + Send + 'static, { type Output = (Framed, PeerKind); - type Error = Void; + type Error = Infallible; type Future = Pin> + Send>>; fn upgrade_outbound(self, socket: TSocket, protocol_id: Self::Info) -> Self::Future { diff --git a/protocols/identify/Cargo.toml b/protocols/identify/Cargo.toml index 13c43b6a71f..87b3ed63774 100644 --- a/protocols/identify/Cargo.toml +++ b/protocols/identify/Cargo.toml @@ -24,7 +24,6 @@ quick-protobuf = "0.8" smallvec = "1.13.2" thiserror = "1.0" tracing = { workspace = true } -void = "1.0" either = "1.12.0" [dev-dependencies] diff --git a/protocols/identify/src/handler.rs b/protocols/identify/src/handler.rs index f9b77e0b63a..dd073d50ed6 100644 --- a/protocols/identify/src/handler.rs +++ b/protocols/identify/src/handler.rs @@ -398,7 +398,7 @@ impl ConnectionHandler for Handler { ConnectionEvent::DialUpgradeError(DialUpgradeError { error, .. }) => { self.events.push(ConnectionHandlerEvent::NotifyBehaviour( Event::IdentificationError( - error.map_upgrade_err(|e| void::unreachable(e.into_inner())), + error.map_upgrade_err(|e| libp2p_core::util::unreachable(e.into_inner())), ), )); self.trigger_next_identify.reset(self.interval); diff --git a/protocols/kad/Cargo.toml b/protocols/kad/Cargo.toml index 5b95b8ac17d..11df81afbf8 100644 --- a/protocols/kad/Cargo.toml +++ b/protocols/kad/Cargo.toml @@ -27,7 +27,6 @@ rand = "0.8" sha2 = "0.10.8" smallvec = "1.13.2" uint = "0.9" -void = "1.0" futures-timer = "3.0.3" web-time = { workspace = true } serde = { version = "1.0", optional = true, features = ["derive"] } diff --git a/protocols/kad/src/handler.rs b/protocols/kad/src/handler.rs index 17c483da709..384ebc3f2b1 100644 --- a/protocols/kad/src/handler.rs +++ b/protocols/kad/src/handler.rs @@ -498,12 +498,12 @@ impl Handler { >, ) { // If `self.allow_listening` is false, then we produced a `DeniedUpgrade` and `protocol` - // is a `Void`. + // is a `Infallible`. let protocol = match protocol { future::Either::Left(p) => p, // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - future::Either::Right(p) => void::unreachable(p), + future::Either::Right(p) => libp2p_core::util::unreachable(p), }; if self.protocol_status.is_none() { diff --git a/protocols/mdns/Cargo.toml b/protocols/mdns/Cargo.toml index 19ae5ce9f36..338501aa896 100644 --- a/protocols/mdns/Cargo.toml +++ b/protocols/mdns/Cargo.toml @@ -25,7 +25,6 @@ socket2 = { version = "0.5.7", features = ["all"] } tokio = { workspace = true, default-features = false, features = ["net", "time"], optional = true} tracing = { workspace = true } hickory-proto = { version = "0.24.1", default-features = false, features = ["mdns"] } -void = "1.0.2" [features] tokio = ["dep:tokio", "if-watch/tokio"] diff --git a/protocols/mdns/src/behaviour.rs b/protocols/mdns/src/behaviour.rs index 6355fbf4943..cecd27bf78b 100644 --- a/protocols/mdns/src/behaviour.rs +++ b/protocols/mdns/src/behaviour.rs @@ -275,7 +275,7 @@ where _: ConnectionId, ev: THandlerOutEvent, ) { - void::unreachable(ev) + libp2p_core::util::unreachable(ev) } fn on_swarm_event(&mut self, event: FromSwarm) { diff --git a/protocols/perf/Cargo.toml b/protocols/perf/Cargo.toml index 398bdce65ec..a1a6128c6ed 100644 --- a/protocols/perf/Cargo.toml +++ b/protocols/perf/Cargo.toml @@ -32,7 +32,6 @@ thiserror = "1.0" tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] } -void = "1" [dev-dependencies] rand = "0.8" diff --git a/protocols/perf/src/client.rs b/protocols/perf/src/client.rs index c4614e979db..9f984a5bba1 100644 --- a/protocols/perf/src/client.rs +++ b/protocols/perf/src/client.rs @@ -25,7 +25,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; pub use behaviour::{Behaviour, Event}; use libp2p_swarm::StreamUpgradeError; -use void::Void; +use std::convert::Infallible; static NEXT_RUN_ID: AtomicUsize = AtomicUsize::new(1); @@ -43,7 +43,7 @@ impl RunId { #[derive(thiserror::Error, Debug)] pub enum RunError { #[error(transparent)] - Upgrade(#[from] StreamUpgradeError), + Upgrade(#[from] StreamUpgradeError), #[error("Failed to execute perf run: {0}")] Io(#[from] std::io::Error), } diff --git a/protocols/perf/src/client/handler.rs b/protocols/perf/src/client/handler.rs index 55fafad7fcc..85e864949f8 100644 --- a/protocols/perf/src/client/handler.rs +++ b/protocols/perf/src/client/handler.rs @@ -116,7 +116,7 @@ impl ConnectionHandler for Handler { #[allow(unreachable_patterns)] ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { protocol, .. - }) => void::unreachable(protocol), + }) => libp2p_core::util::unreachable(protocol), ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { protocol, info: (), @@ -149,7 +149,7 @@ impl ConnectionHandler for Handler { // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] ConnectionEvent::ListenUpgradeError(ListenUpgradeError { info: (), error }) => { - void::unreachable(error) + libp2p_core::util::unreachable(error) } _ => {} } diff --git a/protocols/perf/src/server/handler.rs b/protocols/perf/src/server/handler.rs index 4cb535a452c..c1363ae2380 100644 --- a/protocols/perf/src/server/handler.rs +++ b/protocols/perf/src/server/handler.rs @@ -29,8 +29,8 @@ use libp2p_swarm::{ }, ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, SubstreamProtocol, }; +use std::convert::Infallible; use tracing::error; -use void::Void; use crate::Run; @@ -61,11 +61,11 @@ impl Default for Handler { } impl ConnectionHandler for Handler { - type FromBehaviour = Void; + type FromBehaviour = Infallible; type ToBehaviour = Event; type InboundProtocol = ReadyUpgrade; type OutboundProtocol = DeniedUpgrade; - type OutboundOpenInfo = Void; + type OutboundOpenInfo = Infallible; type InboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol { @@ -75,7 +75,7 @@ impl ConnectionHandler for Handler { fn on_behaviour_event(&mut self, v: Self::FromBehaviour) { // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - void::unreachable(v) + libp2p_core::util::unreachable(v) } fn on_connection_event( @@ -103,13 +103,13 @@ impl ConnectionHandler for Handler { // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { info, .. }) => { - void::unreachable(info) + libp2p_core::util::unreachable(info) } // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] ConnectionEvent::DialUpgradeError(DialUpgradeError { info, .. }) => { - void::unreachable(info) + libp2p_core::util::unreachable(info) } ConnectionEvent::AddressChange(_) | ConnectionEvent::LocalProtocolsChange(_) @@ -117,7 +117,7 @@ impl ConnectionHandler for Handler { // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] ConnectionEvent::ListenUpgradeError(ListenUpgradeError { info: (), error }) => { - void::unreachable(error) + libp2p_core::util::unreachable(error) } _ => {} } diff --git a/protocols/ping/Cargo.toml b/protocols/ping/Cargo.toml index 794ab54ba42..755ebd35718 100644 --- a/protocols/ping/Cargo.toml +++ b/protocols/ping/Cargo.toml @@ -20,7 +20,6 @@ libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } rand = "0.8" tracing = { workspace = true } -void = "1.0" [dev-dependencies] libp2p-swarm = { workspace = true, features = ["macros"] } diff --git a/protocols/ping/src/handler.rs b/protocols/ping/src/handler.rs index 7b36b2d4b3d..961716e934a 100644 --- a/protocols/ping/src/handler.rs +++ b/protocols/ping/src/handler.rs @@ -31,13 +31,13 @@ use libp2p_swarm::{ SubstreamProtocol, }; use std::collections::VecDeque; +use std::convert::Infallible; use std::{ error::Error, fmt, io, task::{Context, Poll}, time::Duration, }; -use void::Void; /// The configuration for outbound pings. #[derive(Debug, Clone)] @@ -212,7 +212,7 @@ impl Handler { }, // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - StreamUpgradeError::Apply(e) => void::unreachable(e), + StreamUpgradeError::Apply(e) => libp2p_core::util::unreachable(e), StreamUpgradeError::Io(e) => Failure::Other { error: Box::new(e) }, }; @@ -221,7 +221,7 @@ impl Handler { } impl ConnectionHandler for Handler { - type FromBehaviour = Void; + type FromBehaviour = Infallible; type ToBehaviour = Result; type InboundProtocol = ReadyUpgrade; type OutboundProtocol = ReadyUpgrade; @@ -232,7 +232,7 @@ impl ConnectionHandler for Handler { SubstreamProtocol::new(ReadyUpgrade::new(PROTOCOL_NAME), ()) } - fn on_behaviour_event(&mut self, _: Void) {} + fn on_behaviour_event(&mut self, _: Infallible) {} #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( diff --git a/protocols/relay/Cargo.toml b/protocols/relay/Cargo.toml index 084fec07efd..a3a659619b6 100644 --- a/protocols/relay/Cargo.toml +++ b/protocols/relay/Cargo.toml @@ -27,7 +27,6 @@ rand = "0.8.4" static_assertions = "1" thiserror = "1.0" tracing = { workspace = true } -void = "1" [dev-dependencies] libp2p-identity = { workspace = true, features = ["rand"] } diff --git a/protocols/relay/src/behaviour.rs b/protocols/relay/src/behaviour.rs index 46419ae64e3..e854ed2a1ff 100644 --- a/protocols/relay/src/behaviour.rs +++ b/protocols/relay/src/behaviour.rs @@ -368,7 +368,7 @@ impl NetworkBehaviour for Behaviour { Either::Left(e) => e, // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - Either::Right(v) => void::unreachable(v), + Either::Right(v) => libp2p_core::util::unreachable(v), }; match event { diff --git a/protocols/relay/src/behaviour/handler.rs b/protocols/relay/src/behaviour/handler.rs index 23e90f4b3f8..92e45720f3f 100644 --- a/protocols/relay/src/behaviour/handler.rs +++ b/protocols/relay/src/behaviour/handler.rs @@ -451,7 +451,7 @@ impl Handler { StreamUpgradeError::Io(e) => outbound_stop::Error::Io(e), // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - StreamUpgradeError::Apply(v) => void::unreachable(v), + StreamUpgradeError::Apply(v) => libp2p_core::util::unreachable(v), }; let stop_command = self diff --git a/protocols/relay/src/priv_client.rs b/protocols/relay/src/priv_client.rs index 8bbc813ec4c..fc9d28e66ed 100644 --- a/protocols/relay/src/priv_client.rs +++ b/protocols/relay/src/priv_client.rs @@ -44,11 +44,11 @@ use libp2p_swarm::{ NotifyHandler, Stream, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use std::collections::{hash_map, HashMap, VecDeque}; +use std::convert::Infallible; use std::io::{Error, ErrorKind, IoSlice}; use std::pin::Pin; use std::task::{Context, Poll}; use transport::Transport; -use void::Void; /// The events produced by the client `Behaviour`. #[derive(Debug)] @@ -93,7 +93,7 @@ pub struct Behaviour { reservation_addresses: HashMap, /// Queue of actions to return when polled. - queued_actions: VecDeque>>, + queued_actions: VecDeque>>, pending_handler_commands: HashMap, } @@ -238,7 +238,7 @@ impl NetworkBehaviour for Behaviour { Either::Left(e) => e, // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - Either::Right(v) => void::unreachable(v), + Either::Right(v) => libp2p_core::util::unreachable(v), }; let event = match handler_event { diff --git a/protocols/relay/src/priv_client/handler.rs b/protocols/relay/src/priv_client/handler.rs index 05fdd5673ae..77b7f94ae60 100644 --- a/protocols/relay/src/priv_client/handler.rs +++ b/protocols/relay/src/priv_client/handler.rs @@ -37,10 +37,10 @@ use libp2p_swarm::{ SubstreamProtocol, }; use std::collections::VecDeque; +use std::convert::Infallible; use std::task::{Context, Poll}; use std::time::Duration; use std::{fmt, io}; -use void::Void; /// The maximum number of circuits being denied concurrently. /// @@ -106,7 +106,7 @@ pub struct Handler { >, >, - pending_streams: VecDeque>>>, + pending_streams: VecDeque>>>, inflight_reserve_requests: futures_bounded::FuturesTupleSet< Result, @@ -447,7 +447,7 @@ impl ConnectionHandler for Handler { } // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - ConnectionEvent::ListenUpgradeError(ev) => void::unreachable(ev.error), + ConnectionEvent::ListenUpgradeError(ev) => libp2p_core::util::unreachable(ev.error), ConnectionEvent::DialUpgradeError(ev) => { if let Some(next) = self.pending_streams.pop_front() { let _ = next.send(Err(ev.error)); @@ -580,27 +580,27 @@ impl Reservation { } } -fn into_reserve_error(e: StreamUpgradeError) -> outbound_hop::ReserveError { +fn into_reserve_error(e: StreamUpgradeError) -> outbound_hop::ReserveError { match e { StreamUpgradeError::Timeout => { outbound_hop::ReserveError::Io(io::ErrorKind::TimedOut.into()) } // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - StreamUpgradeError::Apply(never) => void::unreachable(never), + StreamUpgradeError::Apply(never) => libp2p_core::util::unreachable(never), StreamUpgradeError::NegotiationFailed => outbound_hop::ReserveError::Unsupported, StreamUpgradeError::Io(e) => outbound_hop::ReserveError::Io(e), } } -fn into_connect_error(e: StreamUpgradeError) -> outbound_hop::ConnectError { +fn into_connect_error(e: StreamUpgradeError) -> outbound_hop::ConnectError { match e { StreamUpgradeError::Timeout => { outbound_hop::ConnectError::Io(io::ErrorKind::TimedOut.into()) } // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - StreamUpgradeError::Apply(never) => void::unreachable(never), + StreamUpgradeError::Apply(never) => libp2p_core::util::unreachable(never), StreamUpgradeError::NegotiationFailed => outbound_hop::ConnectError::Unsupported, StreamUpgradeError::Io(e) => outbound_hop::ConnectError::Io(e), } diff --git a/protocols/rendezvous/Cargo.toml b/protocols/rendezvous/Cargo.toml index 78a6a1a0a4c..5aa70688dbe 100644 --- a/protocols/rendezvous/Cargo.toml +++ b/protocols/rendezvous/Cargo.toml @@ -26,7 +26,6 @@ quick-protobuf-codec = { workspace = true } rand = "0.8" thiserror = "1" tracing = { workspace = true } -void = "1" [dev-dependencies] libp2p-swarm = { workspace = true, features = ["macros", "tokio"] } diff --git a/protocols/request-response/Cargo.toml b/protocols/request-response/Cargo.toml index c6b2eda348b..8376f3ce795 100644 --- a/protocols/request-response/Cargo.toml +++ b/protocols/request-response/Cargo.toml @@ -23,7 +23,6 @@ serde = { version = "1.0", optional = true} serde_json = { version = "1.0.117", optional = true } smallvec = "1.13.2" tracing = { workspace = true } -void = "1.0.2" futures-timer = "3.0.3" futures-bounded = { workspace = true } diff --git a/protocols/request-response/src/handler.rs b/protocols/request-response/src/handler.rs index 0591b37dc30..dbd7a0708ce 100644 --- a/protocols/request-response/src/handler.rs +++ b/protocols/request-response/src/handler.rs @@ -242,7 +242,7 @@ where } // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - StreamUpgradeError::Apply(e) => void::unreachable(e), + StreamUpgradeError::Apply(e) => libp2p_core::util::unreachable(e), StreamUpgradeError::Io(e) => { self.pending_events.push_back(Event::OutboundStreamFailed { request_id: message.request_id, @@ -260,7 +260,7 @@ where ) { // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - void::unreachable(error) + libp2p_core::util::unreachable(error) } } diff --git a/protocols/request-response/src/handler/protocol.rs b/protocols/request-response/src/handler/protocol.rs index 833cacdd6ce..a55faec2f16 100644 --- a/protocols/request-response/src/handler/protocol.rs +++ b/protocols/request-response/src/handler/protocol.rs @@ -23,6 +23,8 @@ //! receives a request and sends a response, whereas the //! outbound upgrade send a request and receives a response. +use std::convert::Infallible; + use futures::future::{ready, Ready}; use libp2p_core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use libp2p_swarm::Stream; @@ -82,7 +84,7 @@ where P: AsRef + Clone, { type Output = (Stream, P); - type Error = void::Void; + type Error = Infallible; type Future = Ready>; fn upgrade_inbound(self, io: Stream, protocol: Self::Info) -> Self::Future { @@ -95,7 +97,7 @@ where P: AsRef + Clone, { type Output = (Stream, P); - type Error = void::Void; + type Error = Infallible; type Future = Ready>; fn upgrade_outbound(self, io: Stream, protocol: Self::Info) -> Self::Future { diff --git a/protocols/stream/Cargo.toml b/protocols/stream/Cargo.toml index 9aa9559a2d6..cd83c5978fa 100644 --- a/protocols/stream/Cargo.toml +++ b/protocols/stream/Cargo.toml @@ -15,7 +15,6 @@ libp2p-core = { workspace = true } libp2p-identity = { workspace = true, features = ["peerid"] } libp2p-swarm = { workspace = true } tracing = { workspace = true } -void = "1" rand = "0.8" [dev-dependencies] diff --git a/protocols/stream/src/behaviour.rs b/protocols/stream/src/behaviour.rs index 07549ccef54..e72af8fbfce 100644 --- a/protocols/stream/src/behaviour.rs +++ b/protocols/stream/src/behaviour.rs @@ -124,7 +124,7 @@ impl NetworkBehaviour for Behaviour { _connection_id: ConnectionId, event: THandlerOutEvent, ) { - void::unreachable(event); + libp2p_core::util::unreachable(event); } fn poll( diff --git a/protocols/stream/src/handler.rs b/protocols/stream/src/handler.rs index bf80e30c3c6..b7ec516d3b1 100644 --- a/protocols/stream/src/handler.rs +++ b/protocols/stream/src/handler.rs @@ -1,4 +1,5 @@ use std::{ + convert::Infallible, io, sync::{Arc, Mutex}, task::{Context, Poll}, @@ -44,8 +45,8 @@ impl Handler { } impl ConnectionHandler for Handler { - type FromBehaviour = void::Void; - type ToBehaviour = void::Void; + type FromBehaviour = Infallible; + type ToBehaviour = Infallible; type InboundProtocol = Upgrade; type OutboundProtocol = Upgrade; type InboundOpenInfo = (); @@ -98,7 +99,7 @@ impl ConnectionHandler for Handler { fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - void::unreachable(event) + libp2p_core::util::unreachable(event) } fn on_connection_event( @@ -147,7 +148,7 @@ impl ConnectionHandler for Handler { } // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - swarm::StreamUpgradeError::Apply(v) => void::unreachable(v), + swarm::StreamUpgradeError::Apply(v) => libp2p_core::util::unreachable(v), swarm::StreamUpgradeError::NegotiationFailed => { OpenStreamError::UnsupportedProtocol(p) } diff --git a/protocols/stream/src/upgrade.rs b/protocols/stream/src/upgrade.rs index ac9fb3ed992..bbe679f4a2c 100644 --- a/protocols/stream/src/upgrade.rs +++ b/protocols/stream/src/upgrade.rs @@ -1,4 +1,7 @@ -use std::future::{ready, Ready}; +use std::{ + convert::Infallible, + future::{ready, Ready}, +}; use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use libp2p_swarm::{Stream, StreamProtocol}; @@ -20,7 +23,7 @@ impl UpgradeInfo for Upgrade { impl InboundUpgrade for Upgrade { type Output = (Stream, StreamProtocol); - type Error = void::Void; + type Error = Infallible; type Future = Ready>; @@ -32,7 +35,7 @@ impl InboundUpgrade for Upgrade { impl OutboundUpgrade for Upgrade { type Output = (Stream, StreamProtocol); - type Error = void::Void; + type Error = Infallible; type Future = Ready>; diff --git a/protocols/upnp/Cargo.toml b/protocols/upnp/Cargo.toml index 209733f53e6..a069331b1ed 100644 --- a/protocols/upnp/Cargo.toml +++ b/protocols/upnp/Cargo.toml @@ -18,7 +18,6 @@ libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } tokio = { workspace = true, default-features = false, features = ["rt"], optional = true } tracing = { workspace = true } -void = "1.0.2" [features] tokio = ["igd-next/aio_tokio", "dep:tokio"] diff --git a/protocols/upnp/src/behaviour.rs b/protocols/upnp/src/behaviour.rs index 29a7fbf84a4..ee985042b68 100644 --- a/protocols/upnp/src/behaviour.rs +++ b/protocols/upnp/src/behaviour.rs @@ -366,7 +366,7 @@ impl NetworkBehaviour for Behaviour { _connection_id: ConnectionId, event: libp2p_swarm::THandlerOutEvent, ) { - void::unreachable(event) + libp2p_core::util::unreachable(event) } #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self, cx))] diff --git a/swarm/Cargo.toml b/swarm/Cargo.toml index cdee67f3fb3..4c3b8821ed6 100644 --- a/swarm/Cargo.toml +++ b/swarm/Cargo.toml @@ -26,7 +26,6 @@ once_cell = "1.19.0" rand = "0.8" smallvec = "1.13.2" tracing = { workspace = true } -void = "1" wasm-bindgen-futures = { version = "0.4.42", optional = true } [target.'cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))'.dependencies] @@ -53,7 +52,6 @@ libp2p-swarm-test = { path = "../swarm-test" } # Using `pat libp2p-yamux = { path = "../muxers/yamux" } # Using `path` here because this is a cyclic dev-dependency which otherwise breaks releasing. quickcheck = { workspace = true } criterion = { version = "0.5", features = ["async_tokio"] } -void = "1" once_cell = "1.19.0" trybuild = "1.0.95" tokio = { workspace = true, features = ["time", "rt", "macros", "rt-multi-thread"] } diff --git a/swarm/src/behaviour/toggle.rs b/swarm/src/behaviour/toggle.rs index 5d72534c91e..3dde364bf19 100644 --- a/swarm/src/behaviour/toggle.rs +++ b/swarm/src/behaviour/toggle.rs @@ -212,7 +212,7 @@ where future::Either::Left(out) => out, // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - future::Either::Right(v) => void::unreachable(v), + future::Either::Right(v) => libp2p_core::util::unreachable(v), }; if let Either::Left(info) = info { @@ -255,7 +255,7 @@ where Either::Left(e) => e, // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - Either::Right(v) => void::unreachable(v), + Either::Right(v) => libp2p_core::util::unreachable(v), }; inner.on_connection_event(ConnectionEvent::ListenUpgradeError(ListenUpgradeError { diff --git a/swarm/src/connection.rs b/swarm/src/connection.rs index 859d138b83a..78c007fd71d 100644 --- a/swarm/src/connection.rs +++ b/swarm/src/connection.rs @@ -775,10 +775,10 @@ mod tests { use libp2p_core::upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use libp2p_core::StreamMuxer; use quickcheck::*; + use std::convert::Infallible; use std::sync::{Arc, Weak}; use std::time::Instant; use tracing_subscriber::EnvFilter; - use void::Void; #[test] fn max_negotiating_inbound_streams() { @@ -1016,7 +1016,7 @@ mod tests { impl StreamMuxer for DummyStreamMuxer { type Substream = PendingSubstream; - type Error = Void; + type Error = Infallible; fn poll_inbound( self: Pin<&mut Self>, @@ -1051,7 +1051,7 @@ mod tests { impl StreamMuxer for PendingStreamMuxer { type Substream = PendingSubstream; - type Error = Void; + type Error = Infallible; fn poll_inbound( self: Pin<&mut Self>, @@ -1113,7 +1113,7 @@ mod tests { struct MockConnectionHandler { outbound_requested: bool, - error: Option>, + error: Option>, upgrade_timeout: Duration, } @@ -1133,7 +1133,7 @@ mod tests { #[derive(Default)] struct ConfigurableProtocolConnectionHandler { - events: Vec>, + events: Vec>, active_protocols: HashSet, local_added: Vec>, local_removed: Vec>, @@ -1166,8 +1166,8 @@ mod tests { } impl ConnectionHandler for MockConnectionHandler { - type FromBehaviour = Void; - type ToBehaviour = Void; + type FromBehaviour = Infallible; + type ToBehaviour = Infallible; type InboundProtocol = DeniedUpgrade; type OutboundProtocol = DeniedUpgrade; type InboundOpenInfo = (); @@ -1194,13 +1194,13 @@ mod tests { ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { protocol, .. - }) => void::unreachable(protocol), + }) => libp2p_core::util::unreachable(protocol), // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { protocol, .. - }) => void::unreachable(protocol), + }) => libp2p_core::util::unreachable(protocol), ConnectionEvent::DialUpgradeError(DialUpgradeError { error, .. }) => { self.error = Some(error) } @@ -1216,7 +1216,7 @@ mod tests { fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - void::unreachable(event) + libp2p_core::util::unreachable(event) } fn connection_keep_alive(&self) -> bool { @@ -1246,8 +1246,8 @@ mod tests { } impl ConnectionHandler for ConfigurableProtocolConnectionHandler { - type FromBehaviour = Void; - type ToBehaviour = Void; + type FromBehaviour = Infallible; + type ToBehaviour = Infallible; type InboundProtocol = ManyProtocolsUpgrade; type OutboundProtocol = DeniedUpgrade; type InboundOpenInfo = (); @@ -1293,7 +1293,7 @@ mod tests { fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - void::unreachable(event) + libp2p_core::util::unreachable(event) } fn connection_keep_alive(&self) -> bool { @@ -1333,7 +1333,7 @@ mod tests { impl InboundUpgrade for ManyProtocolsUpgrade { type Output = C; - type Error = Void; + type Error = Infallible; type Future = future::Ready>; fn upgrade_inbound(self, stream: C, _: Self::Info) -> Self::Future { @@ -1343,7 +1343,7 @@ mod tests { impl OutboundUpgrade for ManyProtocolsUpgrade { type Output = C; - type Error = Void; + type Error = Infallible; type Future = future::Ready>; fn upgrade_outbound(self, stream: C, _: Self::Info) -> Self::Future { diff --git a/swarm/src/connection/pool.rs b/swarm/src/connection/pool.rs index 07f6968dec9..b2accf745ef 100644 --- a/swarm/src/connection/pool.rs +++ b/swarm/src/connection/pool.rs @@ -40,6 +40,7 @@ use futures::{ use libp2p_core::connection::Endpoint; use libp2p_core::muxing::{StreamMuxerBox, StreamMuxerExt}; use libp2p_core::transport::PortUse; +use std::convert::Infallible; use std::task::Waker; use std::{ collections::HashMap, @@ -50,7 +51,6 @@ use std::{ task::Poll, }; use tracing::Instrument; -use void::Void; use web_time::{Duration, Instant}; mod concurrent_dial; @@ -200,7 +200,7 @@ struct PendingConnection { peer_id: Option, endpoint: PendingPoint, /// When dropped, notifies the task which then knows to terminate. - abort_notifier: Option>, + abort_notifier: Option>, /// The moment we became aware of this possible connection, useful for timing metrics. accepted_at: Instant, } diff --git a/swarm/src/connection/pool/task.rs b/swarm/src/connection/pool/task.rs index 13977a17b85..3b808a30fd1 100644 --- a/swarm/src/connection/pool/task.rs +++ b/swarm/src/connection/pool/task.rs @@ -36,8 +36,8 @@ use futures::{ SinkExt, StreamExt, }; use libp2p_core::muxing::StreamMuxerBox; +use std::convert::Infallible; use std::pin::Pin; -use void::Void; /// Commands that can be sent to a task driving an established connection. #[derive(Debug)] @@ -93,7 +93,7 @@ pub(crate) enum EstablishedConnectionEvent { pub(crate) async fn new_for_pending_outgoing_connection( connection_id: ConnectionId, dial: ConcurrentDial, - abort_receiver: oneshot::Receiver, + abort_receiver: oneshot::Receiver, mut events: mpsc::Sender, ) { match futures::future::select(abort_receiver, Box::pin(dial)).await { @@ -107,7 +107,7 @@ pub(crate) async fn new_for_pending_outgoing_connection( } // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - Either::Left((Ok(v), _)) => void::unreachable(v), + Either::Left((Ok(v), _)) => libp2p_core::util::unreachable(v), Either::Right((Ok((address, output, errors)), _)) => { let _ = events .send(PendingConnectionEvent::ConnectionEstablished { @@ -131,7 +131,7 @@ pub(crate) async fn new_for_pending_outgoing_connection( pub(crate) async fn new_for_pending_incoming_connection( connection_id: ConnectionId, future: TFut, - abort_receiver: oneshot::Receiver, + abort_receiver: oneshot::Receiver, mut events: mpsc::Sender, ) where TFut: Future> + Send + 'static, @@ -147,7 +147,7 @@ pub(crate) async fn new_for_pending_incoming_connection( } // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - Either::Left((Ok(v), _)) => void::unreachable(v), + Either::Left((Ok(v), _)) => libp2p_core::util::unreachable(v), Either::Right((Ok(output), _)) => { let _ = events .send(PendingConnectionEvent::ConnectionEstablished { diff --git a/swarm/src/dummy.rs b/swarm/src/dummy.rs index 0bd8c06862d..b87ef32c8f7 100644 --- a/swarm/src/dummy.rs +++ b/swarm/src/dummy.rs @@ -12,15 +12,15 @@ use libp2p_core::upgrade::DeniedUpgrade; use libp2p_core::Endpoint; use libp2p_core::Multiaddr; use libp2p_identity::PeerId; +use std::convert::Infallible; use std::task::{Context, Poll}; -use void::Void; /// Implementation of [`NetworkBehaviour`] that doesn't do anything. pub struct Behaviour; impl NetworkBehaviour for Behaviour { type ConnectionHandler = ConnectionHandler; - type ToSwarm = Void; + type ToSwarm = Infallible; fn handle_established_inbound_connection( &mut self, @@ -51,7 +51,7 @@ impl NetworkBehaviour for Behaviour { ) { // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - void::unreachable(event) + libp2p_core::util::unreachable(event) } fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { @@ -66,12 +66,12 @@ impl NetworkBehaviour for Behaviour { pub struct ConnectionHandler; impl crate::handler::ConnectionHandler for ConnectionHandler { - type FromBehaviour = Void; - type ToBehaviour = Void; + type FromBehaviour = Infallible; + type ToBehaviour = Infallible; type InboundProtocol = DeniedUpgrade; type OutboundProtocol = DeniedUpgrade; type InboundOpenInfo = (); - type OutboundOpenInfo = Void; + type OutboundOpenInfo = Infallible; fn listen_protocol(&self) -> SubstreamProtocol { SubstreamProtocol::new(DeniedUpgrade, ()) @@ -80,7 +80,7 @@ impl crate::handler::ConnectionHandler for ConnectionHandler { fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - void::unreachable(event) + libp2p_core::util::unreachable(event) } fn poll( @@ -106,19 +106,19 @@ impl crate::handler::ConnectionHandler for ConnectionHandler { #[allow(unreachable_patterns)] ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { protocol, .. - }) => void::unreachable(protocol), + }) => libp2p_core::util::unreachable(protocol), // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { protocol, .. - }) => void::unreachable(protocol), + }) => libp2p_core::util::unreachable(protocol), // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] ConnectionEvent::DialUpgradeError(DialUpgradeError { info: _, error }) => match error { // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] StreamUpgradeError::Timeout => unreachable!(), - StreamUpgradeError::Apply(e) => void::unreachable(e), + StreamUpgradeError::Apply(e) => libp2p_core::util::unreachable(e), StreamUpgradeError::NegotiationFailed | StreamUpgradeError::Io(_) => { unreachable!("Denied upgrade does not support any protocols") } diff --git a/swarm/src/handler/one_shot.rs b/swarm/src/handler/one_shot.rs index fc1074b31e4..7c84f4bb11a 100644 --- a/swarm/src/handler/one_shot.rs +++ b/swarm/src/handler/one_shot.rs @@ -217,11 +217,11 @@ mod tests { use futures::executor::block_on; use futures::future::poll_fn; use libp2p_core::upgrade::DeniedUpgrade; - use void::Void; + use std::convert::Infallible; #[test] fn do_not_keep_idle_connection_alive() { - let mut handler: OneShotHandler<_, DeniedUpgrade, Void> = OneShotHandler::new( + let mut handler: OneShotHandler<_, DeniedUpgrade, Infallible> = OneShotHandler::new( SubstreamProtocol::new(DeniedUpgrade {}, ()), Default::default(), ); diff --git a/swarm/src/handler/pending.rs b/swarm/src/handler/pending.rs index 9601f5cf78b..656a38849d5 100644 --- a/swarm/src/handler/pending.rs +++ b/swarm/src/handler/pending.rs @@ -24,8 +24,8 @@ use crate::handler::{ FullyNegotiatedOutbound, SubstreamProtocol, }; use libp2p_core::upgrade::PendingUpgrade; +use std::convert::Infallible; use std::task::{Context, Poll}; -use void::Void; /// Implementation of [`ConnectionHandler`] that returns a pending upgrade. #[derive(Clone, Debug)] @@ -40,11 +40,11 @@ impl PendingConnectionHandler { } impl ConnectionHandler for PendingConnectionHandler { - type FromBehaviour = Void; - type ToBehaviour = Void; + type FromBehaviour = Infallible; + type ToBehaviour = Infallible; type InboundProtocol = PendingUpgrade; type OutboundProtocol = PendingUpgrade; - type OutboundOpenInfo = Void; + type OutboundOpenInfo = Infallible; type InboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol { @@ -54,7 +54,7 @@ impl ConnectionHandler for PendingConnectionHandler { fn on_behaviour_event(&mut self, v: Self::FromBehaviour) { // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - void::unreachable(v) + libp2p_core::util::unreachable(v) } fn poll( @@ -80,17 +80,17 @@ impl ConnectionHandler for PendingConnectionHandler { #[allow(unreachable_patterns)] ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { protocol, .. - }) => void::unreachable(protocol), + }) => libp2p_core::util::unreachable(protocol), // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { protocol, info: _info, }) => { - void::unreachable(protocol); + libp2p_core::util::unreachable(protocol); #[allow(unreachable_code, clippy::used_underscore_binding)] { - void::unreachable(_info); + libp2p_core::util::unreachable(_info); } } // TODO: remove when Rust 1.82 is MSRV diff --git a/swarm/tests/connection_close.rs b/swarm/tests/connection_close.rs index 4d530f47684..1d1a25eb84b 100644 --- a/swarm/tests/connection_close.rs +++ b/swarm/tests/connection_close.rs @@ -9,8 +9,8 @@ use libp2p_swarm::{ THandlerOutEvent, ToSwarm, }; use libp2p_swarm_test::SwarmExt; +use std::convert::Infallible; use std::task::{Context, Poll}; -use void::Void; #[async_std::test] async fn sends_remaining_events_to_behaviour_on_connection_close() { @@ -96,7 +96,7 @@ impl NetworkBehaviour for Behaviour { } impl ConnectionHandler for HandlerWithState { - type FromBehaviour = Void; + type FromBehaviour = Infallible; type ToBehaviour = u64; type InboundProtocol = DeniedUpgrade; type OutboundProtocol = DeniedUpgrade; @@ -132,7 +132,7 @@ impl ConnectionHandler for HandlerWithState { } fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { - void::unreachable(event) + libp2p_core::util::unreachable(event) } fn on_connection_event( diff --git a/swarm/tests/listener.rs b/swarm/tests/listener.rs index 160b1f5b064..74b23cf3f7f 100644 --- a/swarm/tests/listener.rs +++ b/swarm/tests/listener.rs @@ -1,5 +1,6 @@ use std::{ collections::{HashSet, VecDeque}, + convert::Infallible, task::{Context, Poll}, }; @@ -79,7 +80,7 @@ impl Behaviour { impl NetworkBehaviour for Behaviour { type ConnectionHandler = dummy::ConnectionHandler; - type ToSwarm = void::Void; + type ToSwarm = Infallible; fn handle_established_inbound_connection( &mut self, diff --git a/swarm/tests/swarm_derive.rs b/swarm/tests/swarm_derive.rs index 667f68408cf..334d1b9d304 100644 --- a/swarm/tests/swarm_derive.rs +++ b/swarm/tests/swarm_derive.rs @@ -386,7 +386,7 @@ fn with_generics_constrained() { impl NetworkBehaviour for Bar { type ConnectionHandler = dummy::ConnectionHandler; - type ToSwarm = void::Void; + type ToSwarm = std::convert::Infallible; fn handle_established_inbound_connection( &mut self, @@ -548,7 +548,7 @@ fn custom_out_event_no_type_parameters() { impl NetworkBehaviour for TemplatedBehaviour { type ConnectionHandler = dummy::ConnectionHandler; - type ToSwarm = void::Void; + type ToSwarm = std::convert::Infallible; fn handle_established_inbound_connection( &mut self, @@ -579,7 +579,7 @@ fn custom_out_event_no_type_parameters() { ) { // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] - void::unreachable(message); + libp2p_core::util::unreachable(message); } fn poll( @@ -603,8 +603,8 @@ fn custom_out_event_no_type_parameters() { None, } - impl From for OutEvent { - fn from(_e: void::Void) -> Self { + impl From for OutEvent { + fn from(_e: std::convert::Infallible) -> Self { Self::None } } From 822246112aa4e982b43cecf2aacc1cb107a9ae03 Mon Sep 17 00:00:00 2001 From: Krishang Shah <93703995+kamuik16@users.noreply.github.com> Date: Tue, 19 Nov 2024 00:27:09 +0530 Subject: [PATCH 29/50] chore: replace async-std with tokio in autonat tests (#5671) ## Description ref #4449 Refactored `autonat` tests to use `tokio` instead of `async-std`. --- Cargo.lock | 1 - protocols/autonat/Cargo.toml | 11 +++++++---- protocols/autonat/tests/test_client.rs | 18 +++++++++--------- protocols/autonat/tests/test_server.rs | 24 ++++++++++++------------ 4 files changed, 28 insertions(+), 26 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5ef3a17a4a1..47e8e89b570 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2591,7 +2591,6 @@ dependencies = [ name = "libp2p-autonat" version = "0.13.1" dependencies = [ - "async-std", "async-trait", "asynchronous-codec", "bytes", diff --git a/protocols/autonat/Cargo.toml b/protocols/autonat/Cargo.toml index 169006e7508..ced5dbeb4e8 100644 --- a/protocols/autonat/Cargo.toml +++ b/protocols/autonat/Cargo.toml @@ -4,7 +4,11 @@ edition = "2021" rust-version = { workspace = true } description = "NAT and firewall detection for libp2p" version = "0.13.1" -authors = ["David Craven ", "Elena Frank ", "Hannes Furmans "] +authors = [ + "David Craven ", + "Elena Frank ", + "Hannes Furmans ", +] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" keywords = ["peer-to-peer", "libp2p", "networking"] @@ -32,12 +36,11 @@ rand_core = { version = "0.6", optional = true } thiserror = { version = "1.0.52", optional = true } [dev-dependencies] -tokio = { version = "1", features = ["macros", "rt", "sync"]} -async-std = { version = "1.10", features = ["attributes"] } +tokio = { workspace = true, features = ["macros", "rt", "sync"] } libp2p-swarm-test = { path = "../../swarm-test" } tracing-subscriber = { version = "0.3", features = ["env-filter"] } libp2p-identify = { workspace = true } -libp2p-swarm = { workspace = true, features = ["macros"]} +libp2p-swarm = { workspace = true, features = ["macros"] } [features] default = ["v1", "v2"] diff --git a/protocols/autonat/tests/test_client.rs b/protocols/autonat/tests/test_client.rs index 7509d3ef425..f5c18e3f34e 100644 --- a/protocols/autonat/tests/test_client.rs +++ b/protocols/autonat/tests/test_client.rs @@ -18,7 +18,6 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use async_std::task::JoinHandle; use libp2p_autonat::{ Behaviour, Config, Event, NatStatus, OutboundProbeError, OutboundProbeEvent, ResponseError, }; @@ -27,12 +26,13 @@ use libp2p_identity::PeerId; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; use std::time::Duration; +use tokio::task::JoinHandle; const MAX_CONFIDENCE: usize = 3; const TEST_RETRY_INTERVAL: Duration = Duration::from_secs(1); const TEST_REFRESH_INTERVAL: Duration = Duration::from_secs(2); -#[async_std::test] +#[tokio::test] async fn test_auto_probe() { let mut client = Swarm::new_ephemeral(|key| { Behaviour::new( @@ -133,7 +133,7 @@ async fn test_auto_probe() { assert!(client.behaviour().public_address().is_some()); } -#[async_std::test] +#[tokio::test] async fn test_confidence() { let mut client = Swarm::new_ephemeral(|key| { Behaviour::new( @@ -217,7 +217,7 @@ async fn test_confidence() { } } -#[async_std::test] +#[tokio::test] async fn test_throttle_server_period() { let mut client = Swarm::new_ephemeral(|key| { Behaviour::new( @@ -268,7 +268,7 @@ async fn test_throttle_server_period() { assert_eq!(client.behaviour().confidence(), 0); } -#[async_std::test] +#[tokio::test] async fn test_use_connected_as_server() { let mut client = Swarm::new_ephemeral(|key| { Behaviour::new( @@ -306,7 +306,7 @@ async fn test_use_connected_as_server() { } } -#[async_std::test] +#[tokio::test] async fn test_outbound_failure() { let mut client = Swarm::new_ephemeral(|key| { Behaviour::new( @@ -351,7 +351,7 @@ async fn test_outbound_failure() { let mut inactive_servers = Vec::new(); for (id, handle) in servers.split_off(1) { - handle.cancel().await; + handle.abort(); inactive_servers.push(id); } @@ -375,7 +375,7 @@ async fn test_outbound_failure() { } } -#[async_std::test] +#[tokio::test] async fn test_global_ips_config() { let mut client = Swarm::new_ephemeral(|key| { Behaviour::new( @@ -426,7 +426,7 @@ async fn new_server_swarm() -> (PeerId, Multiaddr, JoinHandle<()>) { let (_, multiaddr) = swarm.listen().await; let peer_id = *swarm.local_peer_id(); - let task = async_std::task::spawn(swarm.loop_on_next()); + let task = tokio::spawn(swarm.loop_on_next()); (peer_id, multiaddr, task) } diff --git a/protocols/autonat/tests/test_server.rs b/protocols/autonat/tests/test_server.rs index fd97b1a9132..d43d14198d4 100644 --- a/protocols/autonat/tests/test_server.rs +++ b/protocols/autonat/tests/test_server.rs @@ -28,12 +28,12 @@ use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; use std::{num::NonZeroU32, time::Duration}; -#[async_std::test] +#[tokio::test] async fn test_dial_back() { let (mut server, server_id, server_addr) = new_server_swarm(None).await; let (mut client, client_id) = new_client_swarm(server_id, server_addr).await; let (_, client_addr) = client.listen().await; - async_std::task::spawn(client.loop_on_next()); + tokio::spawn(client.loop_on_next()); let client_port = client_addr .into_iter() @@ -128,14 +128,14 @@ async fn test_dial_back() { } } -#[async_std::test] +#[tokio::test] async fn test_dial_error() { let (mut server, server_id, server_addr) = new_server_swarm(None).await; let (mut client, client_id) = new_client_swarm(server_id, server_addr).await; client .behaviour_mut() .probe_address("/ip4/127.0.0.1/tcp/12345".parse().unwrap()); - async_std::task::spawn(client.loop_on_next()); + tokio::spawn(client.loop_on_next()); let request_probe_id = match server.next_behaviour_event().await { Event::InboundProbe(InboundProbeEvent::Request { peer, probe_id, .. }) => { @@ -178,7 +178,7 @@ async fn test_dial_error() { } } -#[async_std::test] +#[tokio::test] async fn test_throttle_global_max() { let (mut server, server_id, server_addr) = new_server_swarm(Some(Config { throttle_clients_global_max: 1, @@ -190,7 +190,7 @@ async fn test_throttle_global_max() { for _ in 0..2 { let (mut client, _) = new_client_swarm(server_id, server_addr.clone()).await; client.listen().await; - async_std::task::spawn(client.loop_on_next()); + tokio::spawn(client.loop_on_next()); } let (first_probe_id, first_peer_id) = match server.next_behaviour_event().await { @@ -218,7 +218,7 @@ async fn test_throttle_global_max() { } } -#[async_std::test] +#[tokio::test] async fn test_throttle_peer_max() { let (mut server, server_id, server_addr) = new_server_swarm(Some(Config { throttle_clients_peer_max: 1, @@ -230,7 +230,7 @@ async fn test_throttle_peer_max() { let (mut client, client_id) = new_client_swarm(server_id, server_addr.clone()).await; client.listen().await; - async_std::task::spawn(client.loop_on_next()); + tokio::spawn(client.loop_on_next()); let first_probe_id = match server.next_behaviour_event().await { Event::InboundProbe(InboundProbeEvent::Request { peer, probe_id, .. }) => { @@ -265,7 +265,7 @@ async fn test_throttle_peer_max() { }; } -#[async_std::test] +#[tokio::test] async fn test_dial_multiple_addr() { let (mut server, server_id, server_addr) = new_server_swarm(Some(Config { throttle_clients_peer_max: 1, @@ -280,7 +280,7 @@ async fn test_dial_multiple_addr() { client .behaviour_mut() .probe_address("/ip4/127.0.0.1/tcp/12345".parse().unwrap()); - async_std::task::spawn(client.loop_on_next()); + tokio::spawn(client.loop_on_next()); let dial_addresses = match server.next_behaviour_event().await { Event::InboundProbe(InboundProbeEvent::Request { @@ -327,7 +327,7 @@ async fn test_dial_multiple_addr() { } } -#[async_std::test] +#[tokio::test] async fn test_global_ips_config() { let (mut server, server_id, server_addr) = new_server_swarm(Some(Config { // Enforce that only clients outside of the local network are qualified for dial-backs. @@ -338,7 +338,7 @@ async fn test_global_ips_config() { let (mut client, _) = new_client_swarm(server_id, server_addr.clone()).await; client.listen().await; - async_std::task::spawn(client.loop_on_next()); + tokio::spawn(client.loop_on_next()); // Expect the probe to be refused as both peers run on the same machine and thus in the same local network. match server.next_behaviour_event().await { From 0e9dcdd07b121b911c3088b0cadc106e790b41f6 Mon Sep 17 00:00:00 2001 From: hanabi1224 Date: Wed, 20 Nov 2024 21:18:39 +0700 Subject: [PATCH 30/50] chore: bump crate versions and update changelogs for #5676 (#5678) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Description This PR bumps crate versions and add changelog entries for crates that are changed in #5676 Question: When should a crate version bump in the current release process? Should it be right before or right after publishing? I see most of current crate versions are published while some are not (e.g. libp2p-autonat@0.13.1 libp2p-gossisub@0.48.0 and libp2p-perf@0.4.0 etc.) ## Notes & open questions ## Change checklist - [x] I have performed a self-review of my own code - [ ] I have made corresponding changes to the documentation - [ ] I have added tests that prove my fix is effective or that my feature works - [x] A changelog entry has been made in the appropriate crates --------- Co-authored-by: João Oliveira Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- Cargo.lock | 24 +++++++++++----------- Cargo.toml | 24 +++++++++++----------- core/CHANGELOG.md | 5 +++++ core/Cargo.toml | 2 +- identity/CHANGELOG.md | 5 +++++ identity/Cargo.toml | 2 +- libp2p/CHANGELOG.md | 5 +++++ libp2p/Cargo.toml | 2 +- misc/allow-block-list/CHANGELOG.md | 5 +++++ misc/allow-block-list/Cargo.toml | 2 +- misc/connection-limits/CHANGELOG.md | 5 +++++ misc/connection-limits/Cargo.toml | 2 +- misc/memory-connection-limits/CHANGELOG.md | 5 +++++ misc/memory-connection-limits/Cargo.toml | 2 +- protocols/autonat/CHANGELOG.md | 4 ++++ protocols/dcutr/CHANGELOG.md | 5 +++++ protocols/dcutr/Cargo.toml | 2 +- protocols/gossipsub/CHANGELOG.md | 3 +++ protocols/perf/CHANGELOG.md | 3 +++ protocols/ping/CHANGELOG.md | 5 +++++ protocols/ping/Cargo.toml | 2 +- protocols/relay/CHANGELOG.md | 5 +++++ protocols/relay/Cargo.toml | 2 +- protocols/request-response/CHANGELOG.md | 5 +++++ protocols/request-response/Cargo.toml | 2 +- protocols/stream/CHANGELOG.md | 5 +++++ protocols/stream/Cargo.toml | 2 +- swarm/CHANGELOG.md | 3 +++ transports/quic/CHANGELOG.md | 5 +++++ transports/quic/Cargo.toml | 2 +- 30 files changed, 109 insertions(+), 36 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 47e8e89b570..993d55a2952 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2525,7 +2525,7 @@ checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "libp2p" -version = "0.54.1" +version = "0.54.2" dependencies = [ "async-std", "async-trait", @@ -2577,7 +2577,7 @@ dependencies = [ [[package]] name = "libp2p-allow-block-list" -version = "0.4.1" +version = "0.4.2" dependencies = [ "async-std", "libp2p-core", @@ -2617,7 +2617,7 @@ dependencies = [ [[package]] name = "libp2p-connection-limits" -version = "0.4.0" +version = "0.4.1" dependencies = [ "async-std", "libp2p-core", @@ -2633,7 +2633,7 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.42.0" +version = "0.42.1" dependencies = [ "async-std", "either", @@ -2663,7 +2663,7 @@ dependencies = [ [[package]] name = "libp2p-dcutr" -version = "0.12.0" +version = "0.12.1" dependencies = [ "async-std", "asynchronous-codec", @@ -2793,7 +2793,7 @@ dependencies = [ [[package]] name = "libp2p-identity" -version = "0.2.9" +version = "0.2.10" dependencies = [ "asn1_der", "base64 0.22.1", @@ -2880,7 +2880,7 @@ dependencies = [ [[package]] name = "libp2p-memory-connection-limits" -version = "0.3.0" +version = "0.3.1" dependencies = [ "async-std", "libp2p-core", @@ -3007,7 +3007,7 @@ dependencies = [ [[package]] name = "libp2p-ping" -version = "0.45.0" +version = "0.45.1" dependencies = [ "either", "futures", @@ -3065,7 +3065,7 @@ dependencies = [ [[package]] name = "libp2p-quic" -version = "0.11.1" +version = "0.11.2" dependencies = [ "async-std", "bytes", @@ -3094,7 +3094,7 @@ dependencies = [ [[package]] name = "libp2p-relay" -version = "0.18.0" +version = "0.18.1" dependencies = [ "asynchronous-codec", "bytes", @@ -3151,7 +3151,7 @@ dependencies = [ [[package]] name = "libp2p-request-response" -version = "0.27.0" +version = "0.27.1" dependencies = [ "anyhow", "async-std", @@ -3199,7 +3199,7 @@ dependencies = [ [[package]] name = "libp2p-stream" -version = "0.2.0-alpha" +version = "0.2.0-alpha.1" dependencies = [ "futures", "libp2p-core", diff --git a/Cargo.toml b/Cargo.toml index aab7f0d71d4..a7f944d22fc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -75,33 +75,33 @@ rust-version = "1.75.0" asynchronous-codec = { version = "0.7.0" } futures-bounded = { version = "0.2.4" } futures-rustls = { version = "0.26.0", default-features = false } -libp2p = { version = "0.54.1", path = "libp2p" } -libp2p-allow-block-list = { version = "0.4.1", path = "misc/allow-block-list" } +libp2p = { version = "0.54.2", path = "libp2p" } +libp2p-allow-block-list = { version = "0.4.2", path = "misc/allow-block-list" } libp2p-autonat = { version = "0.13.1", path = "protocols/autonat" } -libp2p-connection-limits = { version = "0.4.0", path = "misc/connection-limits" } -libp2p-core = { version = "0.42.0", path = "core" } -libp2p-dcutr = { version = "0.12.0", path = "protocols/dcutr" } +libp2p-connection-limits = { version = "0.4.1", path = "misc/connection-limits" } +libp2p-core = { version = "0.42.1", path = "core" } +libp2p-dcutr = { version = "0.12.1", path = "protocols/dcutr" } libp2p-dns = { version = "0.42.0", path = "transports/dns" } libp2p-floodsub = { version = "0.45.0", path = "protocols/floodsub" } libp2p-gossipsub = { version = "0.48.0", path = "protocols/gossipsub" } libp2p-identify = { version = "0.46.0", path = "protocols/identify" } -libp2p-identity = { version = "0.2.9" } +libp2p-identity = { version = "0.2.10" } libp2p-kad = { version = "0.47.0", path = "protocols/kad" } libp2p-mdns = { version = "0.46.0", path = "protocols/mdns" } -libp2p-memory-connection-limits = { version = "0.3.0", path = "misc/memory-connection-limits" } +libp2p-memory-connection-limits = { version = "0.3.1", path = "misc/memory-connection-limits" } libp2p-metrics = { version = "0.15.0", path = "misc/metrics" } libp2p-mplex = { version = "0.42.0", path = "muxers/mplex" } libp2p-noise = { version = "0.45.0", path = "transports/noise" } libp2p-perf = { version = "0.4.0", path = "protocols/perf" } -libp2p-ping = { version = "0.45.0", path = "protocols/ping" } +libp2p-ping = { version = "0.45.1", path = "protocols/ping" } libp2p-plaintext = { version = "0.42.0", path = "transports/plaintext" } libp2p-pnet = { version = "0.25.0", path = "transports/pnet" } -libp2p-quic = { version = "0.11.1", path = "transports/quic" } -libp2p-relay = { version = "0.18.0", path = "protocols/relay" } +libp2p-quic = { version = "0.11.2", path = "transports/quic" } +libp2p-relay = { version = "0.18.1", path = "protocols/relay" } libp2p-rendezvous = { version = "0.15.0", path = "protocols/rendezvous" } -libp2p-request-response = { version = "0.27.0", path = "protocols/request-response" } +libp2p-request-response = { version = "0.27.1", path = "protocols/request-response" } libp2p-server = { version = "0.12.8", path = "misc/server" } -libp2p-stream = { version = "0.2.0-alpha", path = "protocols/stream" } +libp2p-stream = { version = "0.2.0-alpha.1", path = "protocols/stream" } libp2p-swarm = { version = "0.45.2", path = "swarm" } libp2p-swarm-derive = { version = "=0.35.0", path = "swarm-derive" } # `libp2p-swarm-derive` may not be compatible with different `libp2p-swarm` non-breaking releases. E.g. `libp2p-swarm` might introduce a new enum variant `FromSwarm` (which is `#[non-exhaustive]`) in a non-breaking release. Older versions of `libp2p-swarm-derive` would not forward this enum variant within the `NetworkBehaviour` hierarchy. Thus the version pinning is required. libp2p-swarm-test = { version = "0.5.0", path = "swarm-test" } diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 5ed4b4e181d..dbd46a38f07 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.42.1 + +- Added `libp2p::core::util::unreachable` that is a drop-in replacement of `void::unreachable`. + See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676). + ## 0.42.0 - Update `Transport::dial` function signature with a `DialOpts` param and remove `Transport::dial_as_listener`: diff --git a/core/Cargo.toml b/core/Cargo.toml index d8260e14d1f..c257ff25ec4 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-core" edition = "2021" rust-version = { workspace = true } description = "Core traits and structs of libp2p" -version = "0.42.0" +version = "0.42.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" diff --git a/identity/CHANGELOG.md b/identity/CHANGELOG.md index 9670a843130..8ee12c8124a 100644 --- a/identity/CHANGELOG.md +++ b/identity/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.2.10 + +- Deprecate `void` crate. + See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676). + ## 0.2.9 - Add `rand` feature gate to ecdsa methods requiring a random number generator. diff --git a/identity/Cargo.toml b/identity/Cargo.toml index 370533eed58..d3b07c5dc87 100644 --- a/identity/Cargo.toml +++ b/identity/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libp2p-identity" -version = "0.2.9" +version = "0.2.10" edition = "2021" description = "Data structures and algorithms for identifying peers in libp2p." rust-version = "1.73.0" # MUST NOT inherit from workspace because we don't want to publish breaking changes to `libp2p-identity`. diff --git a/libp2p/CHANGELOG.md b/libp2p/CHANGELOG.md index 72a624786d4..e383cfd0cdc 100644 --- a/libp2p/CHANGELOG.md +++ b/libp2p/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.54.2 + +- Deprecate `void` crate. + See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676). + ## 0.54.1 - Update individual crates. diff --git a/libp2p/Cargo.toml b/libp2p/Cargo.toml index b1017f5958c..83ef86a4ca4 100644 --- a/libp2p/Cargo.toml +++ b/libp2p/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p" edition = "2021" rust-version = { workspace = true } description = "Peer-to-peer networking library" -version = "0.54.1" +version = "0.54.2" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" diff --git a/misc/allow-block-list/CHANGELOG.md b/misc/allow-block-list/CHANGELOG.md index 3cda0603ee4..b5ffd7f0495 100644 --- a/misc/allow-block-list/CHANGELOG.md +++ b/misc/allow-block-list/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.4.2 + +- Deprecate `void` crate. + See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676). + ## 0.4.1 - Add getters & setters for the allowed/blocked peers. diff --git a/misc/allow-block-list/Cargo.toml b/misc/allow-block-list/Cargo.toml index c169be87056..66ee3ef9124 100644 --- a/misc/allow-block-list/Cargo.toml +++ b/misc/allow-block-list/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-allow-block-list" edition = "2021" rust-version = { workspace = true } description = "Allow/block list connection management for libp2p." -version = "0.4.1" +version = "0.4.2" license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" keywords = ["peer-to-peer", "libp2p", "networking"] diff --git a/misc/connection-limits/CHANGELOG.md b/misc/connection-limits/CHANGELOG.md index db88e99ffa7..f2722b3745a 100644 --- a/misc/connection-limits/CHANGELOG.md +++ b/misc/connection-limits/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.4.1 + +- Deprecate `void` crate. + See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676). + ## 0.4.0 diff --git a/misc/connection-limits/Cargo.toml b/misc/connection-limits/Cargo.toml index 0d17cb74862..a0ecfd9da39 100644 --- a/misc/connection-limits/Cargo.toml +++ b/misc/connection-limits/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-connection-limits" edition = "2021" rust-version = { workspace = true } description = "Connection limits for libp2p." -version = "0.4.0" +version = "0.4.1" license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" keywords = ["peer-to-peer", "libp2p", "networking"] diff --git a/misc/memory-connection-limits/CHANGELOG.md b/misc/memory-connection-limits/CHANGELOG.md index 9e580c5a1d2..bf198e27c65 100644 --- a/misc/memory-connection-limits/CHANGELOG.md +++ b/misc/memory-connection-limits/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.3.1 + +- Deprecate `void` crate. + See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676). + ## 0.3.0 diff --git a/misc/memory-connection-limits/Cargo.toml b/misc/memory-connection-limits/Cargo.toml index 19ae256e853..f18cb09d193 100644 --- a/misc/memory-connection-limits/Cargo.toml +++ b/misc/memory-connection-limits/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-memory-connection-limits" edition = "2021" rust-version = { workspace = true } description = "Memory usage based connection limits for libp2p." -version = "0.3.0" +version = "0.3.1" license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" keywords = ["peer-to-peer", "libp2p", "networking"] diff --git a/protocols/autonat/CHANGELOG.md b/protocols/autonat/CHANGELOG.md index f1aeda6ac18..9b2bc4cb2ea 100644 --- a/protocols/autonat/CHANGELOG.md +++ b/protocols/autonat/CHANGELOG.md @@ -1,6 +1,10 @@ ## 0.13.1 + - Verify that an incoming AutoNAT dial comes from a connected peer. See [PR 5597](https://github.com/libp2p/rust-libp2p/pull/5597). +- Deprecate `void` crate. + See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676). + ## 0.13.0 - Due to the refactor of `Transport` it's no longer required to create a seperate transport for diff --git a/protocols/dcutr/CHANGELOG.md b/protocols/dcutr/CHANGELOG.md index 0ddc4aa1148..80cac37321e 100644 --- a/protocols/dcutr/CHANGELOG.md +++ b/protocols/dcutr/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.12.1 + +- Deprecate `void` crate. + See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676). + ## 0.12.0 diff --git a/protocols/dcutr/Cargo.toml b/protocols/dcutr/Cargo.toml index c470291af0d..7de195e7d54 100644 --- a/protocols/dcutr/Cargo.toml +++ b/protocols/dcutr/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-dcutr" edition = "2021" rust-version = { workspace = true } description = "Direct connection upgrade through relay" -version = "0.12.0" +version = "0.12.1" authors = ["Max Inden "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" diff --git a/protocols/gossipsub/CHANGELOG.md b/protocols/gossipsub/CHANGELOG.md index cdd170c0d4b..7357170ff93 100644 --- a/protocols/gossipsub/CHANGELOG.md +++ b/protocols/gossipsub/CHANGELOG.md @@ -3,6 +3,9 @@ - Apply `max_transmit_size` to the inner message instead of the final payload. See [PR 5642](https://github.com/libp2p/rust-libp2p/pull/5642). +- Deprecate `void` crate. + See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676). + ## 0.47.1 - Attempt to publish to at least mesh_n peers when flood publish is disabled. diff --git a/protocols/perf/CHANGELOG.md b/protocols/perf/CHANGELOG.md index abeca9fad25..c5eda88d97d 100644 --- a/protocols/perf/CHANGELOG.md +++ b/protocols/perf/CHANGELOG.md @@ -4,6 +4,9 @@ - Add ConnectionError to FromSwarm::ConnectionClosed. See [PR 5485](https://github.com/libp2p/rust-libp2p/pull/5485). +- Deprecate `void` crate. + See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676). + ## 0.3.1 - Use `web-time` instead of `instant`. See [PR 5347](https://github.com/libp2p/rust-libp2p/pull/5347). diff --git a/protocols/ping/CHANGELOG.md b/protocols/ping/CHANGELOG.md index c0a124333e9..d6e71b2c2d0 100644 --- a/protocols/ping/CHANGELOG.md +++ b/protocols/ping/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.45.1 + +- Deprecate `void` crate. + See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676). + ## 0.45.0 diff --git a/protocols/ping/Cargo.toml b/protocols/ping/Cargo.toml index 755ebd35718..0fad9678aec 100644 --- a/protocols/ping/Cargo.toml +++ b/protocols/ping/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-ping" edition = "2021" rust-version = { workspace = true } description = "Ping protocol for libp2p" -version = "0.45.0" +version = "0.45.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" diff --git a/protocols/relay/CHANGELOG.md b/protocols/relay/CHANGELOG.md index fc71ccedad5..8119c24a491 100644 --- a/protocols/relay/CHANGELOG.md +++ b/protocols/relay/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.18.1 + +- Deprecate `void` crate. + See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676). + ## 0.18.0 diff --git a/protocols/relay/Cargo.toml b/protocols/relay/Cargo.toml index a3a659619b6..c996a014845 100644 --- a/protocols/relay/Cargo.toml +++ b/protocols/relay/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-relay" edition = "2021" rust-version = { workspace = true } description = "Communications relaying for libp2p" -version = "0.18.0" +version = "0.18.1" authors = ["Parity Technologies ", "Max Inden "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" diff --git a/protocols/request-response/CHANGELOG.md b/protocols/request-response/CHANGELOG.md index db0d9126516..9ed658fc90f 100644 --- a/protocols/request-response/CHANGELOG.md +++ b/protocols/request-response/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.27.1 + +- Deprecate `void` crate. + See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676). + ## 0.27.0 diff --git a/protocols/request-response/Cargo.toml b/protocols/request-response/Cargo.toml index 8376f3ce795..b2e6fd0b0ac 100644 --- a/protocols/request-response/Cargo.toml +++ b/protocols/request-response/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-request-response" edition = "2021" rust-version = { workspace = true } description = "Generic Request/Response Protocols" -version = "0.27.0" +version = "0.27.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" diff --git a/protocols/stream/CHANGELOG.md b/protocols/stream/CHANGELOG.md index 2532970d3c6..6034104debd 100644 --- a/protocols/stream/CHANGELOG.md +++ b/protocols/stream/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.2.0-alpha.1 + +- Deprecate `void` crate. + See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676). + ## 0.2.0-alpha diff --git a/protocols/stream/Cargo.toml b/protocols/stream/Cargo.toml index cd83c5978fa..d9c9276cb12 100644 --- a/protocols/stream/Cargo.toml +++ b/protocols/stream/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libp2p-stream" -version = "0.2.0-alpha" +version = "0.2.0-alpha.1" edition = "2021" rust-version.workspace = true description = "Generic stream protocols for libp2p" diff --git a/swarm/CHANGELOG.md b/swarm/CHANGELOG.md index c5d10872d40..0109a33747c 100644 --- a/swarm/CHANGELOG.md +++ b/swarm/CHANGELOG.md @@ -3,6 +3,9 @@ - Don't report `NewExternalAddrCandidate` for confirmed external addresses. See [PR 5582](https://github.com/libp2p/rust-libp2p/pull/5582). +- Deprecate `void` crate. + See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676). + ## 0.45.1 - Update `libp2p-swarm-derive` to version `0.35.0`, see [PR 5545] diff --git a/transports/quic/CHANGELOG.md b/transports/quic/CHANGELOG.md index 6fc64c5df36..238cbebe6cf 100644 --- a/transports/quic/CHANGELOG.md +++ b/transports/quic/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.11.2 + +- Deprecate `void` crate. + See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676). + ## 0.11.1 - Update `libp2p-tls` to version `0.5.0`, see [PR 5547] diff --git a/transports/quic/Cargo.toml b/transports/quic/Cargo.toml index 42cc8e54edb..a33ef4ef0b1 100644 --- a/transports/quic/Cargo.toml +++ b/transports/quic/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libp2p-quic" -version = "0.11.1" +version = "0.11.2" authors = ["Parity Technologies "] edition = "2021" rust-version = { workspace = true } From 059742f7da0c746fb87c89032efe99803170de22 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Thu, 21 Nov 2024 10:31:51 +0000 Subject: [PATCH 31/50] chore(ci): add a mergify batch queue for external PRs (#5668) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit and remove the if conditions no longer required as we are running them again on every PR --------- Co-authored-by: Guillaume Michel Co-authored-by: João Oliveira --- .github/mergify.yml | 17 +++++++++++++++++ .github/workflows/interop-test.yml | 3 +++ scripts/build-interop-image.sh | 4 ++-- 3 files changed, 22 insertions(+), 2 deletions(-) diff --git a/.github/mergify.yml b/.github/mergify.yml index 91cd8881237..38f025c7814 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -32,6 +32,17 @@ pull_request_rules: - base=master actions: queue: + name: default + + # Adds the Pr to the batch queue, so that we can run the interop tests. See the `external_prs` queue for more info. + - name: Add to batch merge queue + conditions: + # All branch protection rules are implicit: https://docs.mergify.com/conditions/#about-branch-protection + - label=send-it-batch + - base=master + actions: + queue: + name: external_prs - name: Add approved dependabot PRs to merge queue conditions: @@ -40,6 +51,7 @@ pull_request_rules: - base=master actions: queue: + name: default - name: Remove reviews on updates after PR is queued for merging conditions: @@ -74,3 +86,8 @@ pull_request_rules: queue_rules: - name: default conditions: [] + # External PR's don't have access to secrets and variables, therefore they don't run the interop tests. + # using a batch queue allows to circumvent that as mergify creates it from an internal branch. + - name: external_prs + conditions: [] + batch_size: 1 diff --git a/.github/workflows/interop-test.yml b/.github/workflows/interop-test.yml index e9446d013d7..c88579be68a 100644 --- a/.github/workflows/interop-test.yml +++ b/.github/workflows/interop-test.yml @@ -28,6 +28,7 @@ jobs: AWS_BUCKET_NAME: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }} AWS_ACCESS_KEY_ID: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }} + AWS_REGION: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }} FLAVOUR: ${{ matrix.flavour }} - name: Run ${{ matrix.flavour }} tests @@ -38,6 +39,7 @@ jobs: s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }} s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }} s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }} worker-count: 16 run-holepunching-interop: name: Run hole-punch interoperability tests @@ -56,4 +58,5 @@ jobs: s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }} s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }} s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }} worker-count: 16 diff --git a/scripts/build-interop-image.sh b/scripts/build-interop-image.sh index 4b96e353f9a..ff6f344976c 100755 --- a/scripts/build-interop-image.sh +++ b/scripts/build-interop-image.sh @@ -6,13 +6,13 @@ CACHE_TO="" # If we have credentials, write to cache if [[ -n "${AWS_SECRET_ACCESS_KEY}" ]]; then - CACHE_TO="--cache-to type=s3,mode=max,bucket=${AWS_BUCKET_NAME},region=ap-southeast-2,prefix=buildCache,name=${FLAVOUR}-rust-libp2p-head" + CACHE_TO="--cache-to type=s3,mode=max,bucket=${AWS_BUCKET_NAME},region=${AWS_REGION},prefix=buildCache,name=${FLAVOUR}-rust-libp2p-head" fi docker buildx build \ --load \ $CACHE_TO \ - --cache-from type=s3,mode=max,bucket=${AWS_BUCKET_NAME},region=ap-southeast-2,prefix=buildCache,name=${FLAVOUR}-rust-libp2p-head \ + --cache-from type=s3,mode=max,bucket=${AWS_BUCKET_NAME},region=${AWS_REGION},prefix=buildCache,name=${FLAVOUR}-rust-libp2p-head \ -t ${FLAVOUR}-rust-libp2p-head \ . \ -f interop-tests/Dockerfile.${FLAVOUR} From 13b9ea2ab5dde2d5448a69e74bec3ac9236c21d1 Mon Sep 17 00:00:00 2001 From: Krishang Shah <93703995+kamuik16@users.noreply.github.com> Date: Thu, 21 Nov 2024 18:16:52 +0530 Subject: [PATCH 32/50] chore: refactor dcutr and gossipsub tests to use tokio instead ref #4449 Refactored dcutr and gossipsub tests to use `tokio` instead of `async-std`. Pull-Request: #5662. --- Cargo.lock | 4 ++-- protocols/dcutr/Cargo.toml | 2 +- protocols/dcutr/tests/lib.rs | 6 +++--- protocols/gossipsub/Cargo.toml | 2 +- protocols/gossipsub/src/behaviour/tests.rs | 2 +- protocols/gossipsub/tests/smoke.rs | 11 +++++++---- 6 files changed, 15 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 993d55a2952..57f6df3862f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2665,7 +2665,6 @@ dependencies = [ name = "libp2p-dcutr" version = "0.12.1" dependencies = [ - "async-std", "asynchronous-codec", "clap", "either", @@ -2689,6 +2688,7 @@ dependencies = [ "quick-protobuf-codec", "rand 0.8.5", "thiserror", + "tokio", "tracing", "tracing-subscriber", "web-time 1.1.0", @@ -2736,7 +2736,6 @@ dependencies = [ name = "libp2p-gossipsub" version = "0.48.0" dependencies = [ - "async-std", "asynchronous-codec", "base64 0.22.1", "byteorder", @@ -2763,6 +2762,7 @@ dependencies = [ "serde", "sha2 0.10.8", "smallvec", + "tokio", "tracing", "tracing-subscriber", "web-time 1.1.0", diff --git a/protocols/dcutr/Cargo.toml b/protocols/dcutr/Cargo.toml index 7de195e7d54..69517181aab 100644 --- a/protocols/dcutr/Cargo.toml +++ b/protocols/dcutr/Cargo.toml @@ -27,7 +27,6 @@ lru = "0.12.3" futures-bounded = { workspace = true } [dev-dependencies] -async-std = { version = "1.12.0", features = ["attributes"] } clap = { version = "4.5.6", features = ["derive"] } libp2p-dns = { workspace = true, features = ["async-std"] } libp2p-identify = { workspace = true } @@ -41,6 +40,7 @@ libp2p-tcp = { workspace = true, features = ["async-io"] } libp2p-yamux = { workspace = true } rand = "0.8" tracing-subscriber = { workspace = true, features = ["env-filter"] } +tokio = { workspace = true, features = ["rt", "macros"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/dcutr/tests/lib.rs b/protocols/dcutr/tests/lib.rs index 084ee744145..36f168fb04a 100644 --- a/protocols/dcutr/tests/lib.rs +++ b/protocols/dcutr/tests/lib.rs @@ -32,7 +32,7 @@ use libp2p_swarm_test::SwarmExt as _; use std::time::Duration; use tracing_subscriber::EnvFilter; -#[async_std::test] +#[tokio::test] async fn connect() { let _ = tracing_subscriber::fmt() .with_env_filter(EnvFilter::from_default_env()) @@ -53,7 +53,7 @@ async fn connect() { let relay_peer_id = *relay.local_peer_id(); let dst_peer_id = *dst.local_peer_id(); - async_std::task::spawn(relay.loop_on_next()); + tokio::spawn(relay.loop_on_next()); let dst_relayed_addr = relay_tcp_addr .with(Protocol::P2p(relay_peer_id)) @@ -68,7 +68,7 @@ async fn connect() { false, // No renewal. ) .await; - async_std::task::spawn(dst.loop_on_next()); + tokio::spawn(dst.loop_on_next()); src.dial_and_wait(dst_relayed_addr.clone()).await; diff --git a/protocols/gossipsub/Cargo.toml b/protocols/gossipsub/Cargo.toml index 1416cdb8de3..ca6185a85e4 100644 --- a/protocols/gossipsub/Cargo.toml +++ b/protocols/gossipsub/Cargo.toml @@ -41,7 +41,6 @@ tracing = { workspace = true } prometheus-client = { workspace = true } [dev-dependencies] -async-std = { version = "1.6.3", features = ["unstable"] } hex = "0.4.2" libp2p-core = { workspace = true } libp2p-yamux = { workspace = true } @@ -49,6 +48,7 @@ libp2p-noise = { workspace = true } libp2p-swarm-test = { path = "../../swarm-test" } quickcheck = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } +tokio = { workspace = true, features = ["rt", "rt-multi-thread", "time"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index a74566a1308..c7afe926a65 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -23,10 +23,10 @@ use super::*; use crate::subscription_filter::WhitelistSubscriptionFilter; use crate::{config::ConfigBuilder, types::Rpc, IdentTopic as Topic}; -use async_std::net::Ipv4Addr; use byteorder::{BigEndian, ByteOrder}; use libp2p_core::ConnectedPoint; use rand::Rng; +use std::net::Ipv4Addr; use std::thread::sleep; #[derive(Default, Debug)] diff --git a/protocols/gossipsub/tests/smoke.rs b/protocols/gossipsub/tests/smoke.rs index c8876428b4e..3b6261afa54 100644 --- a/protocols/gossipsub/tests/smoke.rs +++ b/protocols/gossipsub/tests/smoke.rs @@ -18,7 +18,6 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use async_std::prelude::FutureExt; use futures::stream::{FuturesUnordered, SelectAll}; use futures::StreamExt; use libp2p_gossipsub as gossipsub; @@ -28,7 +27,9 @@ use libp2p_swarm_test::SwarmExt as _; use quickcheck::{QuickCheck, TestResult}; use rand::{seq::SliceRandom, SeedableRng}; use std::{task::Poll, time::Duration}; +use tokio::{runtime::Runtime, time}; use tracing_subscriber::EnvFilter; + struct Graph { nodes: SelectAll>, } @@ -84,7 +85,7 @@ impl Graph { } }; - match condition.timeout(Duration::from_secs(10)).await { + match time::timeout(Duration::from_secs(10), condition).await { Ok(()) => true, Err(_) => false, } @@ -98,7 +99,7 @@ impl Graph { Poll::Pending => return Poll::Ready(()), } }); - fut.timeout(Duration::from_secs(10)).await.unwrap(); + time::timeout(Duration::from_secs(10), fut).await.unwrap(); } } @@ -139,7 +140,9 @@ fn multi_hop_propagation() { tracing::debug!(number_of_nodes=%num_nodes, seed=%seed); - async_std::task::block_on(async move { + let rt = Runtime::new().unwrap(); + + rt.block_on(async move { let mut graph = Graph::new_connected(num_nodes as usize, seed).await; let number_nodes = graph.nodes.len(); From c3a21d13f472fedfd459fa15d2bed5941eccfe26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Thu, 21 Nov 2024 14:27:19 +0000 Subject: [PATCH 33/50] chore(gossispsub): deprecate futures-ticker to address [RUSTSEC-2024-0384 ](https://rustsec.org/advisories/RUSTSEC-2024-0384.html). Use `futures-timer` and `Delay` instead Pull-Request: #5674. --- Cargo.lock | 41 ++++++++++----------------- protocols/gossipsub/CHANGELOG.md | 2 ++ protocols/gossipsub/Cargo.toml | 4 +-- protocols/gossipsub/src/behaviour.rs | 25 ++++++++-------- protocols/gossipsub/src/peer_score.rs | 5 ++-- 5 files changed, 34 insertions(+), 43 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 57f6df3862f..ec986f43d57 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1629,9 +1629,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -1639,9 +1639,9 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" @@ -1657,9 +1657,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" @@ -1688,9 +1688,9 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", @@ -1710,26 +1710,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" - -[[package]] -name = "futures-ticker" -version = "0.0.3" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9763058047f713632a52e916cc7f6a4b3fc6e9fc1ff8c5b1dc49e5a89041682e" -dependencies = [ - "futures", - "futures-timer", - "instant", -] +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-timer" @@ -1743,9 +1732,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -2743,7 +2732,7 @@ dependencies = [ "either", "fnv", "futures", - "futures-ticker", + "futures-timer", "getrandom 0.2.15", "hex", "hex_fmt", diff --git a/protocols/gossipsub/CHANGELOG.md b/protocols/gossipsub/CHANGELOG.md index 7357170ff93..7bf021c761e 100644 --- a/protocols/gossipsub/CHANGELOG.md +++ b/protocols/gossipsub/CHANGELOG.md @@ -1,5 +1,7 @@ ## 0.48.0 +- Deprecate `futures-ticker` and use `futures-timer` instead. + See [PR 5674](https://github.com/libp2p/rust-libp2p/pull/5674). - Apply `max_transmit_size` to the inner message instead of the final payload. See [PR 5642](https://github.com/libp2p/rust-libp2p/pull/5642). diff --git a/protocols/gossipsub/Cargo.toml b/protocols/gossipsub/Cargo.toml index ca6185a85e4..1d58fc98896 100644 --- a/protocols/gossipsub/Cargo.toml +++ b/protocols/gossipsub/Cargo.toml @@ -11,7 +11,7 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [features] -wasm-bindgen = ["getrandom/js"] +wasm-bindgen = ["getrandom/js", "futures-timer/wasm-bindgen"] [dependencies] asynchronous-codec = { workspace = true } @@ -21,7 +21,6 @@ bytes = "1.6" either = "1.11" fnv = "1.0.7" futures = { workspace = true } -futures-ticker = "0.0.3" getrandom = "0.2.15" hex_fmt = "0.3.0" web-time = { workspace = true } @@ -39,6 +38,7 @@ tracing = { workspace = true } # Metrics dependencies prometheus-client = { workspace = true } +futures-timer = "3.0.3" [dev-dependencies] hex = "0.4.2" diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index bf94a5b7920..d0fd3127f72 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -29,8 +29,8 @@ use std::{ time::Duration, }; -use futures::StreamExt; -use futures_ticker::Ticker; +use futures::FutureExt; +use futures_timer::Delay; use prometheus_client::registry::Registry; use rand::{seq::SliceRandom, thread_rng}; @@ -283,7 +283,7 @@ pub struct Behaviour { mcache: MessageCache, /// Heartbeat interval stream. - heartbeat: Ticker, + heartbeat: Delay, /// Number of heartbeats since the beginning of time; this allows us to amortize some resource /// clean up -- eg backoff clean up. @@ -301,7 +301,7 @@ pub struct Behaviour { /// Stores optional peer score data together with thresholds, decay interval and gossip /// promises. - peer_score: Option<(PeerScore, PeerScoreThresholds, Ticker, GossipPromises)>, + peer_score: Option<(PeerScore, PeerScoreThresholds, Delay, GossipPromises)>, /// Counts the number of `IHAVE` received from each peer since the last heartbeat. count_received_ihave: HashMap, @@ -448,10 +448,7 @@ where config.backoff_slack(), ), mcache: MessageCache::new(config.history_gossip(), config.history_length()), - heartbeat: Ticker::new_with_next( - config.heartbeat_interval(), - config.heartbeat_initial_delay(), - ), + heartbeat: Delay::new(config.heartbeat_interval() + config.heartbeat_initial_delay()), heartbeat_ticks: 0, px_peers: HashSet::new(), outbound_peers: HashSet::new(), @@ -879,7 +876,7 @@ where return Err("Peer score set twice".into()); } - let interval = Ticker::new(params.decay_interval); + let interval = Delay::new(params.decay_interval); let peer_score = PeerScore::new_with_message_delivery_time_callback(params, callback); self.peer_score = Some((peer_score, threshold, interval, GossipPromises::default())); Ok(()) @@ -1145,7 +1142,7 @@ where } fn score_below_threshold_from_scores( - peer_score: &Option<(PeerScore, PeerScoreThresholds, Ticker, GossipPromises)>, + peer_score: &Option<(PeerScore, PeerScoreThresholds, Delay, GossipPromises)>, peer_id: &PeerId, threshold: impl Fn(&PeerScoreThresholds) -> f64, ) -> (bool, f64) { @@ -3105,14 +3102,16 @@ where } // update scores - if let Some((peer_score, _, interval, _)) = &mut self.peer_score { - while let Poll::Ready(Some(_)) = interval.poll_next_unpin(cx) { + if let Some((peer_score, _, delay, _)) = &mut self.peer_score { + if delay.poll_unpin(cx).is_ready() { peer_score.refresh_scores(); + delay.reset(peer_score.params.decay_interval); } } - while let Poll::Ready(Some(_)) = self.heartbeat.poll_next_unpin(cx) { + if self.heartbeat.poll_unpin(cx).is_ready() { self.heartbeat(); + self.heartbeat.reset(self.config.heartbeat_interval()); } Poll::Pending diff --git a/protocols/gossipsub/src/peer_score.rs b/protocols/gossipsub/src/peer_score.rs index ac24fc91970..4df8f162ed9 100644 --- a/protocols/gossipsub/src/peer_score.rs +++ b/protocols/gossipsub/src/peer_score.rs @@ -44,14 +44,15 @@ mod tests; const TIME_CACHE_DURATION: u64 = 120; pub(crate) struct PeerScore { - params: PeerScoreParams, /// The score parameters. + pub(crate) params: PeerScoreParams, + /// The stats per PeerId. peer_stats: HashMap, /// Tracking peers per IP. peer_ips: HashMap>, /// Message delivery tracking. This is a time-cache of [`DeliveryRecord`]s. deliveries: TimeCache, - /// callback for monitoring message delivery times + /// Callback for monitoring message delivery times. message_delivery_time_callback: Option, } From 00588a543a135f71ee7053b5315fdbb7c6e492f4 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 23 Nov 2024 00:24:17 +1100 Subject: [PATCH 34/50] chore: update FUNDING.json I've added addresses to receive Drips donations. This is required for FIL-RetroPGF-2 in the coming days, we should get this into `master` ASAP. This is a dedicated address controlled by Sigma Prime (including @AgeManning and myself). Pull-Request: #5685. --- FUNDING.json | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/FUNDING.json b/FUNDING.json index cce3fb3fe4c..d707bcb2f8c 100644 --- a/FUNDING.json +++ b/FUNDING.json @@ -1,5 +1,13 @@ { - "opRetro": { - "projectId": "0xdf1bb03d08808e2d789f5eac8462bdc560f1bb5b0877f0cf8c66ab53a0bc2f5c" - } + "drips": { + "ethereum": { + "ownedBy": "0x79c49637182Ea32734f7e8445a3649c22ff348f2" + }, + "filecoin": { + "ownedBy": "0x79c49637182Ea32734f7e8445a3649c22ff348f2" + } + }, + "opRetro": { + "projectId": "0xdf1bb03d08808e2d789f5eac8462bdc560f1bb5b0877f0cf8c66ab53a0bc2f5c" + } } From 237192287fd9785918ee9b8ca1ad7f713884d00f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Mon, 25 Nov 2024 13:53:57 +0000 Subject: [PATCH 35/50] feat(gossipsub): introduce backpressure superseeds #4914 with some changes and improvements, namely: - introduce a `Delay` for `Forward` and `Publish` messages, messages that take more than the configured delay to be sent are discarded - introduce scoring and penalize slow peers - remove control pool - report slow peers with the number of failed messages Pull-Request: #5595. --- Cargo.lock | 76 +- protocols/gossipsub/CHANGELOG.md | 7 + protocols/gossipsub/Cargo.toml | 5 +- protocols/gossipsub/src/behaviour.rs | 647 +++--- protocols/gossipsub/src/behaviour/tests.rs | 1914 ++++++++++++------ protocols/gossipsub/src/config.rs | 43 + protocols/gossipsub/src/error.rs | 3 + protocols/gossipsub/src/handler.rs | 54 +- protocols/gossipsub/src/lib.rs | 3 +- protocols/gossipsub/src/metrics.rs | 76 + protocols/gossipsub/src/peer_score.rs | 27 +- protocols/gossipsub/src/peer_score/params.rs | 10 + protocols/gossipsub/src/protocol.rs | 53 +- protocols/gossipsub/src/rpc.rs | 192 ++ protocols/gossipsub/src/types.rs | 149 +- 15 files changed, 2201 insertions(+), 1058 deletions(-) create mode 100644 protocols/gossipsub/src/rpc.rs diff --git a/Cargo.lock b/Cargo.lock index ec986f43d57..e24db6e69d4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -268,6 +268,18 @@ dependencies = [ "futures-core", ] +[[package]] +name = "async-channel" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +dependencies = [ + "concurrent-queue", + "event-listener-strategy 0.5.2", + "futures-core", + "pin-project-lite", +] + [[package]] name = "async-executor" version = "1.5.1" @@ -300,7 +312,7 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" dependencies = [ - "async-channel", + "async-channel 1.9.0", "async-executor", "async-io 1.13.0", "async-lock 2.7.0", @@ -364,7 +376,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "deb2ab2aa8a746e221ab826c73f48bc6ba41be6763f0855cb249eb6d154cf1d7" dependencies = [ "event-listener 3.1.0", - "event-listener-strategy", + "event-listener-strategy 0.3.0", "pin-project-lite", ] @@ -405,7 +417,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" dependencies = [ "async-attributes", - "async-channel", + "async-channel 1.9.0", "async-global-executor", "async-io 1.13.0", "async-lock 2.7.0", @@ -720,7 +732,7 @@ version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77231a1c8f801696fc0123ec6150ce92cffb8e164a02afb9c8ddee0e9b65ad65" dependencies = [ - "async-channel", + "async-channel 1.9.0", "async-lock 2.7.0", "async-task", "atomic-waker", @@ -982,9 +994,9 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.2.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ "crossbeam-utils", ] @@ -1518,6 +1530,17 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "event-listener" +version = "5.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + [[package]] name = "event-listener-strategy" version = "0.3.0" @@ -1528,6 +1551,16 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "event-listener-strategy" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +dependencies = [ + "event-listener 5.3.1", + "pin-project-lite", +] + [[package]] name = "fastrand" version = "1.9.0" @@ -1629,9 +1662,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -1639,9 +1672,9 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" @@ -1657,9 +1690,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-lite" @@ -1688,9 +1721,9 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", @@ -1710,15 +1743,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-timer" @@ -1732,9 +1765,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", @@ -2725,6 +2758,7 @@ dependencies = [ name = "libp2p-gossipsub" version = "0.48.0" dependencies = [ + "async-channel 2.3.1", "asynchronous-codec", "base64 0.22.1", "byteorder", @@ -5509,7 +5543,7 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13f2b548cd8447f8de0fdf1c592929f70f4fc7039a05e47404b0d096ec6987a1" dependencies = [ - "async-channel", + "async-channel 1.9.0", "async-executor", "async-fs", "async-io 1.13.0", diff --git a/protocols/gossipsub/CHANGELOG.md b/protocols/gossipsub/CHANGELOG.md index 7bf021c761e..8d95abc01a2 100644 --- a/protocols/gossipsub/CHANGELOG.md +++ b/protocols/gossipsub/CHANGELOG.md @@ -13,6 +13,13 @@ - Attempt to publish to at least mesh_n peers when flood publish is disabled. See [PR 5578](https://github.com/libp2p/rust-libp2p/pull/5578). +- Introduce back pressure and penalize slow peers. Drop stale messages that timeout before being + delivered. + See [PR 5595](https://github.com/libp2p/rust-libp2p/pull/5595). +- Change `Behaviour::unsubscribe` and `Behaviour::report_message_validation_result` + to `bool` they don't need to be a `Result`. + See [PR 5595](https://github.com/libp2p/rust-libp2p/pull/5595). + ## 0.47.0 diff --git a/protocols/gossipsub/Cargo.toml b/protocols/gossipsub/Cargo.toml index 1d58fc98896..c09286c8aa0 100644 --- a/protocols/gossipsub/Cargo.toml +++ b/protocols/gossipsub/Cargo.toml @@ -14,6 +14,7 @@ categories = ["network-programming", "asynchronous"] wasm-bindgen = ["getrandom/js", "futures-timer/wasm-bindgen"] [dependencies] +async-channel = "2.3.1" asynchronous-codec = { workspace = true } base64 = "0.22.1" byteorder = "1.5.0" @@ -21,6 +22,7 @@ bytes = "1.6" either = "1.11" fnv = "1.0.7" futures = { workspace = true } +futures-timer = "3.0.2" getrandom = "0.2.15" hex_fmt = "0.3.0" web-time = { workspace = true } @@ -38,7 +40,6 @@ tracing = { workspace = true } # Metrics dependencies prometheus-client = { workspace = true } -futures-timer = "3.0.3" [dev-dependencies] hex = "0.4.2" @@ -48,7 +49,7 @@ libp2p-noise = { workspace = true } libp2p-swarm-test = { path = "../../swarm-test" } quickcheck = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } -tokio = { workspace = true, features = ["rt", "rt-multi-thread", "time"] } +tokio = { workspace = true, features = ["rt", "rt-multi-thread", "time", "macros"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index d0fd3127f72..fae45ed452e 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -47,12 +47,6 @@ use libp2p_swarm::{ }; use web_time::{Instant, SystemTime}; -use crate::backoff::BackoffStorage; -use crate::config::{Config, ValidationMode}; -use crate::gossip_promises::GossipPromises; -use crate::handler::{Handler, HandlerEvent, HandlerIn}; -use crate::mcache::MessageCache; -use crate::metrics::{Churn, Config as MetricsConfig, Inclusion, Metrics, Penalty}; use crate::peer_score::{PeerScore, PeerScoreParams, PeerScoreThresholds, RejectReason}; use crate::protocol::SIGNING_PREFIX; use crate::subscription_filter::{AllowAllSubscriptionFilter, TopicSubscriptionFilter}; @@ -64,6 +58,21 @@ use crate::types::{ SubscriptionAction, }; use crate::types::{PeerConnections, PeerKind, RpcOut}; +use crate::{backoff::BackoffStorage, FailedMessages}; +use crate::{ + config::{Config, ValidationMode}, + types::Graft, +}; +use crate::{gossip_promises::GossipPromises, types::Prune}; +use crate::{ + handler::{Handler, HandlerEvent, HandlerIn}, + types::IWant, +}; +use crate::{mcache::MessageCache, types::IHave}; +use crate::{ + metrics::{Churn, Config as MetricsConfig, Inclusion, Metrics, Penalty}, + rpc::Sender, +}; use crate::{rpc_proto::proto, TopicScoreParams}; use crate::{PublishError, SubscriptionError, ValidationError}; use quick_protobuf::{MessageWrite, Writer}; @@ -145,6 +154,13 @@ pub enum Event { }, /// A peer that does not support gossipsub has connected. GossipsubNotSupported { peer_id: PeerId }, + /// A peer is not able to download messages in time. + SlowPeer { + /// The peer_id + peer_id: PeerId, + /// The types and amounts of failed messages that are occurring for this peer. + failed_messages: FailedMessages, + }, } /// A data structure for storing configuration for publishing messages. See [`MessageAuthenticity`] @@ -245,9 +261,6 @@ pub struct Behaviour { /// Events that need to be yielded to the outside when polling. events: VecDeque>, - /// Pools non-urgent control messages between heartbeats. - control_pool: HashMap>, - /// Information used for publishing messages. publish_config: PublishConfig, @@ -309,10 +322,6 @@ pub struct Behaviour { /// Counts the number of `IWANT` that we sent the each peer since the last heartbeat. count_sent_iwant: HashMap, - /// Keeps track of IWANT messages that we are awaiting to send. - /// This is used to prevent sending duplicate IWANT messages for the same message. - pending_iwant_msgs: HashSet, - /// Short term cache for published message ids. This is used for penalizing peers sending /// our own messages back if the messages are anonymous or use a random author. published_message_ids: DuplicateCache, @@ -327,6 +336,9 @@ pub struct Behaviour { /// Keep track of a set of internal metrics relating to gossipsub. metrics: Option, + + /// Tracks the numbers of failed messages per peer-id. + failed_messages: HashMap, } impl Behaviour @@ -434,7 +446,6 @@ where Ok(Behaviour { metrics: metrics.map(|(registry, cfg)| Metrics::new(registry, cfg)), events: VecDeque::new(), - control_pool: HashMap::new(), publish_config: privacy.into(), duplicate_cache: DuplicateCache::new(config.duplicate_cache_time()), explicit_peers: HashSet::new(), @@ -455,12 +466,12 @@ where peer_score: None, count_received_ihave: HashMap::new(), count_sent_iwant: HashMap::new(), - pending_iwant_msgs: HashSet::new(), connected_peers: HashMap::new(), published_message_ids: DuplicateCache::new(config.published_message_ids_cache_time()), config, subscription_filter, data_transform, + failed_messages: Default::default(), }) } } @@ -524,10 +535,10 @@ where } // send subscription request to all peers - for peer in self.connected_peers.keys().copied().collect::>() { - tracing::debug!(%peer, "Sending SUBSCRIBE to peer"); + for peer_id in self.connected_peers.keys().copied().collect::>() { + tracing::debug!(%peer_id, "Sending SUBSCRIBE to peer"); let event = RpcOut::Subscribe(topic_hash.clone()); - self.send_message(peer, event); + self.send_message(peer_id, event); } // call JOIN(topic) @@ -539,16 +550,15 @@ where /// Unsubscribes from a topic. /// - /// Returns [`Ok(true)`] if we were subscribed to this topic. - #[allow(clippy::unnecessary_wraps)] - pub fn unsubscribe(&mut self, topic: &Topic) -> Result { + /// Returns `true` if we were subscribed to this topic. + pub fn unsubscribe(&mut self, topic: &Topic) -> bool { tracing::debug!(%topic, "Unsubscribing from topic"); let topic_hash = topic.hash(); if !self.mesh.contains_key(&topic_hash) { tracing::debug!(topic=%topic_hash, "Already unsubscribed from topic"); // we are not subscribed - return Ok(false); + return false; } // announce to all peers @@ -563,7 +573,7 @@ where self.leave(&topic_hash); tracing::debug!(topic=%topic_hash, "Unsubscribed from topic"); - Ok(true) + true } /// Publishes a message with multiple topics to the network. @@ -721,9 +731,26 @@ where } // Send to peers we know are subscribed to the topic. + let mut publish_failed = true; for peer_id in recipient_peers.iter() { tracing::trace!(peer=%peer_id, "Sending message to peer"); - self.send_message(*peer_id, RpcOut::Publish(raw_message.clone())); + if self.send_message( + *peer_id, + RpcOut::Publish { + message: raw_message.clone(), + timeout: Delay::new(self.config.publish_queue_duration()), + }, + ) { + publish_failed = false + } + } + + if recipient_peers.is_empty() { + return Err(PublishError::InsufficientPeers); + } + + if publish_failed { + return Err(PublishError::AllQueuesFull(recipient_peers.len())); } tracing::debug!(message=%msg_id, "Published message"); @@ -759,7 +786,7 @@ where msg_id: &MessageId, propagation_source: &PeerId, acceptance: MessageAcceptance, - ) -> Result { + ) -> bool { let reject_reason = match acceptance { MessageAcceptance::Accept => { let (raw_message, originating_peers) = match self.mcache.validate(msg_id) { @@ -774,7 +801,7 @@ where if let Some(metrics) = self.metrics.as_mut() { metrics.memcache_miss(); } - return Ok(false); + return false; } }; @@ -787,8 +814,8 @@ where raw_message, Some(propagation_source), originating_peers, - )?; - return Ok(true); + ); + return true; } MessageAcceptance::Reject => RejectReason::ValidationFailed, MessageAcceptance::Ignore => RejectReason::ValidationIgnored, @@ -812,10 +839,10 @@ where peer_score.reject_message(peer, msg_id, &raw_message.topic, reject_reason); } } - Ok(true) + true } else { tracing::warn!(message=%msg_id, "Rejected message not in cache"); - Ok(false) + false } } @@ -1003,12 +1030,11 @@ where if let Some((peer_score, ..)) = &mut self.peer_score { peer_score.graft(&peer_id, topic_hash.clone()); } - Self::control_pool_add( - &mut self.control_pool, + self.send_message( peer_id, - ControlAction::Graft { + RpcOut::Graft(Graft { topic_hash: topic_hash.clone(), - }, + }), ); // If the peer did not previously exist in any mesh, inform the handler @@ -1036,7 +1062,7 @@ where peer: &PeerId, do_px: bool, on_unsubscribe: bool, - ) -> ControlAction { + ) -> Prune { if let Some((peer_score, ..)) = &mut self.peer_score { peer_score.prune(peer, topic_hash.clone()); } @@ -1047,7 +1073,7 @@ where } Some(PeerKind::Gossipsub) => { // GossipSub v1.0 -- no peer exchange, the peer won't be able to parse it anyway - return ControlAction::Prune { + return Prune { topic_hash: topic_hash.clone(), peers: Vec::new(), backoff: None, @@ -1083,7 +1109,7 @@ where // update backoff self.backoffs.update_backoff(topic_hash, peer, backoff); - ControlAction::Prune { + Prune { topic_hash: topic_hash.clone(), peers, backoff: Some(backoff.as_secs()), @@ -1099,17 +1125,18 @@ where if let Some(m) = self.metrics.as_mut() { m.left(topic_hash) } - for peer in peers { + for peer_id in peers { // Send a PRUNE control message - tracing::debug!(%peer, "LEAVE: Sending PRUNE to peer"); + tracing::debug!(%peer_id, "LEAVE: Sending PRUNE to peer"); + let on_unsubscribe = true; - let control = - self.make_prune(topic_hash, &peer, self.config.do_px(), on_unsubscribe); - Self::control_pool_add(&mut self.control_pool, peer, control); + let prune = + self.make_prune(topic_hash, &peer_id, self.config.do_px(), on_unsubscribe); + self.send_message(peer_id, RpcOut::Prune(prune)); // If the peer did not previously exist in any mesh, inform the handler peer_removed_from_mesh( - peer, + peer_id, topic_hash, &self.mesh, &mut self.events, @@ -1203,10 +1230,6 @@ where return false; } - if self.pending_iwant_msgs.contains(id) { - return false; - } - self.peer_score .as_ref() .map(|(_, _, _, promises)| !promises.contains(id)) @@ -1257,11 +1280,6 @@ where iwant_ids_vec.truncate(iask); *iasked += iask; - for message_id in &iwant_ids_vec { - // Add all messages to the pending list - self.pending_iwant_msgs.insert(message_id.clone()); - } - if let Some((_, _, _, gossip_promises)) = &mut self.peer_score { gossip_promises.add_promise( *peer_id, @@ -1275,12 +1293,11 @@ where iwant_ids_vec ); - Self::control_pool_add( - &mut self.control_pool, + self.send_message( *peer_id, - ControlAction::IWant { + RpcOut::IWant(IWant { message_ids: iwant_ids_vec, - }, + }), ); } tracing::trace!(peer=%peer_id, "Completed IHAVE handling for peer"); @@ -1317,7 +1334,13 @@ where ); } else { tracing::debug!(peer=%peer_id, "IWANT: Sending cached messages to peer"); - self.send_message(*peer_id, RpcOut::Forward(msg)); + self.send_message( + *peer_id, + RpcOut::Forward { + message: msg, + timeout: Delay::new(self.config.forward_queue_duration()), + }, + ); } } } @@ -1471,12 +1494,13 @@ where if !to_prune_topics.is_empty() { // build the prune messages to send let on_unsubscribe = false; - for action in to_prune_topics + + for prune in to_prune_topics .iter() .map(|t| self.make_prune(t, peer_id, do_px, on_unsubscribe)) .collect::>() { - self.send_message(*peer_id, RpcOut::Control(action)); + self.send_message(*peer_id, RpcOut::Prune(prune)); } // Send the prune messages to the peer tracing::debug!( @@ -1768,17 +1792,12 @@ where // forward the message to mesh peers, if no validation is required if !self.config.validate_messages() { - if self - .forward_msg( - &msg_id, - raw_message, - Some(propagation_source), - HashSet::new(), - ) - .is_err() - { - tracing::error!("Failed to forward message. Too large"); - } + self.forward_msg( + &msg_id, + raw_message, + Some(propagation_source), + HashSet::new(), + ); tracing::debug!(message=%msg_id, "Completed message handling for message"); } } @@ -1962,12 +1981,8 @@ where // If we need to send grafts to peer, do so immediately, rather than waiting for the // heartbeat. - for action in topics_to_graft - .into_iter() - .map(|topic_hash| ControlAction::Graft { topic_hash }) - .collect::>() - { - self.send_message(*propagation_source, RpcOut::Control(action)) + for topic_hash in topics_to_graft.into_iter() { + self.send_message(*propagation_source, RpcOut::Graft(Graft { topic_hash })); } // Notify the application of the subscriptions @@ -1998,6 +2013,16 @@ where tracing::debug!("Starting heartbeat"); let start = Instant::now(); + // Every heartbeat we sample the send queues to add to our metrics. We do this intentionally + // before we add all the gossip from this heartbeat in order to gain a true measure of + // steady-state size of the queues. + if let Some(m) = &mut self.metrics { + for sender_queue in self.connected_peers.values().map(|v| &v.sender) { + m.observe_priority_queue_size(sender_queue.priority_queue_len()); + m.observe_non_priority_queue_size(sender_queue.non_priority_queue_len()); + } + } + self.heartbeat_ticks += 1; let mut to_graft = HashMap::new(); @@ -2367,12 +2392,20 @@ where self.send_graft_prune(to_graft, to_prune, no_px); } - // piggyback pooled control messages - self.flush_control_pool(); - // shift the memcache self.mcache.shift(); + // Report expired messages + for (peer_id, failed_messages) in self.failed_messages.drain() { + tracing::debug!("Peer couldn't consume messages: {:?}", failed_messages); + self.events + .push_back(ToSwarm::GenerateEvent(Event::SlowPeer { + peer_id, + failed_messages, + })); + } + self.failed_messages.shrink_to_fit(); + tracing::debug!("Completed Heartbeat"); if let Some(metrics) = self.metrics.as_mut() { let duration = u64::try_from(start.elapsed().as_millis()).unwrap_or(u64::MAX); @@ -2384,6 +2417,7 @@ where /// and fanout peers fn emit_gossip(&mut self) { let mut rng = thread_rng(); + let mut messages = Vec::new(); for (topic_hash, peers) in self.mesh.iter().chain(self.fanout.iter()) { let mut message_ids = self.mcache.get_gossip_message_ids(topic_hash); if message_ids.is_empty() { @@ -2419,7 +2453,7 @@ where tracing::debug!("Gossiping IHAVE to {} peers", to_msg_peers.len()); - for peer in to_msg_peers { + for peer_id in to_msg_peers { let mut peer_message_ids = message_ids.clone(); if peer_message_ids.len() > self.config.max_ihave_length() { @@ -2431,16 +2465,18 @@ where } // send an IHAVE message - Self::control_pool_add( - &mut self.control_pool, - peer, - ControlAction::IHave { + messages.push(( + peer_id, + RpcOut::IHave(IHave { topic_hash: topic_hash.clone(), message_ids: peer_message_ids, - }, - ); + }), + )); } } + for (peer_id, message) in messages { + self.send_message(peer_id, message); + } } /// Handles multiple GRAFT/PRUNE messages and coalesces them into chunked gossip control @@ -2452,25 +2488,27 @@ where no_px: HashSet, ) { // handle the grafts and overlapping prunes per peer - for (peer, topics) in to_graft.into_iter() { + for (peer_id, topics) in to_graft.into_iter() { for topic in &topics { // inform scoring of graft if let Some((peer_score, ..)) = &mut self.peer_score { - peer_score.graft(&peer, topic.clone()); + peer_score.graft(&peer_id, topic.clone()); } // inform the handler of the peer being added to the mesh // If the peer did not previously exist in any mesh, inform the handler peer_added_to_mesh( - peer, + peer_id, vec![topic], &self.mesh, &mut self.events, &self.connected_peers, ); } - let control_msgs = topics.iter().map(|topic_hash| ControlAction::Graft { - topic_hash: topic_hash.clone(), + let rpc_msgs = topics.iter().map(|topic_hash| { + RpcOut::Graft(Graft { + topic_hash: topic_hash.clone(), + }) }); // If there are prunes associated with the same peer add them. @@ -2479,40 +2517,41 @@ where // of its removal from another. // The following prunes are not due to unsubscribing. - let prunes = to_prune - .remove(&peer) + let prune_msgs = to_prune + .remove(&peer_id) .into_iter() .flatten() .map(|topic_hash| { - self.make_prune( + let prune = self.make_prune( &topic_hash, - &peer, - self.config.do_px() && !no_px.contains(&peer), + &peer_id, + self.config.do_px() && !no_px.contains(&peer_id), false, - ) + ); + RpcOut::Prune(prune) }); - // send the control messages - for msg in control_msgs.chain(prunes).collect::>() { - self.send_message(peer, RpcOut::Control(msg)); + // send the rpc messages + for msg in rpc_msgs.chain(prune_msgs).collect::>() { + self.send_message(peer_id, msg); } } // handle the remaining prunes // The following prunes are not due to unsubscribing. - for (peer, topics) in to_prune.iter() { + for (peer_id, topics) in to_prune.iter() { for topic_hash in topics { let prune = self.make_prune( topic_hash, - peer, - self.config.do_px() && !no_px.contains(peer), + peer_id, + self.config.do_px() && !no_px.contains(peer_id), false, ); - self.send_message(*peer, RpcOut::Control(prune)); + self.send_message(*peer_id, RpcOut::Prune(prune)); // inform the handler peer_removed_from_mesh( - *peer, + *peer_id, topic_hash, &self.mesh, &mut self.events, @@ -2525,14 +2564,13 @@ where /// Helper function which forwards a message to mesh\[topic\] peers. /// /// Returns true if at least one peer was messaged. - #[allow(clippy::unnecessary_wraps)] fn forward_msg( &mut self, msg_id: &MessageId, message: RawMessage, propagation_source: Option<&PeerId>, originating_peers: HashSet, - ) -> Result { + ) -> bool { // message is fully validated inform peer_score if let Some((peer_score, ..)) = &mut self.peer_score { if let Some(peer) = propagation_source { @@ -2543,50 +2581,51 @@ where tracing::debug!(message=%msg_id, "Forwarding message"); let mut recipient_peers = HashSet::new(); - { - // Populate the recipient peers mapping - - // Add explicit peers - for peer_id in &self.explicit_peers { - if let Some(peer) = self.connected_peers.get(peer_id) { - if Some(peer_id) != propagation_source - && !originating_peers.contains(peer_id) - && Some(peer_id) != message.source.as_ref() - && peer.topics.contains(&message.topic) - { - recipient_peers.insert(*peer_id); - } - } + // Populate the recipient peers mapping + + // Add explicit peers + for peer_id in &self.explicit_peers { + let Some(peer) = self.connected_peers.get(peer_id) else { + continue; + }; + if Some(peer_id) != propagation_source + && !originating_peers.contains(peer_id) + && Some(peer_id) != message.source.as_ref() + && peer.topics.contains(&message.topic) + { + recipient_peers.insert(*peer_id); } + } - // add mesh peers - let topic = &message.topic; - // mesh - if let Some(mesh_peers) = self.mesh.get(topic) { - for peer_id in mesh_peers { - if Some(peer_id) != propagation_source - && !originating_peers.contains(peer_id) - && Some(peer_id) != message.source.as_ref() - { - recipient_peers.insert(*peer_id); - } + // add mesh peers + let topic = &message.topic; + // mesh + if let Some(mesh_peers) = self.mesh.get(topic) { + for peer_id in mesh_peers { + if Some(peer_id) != propagation_source + && !originating_peers.contains(peer_id) + && Some(peer_id) != message.source.as_ref() + { + recipient_peers.insert(*peer_id); } } } - // forward the message to peers - if !recipient_peers.is_empty() { - let event = RpcOut::Forward(message.clone()); + if recipient_peers.is_empty() { + return false; + } - for peer in recipient_peers.iter() { - tracing::debug!(%peer, message=%msg_id, "Sending message to peer"); - self.send_message(*peer, event.clone()); - } - tracing::debug!("Completed forwarding message"); - Ok(true) - } else { - Ok(false) + // forward the message to peers + for peer in recipient_peers.iter() { + let event = RpcOut::Forward { + message: message.clone(), + timeout: Delay::new(self.config.forward_queue_duration()), + }; + tracing::debug!(%peer, message=%msg_id, "Sending message to peer"); + self.send_message(*peer, event); } + tracing::debug!("Completed forwarding message"); + true } /// Constructs a [`RawMessage`] performing message signing if required. @@ -2681,49 +2720,69 @@ where } } - // adds a control action to control_pool - fn control_pool_add( - control_pool: &mut HashMap>, - peer: PeerId, - control: ControlAction, - ) { - control_pool.entry(peer).or_default().push(control); - } - - /// Takes each control action mapping and turns it into a message - fn flush_control_pool(&mut self) { - for (peer, controls) in self.control_pool.drain().collect::>() { - for msg in controls { - self.send_message(peer, RpcOut::Control(msg)); - } - } - - // This clears all pending IWANT messages - self.pending_iwant_msgs.clear(); - } - - /// Send a [`RpcOut`] message to a peer. This will wrap the message in an arc if it - /// is not already an arc. - fn send_message(&mut self, peer_id: PeerId, rpc: RpcOut) { + /// Send a [`RpcOut`] message to a peer. + /// + /// Returns `true` if sending was successful, `false` otherwise. + /// The method will update the peer score and failed message counter if + /// sending the message failed due to the channel to the connection handler being + /// full (which indicates a slow peer). + fn send_message(&mut self, peer_id: PeerId, rpc: RpcOut) -> bool { if let Some(m) = self.metrics.as_mut() { - if let RpcOut::Publish(ref message) | RpcOut::Forward(ref message) = rpc { + if let RpcOut::Publish { ref message, .. } | RpcOut::Forward { ref message, .. } = rpc { // register bytes sent on the internal metrics. m.msg_sent(&message.topic, message.raw_protobuf_len()); } } - self.events.push_back(ToSwarm::NotifyHandler { - peer_id, - event: HandlerIn::Message(rpc), - handler: NotifyHandler::Any, - }); + let Some(peer) = &mut self.connected_peers.get_mut(&peer_id) else { + tracing::error!(peer = %peer_id, + "Could not send rpc to connection handler, peer doesn't exist in connected peer list"); + return false; + }; + + // Try sending the message to the connection handler. + match peer.sender.send_message(rpc) { + Ok(()) => true, + Err(rpc) => { + // Sending failed because the channel is full. + tracing::warn!(peer=%peer_id, "Send Queue full. Could not send {:?}.", rpc); + + // Update failed message counter. + let failed_messages = self.failed_messages.entry(peer_id).or_default(); + match rpc { + RpcOut::Publish { .. } => { + failed_messages.priority += 1; + failed_messages.publish += 1; + } + RpcOut::Forward { .. } => { + failed_messages.non_priority += 1; + failed_messages.forward += 1; + } + RpcOut::IWant(_) | RpcOut::IHave(_) => { + failed_messages.non_priority += 1; + } + RpcOut::Graft(_) + | RpcOut::Prune(_) + | RpcOut::Subscribe(_) + | RpcOut::Unsubscribe(_) => { + unreachable!("Channel for highpriority contorl messages is unbounded and should always be open.") + } + } + + // Update peer score. + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.failed_message_slow_peer(&peer_id); + } + + false + } + } } fn on_connection_established( &mut self, ConnectionEstablished { peer_id, - connection_id, endpoint, other_established, .. @@ -2751,21 +2810,6 @@ where } } - // By default we assume a peer is only a floodsub peer. - // - // The protocol negotiation occurs once a message is sent/received. Once this happens we - // update the type of peer that this is in order to determine which kind of routing should - // occur. - self.connected_peers - .entry(peer_id) - .or_insert(PeerConnections { - kind: PeerKind::Floodsub, - connections: vec![], - topics: Default::default(), - }) - .connections - .push(connection_id); - if other_established > 0 { return; // Not our first connection to this peer, hence nothing to do. } @@ -2840,37 +2884,32 @@ where } else { // remove from mesh, topic_peers, peer_topic and the fanout tracing::debug!(peer=%peer_id, "Peer disconnected"); - { - let Some(peer) = self.connected_peers.get(&peer_id) else { - debug_assert!( - self.blacklisted_peers.contains(&peer_id), - "Disconnected node not in connected list" - ); - return; - }; - - // remove peer from all mappings - for topic in peer.topics.iter() { - // check the mesh for the topic - if let Some(mesh_peers) = self.mesh.get_mut(topic) { - // check if the peer is in the mesh and remove it - if mesh_peers.remove(&peer_id) { - if let Some(m) = self.metrics.as_mut() { - m.peers_removed(topic, Churn::Dc, 1); - m.set_mesh_peers(topic, mesh_peers.len()); - } - }; - } + let Some(connected_peer) = self.connected_peers.get(&peer_id) else { + tracing::error!(peer_id = %peer_id, "Peer non-existent when handling disconnection"); + return; + }; - if let Some(m) = self.metrics.as_mut() { - m.dec_topic_peers(topic); - } + // remove peer from all mappings + for topic in &connected_peer.topics { + // check the mesh for the topic + if let Some(mesh_peers) = self.mesh.get_mut(topic) { + // check if the peer is in the mesh and remove it + if mesh_peers.remove(&peer_id) { + if let Some(m) = self.metrics.as_mut() { + m.peers_removed(topic, Churn::Dc, 1); + m.set_mesh_peers(topic, mesh_peers.len()); + } + }; + } - // remove from fanout - self.fanout - .get_mut(topic) - .map(|peers| peers.remove(&peer_id)); + if let Some(m) = self.metrics.as_mut() { + m.dec_topic_peers(topic); } + + // remove from fanout + self.fanout + .get_mut(topic) + .map(|peers| peers.remove(&peer_id)); } // Forget px and outbound status for this peer @@ -2879,12 +2918,7 @@ where // If metrics are enabled, register the disconnection of a peer based on its protocol. if let Some(metrics) = self.metrics.as_mut() { - let peer_kind = &self - .connected_peers - .get(&peer_id) - .expect("Connected peer must be registered") - .kind; - metrics.peer_protocol_disconnected(peer_kind.clone()); + metrics.peer_protocol_disconnected(connected_peer.kind.clone()); } self.connected_peers.remove(&peer_id); @@ -2946,23 +2980,58 @@ where fn handle_established_inbound_connection( &mut self, - _: ConnectionId, - _: PeerId, + connection_id: ConnectionId, + peer_id: PeerId, _: &Multiaddr, _: &Multiaddr, ) -> Result, ConnectionDenied> { - Ok(Handler::new(self.config.protocol_config())) + // By default we assume a peer is only a floodsub peer. + // + // The protocol negotiation occurs once a message is sent/received. Once this happens we + // update the type of peer that this is in order to determine which kind of routing should + // occur. + let connected_peer = self + .connected_peers + .entry(peer_id) + .or_insert(PeerConnections { + kind: PeerKind::Floodsub, + connections: vec![], + sender: Sender::new(self.config.connection_handler_queue_len()), + topics: Default::default(), + }); + // Add the new connection + connected_peer.connections.push(connection_id); + + Ok(Handler::new( + self.config.protocol_config(), + connected_peer.sender.new_receiver(), + )) } fn handle_established_outbound_connection( &mut self, - _: ConnectionId, - _: PeerId, + connection_id: ConnectionId, + peer_id: PeerId, _: &Multiaddr, _: Endpoint, _: PortUse, ) -> Result, ConnectionDenied> { - Ok(Handler::new(self.config.protocol_config())) + let connected_peer = self + .connected_peers + .entry(peer_id) + .or_insert(PeerConnections { + kind: PeerKind::Floodsub, + connections: vec![], + sender: Sender::new(self.config.connection_handler_queue_len()), + topics: Default::default(), + }); + // Add the new connection + connected_peer.connections.push(connection_id); + + Ok(Handler::new( + self.config.protocol_config(), + connected_peer.sender.new_receiver(), + )) } fn on_connection_handler_event( @@ -3002,6 +3071,40 @@ where } } } + HandlerEvent::MessageDropped(rpc) => { + // Account for this in the scoring logic + if let Some((peer_score, _, _, _)) = &mut self.peer_score { + peer_score.failed_message_slow_peer(&propagation_source); + } + + // Keep track of expired messages for the application layer. + let failed_messages = self.failed_messages.entry(propagation_source).or_default(); + failed_messages.timeout += 1; + match rpc { + RpcOut::Publish { .. } => { + failed_messages.publish += 1; + } + RpcOut::Forward { .. } => { + failed_messages.forward += 1; + } + _ => {} + } + + // Record metrics on the failure. + if let Some(metrics) = self.metrics.as_mut() { + match rpc { + RpcOut::Publish { message, .. } => { + metrics.publish_msg_dropped(&message.topic); + metrics.timeout_msg_dropped(&message.topic); + } + RpcOut::Forward { message, .. } => { + metrics.forward_msg_dropped(&message.topic); + metrics.timeout_msg_dropped(&message.topic); + } + _ => {} + } + } + } HandlerEvent::Message { rpc, invalid_messages, @@ -3062,21 +3165,21 @@ where let mut prune_msgs = vec![]; for control_msg in rpc.control_msgs { match control_msg { - ControlAction::IHave { + ControlAction::IHave(IHave { topic_hash, message_ids, - } => { + }) => { ihave_msgs.push((topic_hash, message_ids)); } - ControlAction::IWant { message_ids } => { + ControlAction::IWant(IWant { message_ids }) => { self.handle_iwant(&propagation_source, message_ids) } - ControlAction::Graft { topic_hash } => graft_msgs.push(topic_hash), - ControlAction::Prune { + ControlAction::Graft(Graft { topic_hash }) => graft_msgs.push(topic_hash), + ControlAction::Prune(Prune { topic_hash, peers, backoff, - } => prune_msgs.push((topic_hash, peers, backoff)), + }) => prune_msgs.push((topic_hash, peers, backoff)), } } if !ihave_msgs.is_empty() { @@ -3142,13 +3245,15 @@ fn peer_added_to_mesh( connections: &HashMap, ) { // Ensure there is an active connection - let connection_id = { - let conn = connections.get(&peer_id).expect("To be connected to peer."); - assert!( - !conn.connections.is_empty(), - "Must have at least one connection" - ); - conn.connections[0] + let connection_id = match connections.get(&peer_id) { + Some(p) => p + .connections + .first() + .expect("There should be at least one connection to a peer."), + None => { + tracing::error!(peer_id=%peer_id, "Peer not existent when added to the mesh"); + return; + } }; if let Some(peer) = connections.get(&peer_id) { @@ -3167,7 +3272,7 @@ fn peer_added_to_mesh( events.push_back(ToSwarm::NotifyHandler { peer_id, event: HandlerIn::JoinedMesh, - handler: NotifyHandler::One(connection_id), + handler: NotifyHandler::One(*connection_id), }); } @@ -3182,12 +3287,16 @@ fn peer_removed_from_mesh( connections: &HashMap, ) { // Ensure there is an active connection - let connection_id = connections - .get(&peer_id) - .expect("To be connected to peer.") - .connections - .first() - .expect("There should be at least one connection to a peer."); + let connection_id = match connections.get(&peer_id) { + Some(p) => p + .connections + .first() + .expect("There should be at least one connection to a peer."), + None => { + tracing::error!(peer_id=%peer_id, "Peer not existent when removed from mesh"); + return; + } + }; if let Some(peer) = connections.get(&peer_id) { for topic in &peer.topics { @@ -3289,7 +3398,6 @@ impl fmt::Debug for Behaviour RawMessage { - RawMessage { - source: Some(PeerId::random()), - data: vec![0; 100], - sequence_number: None, - topic: TopicHash::from_raw("test_topic"), - signature: None, - key: None, - validated: false, - } - } - - fn test_control() -> ControlAction { - ControlAction::IHave { - topic_hash: IdentTopic::new("TestTopic").hash(), - message_ids: vec![MessageId(vec![12u8]); 5], - } - } - - impl Arbitrary for RpcOut { - fn arbitrary(g: &mut Gen) -> Self { - match u8::arbitrary(g) % 5 { - 0 => RpcOut::Subscribe(IdentTopic::new("TestTopic").hash()), - 1 => RpcOut::Unsubscribe(IdentTopic::new("TestTopic").hash()), - 2 => RpcOut::Publish(test_message()), - 3 => RpcOut::Forward(test_message()), - 4 => RpcOut::Control(test_control()), - _ => panic!("outside range"), - } - } - } -} diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index c7afe926a65..9567150382a 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -21,11 +21,13 @@ // Collection of tests for the gossipsub network behaviour use super::*; +use crate::rpc::Receiver; use crate::subscription_filter::WhitelistSubscriptionFilter; use crate::{config::ConfigBuilder, types::Rpc, IdentTopic as Topic}; use byteorder::{BigEndian, ByteOrder}; use libp2p_core::ConnectedPoint; use rand::Rng; +use std::future; use std::net::Ipv4Addr; use std::thread::sleep; @@ -53,7 +55,15 @@ where D: DataTransform + Default + Clone + Send + 'static, F: TopicSubscriptionFilter + Clone + Default + Send + 'static, { - pub(crate) fn create_network(self) -> (Behaviour, Vec, Vec) { + #[allow(clippy::type_complexity)] + pub(crate) fn create_network( + self, + ) -> ( + Behaviour, + Vec, + HashMap, + Vec, + ) { let keypair = libp2p_identity::Keypair::generate_ed25519(); // create a gossipsub struct let mut gs: Behaviour = Behaviour::new_with_subscription_filter_and_transform( @@ -81,10 +91,11 @@ where // build and connect peer_no random peers let mut peers = vec![]; + let mut receivers = HashMap::new(); let empty = vec![]; for i in 0..self.peer_no { - peers.push(add_peer( + let (peer, receiver) = add_peer( &mut gs, if self.to_subscribe { &topic_hashes @@ -93,10 +104,12 @@ where }, i < self.outbound, i < self.explicit, - )); + ); + peers.push(peer); + receivers.insert(peer, receiver); } - (gs, peers, topic_hashes) + (gs, peers, receivers, topic_hashes) } fn peer_no(mut self, peer_no: usize) -> Self { @@ -160,7 +173,7 @@ fn add_peer( topic_hashes: &[TopicHash], outbound: bool, explicit: bool, -) -> PeerId +) -> (PeerId, Receiver) where D: DataTransform + Default + Clone + Send + 'static, F: TopicSubscriptionFilter + Clone + Default + Send + 'static, @@ -174,7 +187,7 @@ fn add_peer_with_addr( outbound: bool, explicit: bool, address: Multiaddr, -) -> PeerId +) -> (PeerId, Receiver) where D: DataTransform + Default + Clone + Send + 'static, F: TopicSubscriptionFilter + Clone + Default + Send + 'static, @@ -196,7 +209,7 @@ fn add_peer_with_addr_and_kind( explicit: bool, address: Multiaddr, kind: Option, -) -> PeerId +) -> (PeerId, Receiver) where D: DataTransform + Default + Clone + Send + 'static, F: TopicSubscriptionFilter + Clone + Default + Send + 'static, @@ -215,9 +228,22 @@ where } }; + let sender = Sender::new(gs.config.connection_handler_queue_len()); + let receiver = sender.new_receiver(); + let connection_id = ConnectionId::new_unchecked(0); + gs.connected_peers.insert( + peer, + PeerConnections { + kind: kind.clone().unwrap_or(PeerKind::Floodsub), + connections: vec![connection_id], + topics: Default::default(), + sender, + }, + ); + gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id: peer, - connection_id: ConnectionId::new_unchecked(0), + connection_id, endpoint: &endpoint, failed_addresses: &[], other_established: 0, // first connection @@ -245,7 +271,7 @@ where &peer, ); } - peer + (peer, receiver) } fn disconnect_peer(gs: &mut Behaviour, peer_id: &PeerId) @@ -298,33 +324,39 @@ fn proto_to_message(rpc: &proto::RPC) -> Rpc { let ihave_msgs: Vec = rpc_control .ihave .into_iter() - .map(|ihave| ControlAction::IHave { - topic_hash: TopicHash::from_raw(ihave.topic_id.unwrap_or_default()), - message_ids: ihave - .message_ids - .into_iter() - .map(MessageId::from) - .collect::>(), + .map(|ihave| { + ControlAction::IHave(IHave { + topic_hash: TopicHash::from_raw(ihave.topic_id.unwrap_or_default()), + message_ids: ihave + .message_ids + .into_iter() + .map(MessageId::from) + .collect::>(), + }) }) .collect(); let iwant_msgs: Vec = rpc_control .iwant .into_iter() - .map(|iwant| ControlAction::IWant { - message_ids: iwant - .message_ids - .into_iter() - .map(MessageId::from) - .collect::>(), + .map(|iwant| { + ControlAction::IWant(IWant { + message_ids: iwant + .message_ids + .into_iter() + .map(MessageId::from) + .collect::>(), + }) }) .collect(); let graft_msgs: Vec = rpc_control .graft .into_iter() - .map(|graft| ControlAction::Graft { - topic_hash: TopicHash::from_raw(graft.topic_id.unwrap_or_default()), + .map(|graft| { + ControlAction::Graft(Graft { + topic_hash: TopicHash::from_raw(graft.topic_id.unwrap_or_default()), + }) }) .collect(); @@ -347,11 +379,11 @@ fn proto_to_message(rpc: &proto::RPC) -> Rpc { .collect::>(); let topic_hash = TopicHash::from_raw(prune.topic_id.unwrap_or_default()); - prune_msgs.push(ControlAction::Prune { + prune_msgs.push(ControlAction::Prune(Prune { topic_hash, peers, backoff: prune.backoff, - }); + })); } control_msgs.extend(ihave_msgs); @@ -387,7 +419,7 @@ fn test_subscribe() { // - run JOIN(topic) let subscribe_topic = vec![String::from("test_subscribe")]; - let (gs, _, topic_hashes) = inject_nodes1() + let (gs, _, receivers, topic_hashes) = inject_nodes1() .peer_no(20) .topics(subscribe_topic) .to_subscribe(true) @@ -399,26 +431,24 @@ fn test_subscribe() { ); // collect all the subscriptions - let subscriptions = gs - .events - .iter() - .filter(|e| { - matches!( - e, - ToSwarm::NotifyHandler { - event: HandlerIn::Message(RpcOut::Subscribe(_)), - .. + let subscriptions = receivers + .into_values() + .fold(0, |mut collected_subscriptions, c| { + let priority = c.priority.get_ref(); + while !priority.is_empty() { + if let Ok(RpcOut::Subscribe(_)) = priority.try_recv() { + collected_subscriptions += 1 } - ) - }) - .count(); + } + collected_subscriptions + }); // we sent a subscribe to all known peers assert_eq!(subscriptions, 20); } -#[test] /// Test unsubscribe. +#[test] fn test_unsubscribe() { // Unsubscribe should: // - Remove the mesh entry for topic @@ -432,7 +462,7 @@ fn test_unsubscribe() { .collect::>(); // subscribe to topic_strings - let (mut gs, _, topic_hashes) = inject_nodes1() + let (mut gs, _, receivers, topic_hashes) = inject_nodes1() .peer_no(20) .topics(topic_strings) .to_subscribe(true) @@ -453,24 +483,25 @@ fn test_unsubscribe() { // unsubscribe from both topics assert!( - gs.unsubscribe(&topics[0]).unwrap(), + gs.unsubscribe(&topics[0]), "should be able to unsubscribe successfully from each topic", ); assert!( - gs.unsubscribe(&topics[1]).unwrap(), + gs.unsubscribe(&topics[1]), "should be able to unsubscribe successfully from each topic", ); // collect all the subscriptions - let subscriptions = gs - .events - .iter() - .fold(0, |collected_subscriptions, e| match e { - ToSwarm::NotifyHandler { - event: HandlerIn::Message(RpcOut::Subscribe(_)), - .. - } => collected_subscriptions + 1, - _ => collected_subscriptions, + let subscriptions = receivers + .into_values() + .fold(0, |mut collected_subscriptions, c| { + let priority = c.priority.get_ref(); + while !priority.is_empty() { + if let Ok(RpcOut::Subscribe(_)) = priority.try_recv() { + collected_subscriptions += 1 + } + } + collected_subscriptions }); // we sent a unsubscribe to all known peers, for two topics @@ -485,8 +516,8 @@ fn test_unsubscribe() { } } -#[test] /// Test JOIN(topic) functionality. +#[test] fn test_join() { // The Join function should: // - Remove peers from fanout[topic] @@ -503,19 +534,22 @@ fn test_join() { .map(|t| Topic::new(t.clone())) .collect::>(); - let (mut gs, _, topic_hashes) = inject_nodes1() + let (mut gs, _, mut receivers, topic_hashes) = inject_nodes1() .peer_no(20) .topics(topic_strings) .to_subscribe(true) .create_network(); + // Flush previous GRAFT messages. + receivers = flush_events(&mut gs, receivers); + // unsubscribe, then call join to invoke functionality assert!( - gs.unsubscribe(&topics[0]).unwrap(), + gs.unsubscribe(&topics[0]), "should be able to unsubscribe successfully" ); assert!( - gs.unsubscribe(&topics[1]).unwrap(), + gs.unsubscribe(&topics[1]), "should be able to unsubscribe successfully" ); @@ -531,24 +565,34 @@ fn test_join() { "Should have added 6 nodes to the mesh" ); - fn collect_grafts( - mut collected_grafts: Vec, - (_, controls): (&PeerId, &Vec), - ) -> Vec { - for c in controls.iter() { - if let ControlAction::Graft { topic_hash: _ } = c { - collected_grafts.push(c.clone()) + fn count_grafts(receivers: HashMap) -> (usize, HashMap) { + let mut new_receivers = HashMap::new(); + let mut acc = 0; + + for (peer_id, c) in receivers.into_iter() { + let priority = c.priority.get_ref(); + while !priority.is_empty() { + if let Ok(RpcOut::Graft(_)) = priority.try_recv() { + acc += 1; + } } + new_receivers.insert( + peer_id, + Receiver { + priority_queue_len: c.priority_queue_len, + priority: c.priority, + non_priority: c.non_priority, + }, + ); } - collected_grafts + (acc, new_receivers) } // there should be mesh_n GRAFT messages. - let graft_messages = gs.control_pool.iter().fold(vec![], collect_grafts); + let (graft_messages, mut receivers) = count_grafts(receivers); assert_eq!( - graft_messages.len(), - 6, + graft_messages, 6, "There should be 6 grafts messages sent to peers" ); @@ -557,14 +601,37 @@ fn test_join() { gs.fanout .insert(topic_hashes[1].clone(), Default::default()); let mut new_peers: Vec = vec![]; + for _ in 0..3 { let random_peer = PeerId::random(); // inform the behaviour of a new peer + let address = "/ip4/127.0.0.1".parse::().unwrap(); + gs.handle_established_inbound_connection( + ConnectionId::new_unchecked(0), + random_peer, + &address, + &address, + ) + .unwrap(); + let sender = Sender::new(gs.config.connection_handler_queue_len()); + let receiver = sender.new_receiver(); + let connection_id = ConnectionId::new_unchecked(0); + gs.connected_peers.insert( + random_peer, + PeerConnections { + kind: PeerKind::Floodsub, + connections: vec![connection_id], + topics: Default::default(), + sender, + }, + ); + receivers.insert(random_peer, receiver); + gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id: random_peer, - connection_id: ConnectionId::new_unchecked(0), + connection_id, endpoint: &ConnectedPoint::Dialer { - address: "/ip4/127.0.0.1".parse::().unwrap(), + address, role_override: Endpoint::Dialer, port_use: PortUse::Reuse, }, @@ -594,12 +661,12 @@ fn test_join() { ); } - // there should now be 12 graft messages to be sent - let graft_messages = gs.control_pool.iter().fold(vec![], collect_grafts); + // there should now 6 graft messages to be sent + let (graft_messages, _) = count_grafts(receivers); - assert!( - graft_messages.len() == 12, - "There should be 12 grafts messages sent to peers" + assert_eq!( + graft_messages, 6, + "There should be 6 grafts messages sent to peers" ); } @@ -617,7 +684,7 @@ fn test_publish_without_flood_publishing() { .unwrap(); let publish_topic = String::from("test_publish"); - let (mut gs, _, topic_hashes) = inject_nodes1() + let (mut gs, _, receivers, topic_hashes) = inject_nodes1() .peer_no(20) .topics(vec![publish_topic.clone()]) .to_subscribe(true) @@ -644,18 +711,16 @@ fn test_publish_without_flood_publishing() { gs.publish(Topic::new(publish_topic), publish_data).unwrap(); // Collect all publish messages - let publishes = gs - .events - .into_iter() - .fold(vec![], |mut collected_publish, e| match e { - ToSwarm::NotifyHandler { - event: HandlerIn::Message(RpcOut::Publish(message)), - .. - } => { - collected_publish.push(message); - collected_publish + let publishes = receivers + .into_values() + .fold(vec![], |mut collected_publish, c| { + let priority = c.priority.get_ref(); + while !priority.is_empty() { + if let Ok(RpcOut::Publish { message, .. }) = priority.try_recv() { + collected_publish.push(message); + } } - _ => collected_publish, + collected_publish }); // Transform the inbound message @@ -699,7 +764,7 @@ fn test_fanout() { .unwrap(); let fanout_topic = String::from("test_fanout"); - let (mut gs, _, topic_hashes) = inject_nodes1() + let (mut gs, _, receivers, topic_hashes) = inject_nodes1() .peer_no(20) .topics(vec![fanout_topic.clone()]) .to_subscribe(true) @@ -712,7 +777,7 @@ fn test_fanout() { ); // Unsubscribe from topic assert!( - gs.unsubscribe(&Topic::new(fanout_topic.clone())).unwrap(), + gs.unsubscribe(&Topic::new(fanout_topic.clone())), "should be able to unsubscribe successfully from topic" ); @@ -731,18 +796,16 @@ fn test_fanout() { ); // Collect all publish messages - let publishes = gs - .events - .into_iter() - .fold(vec![], |mut collected_publish, e| match e { - ToSwarm::NotifyHandler { - event: HandlerIn::Message(RpcOut::Publish(message)), - .. - } => { - collected_publish.push(message); - collected_publish + let publishes = receivers + .into_values() + .fold(vec![], |mut collected_publish, c| { + let priority = c.priority.get_ref(); + while !priority.is_empty() { + if let Ok(RpcOut::Publish { message, .. }) = priority.try_recv() { + collected_publish.push(message); + } } - _ => collected_publish, + collected_publish }); // Transform the inbound message @@ -770,10 +833,10 @@ fn test_fanout() { ); } -#[test] /// Test the gossipsub NetworkBehaviour peer connection logic. +#[test] fn test_inject_connected() { - let (gs, peers, topic_hashes) = inject_nodes1() + let (gs, peers, receivers, topic_hashes) = inject_nodes1() .peer_no(20) .topics(vec![String::from("topic1"), String::from("topic2")]) .to_subscribe(true) @@ -781,26 +844,20 @@ fn test_inject_connected() { // check that our subscriptions are sent to each of the peers // collect all the SendEvents - let subscriptions = gs - .events - .into_iter() - .filter_map(|e| match e { - ToSwarm::NotifyHandler { - event: HandlerIn::Message(RpcOut::Subscribe(topic)), - peer_id, - .. - } => Some((peer_id, topic)), - _ => None, - }) - .fold( - HashMap::>::new(), - |mut subs, (peer, sub)| { - let mut peer_subs = subs.remove(&peer).unwrap_or_default(); - peer_subs.push(sub.into_string()); - subs.insert(peer, peer_subs); - subs - }, - ); + let subscriptions = receivers.into_iter().fold( + HashMap::>::new(), + |mut collected_subscriptions, (peer, c)| { + let priority = c.priority.get_ref(); + while !priority.is_empty() { + if let Ok(RpcOut::Subscribe(topic)) = priority.try_recv() { + let mut peer_subs = collected_subscriptions.remove(&peer).unwrap_or_default(); + peer_subs.push(topic.into_string()); + collected_subscriptions.insert(peer, peer_subs); + } + } + collected_subscriptions + }, + ); // check that there are two subscriptions sent to each peer for peer_subs in subscriptions.values() { @@ -822,8 +879,8 @@ fn test_inject_connected() { } } -#[test] /// Test subscription handling +#[test] fn test_handle_received_subscriptions() { // For every subscription: // SUBSCRIBE: - Add subscribed topic to peer_topics for peer. @@ -835,7 +892,7 @@ fn test_handle_received_subscriptions() { .iter() .map(|&t| String::from(t)) .collect(); - let (mut gs, peers, topic_hashes) = inject_nodes1() + let (mut gs, peers, _receivers, topic_hashes) = inject_nodes1() .peer_no(20) .topics(topics) .to_subscribe(false) @@ -914,10 +971,9 @@ fn test_handle_received_subscriptions() { &peers[0], ); - let peer = gs.connected_peers.get(&peers[0]).unwrap().clone(); - assert_eq!( - peer.topics, - topic_hashes[1..3].iter().cloned().collect::>(), + let peer = gs.connected_peers.get(&peers[0]).unwrap(); + assert!( + peer.topics == topic_hashes[1..3].iter().cloned().collect::>(), "Peer should be subscribed to two topics" ); @@ -935,8 +991,8 @@ fn test_handle_received_subscriptions() { ); } -#[test] /// Test Gossipsub.get_random_peers() function +#[test] fn test_get_random_peers() { // generate a default Config let gs_config = ConfigBuilder::default() @@ -949,25 +1005,22 @@ fn test_get_random_peers() { // create a topic and fill it with some peers let topic_hash = Topic::new("Test").hash(); let mut peers = vec![]; - for _ in 0..20 { - peers.push(PeerId::random()) - } let mut topics = BTreeSet::new(); topics.insert(topic_hash.clone()); - gs.connected_peers = peers - .iter() - .map(|p| { - ( - *p, - PeerConnections { - kind: PeerKind::Gossipsubv1_1, - connections: vec![ConnectionId::new_unchecked(0)], - topics: topics.clone(), - }, - ) - }) - .collect(); + for _ in 0..20 { + let peer_id = PeerId::random(); + peers.push(peer_id); + gs.connected_peers.insert( + peer_id, + PeerConnections { + kind: PeerKind::Gossipsubv1_1, + connections: vec![ConnectionId::new_unchecked(0)], + topics: topics.clone(), + sender: Sender::new(gs.config.connection_handler_queue_len()), + }, + ); + } let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 5, |_| true); assert_eq!(random_peers.len(), 5, "Expected 5 peers to be returned"); @@ -997,7 +1050,7 @@ fn test_get_random_peers() { /// Tests that the correct message is sent when a peer asks for a message in our cache. #[test] fn test_handle_iwant_msg_cached() { - let (mut gs, peers, _) = inject_nodes1() + let (mut gs, peers, receivers, _) = inject_nodes1() .peer_no(20) .topics(Vec::new()) .to_subscribe(true) @@ -1025,18 +1078,17 @@ fn test_handle_iwant_msg_cached() { gs.handle_iwant(&peers[7], vec![msg_id.clone()]); // the messages we are sending - let sent_messages = gs.events.into_iter().fold( - Vec::::new(), - |mut collected_messages, e| match e { - ToSwarm::NotifyHandler { event, .. } => { - if let HandlerIn::Message(RpcOut::Forward(message)) = event { - collected_messages.push(message); + let sent_messages = receivers + .into_values() + .fold(vec![], |mut collected_messages, c| { + let non_priority = c.non_priority.get_ref(); + while !non_priority.is_empty() { + if let Ok(RpcOut::Forward { message, .. }) = non_priority.try_recv() { + collected_messages.push(message) } - collected_messages } - _ => collected_messages, - }, - ); + collected_messages + }); assert!( sent_messages @@ -1050,7 +1102,7 @@ fn test_handle_iwant_msg_cached() { /// Tests that messages are sent correctly depending on the shifting of the message cache. #[test] fn test_handle_iwant_msg_cached_shifted() { - let (mut gs, peers, _) = inject_nodes1() + let (mut gs, peers, mut receivers, _) = inject_nodes1() .peer_no(20) .topics(Vec::new()) .to_subscribe(true) @@ -1083,19 +1135,29 @@ fn test_handle_iwant_msg_cached_shifted() { gs.handle_iwant(&peers[7], vec![msg_id.clone()]); // is the message is being sent? - let message_exists = gs.events.iter().any(|e| match e { - ToSwarm::NotifyHandler { - event: HandlerIn::Message(RpcOut::Forward(message)), - .. - } => { - gs.config.message_id( - &gs.data_transform - .inbound_transform(message.clone()) - .unwrap(), - ) == msg_id + let mut message_exists = false; + receivers = receivers.into_iter().map(|(peer_id, c)| { + let non_priority = c.non_priority.get_ref(); + while !non_priority.is_empty() { + if matches!(non_priority.try_recv(), Ok(RpcOut::Forward{message, timeout: _ }) if + gs.config.message_id( + &gs.data_transform + .inbound_transform(message.clone()) + .unwrap(), + ) == msg_id) + { + message_exists = true; + } } - _ => false, - }); + ( + peer_id, + Receiver { + priority_queue_len: c.priority_queue_len, + priority: c.priority, + non_priority: c.non_priority, + }, + ) + }).collect(); // default history_length is 5, expect no messages after shift > 5 if shift < 5 { assert!( @@ -1111,10 +1173,10 @@ fn test_handle_iwant_msg_cached_shifted() { } } +/// tests that an event is not created when a peers asks for a message not in our cache #[test] -// tests that an event is not created when a peers asks for a message not in our cache fn test_handle_iwant_msg_not_cached() { - let (mut gs, peers, _) = inject_nodes1() + let (mut gs, peers, _, _) = inject_nodes1() .peer_no(20) .topics(Vec::new()) .to_subscribe(true) @@ -1130,10 +1192,10 @@ fn test_handle_iwant_msg_not_cached() { ); } +/// tests that an event is created when a peer shares that it has a message we want #[test] -// tests that an event is created when a peer shares that it has a message we want fn test_handle_ihave_subscribed_and_msg_not_cached() { - let (mut gs, peers, topic_hashes) = inject_nodes1() + let (mut gs, peers, mut receivers, topic_hashes) = inject_nodes1() .peer_no(20) .topics(vec![String::from("topic1")]) .to_subscribe(true) @@ -1145,15 +1207,20 @@ fn test_handle_ihave_subscribed_and_msg_not_cached() { ); // check that we sent an IWANT request for `unknown id` - let iwant_exists = match gs.control_pool.get(&peers[7]) { - Some(controls) => controls.iter().any(|c| match c { - ControlAction::IWant { message_ids } => message_ids + let mut iwant_exists = false; + let receiver = receivers.remove(&peers[7]).unwrap(); + let non_priority = receiver.non_priority.get_ref(); + while !non_priority.is_empty() { + if let Ok(RpcOut::IWant(IWant { message_ids })) = non_priority.try_recv() { + if message_ids .iter() - .any(|m| *m == MessageId::new(b"unknown id")), - _ => false, - }), - _ => false, - }; + .any(|m| *m == MessageId::new(b"unknown id")) + { + iwant_exists = true; + break; + } + } + } assert!( iwant_exists, @@ -1161,11 +1228,11 @@ fn test_handle_ihave_subscribed_and_msg_not_cached() { ); } +/// tests that an event is not created when a peer shares that it has a message that +/// we already have #[test] -// tests that an event is not created when a peer shares that it has a message that -// we already have fn test_handle_ihave_subscribed_and_msg_cached() { - let (mut gs, peers, topic_hashes) = inject_nodes1() + let (mut gs, peers, _, topic_hashes) = inject_nodes1() .peer_no(20) .topics(vec![String::from("topic1")]) .to_subscribe(true) @@ -1183,11 +1250,11 @@ fn test_handle_ihave_subscribed_and_msg_cached() { ) } +/// test that an event is not created when a peer shares that it has a message in +/// a topic that we are not subscribed to #[test] -// test that an event is not created when a peer shares that it has a message in -// a topic that we are not subscribed to fn test_handle_ihave_not_subscribed() { - let (mut gs, peers, _) = inject_nodes1() + let (mut gs, peers, _, _) = inject_nodes1() .peer_no(20) .topics(vec![]) .to_subscribe(true) @@ -1209,11 +1276,11 @@ fn test_handle_ihave_not_subscribed() { ) } +/// tests that a peer is added to our mesh when we are both subscribed +/// to the same topic #[test] -// tests that a peer is added to our mesh when we are both subscribed -// to the same topic fn test_handle_graft_is_subscribed() { - let (mut gs, peers, topic_hashes) = inject_nodes1() + let (mut gs, peers, _, topic_hashes) = inject_nodes1() .peer_no(20) .topics(vec![String::from("topic1")]) .to_subscribe(true) @@ -1227,11 +1294,11 @@ fn test_handle_graft_is_subscribed() { ); } +/// tests that a peer is not added to our mesh when they are subscribed to +/// a topic that we are not #[test] -// tests that a peer is not added to our mesh when they are subscribed to -// a topic that we are not fn test_handle_graft_is_not_subscribed() { - let (mut gs, peers, topic_hashes) = inject_nodes1() + let (mut gs, peers, _, topic_hashes) = inject_nodes1() .peer_no(20) .topics(vec![String::from("topic1")]) .to_subscribe(true) @@ -1248,15 +1315,15 @@ fn test_handle_graft_is_not_subscribed() { ); } +/// tests multiple topics in a single graft message #[test] -// tests multiple topics in a single graft message fn test_handle_graft_multiple_topics() { let topics: Vec = ["topic1", "topic2", "topic3", "topic4"] .iter() .map(|&t| String::from(t)) .collect(); - let (mut gs, peers, topic_hashes) = inject_nodes1() + let (mut gs, peers, _, topic_hashes) = inject_nodes1() .peer_no(20) .topics(topics) .to_subscribe(true) @@ -1283,10 +1350,10 @@ fn test_handle_graft_multiple_topics() { ); } +/// tests that a peer is removed from our mesh #[test] -// tests that a peer is removed from our mesh fn test_handle_prune_peer_in_mesh() { - let (mut gs, peers, topic_hashes) = inject_nodes1() + let (mut gs, peers, _, topic_hashes) = inject_nodes1() .peer_no(20) .topics(vec![String::from("topic1")]) .to_subscribe(true) @@ -1313,36 +1380,68 @@ fn test_handle_prune_peer_in_mesh() { ); } -fn count_control_msgs( - gs: &Behaviour, - mut filter: impl FnMut(&PeerId, &ControlAction) -> bool, -) -> usize { - gs.control_pool - .iter() - .map(|(peer_id, actions)| actions.iter().filter(|m| filter(peer_id, m)).count()) - .sum::() - + gs.events - .iter() - .filter(|e| match e { - ToSwarm::NotifyHandler { - peer_id, - event: HandlerIn::Message(RpcOut::Control(action)), - .. - } => filter(peer_id, action), - _ => false, - }) - .count() +fn count_control_msgs( + receivers: HashMap, + mut filter: impl FnMut(&PeerId, &RpcOut) -> bool, +) -> (usize, HashMap) { + let mut new_receivers = HashMap::new(); + let mut collected_messages = 0; + for (peer_id, c) in receivers.into_iter() { + let priority = c.priority.get_ref(); + let non_priority = c.non_priority.get_ref(); + while !priority.is_empty() || !non_priority.is_empty() { + if let Ok(rpc) = priority.try_recv() { + if filter(&peer_id, &rpc) { + collected_messages += 1; + } + } + if let Ok(rpc) = non_priority.try_recv() { + if filter(&peer_id, &rpc) { + collected_messages += 1; + } + } + } + new_receivers.insert( + peer_id, + Receiver { + priority_queue_len: c.priority_queue_len, + priority: c.priority, + non_priority: c.non_priority, + }, + ); + } + (collected_messages, new_receivers) } -fn flush_events(gs: &mut Behaviour) { - gs.control_pool.clear(); +fn flush_events( + gs: &mut Behaviour, + receivers: HashMap, +) -> HashMap { gs.events.clear(); + let mut new_receivers = HashMap::new(); + for (peer_id, c) in receivers.into_iter() { + let priority = c.priority.get_ref(); + let non_priority = c.non_priority.get_ref(); + while !priority.is_empty() || !non_priority.is_empty() { + let _ = priority.try_recv(); + let _ = non_priority.try_recv(); + } + new_receivers.insert( + peer_id, + Receiver { + priority_queue_len: c.priority_queue_len, + priority: c.priority, + non_priority: c.non_priority, + }, + ); + } + new_receivers } +/// tests that a peer added as explicit peer gets connected to #[test] -// tests that a peer added as explicit peer gets connected to fn test_explicit_peer_gets_connected() { - let (mut gs, _, _) = inject_nodes1() + let (mut gs, _, _, _) = inject_nodes1() .peer_no(0) .topics(Vec::new()) .to_subscribe(true) @@ -1375,7 +1474,7 @@ fn test_explicit_peer_reconnects() { .check_explicit_peers_ticks(2) .build() .unwrap(); - let (mut gs, others, _) = inject_nodes1() + let (mut gs, others, receivers, _) = inject_nodes1() .peer_no(1) .topics(Vec::new()) .to_subscribe(true) @@ -1387,7 +1486,7 @@ fn test_explicit_peer_reconnects() { //add peer as explicit peer gs.add_explicit_peer(peer); - flush_events(&mut gs); + flush_events(&mut gs, receivers); //disconnect peer disconnect_peer(&mut gs, peer); @@ -1425,7 +1524,7 @@ fn test_explicit_peer_reconnects() { #[test] fn test_handle_graft_explicit_peer() { - let (mut gs, peers, topic_hashes) = inject_nodes1() + let (mut gs, peers, receivers, topic_hashes) = inject_nodes1() .peer_no(1) .topics(vec![String::from("topic1"), String::from("topic2")]) .to_subscribe(true) @@ -1442,21 +1541,24 @@ fn test_handle_graft_explicit_peer() { assert!(gs.mesh[&topic_hashes[1]].is_empty()); //check prunes - assert!( - count_control_msgs(&gs, |peer_id, m| peer_id == peer + let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + peer_id == peer && match m { - ControlAction::Prune { topic_hash, .. } => - topic_hash == &topic_hashes[0] || topic_hash == &topic_hashes[1], + RpcOut::Prune(Prune { topic_hash, .. }) => { + topic_hash == &topic_hashes[0] || topic_hash == &topic_hashes[1] + } _ => false, - }) - >= 2, + } + }); + assert!( + control_msgs >= 2, "Not enough prunes sent when grafting from explicit peer" ); } #[test] fn explicit_peers_not_added_to_mesh_on_receiving_subscription() { - let (gs, peers, topic_hashes) = inject_nodes1() + let (gs, peers, receivers, topic_hashes) = inject_nodes1() .peer_no(2) .topics(vec![String::from("topic1")]) .to_subscribe(true) @@ -1471,25 +1573,27 @@ fn explicit_peers_not_added_to_mesh_on_receiving_subscription() { ); //assert that graft gets created to non-explicit peer + let (control_msgs, receivers) = count_control_msgs(receivers, |peer_id, m| { + peer_id == &peers[1] && matches!(m, RpcOut::Graft { .. }) + }); assert!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] - && matches!(m, ControlAction::Graft { .. })) - >= 1, + control_msgs >= 1, "No graft message got created to non-explicit peer" ); //assert that no graft gets created to explicit peer + let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + peer_id == &peers[0] && matches!(m, RpcOut::Graft { .. }) + }); assert_eq!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] - && matches!(m, ControlAction::Graft { .. })), - 0, + control_msgs, 0, "A graft message got created to an explicit peer" ); } #[test] fn do_not_graft_explicit_peer() { - let (mut gs, others, topic_hashes) = inject_nodes1() + let (mut gs, others, receivers, topic_hashes) = inject_nodes1() .peer_no(1) .topics(vec![String::from("topic")]) .to_subscribe(true) @@ -1503,17 +1607,18 @@ fn do_not_graft_explicit_peer() { assert_eq!(gs.mesh[&topic_hashes[0]], BTreeSet::new()); //assert that no graft gets created to explicit peer + let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + peer_id == &others[0] && matches!(m, RpcOut::Graft { .. }) + }); assert_eq!( - count_control_msgs(&gs, |peer_id, m| peer_id == &others[0] - && matches!(m, ControlAction::Graft { .. })), - 0, + control_msgs, 0, "A graft message got created to an explicit peer" ); } #[test] fn do_forward_messages_to_explicit_peers() { - let (mut gs, peers, topic_hashes) = inject_nodes1() + let (mut gs, peers, receivers, topic_hashes) = inject_nodes1() .peer_no(2) .topics(vec![String::from("topic1"), String::from("topic2")]) .to_subscribe(true) @@ -1533,21 +1638,16 @@ fn do_forward_messages_to_explicit_peers() { validated: true, }; gs.handle_received_message(message.clone(), &local_id); - assert_eq!( - gs.events - .iter() - .filter(|e| match e { - ToSwarm::NotifyHandler { - peer_id, - event: HandlerIn::Message(RpcOut::Forward(m)), - .. - } => { - peer_id == &peers[0] && m.data == message.data + receivers.into_iter().fold(0, |mut fwds, (peer_id, c)| { + let non_priority = c.non_priority.get_ref(); + while !non_priority.is_empty() { + if matches!(non_priority.try_recv(), Ok(RpcOut::Forward{message: m, timeout: _}) if peer_id == peers[0] && m.data == message.data) { + fwds +=1; + } } - _ => false, - }) - .count(), + fwds + }), 1, "The message did not get forwarded to the explicit peer" ); @@ -1555,7 +1655,7 @@ fn do_forward_messages_to_explicit_peers() { #[test] fn explicit_peers_not_added_to_mesh_on_subscribe() { - let (mut gs, peers, _) = inject_nodes1() + let (mut gs, peers, receivers, _) = inject_nodes1() .peer_no(2) .topics(Vec::new()) .to_subscribe(true) @@ -1583,25 +1683,27 @@ fn explicit_peers_not_added_to_mesh_on_subscribe() { assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); //assert that graft gets created to non-explicit peer + let (control_msgs, receivers) = count_control_msgs(receivers, |peer_id, m| { + peer_id == &peers[1] && matches!(m, RpcOut::Graft { .. }) + }); assert!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] - && matches!(m, ControlAction::Graft { .. })) - > 0, + control_msgs > 0, "No graft message got created to non-explicit peer" ); //assert that no graft gets created to explicit peer + let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + peer_id == &peers[0] && matches!(m, RpcOut::Graft { .. }) + }); assert_eq!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] - && matches!(m, ControlAction::Graft { .. })), - 0, + control_msgs, 0, "A graft message got created to an explicit peer" ); } #[test] fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { - let (mut gs, peers, _) = inject_nodes1() + let (mut gs, peers, receivers, _) = inject_nodes1() .peer_no(2) .topics(Vec::new()) .to_subscribe(true) @@ -1632,25 +1734,27 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); //assert that graft gets created to non-explicit peer + let (control_msgs, receivers) = count_control_msgs(receivers, |peer_id, m| { + peer_id == &peers[1] && matches!(m, RpcOut::Graft { .. }) + }); assert!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] - && matches!(m, ControlAction::Graft { .. })) - >= 1, + control_msgs >= 1, "No graft message got created to non-explicit peer" ); //assert that no graft gets created to explicit peer + let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + peer_id == &peers[0] && matches!(m, RpcOut::Graft { .. }) + }); assert_eq!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] - && matches!(m, ControlAction::Graft { .. })), - 0, + control_msgs, 0, "A graft message got created to an explicit peer" ); } #[test] fn no_gossip_gets_sent_to_explicit_peers() { - let (mut gs, peers, topic_hashes) = inject_nodes1() + let (mut gs, peers, mut receivers, topic_hashes) = inject_nodes1() .peer_no(2) .topics(vec![String::from("topic1"), String::from("topic2")]) .to_subscribe(true) @@ -1679,25 +1783,24 @@ fn no_gossip_gets_sent_to_explicit_peers() { } //assert that no gossip gets sent to explicit peer - assert_eq!( - gs.control_pool - .get(&peers[0]) - .unwrap_or(&Vec::new()) - .iter() - .filter(|m| matches!(m, ControlAction::IHave { .. })) - .count(), - 0, - "Gossip got emitted to explicit peer" - ); + let receiver = receivers.remove(&peers[0]).unwrap(); + let mut gossips = 0; + let non_priority = receiver.non_priority.get_ref(); + while !non_priority.is_empty() { + if let Ok(RpcOut::IHave(_)) = non_priority.try_recv() { + gossips += 1; + } + } + assert_eq!(gossips, 0, "Gossip got emitted to explicit peer"); } -// Tests the mesh maintenance addition +/// Tests the mesh maintenance addition #[test] fn test_mesh_addition() { let config: Config = Config::default(); // Adds mesh_low peers and PRUNE 2 giving us a deficit. - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, _receivers, topics) = inject_nodes1() .peer_no(config.mesh_n() + 1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -1725,7 +1828,7 @@ fn test_mesh_addition() { assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), config.mesh_n()); } -// Tests the mesh maintenance subtraction +/// Tests the mesh maintenance subtraction #[test] fn test_mesh_subtraction() { let config = Config::default(); @@ -1733,7 +1836,7 @@ fn test_mesh_subtraction() { // Adds mesh_low peers and PRUNE 2 giving us a deficit. let n = config.mesh_n_high() + 10; //make all outbound connections so that we allow grafting to all - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, _receivers, topics) = inject_nodes1() .peer_no(n) .topics(vec!["test".into()]) .to_subscribe(true) @@ -1757,7 +1860,7 @@ fn test_mesh_subtraction() { fn test_connect_to_px_peers_on_handle_prune() { let config: Config = Config::default(); - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -1813,7 +1916,7 @@ fn test_send_px_and_backoff_in_prune() { let config: Config = Config::default(); //build mesh with enough peers for px - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, receivers, topics) = inject_nodes1() .peer_no(config.prune_peers() + 1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -1829,24 +1932,25 @@ fn test_send_px_and_backoff_in_prune() { ); //check prune message - assert_eq!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] + let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + peer_id == &peers[0] && match m { - ControlAction::Prune { + RpcOut::Prune(Prune { topic_hash, peers, backoff, - } => + }) => { topic_hash == &topics[0] && peers.len() == config.prune_peers() && //all peers are different peers.iter().collect::>().len() == config.prune_peers() && - backoff.unwrap() == config.prune_backoff().as_secs(), + backoff.unwrap() == config.prune_backoff().as_secs() + } _ => false, - }), - 1 - ); + } + }); + assert_eq!(control_msgs, 1); } #[test] @@ -1854,7 +1958,7 @@ fn test_prune_backoffed_peer_on_graft() { let config: Config = Config::default(); //build mesh with enough peers for px - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, receivers, topics) = inject_nodes1() .peer_no(config.prune_peers() + 1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -1871,28 +1975,29 @@ fn test_prune_backoffed_peer_on_graft() { ); //ignore all messages until now - gs.events.clear(); + let receivers = flush_events(&mut gs, receivers); //handle graft gs.handle_graft(&peers[0], vec![topics[0].clone()]); //check prune message - assert_eq!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] + let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + peer_id == &peers[0] && match m { - ControlAction::Prune { + RpcOut::Prune(Prune { topic_hash, peers, backoff, - } => + }) => { topic_hash == &topics[0] && //no px in this case peers.is_empty() && - backoff.unwrap() == config.prune_backoff().as_secs(), + backoff.unwrap() == config.prune_backoff().as_secs() + } _ => false, - }), - 1 - ); + } + }); + assert_eq!(control_msgs, 1); } #[test] @@ -1903,7 +2008,7 @@ fn test_do_not_graft_within_backoff_period() { .build() .unwrap(); //only one peer => mesh too small and will try to regraft as early as possible - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, receivers, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -1914,7 +2019,7 @@ fn test_do_not_graft_within_backoff_period() { gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), Some(1))]); //forget all events until now - flush_events(&mut gs); + let receivers = flush_events(&mut gs, receivers); //call heartbeat gs.heartbeat(); @@ -1927,9 +2032,10 @@ fn test_do_not_graft_within_backoff_period() { //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat // is needed). + let (control_msgs, receivers) = + count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); assert_eq!( - count_control_msgs(&gs, |_, m| matches!(m, ControlAction::Graft { .. })), - 0, + control_msgs, 0, "Graft message created too early within backoff period" ); @@ -1938,8 +2044,9 @@ fn test_do_not_graft_within_backoff_period() { gs.heartbeat(); //check that graft got created + let (control_msgs, _) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); assert!( - count_control_msgs(&gs, |_, m| matches!(m, ControlAction::Graft { .. })) > 0, + control_msgs > 0, "No graft message was created after backoff period" ); } @@ -1954,7 +2061,7 @@ fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without .build() .unwrap(); //only one peer => mesh too small and will try to regraft as early as possible - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, receivers, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -1965,7 +2072,7 @@ fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), None)]); //forget all events until now - flush_events(&mut gs); + let receivers = flush_events(&mut gs, receivers); //call heartbeat gs.heartbeat(); @@ -1976,9 +2083,10 @@ fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat // is needed). + let (control_msgs, receivers) = + count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); assert_eq!( - count_control_msgs(&gs, |_, m| matches!(m, ControlAction::Graft { .. })), - 0, + control_msgs, 0, "Graft message created too early within backoff period" ); @@ -1987,8 +2095,9 @@ fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without gs.heartbeat(); //check that graft got created + let (control_msgs, _) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); assert!( - count_control_msgs(&gs, |_, m| matches!(m, ControlAction::Graft { .. })) > 0, + control_msgs > 0, "No graft message was created after backoff period" ); } @@ -2007,7 +2116,7 @@ fn test_unsubscribe_backoff() { let topic = String::from("test"); // only one peer => mesh too small and will try to regraft as early as possible - let (mut gs, _, topics) = inject_nodes1() + let (mut gs, _, receivers, topics) = inject_nodes1() .peer_no(1) .topics(vec![topic.clone()]) .to_subscribe(true) @@ -2016,19 +2125,19 @@ fn test_unsubscribe_backoff() { let _ = gs.unsubscribe(&Topic::new(topic)); + let (control_msgs, receivers) = count_control_msgs(receivers, |_, m| match m { + RpcOut::Prune(Prune { backoff, .. }) => backoff == &Some(1), + _ => false, + }); assert_eq!( - count_control_msgs(&gs, |_, m| match m { - ControlAction::Prune { backoff, .. } => backoff == &Some(1), - _ => false, - }), - 1, + control_msgs, 1, "Peer should be pruned with `unsubscribe_backoff`." ); let _ = gs.subscribe(&Topic::new(topics[0].to_string())); // forget all events until now - flush_events(&mut gs); + let receivers = flush_events(&mut gs, receivers); // call heartbeat gs.heartbeat(); @@ -2041,9 +2150,10 @@ fn test_unsubscribe_backoff() { // Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat // is needed). + let (control_msgs, receivers) = + count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); assert_eq!( - count_control_msgs(&gs, |_, m| matches!(m, ControlAction::Graft { .. })), - 0, + control_msgs, 0, "Graft message created too early within backoff period" ); @@ -2052,8 +2162,9 @@ fn test_unsubscribe_backoff() { gs.heartbeat(); // check that graft got created + let (control_msgs, _) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); assert!( - count_control_msgs(&gs, |_, m| matches!(m, ControlAction::Graft { .. })) > 0, + control_msgs > 0, "No graft message was created after backoff period" ); } @@ -2064,7 +2175,7 @@ fn test_flood_publish() { let topic = "test"; // Adds more peers than mesh can hold to test flood publishing - let (mut gs, _, _) = inject_nodes1() + let (mut gs, _, receivers, _) = inject_nodes1() .peer_no(config.mesh_n_high() + 10) .topics(vec![topic.into()]) .to_subscribe(true) @@ -2075,17 +2186,16 @@ fn test_flood_publish() { gs.publish(Topic::new(topic), publish_data).unwrap(); // Collect all publish messages - let publishes = gs - .events - .into_iter() - .fold(vec![], |mut collected_publish, e| match e { - ToSwarm::NotifyHandler { event, .. } => { - if let HandlerIn::Message(RpcOut::Publish(message)) = event { + let publishes = receivers + .into_values() + .fold(vec![], |mut collected_publish, c| { + let priority = c.priority.get_ref(); + while !priority.is_empty() { + if let Ok(RpcOut::Publish { message, .. }) = priority.try_recv() { collected_publish.push(message); } - collected_publish } - _ => collected_publish, + collected_publish }); // Transform the inbound message @@ -2120,7 +2230,7 @@ fn test_gossip_to_at_least_gossip_lazy_peers() { //add more peers than in mesh to test gossipping //by default only mesh_n_low peers will get added to mesh - let (mut gs, _, topic_hashes) = inject_nodes1() + let (mut gs, _, receivers, topic_hashes) = inject_nodes1() .peer_no(config.mesh_n_low() + config.gossip_lazy() + 1) .topics(vec!["topic".into()]) .to_subscribe(true) @@ -2147,16 +2257,14 @@ fn test_gossip_to_at_least_gossip_lazy_peers() { let msg_id = gs.config.message_id(message); //check that exactly config.gossip_lazy() many gossip messages were sent. - assert_eq!( - count_control_msgs(&gs, |_, action| match action { - ControlAction::IHave { - topic_hash, - message_ids, - } => topic_hash == &topic_hashes[0] && message_ids.iter().any(|id| id == &msg_id), - _ => false, - }), - config.gossip_lazy() - ); + let (control_msgs, _) = count_control_msgs(receivers, |_, action| match action { + RpcOut::IHave(IHave { + topic_hash, + message_ids, + }) => topic_hash == &topic_hashes[0] && message_ids.iter().any(|id| id == &msg_id), + _ => false, + }); + assert_eq!(control_msgs, config.gossip_lazy()); } #[test] @@ -2165,7 +2273,7 @@ fn test_gossip_to_at_most_gossip_factor_peers() { //add a lot of peers let m = config.mesh_n_low() + config.gossip_lazy() * (2.0 / config.gossip_factor()) as usize; - let (mut gs, _, topic_hashes) = inject_nodes1() + let (mut gs, _, receivers, topic_hashes) = inject_nodes1() .peer_no(m) .topics(vec!["topic".into()]) .to_subscribe(true) @@ -2191,14 +2299,15 @@ fn test_gossip_to_at_most_gossip_factor_peers() { let msg_id = gs.config.message_id(message); //check that exactly config.gossip_lazy() many gossip messages were sent. + let (control_msgs, _) = count_control_msgs(receivers, |_, action| match action { + RpcOut::IHave(IHave { + topic_hash, + message_ids, + }) => topic_hash == &topic_hashes[0] && message_ids.iter().any(|id| id == &msg_id), + _ => false, + }); assert_eq!( - count_control_msgs(&gs, |_, action| match action { - ControlAction::IHave { - topic_hash, - message_ids, - } => topic_hash == &topic_hashes[0] && message_ids.iter().any(|id| id == &msg_id), - _ => false, - }), + control_msgs, ((m - config.mesh_n_low()) as f64 * config.gossip_factor()) as usize ); } @@ -2208,7 +2317,7 @@ fn test_accept_only_outbound_peer_grafts_when_mesh_full() { let config: Config = Config::default(); //enough peers to fill the mesh - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(true) @@ -2223,8 +2332,8 @@ fn test_accept_only_outbound_peer_grafts_when_mesh_full() { assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high()); //create an outbound and an inbound peer - let inbound = add_peer(&mut gs, &topics, false, false); - let outbound = add_peer(&mut gs, &topics, true, false); + let (inbound, _in_reciver) = add_peer(&mut gs, &topics, false, false); + let (outbound, _out_receiver) = add_peer(&mut gs, &topics, true, false); //send grafts gs.handle_graft(&inbound, vec![topics[0].clone()]); @@ -2254,7 +2363,7 @@ fn test_do_not_remove_too_many_outbound_peers() { .unwrap(); //fill the mesh with inbound connections - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, _receivers, topics) = inject_nodes1() .peer_no(n) .topics(vec!["test".into()]) .to_subscribe(true) @@ -2269,7 +2378,7 @@ fn test_do_not_remove_too_many_outbound_peers() { //create m outbound connections and graft (we will accept the graft) let mut outbound = HashSet::new(); for _ in 0..m { - let peer = add_peer(&mut gs, &topics, true, false); + let (peer, _) = add_peer(&mut gs, &topics, true, false); outbound.insert(peer); gs.handle_graft(&peer, topics.clone()); } @@ -2292,7 +2401,7 @@ fn test_add_outbound_peers_if_min_is_not_satisfied() { let config: Config = Config::default(); // Fill full mesh with inbound peers - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(true) @@ -2304,8 +2413,9 @@ fn test_add_outbound_peers_if_min_is_not_satisfied() { } //create config.mesh_outbound_min() many outbound connections without grafting + let mut peers = vec![]; for _ in 0..config.mesh_outbound_min() { - add_peer(&mut gs, &topics, true, false); + peers.push(add_peer(&mut gs, &topics, true, false)); } // Nothing changed in the mesh yet @@ -2326,7 +2436,7 @@ fn test_prune_negative_scored_peers() { let config = Config::default(); //build mesh with one peer - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, receivers, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -2349,29 +2459,30 @@ fn test_prune_negative_scored_peers() { assert!(gs.mesh[&topics[0]].is_empty()); //check prune message - assert_eq!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] + let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + peer_id == &peers[0] && match m { - ControlAction::Prune { + RpcOut::Prune(Prune { topic_hash, peers, backoff, - } => + }) => { topic_hash == &topics[0] && //no px in this case peers.is_empty() && - backoff.unwrap() == config.prune_backoff().as_secs(), + backoff.unwrap() == config.prune_backoff().as_secs() + } _ => false, - }), - 1 - ); + } + }); + assert_eq!(control_msgs, 1); } #[test] fn test_dont_graft_to_negative_scored_peers() { let config = Config::default(); //init full mesh - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(true) @@ -2383,8 +2494,8 @@ fn test_dont_graft_to_negative_scored_peers() { .create_network(); //add two additional peers that will not be part of the mesh - let p1 = add_peer(&mut gs, &topics, false, false); - let p2 = add_peer(&mut gs, &topics, false, false); + let (p1, _receiver1) = add_peer(&mut gs, &topics, false, false); + let (p2, _receiver2) = add_peer(&mut gs, &topics, false, false); //reduce score of p1 to negative gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 1); @@ -2410,7 +2521,7 @@ fn test_ignore_px_from_negative_scored_peer() { let config = Config::default(); //build mesh with one peer - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -2457,7 +2568,7 @@ fn test_only_send_nonnegative_scoring_peers_in_px() { .unwrap(); // Build mesh with three peer - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, receivers, topics) = inject_nodes1() .peer_no(3) .topics(vec!["test".into()]) .to_subscribe(true) @@ -2483,21 +2594,22 @@ fn test_only_send_nonnegative_scoring_peers_in_px() { ); // Check that px in prune message only contains third peer - assert_eq!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] + let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + peer_id == &peers[1] && match m { - ControlAction::Prune { + RpcOut::Prune(Prune { topic_hash, peers: px, .. - } => + }) => { topic_hash == &topics[0] && px.len() == 1 - && px[0].peer_id.as_ref().unwrap() == &peers[2], + && px[0].peer_id.as_ref().unwrap() == &peers[2] + } _ => false, - }), - 1 - ); + } + }); + assert_eq!(control_msgs, 1); } #[test] @@ -2510,7 +2622,7 @@ fn test_do_not_gossip_to_peers_below_gossip_threshold() { }; // Build full mesh - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, mut receivers, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(true) @@ -2524,8 +2636,10 @@ fn test_do_not_gossip_to_peers_below_gossip_threshold() { } // Add two additional peers that will not be part of the mesh - let p1 = add_peer(&mut gs, &topics, false, false); - let p2 = add_peer(&mut gs, &topics, false, false); + let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); + receivers.insert(p1, receiver1); + let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); + receivers.insert(p2, receiver2); // Reduce score of p1 below peer_score_thresholds.gossip_threshold // note that penalties get squared so two penalties means a score of @@ -2556,23 +2670,21 @@ fn test_do_not_gossip_to_peers_below_gossip_threshold() { gs.emit_gossip(); // Check that exactly one gossip messages got sent and it got sent to p2 - assert_eq!( - count_control_msgs(&gs, |peer, action| match action { - ControlAction::IHave { - topic_hash, - message_ids, - } => { - if topic_hash == &topics[0] && message_ids.iter().any(|id| id == &msg_id) { - assert_eq!(peer, &p2); - true - } else { - false - } + let (control_msgs, _) = count_control_msgs(receivers, |peer, action| match action { + RpcOut::IHave(IHave { + topic_hash, + message_ids, + }) => { + if topic_hash == &topics[0] && message_ids.iter().any(|id| id == &msg_id) { + assert_eq!(peer, &p2); + true + } else { + false } - _ => false, - }), - 1 - ); + } + _ => false, + }); + assert_eq!(control_msgs, 1); } #[test] @@ -2585,7 +2697,7 @@ fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() { }; // Build full mesh - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, mut receivers, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(true) @@ -2601,8 +2713,10 @@ fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() { } // Add two additional peers that will not be part of the mesh - let p1 = add_peer(&mut gs, &topics, false, false); - let p2 = add_peer(&mut gs, &topics, false, false); + let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); + receivers.insert(p1, receiver1); + let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); + receivers.insert(p2, receiver2); // Reduce score of p1 below peer_score_thresholds.gossip_threshold // note that penalties get squared so two penalties means a score of @@ -2633,18 +2747,18 @@ fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() { gs.handle_iwant(&p2, vec![msg_id.clone()]); // the messages we are sending - let sent_messages = gs - .events - .into_iter() - .fold(vec![], |mut collected_messages, e| match e { - ToSwarm::NotifyHandler { event, peer_id, .. } => { - if let HandlerIn::Message(RpcOut::Forward(message)) = event { - collected_messages.push((peer_id, message)); + let sent_messages = + receivers + .into_iter() + .fold(vec![], |mut collected_messages, (peer_id, c)| { + let non_priority = c.non_priority.get_ref(); + while !non_priority.is_empty() { + if let Ok(RpcOut::Forward { message, .. }) = non_priority.try_recv() { + collected_messages.push((peer_id, message)); + } } collected_messages - } - _ => collected_messages, - }); + }); //the message got sent to p2 assert!(sent_messages @@ -2673,7 +2787,7 @@ fn test_ihave_msg_from_peer_below_gossip_threshold_gets_ignored() { ..PeerScoreThresholds::default() }; //build full mesh - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, mut receivers, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(true) @@ -2689,8 +2803,10 @@ fn test_ihave_msg_from_peer_below_gossip_threshold_gets_ignored() { } //add two additional peers that will not be part of the mesh - let p1 = add_peer(&mut gs, &topics, false, false); - let p2 = add_peer(&mut gs, &topics, false, false); + let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); + receivers.insert(p1, receiver1); + let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); + receivers.insert(p2, receiver2); //reduce score of p1 below peer_score_thresholds.gossip_threshold //note that penalties get squared so two penalties means a score of @@ -2720,19 +2836,18 @@ fn test_ihave_msg_from_peer_below_gossip_threshold_gets_ignored() { gs.handle_ihave(&p2, vec![(topics[0].clone(), vec![msg_id.clone()])]); // check that we sent exactly one IWANT request to p2 - assert_eq!( - count_control_msgs(&gs, |peer, c| match c { - ControlAction::IWant { message_ids } => - if message_ids.iter().any(|m| m == &msg_id) { - assert_eq!(peer, &p2); - true - } else { - false - }, - _ => false, - }), - 1 - ); + let (control_msgs, _) = count_control_msgs(receivers, |peer, c| match c { + RpcOut::IWant(IWant { message_ids }) => { + if message_ids.iter().any(|m| m == &msg_id) { + assert_eq!(peer, &p2); + true + } else { + false + } + } + _ => false, + }); + assert_eq!(control_msgs, 1); } #[test] @@ -2749,7 +2864,7 @@ fn test_do_not_publish_to_peer_below_publish_threshold() { }; //build mesh with no peers and no subscribed topics - let (mut gs, _, _) = inject_nodes1() + let (mut gs, _, mut receivers, _) = inject_nodes1() .gs_config(config) .scoring(Some((peer_score_params, peer_score_thresholds))) .create_network(); @@ -2759,8 +2874,10 @@ fn test_do_not_publish_to_peer_below_publish_threshold() { let topics = vec![topic.hash()]; //add two additional peers that will be added to the mesh - let p1 = add_peer(&mut gs, &topics, false, false); - let p2 = add_peer(&mut gs, &topics, false, false); + let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); + receivers.insert(p1, receiver1); + let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); + receivers.insert(p2, receiver2); //reduce score of p1 below peer_score_thresholds.publish_threshold //note that penalties get squared so two penalties means a score of @@ -2778,17 +2895,16 @@ fn test_do_not_publish_to_peer_below_publish_threshold() { gs.publish(topic, publish_data).unwrap(); // Collect all publish messages - let publishes = gs - .events + let publishes = receivers .into_iter() - .fold(vec![], |mut collected_publish, e| match e { - ToSwarm::NotifyHandler { event, peer_id, .. } => { - if let HandlerIn::Message(RpcOut::Publish(message)) = event { + .fold(vec![], |mut collected_publish, (peer_id, c)| { + let priority = c.priority.get_ref(); + while !priority.is_empty() { + if let Ok(RpcOut::Publish { message, .. }) = priority.try_recv() { collected_publish.push((peer_id, message)); } - collected_publish } - _ => collected_publish, + collected_publish }); //assert only published to p2 @@ -2806,15 +2922,17 @@ fn test_do_not_flood_publish_to_peer_below_publish_threshold() { ..PeerScoreThresholds::default() }; //build mesh with no peers - let (mut gs, _, topics) = inject_nodes1() + let (mut gs, _, mut receivers, topics) = inject_nodes1() .topics(vec!["test".into()]) .gs_config(config) .scoring(Some((peer_score_params, peer_score_thresholds))) .create_network(); //add two additional peers that will be added to the mesh - let p1 = add_peer(&mut gs, &topics, false, false); - let p2 = add_peer(&mut gs, &topics, false, false); + let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); + receivers.insert(p1, receiver1); + let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); + receivers.insert(p2, receiver2); //reduce score of p1 below peer_score_thresholds.publish_threshold //note that penalties get squared so two penalties means a score of @@ -2832,17 +2950,16 @@ fn test_do_not_flood_publish_to_peer_below_publish_threshold() { gs.publish(Topic::new("test"), publish_data).unwrap(); // Collect all publish messages - let publishes = gs - .events + let publishes = receivers .into_iter() - .fold(vec![], |mut collected_publish, e| match e { - ToSwarm::NotifyHandler { event, peer_id, .. } => { - if let HandlerIn::Message(RpcOut::Publish(message)) = event { - collected_publish.push((peer_id, message)); + .fold(vec![], |mut collected_publish, (peer_id, c)| { + let priority = c.priority.get_ref(); + while !priority.is_empty() { + if let Ok(RpcOut::Publish { message, .. }) = priority.try_recv() { + collected_publish.push((peer_id, message)) } - collected_publish } - _ => collected_publish, + collected_publish }); //assert only published to p2 @@ -2862,15 +2979,15 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { }; //build mesh with no peers - let (mut gs, _, topics) = inject_nodes1() + let (mut gs, _, _, topics) = inject_nodes1() .topics(vec!["test".into()]) .gs_config(config.clone()) .scoring(Some((peer_score_params, peer_score_thresholds))) .create_network(); //add two additional peers that will be added to the mesh - let p1 = add_peer(&mut gs, &topics, false, false); - let p2 = add_peer(&mut gs, &topics, false, false); + let (p1, _receiver1) = add_peer(&mut gs, &topics, false, false); + let (p2, _receiver2) = add_peer(&mut gs, &topics, false, false); //reduce score of p1 below peer_score_thresholds.graylist_threshold //note that penalties get squared so two penalties means a score of @@ -2931,10 +3048,10 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { topic_hash: topics[0].clone(), }; - let control_action = ControlAction::IHave { + let control_action = ControlAction::IHave(IHave { topic_hash: topics[0].clone(), message_ids: vec![config.message_id(message2)], - }; + }); //clear events gs.events.clear(); @@ -2960,10 +3077,10 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { ToSwarm::GenerateEvent(Event::Subscribed { .. }) )); - let control_action = ControlAction::IHave { + let control_action = ControlAction::IHave(IHave { topic_hash: topics[0].clone(), message_ids: vec![config.message_id(message4)], - }; + }); //receive from p2 gs.on_connection_handler_event( @@ -2992,7 +3109,7 @@ fn test_ignore_px_from_peers_below_accept_px_threshold() { ..PeerScoreThresholds::default() }; // Build mesh with two peers - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(2) .topics(vec!["test".into()]) .to_subscribe(true) @@ -3063,7 +3180,7 @@ fn test_keep_best_scoring_peers_on_oversubscription() { //build mesh with more peers than mesh can hold let n = config.mesh_n_high() + 1; - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, _receivers, topics) = inject_nodes1() .peer_no(n) .topics(vec!["test".into()]) .to_subscribe(true) @@ -3123,7 +3240,7 @@ fn test_scoring_p1() { let peer_score_thresholds = PeerScoreThresholds::default(); //build mesh with one peer - let (mut gs, peers, _) = inject_nodes1() + let (mut gs, peers, _, _) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -3205,7 +3322,7 @@ fn test_scoring_p2() { let peer_score_thresholds = PeerScoreThresholds::default(); //build mesh with one peer - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(2) .topics(vec!["test".into()]) .to_subscribe(true) @@ -3305,7 +3422,7 @@ fn test_scoring_p3() { let peer_score_thresholds = PeerScoreThresholds::default(); //build mesh with two peers - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(2) .topics(vec!["test".into()]) .to_subscribe(true) @@ -3406,7 +3523,7 @@ fn test_scoring_p3b() { let peer_score_thresholds = PeerScoreThresholds::default(); //build mesh with one peer - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -3498,7 +3615,7 @@ fn test_scoring_p4_valid_message() { let peer_score_thresholds = PeerScoreThresholds::default(); //build mesh with two peers - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -3527,8 +3644,7 @@ fn test_scoring_p4_valid_message() { &config.message_id(message1), &peers[0], MessageAcceptance::Accept, - ) - .unwrap(); + ); assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); } @@ -3557,7 +3673,7 @@ fn test_scoring_p4_invalid_signature() { let peer_score_thresholds = PeerScoreThresholds::default(); //build mesh with one peer - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -3615,7 +3731,7 @@ fn test_scoring_p4_message_from_self() { let peer_score_thresholds = PeerScoreThresholds::default(); //build mesh with two peers - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -3665,7 +3781,7 @@ fn test_scoring_p4_ignored_message() { let peer_score_thresholds = PeerScoreThresholds::default(); //build mesh with two peers - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -3694,8 +3810,7 @@ fn test_scoring_p4_ignored_message() { &config.message_id(message1), &peers[0], MessageAcceptance::Ignore, - ) - .unwrap(); + ); assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); } @@ -3724,7 +3839,7 @@ fn test_scoring_p4_application_invalidated_message() { let peer_score_thresholds = PeerScoreThresholds::default(); //build mesh with two peers - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -3753,8 +3868,7 @@ fn test_scoring_p4_application_invalidated_message() { &config.message_id(message1), &peers[0], MessageAcceptance::Reject, - ) - .unwrap(); + ); assert_eq!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]), @@ -3786,7 +3900,7 @@ fn test_scoring_p4_application_invalid_message_from_two_peers() { let peer_score_thresholds = PeerScoreThresholds::default(); //build mesh with two peers - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(2) .topics(vec!["test".into()]) .to_subscribe(true) @@ -3819,8 +3933,7 @@ fn test_scoring_p4_application_invalid_message_from_two_peers() { &config.message_id(message1), &peers[0], MessageAcceptance::Reject, - ) - .unwrap(); + ); assert_eq!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]), @@ -3856,7 +3969,7 @@ fn test_scoring_p4_three_application_invalid_messages() { let peer_score_thresholds = PeerScoreThresholds::default(); //build mesh with one peer - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -3894,20 +4007,19 @@ fn test_scoring_p4_three_application_invalid_messages() { &config.message_id(message1), &peers[0], MessageAcceptance::Reject, - ) - .unwrap(); + ); + gs.report_message_validation_result( &config.message_id(message2), &peers[0], MessageAcceptance::Reject, - ) - .unwrap(); + ); + gs.report_message_validation_result( &config.message_id(message3), &peers[0], MessageAcceptance::Reject, - ) - .unwrap(); + ); //number of invalid messages gets squared assert_eq!( @@ -3940,7 +4052,7 @@ fn test_scoring_p4_decay() { let peer_score_thresholds = PeerScoreThresholds::default(); //build mesh with one peer - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -3968,8 +4080,7 @@ fn test_scoring_p4_decay() { &config.message_id(message1), &peers[0], MessageAcceptance::Reject, - ) - .unwrap(); + ); assert_eq!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]), @@ -3994,7 +4105,7 @@ fn test_scoring_p5() { }; //build mesh with one peer - let (mut gs, peers, _) = inject_nodes1() + let (mut gs, peers, _, _) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -4020,7 +4131,7 @@ fn test_scoring_p6() { ..Default::default() }; - let (mut gs, _, _) = inject_nodes1() + let (mut gs, _, _, _) = inject_nodes1() .peer_no(0) .topics(vec![]) .to_subscribe(false) @@ -4033,20 +4144,20 @@ fn test_scoring_p6() { //create 5 peers with the same ip let addr = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 3)); let peers = vec![ - add_peer_with_addr(&mut gs, &[], false, false, addr.clone()), - add_peer_with_addr(&mut gs, &[], false, false, addr.clone()), - add_peer_with_addr(&mut gs, &[], true, false, addr.clone()), - add_peer_with_addr(&mut gs, &[], true, false, addr.clone()), - add_peer_with_addr(&mut gs, &[], true, true, addr.clone()), + add_peer_with_addr(&mut gs, &[], false, false, addr.clone()).0, + add_peer_with_addr(&mut gs, &[], false, false, addr.clone()).0, + add_peer_with_addr(&mut gs, &[], true, false, addr.clone()).0, + add_peer_with_addr(&mut gs, &[], true, false, addr.clone()).0, + add_peer_with_addr(&mut gs, &[], true, true, addr.clone()).0, ]; //create 4 other peers with other ip let addr2 = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 4)); let others = vec![ - add_peer_with_addr(&mut gs, &[], false, false, addr2.clone()), - add_peer_with_addr(&mut gs, &[], false, false, addr2.clone()), - add_peer_with_addr(&mut gs, &[], true, false, addr2.clone()), - add_peer_with_addr(&mut gs, &[], true, false, addr2.clone()), + add_peer_with_addr(&mut gs, &[], false, false, addr2.clone()).0, + add_peer_with_addr(&mut gs, &[], false, false, addr2.clone()).0, + add_peer_with_addr(&mut gs, &[], true, false, addr2.clone()).0, + add_peer_with_addr(&mut gs, &[], true, false, addr2.clone()).0, ]; //no penalties yet @@ -4153,7 +4264,7 @@ fn test_scoring_p7_grafts_before_backoff() { ..Default::default() }; - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, _receivers, topics) = inject_nodes1() .peer_no(2) .topics(vec!["test".into()]) .to_subscribe(false) @@ -4230,7 +4341,7 @@ fn test_opportunistic_grafting() { ..Default::default() }; - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, _receivers, topics) = inject_nodes1() .peer_no(5) .topics(vec!["test".into()]) .to_subscribe(false) @@ -4259,7 +4370,7 @@ fn test_opportunistic_grafting() { } //set scores for peers in the mesh - for (i, peer) in others.iter().enumerate().take(5) { + for (i, (peer, _receiver)) in others.iter().enumerate().take(5) { gs.set_application_score(peer, 0.0 + i as f64); } @@ -4299,7 +4410,7 @@ fn test_opportunistic_grafting() { ); assert!( - gs.mesh[&topics[0]].is_disjoint(&others.iter().cloned().take(2).collect()), + gs.mesh[&topics[0]].is_disjoint(&others.iter().map(|(p, _)| p).cloned().take(2).collect()), "peers below or equal to median should not be added in opportunistic grafting" ); } @@ -4307,19 +4418,19 @@ fn test_opportunistic_grafting() { #[test] fn test_ignore_graft_from_unknown_topic() { //build gossipsub without subscribing to any topics - let (mut gs, _, _) = inject_nodes1() - .peer_no(0) + let (mut gs, peers, receivers, _) = inject_nodes1() + .peer_no(1) .topics(vec![]) .to_subscribe(false) .create_network(); //handle an incoming graft for some topic - gs.handle_graft(&PeerId::random(), vec![Topic::new("test").hash()]); + gs.handle_graft(&peers[0], vec![Topic::new("test").hash()]); //assert that no prune got created + let (control_msgs, _) = count_control_msgs(receivers, |_, a| matches!(a, RpcOut::Prune { .. })); assert_eq!( - count_control_msgs(&gs, |_, a| matches!(a, ControlAction::Prune { .. })), - 0, + control_msgs, 0, "we should not prune after graft in unknown topic" ); } @@ -4328,14 +4439,15 @@ fn test_ignore_graft_from_unknown_topic() { fn test_ignore_too_many_iwants_from_same_peer_for_same_message() { let config = Config::default(); //build gossipsub with full mesh - let (mut gs, _, topics) = inject_nodes1() + let (mut gs, _, mut receivers, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(false) .create_network(); //add another peer not in the mesh - let peer = add_peer(&mut gs, &topics, false, false); + let (peer, receiver) = add_peer(&mut gs, &topics, false, false); + receivers.insert(peer, receiver); //receive a message let mut seq = 0; @@ -4349,7 +4461,7 @@ fn test_ignore_too_many_iwants_from_same_peer_for_same_message() { gs.handle_received_message(m1, &PeerId::random()); //clear events - gs.events.clear(); + let receivers = flush_events(&mut gs, receivers); //the first gossip_retransimission many iwants return the valid message, all others are // ignored. @@ -4358,16 +4470,15 @@ fn test_ignore_too_many_iwants_from_same_peer_for_same_message() { } assert_eq!( - gs.events - .iter() - .filter(|e| matches!( - e, - ToSwarm::NotifyHandler { - event: HandlerIn::Message(RpcOut::Forward(_)), - .. + receivers.into_values().fold(0, |mut fwds, c| { + let non_priority = c.non_priority.get_ref(); + while !non_priority.is_empty() { + if let Ok(RpcOut::Forward { .. }) = non_priority.try_recv() { + fwds += 1; } - )) - .count(), + } + fwds + }), config.gossip_retransimission() as usize, "not more then gossip_retransmission many messages get sent back" ); @@ -4380,7 +4491,7 @@ fn test_ignore_too_many_ihaves() { .build() .unwrap(); //build gossipsub with full mesh - let (mut gs, _, topics) = inject_nodes1() + let (mut gs, _, mut receivers, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(false) @@ -4388,7 +4499,8 @@ fn test_ignore_too_many_ihaves() { .create_network(); //add another peer not in the mesh - let peer = add_peer(&mut gs, &topics, false, false); + let (peer, receiver) = add_peer(&mut gs, &topics, false, false); + receivers.insert(peer, receiver); //peer has 20 messages let mut seq = 0; @@ -4416,15 +4528,18 @@ fn test_ignore_too_many_ihaves() { .collect(); //we send iwant only for the first 10 messages + let (control_msgs, receivers) = count_control_msgs(receivers, |p, action| { + p == &peer + && matches!(action, RpcOut::IWant(IWant { message_ids }) if message_ids.len() == 1 && first_ten.contains(&message_ids[0])) + }); assert_eq!( - count_control_msgs(&gs, |p, action| p == &peer - && matches!(action, ControlAction::IWant { message_ids } if message_ids.len() == 1 && first_ten.contains(&message_ids[0]))), - 10, + control_msgs, 10, "exactly the first ten ihaves should be processed and one iwant for each created" ); //after a heartbeat everything is forgotten gs.heartbeat(); + for raw_message in messages[10..].iter() { // Transform the inbound message let message = &gs @@ -4438,13 +4553,12 @@ fn test_ignore_too_many_ihaves() { ); } - //we sent iwant for all 20 messages - assert_eq!( - count_control_msgs(&gs, |p, action| p == &peer - && matches!(action, ControlAction::IWant { message_ids } if message_ids.len() == 1)), - 20, - "all 20 should get sent" - ); + //we sent iwant for all 10 messages + let (control_msgs, _) = count_control_msgs(receivers, |p, action| { + p == &peer + && matches!(action, RpcOut::IWant(IWant { message_ids }) if message_ids.len() == 1) + }); + assert_eq!(control_msgs, 10, "all 20 should get sent"); } #[test] @@ -4455,7 +4569,7 @@ fn test_ignore_too_many_messages_in_ihave() { .build() .unwrap(); //build gossipsub with full mesh - let (mut gs, _, topics) = inject_nodes1() + let (mut gs, _, mut receivers, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(false) @@ -4463,7 +4577,8 @@ fn test_ignore_too_many_messages_in_ihave() { .create_network(); //add another peer not in the mesh - let peer = add_peer(&mut gs, &topics, false, false); + let (peer, receiver) = add_peer(&mut gs, &topics, false, false); + receivers.insert(peer, receiver); //peer has 20 messages let mut seq = 0; @@ -4488,17 +4603,18 @@ fn test_ignore_too_many_messages_in_ihave() { //we send iwant only for the first 10 messages let mut sum = 0; + let (control_msgs, receivers) = count_control_msgs(receivers, |p, rpc| match rpc { + RpcOut::IWant(IWant { message_ids }) => { + p == &peer && { + assert!(first_twelve.is_superset(&message_ids.iter().collect())); + sum += message_ids.len(); + true + } + } + _ => false, + }); assert_eq!( - count_control_msgs(&gs, |p, action| match action { - ControlAction::IWant { message_ids } => - p == &peer && { - assert!(first_twelve.is_superset(&message_ids.iter().collect())); - sum += message_ids.len(); - true - }, - _ => false, - }), - 2, + control_msgs, 2, "the third ihave should get ignored and no iwant sent" ); @@ -4511,20 +4627,19 @@ fn test_ignore_too_many_messages_in_ihave() { vec![(topics[0].clone(), message_ids[10..20].to_vec())], ); - //we sent 20 iwant messages + //we sent 10 iwant messages ids via a IWANT rpc. let mut sum = 0; - assert_eq!( - count_control_msgs(&gs, |p, action| match action { - ControlAction::IWant { message_ids } => - p == &peer && { - sum += message_ids.len(); - true - }, - _ => false, - }), - 3 - ); - assert_eq!(sum, 20, "exactly 20 iwants should get sent"); + let (control_msgs, _) = count_control_msgs(receivers, |p, rpc| match rpc { + RpcOut::IWant(IWant { message_ids }) => { + p == &peer && { + sum += message_ids.len(); + true + } + } + _ => false, + }); + assert_eq!(control_msgs, 1); + assert_eq!(sum, 10, "exactly 20 iwants should get sent"); } #[test] @@ -4535,7 +4650,7 @@ fn test_limit_number_of_message_ids_inside_ihave() { .build() .unwrap(); //build gossipsub with full mesh - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, mut receivers, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(false) @@ -4548,8 +4663,10 @@ fn test_limit_number_of_message_ids_inside_ihave() { } //add two other peers not in the mesh - let p1 = add_peer(&mut gs, &topics, false, false); - let p2 = add_peer(&mut gs, &topics, false, false); + let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); + receivers.insert(p1, receiver1); + let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); + receivers.insert(p2, receiver2); //receive 200 messages from another peer let mut seq = 0; @@ -4567,22 +4684,22 @@ fn test_limit_number_of_message_ids_inside_ihave() { let mut ihaves1 = HashSet::new(); let mut ihaves2 = HashSet::new(); - assert_eq!( - count_control_msgs(&gs, |p, action| match action { - ControlAction::IHave { message_ids, .. } => { - if p == &p1 { - ihaves1 = message_ids.iter().cloned().collect(); - true - } else if p == &p2 { - ihaves2 = message_ids.iter().cloned().collect(); - true - } else { - false - } + let (control_msgs, _) = count_control_msgs(receivers, |p, action| match action { + RpcOut::IHave(IHave { message_ids, .. }) => { + if p == &p1 { + ihaves1 = message_ids.iter().cloned().collect(); + true + } else if p == &p2 { + ihaves2 = message_ids.iter().cloned().collect(); + true + } else { + false } - _ => false, - }), - 2, + } + _ => false, + }); + assert_eq!( + control_msgs, 2, "should have emitted one ihave to p1 and one to p2" ); @@ -4610,11 +4727,12 @@ fn test_limit_number_of_message_ids_inside_ihave() { #[test] fn test_iwant_penalties() { + /* use tracing_subscriber::EnvFilter; let _ = tracing_subscriber::fmt() .with_env_filter(EnvFilter::from_default_env()) .try_init(); - + */ let config = ConfigBuilder::default() .iwant_followup_time(Duration::from_secs(4)) .build() @@ -4625,7 +4743,7 @@ fn test_iwant_penalties() { }; // fill the mesh - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(2) .topics(vec!["test".into()]) .to_subscribe(false) @@ -4649,7 +4767,7 @@ fn test_iwant_penalties() { let mut first_messages = Vec::new(); let mut second_messages = Vec::new(); let mut seq = 0; - for peer in &other_peers { + for (peer, _receiver) in &other_peers { let msg1 = random_message(&mut seq, &topics); let msg2 = random_message(&mut seq, &topics); @@ -4672,19 +4790,19 @@ fn test_iwant_penalties() { } // the peers send us all the first message ids in time - for (index, peer) in other_peers.iter().enumerate() { + for (index, (peer, _receiver)) in other_peers.iter().enumerate() { gs.handle_received_message(first_messages[index].clone(), peer); } // now we do a heartbeat no penalization should have been applied yet gs.heartbeat(); - for peer in &other_peers { + for (peer, _receiver) in &other_peers { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 0.0); } // receive the first twenty of the other peers then send their response - for (index, peer) in other_peers.iter().enumerate().take(20) { + for (index, (peer, _receiver)) in other_peers.iter().enumerate().take(20) { gs.handle_received_message(second_messages[index].clone(), peer); } @@ -4695,7 +4813,7 @@ fn test_iwant_penalties() { gs.heartbeat(); // now we get the second messages from the last 80 peers. - for (index, peer) in other_peers.iter().enumerate() { + for (index, (peer, _receiver)) in other_peers.iter().enumerate() { if index > 19 { gs.handle_received_message(second_messages[index].clone(), peer); } @@ -4709,7 +4827,7 @@ fn test_iwant_penalties() { let mut single_penalized = 0; let mut double_penalized = 0; - for (i, peer) in other_peers.iter().enumerate() { + for (i, (peer, _receiver)) in other_peers.iter().enumerate() { let score = gs.peer_score.as_ref().unwrap().0.score(peer); if score == 0.0 { not_penalized += 1; @@ -4737,7 +4855,7 @@ fn test_publish_to_floodsub_peers_without_flood_publish() { .flood_publish(false) .build() .unwrap(); - let (mut gs, _, topics) = inject_nodes1() + let (mut gs, _, mut receivers, topics) = inject_nodes1() .peer_no(config.mesh_n_low() - 1) .topics(vec!["test".into()]) .to_subscribe(false) @@ -4745,7 +4863,7 @@ fn test_publish_to_floodsub_peers_without_flood_publish() { .create_network(); //add two floodsub peer, one explicit, one implicit - let p1 = add_peer_with_addr_and_kind( + let (p1, receiver1) = add_peer_with_addr_and_kind( &mut gs, &topics, false, @@ -4753,7 +4871,11 @@ fn test_publish_to_floodsub_peers_without_flood_publish() { Multiaddr::empty(), Some(PeerKind::Floodsub), ); - let p2 = add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + receivers.insert(p1, receiver1); + + let (p2, receiver2) = + add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + receivers.insert(p2, receiver2); //p1 and p2 are not in the mesh assert!(!gs.mesh[&topics[0]].contains(&p1) && !gs.mesh[&topics[0]].contains(&p2)); @@ -4763,24 +4885,22 @@ fn test_publish_to_floodsub_peers_without_flood_publish() { gs.publish(Topic::new("test"), publish_data).unwrap(); // Collect publish messages to floodsub peers - let publishes = gs - .events - .iter() - .fold(vec![], |mut collected_publish, e| match e { - ToSwarm::NotifyHandler { peer_id, event, .. } => { - if peer_id == &p1 || peer_id == &p2 { - if let HandlerIn::Message(RpcOut::Publish(message)) = event { - collected_publish.push(message); - } + let publishes = receivers + .into_iter() + .fold(0, |mut collected_publish, (peer_id, c)| { + let priority = c.priority.get_ref(); + while !priority.is_empty() { + if matches!(priority.try_recv(), + Ok(RpcOut::Publish{..}) if peer_id == p1 || peer_id == p2) + { + collected_publish += 1; } - collected_publish } - _ => collected_publish, + collected_publish }); assert_eq!( - publishes.len(), - 2, + publishes, 2, "Should send a publish message to all floodsub peers" ); } @@ -4791,7 +4911,7 @@ fn test_do_not_use_floodsub_in_fanout() { .flood_publish(false) .build() .unwrap(); - let (mut gs, _, _) = inject_nodes1() + let (mut gs, _, mut receivers, _) = inject_nodes1() .peer_no(config.mesh_n_low() - 1) .topics(Vec::new()) .to_subscribe(false) @@ -4802,7 +4922,7 @@ fn test_do_not_use_floodsub_in_fanout() { let topics = vec![topic.hash()]; //add two floodsub peer, one explicit, one implicit - let p1 = add_peer_with_addr_and_kind( + let (p1, receiver1) = add_peer_with_addr_and_kind( &mut gs, &topics, false, @@ -4810,31 +4930,33 @@ fn test_do_not_use_floodsub_in_fanout() { Multiaddr::empty(), Some(PeerKind::Floodsub), ); - let p2 = add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + receivers.insert(p1, receiver1); + let (p2, receiver2) = + add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + + receivers.insert(p2, receiver2); //publish a message let publish_data = vec![0; 42]; gs.publish(Topic::new("test"), publish_data).unwrap(); // Collect publish messages to floodsub peers - let publishes = gs - .events - .iter() - .fold(vec![], |mut collected_publish, e| match e { - ToSwarm::NotifyHandler { peer_id, event, .. } => { - if peer_id == &p1 || peer_id == &p2 { - if let HandlerIn::Message(RpcOut::Publish(message)) = event { - collected_publish.push(message); - } + let publishes = receivers + .into_iter() + .fold(0, |mut collected_publish, (peer_id, c)| { + let priority = c.priority.get_ref(); + while !priority.is_empty() { + if matches!(priority.try_recv(), + Ok(RpcOut::Publish{..}) if peer_id == p1 || peer_id == p2) + { + collected_publish += 1; } - collected_publish } - _ => collected_publish, + collected_publish }); assert_eq!( - publishes.len(), - 2, + publishes, 2, "Should send a publish message to all floodsub peers" ); @@ -4846,7 +4968,7 @@ fn test_do_not_use_floodsub_in_fanout() { #[test] fn test_dont_add_floodsub_peers_to_mesh_on_join() { - let (mut gs, _, _) = inject_nodes1() + let (mut gs, _, _, _) = inject_nodes1() .peer_no(0) .topics(Vec::new()) .to_subscribe(false) @@ -4876,14 +4998,14 @@ fn test_dont_add_floodsub_peers_to_mesh_on_join() { #[test] fn test_dont_send_px_to_old_gossipsub_peers() { - let (mut gs, _, topics) = inject_nodes1() + let (mut gs, _, receivers, topics) = inject_nodes1() .peer_no(0) .topics(vec!["test".into()]) .to_subscribe(false) .create_network(); //add an old gossipsub peer - let p1 = add_peer_with_addr_and_kind( + let (p1, _receiver1) = add_peer_with_addr_and_kind( &mut gs, &topics, false, @@ -4900,20 +5022,17 @@ fn test_dont_send_px_to_old_gossipsub_peers() { ); //check that prune does not contain px - assert_eq!( - count_control_msgs(&gs, |_, m| match m { - ControlAction::Prune { peers: px, .. } => !px.is_empty(), - _ => false, - }), - 0, - "Should not send px to floodsub peers" - ); + let (control_msgs, _) = count_control_msgs(receivers, |_, m| match m { + RpcOut::Prune(Prune { peers: px, .. }) => !px.is_empty(), + _ => false, + }); + assert_eq!(control_msgs, 0, "Should not send px to floodsub peers"); } #[test] fn test_dont_send_floodsub_peers_in_px() { //build mesh with one peer - let (mut gs, peers, topics) = inject_nodes1() + let (mut gs, peers, receivers, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -4938,19 +5057,16 @@ fn test_dont_send_floodsub_peers_in_px() { ); //check that px in prune message is empty - assert_eq!( - count_control_msgs(&gs, |_, m| match m { - ControlAction::Prune { peers: px, .. } => !px.is_empty(), - _ => false, - }), - 0, - "Should not include floodsub peers in px" - ); + let (control_msgs, _) = count_control_msgs(receivers, |_, m| match m { + RpcOut::Prune(Prune { peers: px, .. }) => !px.is_empty(), + _ => false, + }); + assert_eq!(control_msgs, 0, "Should not include floodsub peers in px"); } #[test] fn test_dont_add_floodsub_peers_to_mesh_in_heartbeat() { - let (mut gs, _, topics) = inject_nodes1() + let (mut gs, _, _, topics) = inject_nodes1() .peer_no(0) .topics(vec!["test".into()]) .to_subscribe(false) @@ -4978,7 +5094,7 @@ fn test_dont_add_floodsub_peers_to_mesh_in_heartbeat() { // Some very basic test of public api methods. #[test] fn test_public_api() { - let (gs, peers, topic_hashes) = inject_nodes1() + let (gs, peers, _, topic_hashes) = inject_nodes1() .peer_no(4) .topics(vec![String::from("topic1")]) .to_subscribe(true) @@ -5010,7 +5126,7 @@ fn test_public_api() { fn test_subscribe_to_invalid_topic() { let t1 = Topic::new("t1"); let t2 = Topic::new("t2"); - let (mut gs, _, _) = inject_nodes::() + let (mut gs, _, _, _) = inject_nodes::() .subscription_filter(WhitelistSubscriptionFilter( vec![t1.hash()].into_iter().collect(), )) @@ -5024,7 +5140,7 @@ fn test_subscribe_to_invalid_topic() { #[test] fn test_subscribe_and_graft_with_negative_score() { //simulate a communication between two gossipsub instances - let (mut gs1, _, topic_hashes) = inject_nodes1() + let (mut gs1, _, _, topic_hashes) = inject_nodes1() .topics(vec!["test".into()]) .scoring(Some(( PeerScoreParams::default(), @@ -5032,14 +5148,14 @@ fn test_subscribe_and_graft_with_negative_score() { ))) .create_network(); - let (mut gs2, _, _) = inject_nodes1().create_network(); + let (mut gs2, _, receivers, _) = inject_nodes1().create_network(); let connection_id = ConnectionId::new_unchecked(0); let topic = Topic::new("test"); - let p2 = add_peer(&mut gs1, &Vec::new(), true, false); - let p1 = add_peer(&mut gs2, &topic_hashes, false, false); + let (p2, _receiver1) = add_peer(&mut gs1, &Vec::new(), true, false); + let (p1, _receiver2) = add_peer(&mut gs2, &topic_hashes, false, false); //add penalty to peer p2 gs1.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); @@ -5049,43 +5165,41 @@ fn test_subscribe_and_graft_with_negative_score() { //subscribe to topic in gs2 gs2.subscribe(&topic).unwrap(); - let forward_messages_to_p1 = |gs1: &mut Behaviour<_, _>, gs2: &mut Behaviour<_, _>| { - //collect messages to p1 - let messages_to_p1 = gs2.events.drain(..).filter_map(|e| match e { - ToSwarm::NotifyHandler { peer_id, event, .. } => { - if peer_id == p1 { - if let HandlerIn::Message(m) = event { - Some(m) - } else { - None - } - } else { - None + let forward_messages_to_p1 = |gs1: &mut Behaviour<_, _>, + p1: PeerId, + p2: PeerId, + connection_id: ConnectionId, + receivers: HashMap| + -> HashMap { + let new_receivers = HashMap::new(); + for (peer_id, receiver) in receivers.into_iter() { + let non_priority = receiver.non_priority.get_ref(); + match non_priority.try_recv() { + Ok(rpc) if peer_id == p1 => { + gs1.on_connection_handler_event( + p2, + connection_id, + HandlerEvent::Message { + rpc: proto_to_message(&rpc.into_protobuf()), + invalid_messages: vec![], + }, + ); } + _ => {} } - _ => None, - }); - for message in messages_to_p1 { - gs1.on_connection_handler_event( - p2, - connection_id, - HandlerEvent::Message { - rpc: proto_to_message(&message.into_protobuf()), - invalid_messages: vec![], - }, - ); } + new_receivers }; //forward the subscribe message - forward_messages_to_p1(&mut gs1, &mut gs2); + let receivers = forward_messages_to_p1(&mut gs1, p1, p2, connection_id, receivers); //heartbeats on both gs1.heartbeat(); gs2.heartbeat(); //forward messages again - forward_messages_to_p1(&mut gs1, &mut gs2); + forward_messages_to_p1(&mut gs1, p1, p2, connection_id, receivers); //nobody got penalized assert!(gs1.peer_score.as_ref().unwrap().0.score(&p2) >= original_score); @@ -5102,7 +5216,7 @@ fn test_graft_without_subscribe() { let topic = String::from("test_subscribe"); let subscribe_topic = vec![topic.clone()]; let subscribe_topic_hash = vec![Topic::new(topic.clone()).hash()]; - let (mut gs, peers, topic_hashes) = inject_nodes1() + let (mut gs, peers, _, topic_hashes) = inject_nodes1() .peer_no(1) .topics(subscribe_topic) .to_subscribe(false) @@ -5122,3 +5236,473 @@ fn test_graft_without_subscribe() { // We unsubscribe from the topic. let _ = gs.unsubscribe(&Topic::new(topic)); } + +#[test] +fn test_all_queues_full() { + let gs_config = ConfigBuilder::default() + .validation_mode(ValidationMode::Permissive) + .build() + .unwrap(); + + let mut gs: Behaviour = Behaviour::new(MessageAuthenticity::RandomAuthor, gs_config).unwrap(); + + let topic_hash = Topic::new("Test").hash(); + let mut peers = vec![]; + let mut topics = BTreeSet::new(); + topics.insert(topic_hash.clone()); + + let peer_id = PeerId::random(); + peers.push(peer_id); + gs.connected_peers.insert( + peer_id, + PeerConnections { + kind: PeerKind::Gossipsubv1_1, + connections: vec![ConnectionId::new_unchecked(0)], + topics: topics.clone(), + sender: Sender::new(2), + }, + ); + + let publish_data = vec![0; 42]; + gs.publish(topic_hash.clone(), publish_data.clone()) + .unwrap(); + let publish_data = vec![2; 59]; + let err = gs.publish(topic_hash, publish_data).unwrap_err(); + assert!(matches!(err, PublishError::AllQueuesFull(f) if f == 1)); +} + +#[test] +fn test_slow_peer_returns_failed_publish() { + let gs_config = ConfigBuilder::default() + .validation_mode(ValidationMode::Permissive) + .build() + .unwrap(); + + let mut gs: Behaviour = Behaviour::new(MessageAuthenticity::RandomAuthor, gs_config).unwrap(); + + let topic_hash = Topic::new("Test").hash(); + let mut peers = vec![]; + let mut topics = BTreeSet::new(); + topics.insert(topic_hash.clone()); + + let slow_peer_id = PeerId::random(); + peers.push(slow_peer_id); + gs.connected_peers.insert( + slow_peer_id, + PeerConnections { + kind: PeerKind::Gossipsubv1_1, + connections: vec![ConnectionId::new_unchecked(0)], + topics: topics.clone(), + sender: Sender::new(2), + }, + ); + let peer_id = PeerId::random(); + peers.push(peer_id); + gs.connected_peers.insert( + peer_id, + PeerConnections { + kind: PeerKind::Gossipsubv1_1, + connections: vec![ConnectionId::new_unchecked(0)], + topics: topics.clone(), + sender: Sender::new(gs.config.connection_handler_queue_len()), + }, + ); + + let publish_data = vec![0; 42]; + gs.publish(topic_hash.clone(), publish_data.clone()) + .unwrap(); + let publish_data = vec![2; 59]; + gs.publish(topic_hash.clone(), publish_data).unwrap(); + gs.heartbeat(); + + gs.heartbeat(); + + let slow_peer_failed_messages = match gs.events.pop_front().unwrap() { + ToSwarm::GenerateEvent(Event::SlowPeer { + peer_id, + failed_messages, + }) if peer_id == slow_peer_id => failed_messages, + _ => panic!("invalid event"), + }; + + let failed_messages = FailedMessages { + publish: 1, + forward: 0, + priority: 1, + non_priority: 0, + timeout: 0, + }; + + assert_eq!(slow_peer_failed_messages.priority, failed_messages.priority); + assert_eq!( + slow_peer_failed_messages.non_priority, + failed_messages.non_priority + ); + assert_eq!(slow_peer_failed_messages.publish, failed_messages.publish); + assert_eq!(slow_peer_failed_messages.forward, failed_messages.forward); +} + +#[test] +fn test_slow_peer_returns_failed_ihave_handling() { + let gs_config = ConfigBuilder::default() + .validation_mode(ValidationMode::Permissive) + .build() + .unwrap(); + + let mut gs: Behaviour = Behaviour::new(MessageAuthenticity::RandomAuthor, gs_config).unwrap(); + + let topic_hash = Topic::new("Test").hash(); + let mut peers = vec![]; + let mut topics = BTreeSet::new(); + topics.insert(topic_hash.clone()); + + let slow_peer_id = PeerId::random(); + peers.push(slow_peer_id); + gs.connected_peers.insert( + slow_peer_id, + PeerConnections { + kind: PeerKind::Gossipsubv1_1, + connections: vec![ConnectionId::new_unchecked(0)], + topics: topics.clone(), + sender: Sender::new(2), + }, + ); + peers.push(slow_peer_id); + let mesh = gs.mesh.entry(topic_hash.clone()).or_default(); + mesh.insert(slow_peer_id); + + let peer_id = PeerId::random(); + peers.push(peer_id); + gs.connected_peers.insert( + peer_id, + PeerConnections { + kind: PeerKind::Gossipsubv1_1, + connections: vec![ConnectionId::new_unchecked(0)], + topics: topics.clone(), + sender: Sender::new(gs.config.connection_handler_queue_len()), + }, + ); + + let publish_data = vec![1; 59]; + let transformed = gs + .data_transform + .outbound_transform(&topic_hash, publish_data.clone()) + .unwrap(); + let raw_message = gs + .build_raw_message(topic_hash.clone(), transformed) + .unwrap(); + let msg_id = gs.config.message_id(&Message { + source: raw_message.source, + data: publish_data, + sequence_number: raw_message.sequence_number, + topic: raw_message.topic.clone(), + }); + + gs.handle_ihave( + &slow_peer_id, + vec![(topic_hash.clone(), vec![msg_id.clone()])], + ); + gs.handle_ihave(&slow_peer_id, vec![(topic_hash, vec![msg_id.clone()])]); + + gs.heartbeat(); + + let slow_peer_failed_messages = gs + .events + .into_iter() + .find_map(|e| match e { + ToSwarm::GenerateEvent(Event::SlowPeer { + peer_id, + failed_messages, + }) if peer_id == slow_peer_id => Some(failed_messages), + _ => None, + }) + .unwrap(); + + let failed_messages = FailedMessages { + publish: 0, + forward: 0, + priority: 0, + non_priority: 1, + timeout: 0, + }; + + assert_eq!(slow_peer_failed_messages.priority, failed_messages.priority); + assert_eq!( + slow_peer_failed_messages.non_priority, + failed_messages.non_priority + ); + assert_eq!(slow_peer_failed_messages.publish, failed_messages.publish); + assert_eq!(slow_peer_failed_messages.forward, failed_messages.forward); +} + +#[test] +fn test_slow_peer_returns_failed_iwant_handling() { + let gs_config = ConfigBuilder::default() + .validation_mode(ValidationMode::Permissive) + .build() + .unwrap(); + + let mut gs: Behaviour = Behaviour::new(MessageAuthenticity::RandomAuthor, gs_config).unwrap(); + + let topic_hash = Topic::new("Test").hash(); + let mut peers = vec![]; + let mut topics = BTreeSet::new(); + topics.insert(topic_hash.clone()); + + let slow_peer_id = PeerId::random(); + peers.push(slow_peer_id); + gs.connected_peers.insert( + slow_peer_id, + PeerConnections { + kind: PeerKind::Gossipsubv1_1, + connections: vec![ConnectionId::new_unchecked(0)], + topics: topics.clone(), + sender: Sender::new(2), + }, + ); + peers.push(slow_peer_id); + let mesh = gs.mesh.entry(topic_hash.clone()).or_default(); + mesh.insert(slow_peer_id); + + let peer_id = PeerId::random(); + peers.push(peer_id); + gs.connected_peers.insert( + peer_id, + PeerConnections { + kind: PeerKind::Gossipsubv1_1, + connections: vec![ConnectionId::new_unchecked(0)], + topics: topics.clone(), + sender: Sender::new(gs.config.connection_handler_queue_len()), + }, + ); + + let publish_data = vec![1; 59]; + let transformed = gs + .data_transform + .outbound_transform(&topic_hash, publish_data.clone()) + .unwrap(); + let raw_message = gs + .build_raw_message(topic_hash.clone(), transformed) + .unwrap(); + let msg_id = gs.config.message_id(&Message { + source: raw_message.source, + data: publish_data, + sequence_number: raw_message.sequence_number, + topic: raw_message.topic.clone(), + }); + + gs.mcache.put(&msg_id, raw_message); + gs.handle_iwant(&slow_peer_id, vec![msg_id.clone(), msg_id]); + + gs.heartbeat(); + + let slow_peer_failed_messages = gs + .events + .into_iter() + .find_map(|e| match e { + ToSwarm::GenerateEvent(Event::SlowPeer { + peer_id, + failed_messages, + }) if peer_id == slow_peer_id => Some(failed_messages), + _ => None, + }) + .unwrap(); + + let failed_messages = FailedMessages { + publish: 0, + forward: 1, + priority: 0, + non_priority: 1, + timeout: 0, + }; + + assert_eq!(slow_peer_failed_messages.priority, failed_messages.priority); + assert_eq!( + slow_peer_failed_messages.non_priority, + failed_messages.non_priority + ); + assert_eq!(slow_peer_failed_messages.publish, failed_messages.publish); + assert_eq!(slow_peer_failed_messages.forward, failed_messages.forward); +} + +#[test] +fn test_slow_peer_returns_failed_forward() { + let gs_config = ConfigBuilder::default() + .validation_mode(ValidationMode::Permissive) + .build() + .unwrap(); + + let mut gs: Behaviour = Behaviour::new(MessageAuthenticity::RandomAuthor, gs_config).unwrap(); + + let topic_hash = Topic::new("Test").hash(); + let mut peers = vec![]; + let mut topics = BTreeSet::new(); + topics.insert(topic_hash.clone()); + + let slow_peer_id = PeerId::random(); + peers.push(slow_peer_id); + gs.connected_peers.insert( + slow_peer_id, + PeerConnections { + kind: PeerKind::Gossipsubv1_1, + connections: vec![ConnectionId::new_unchecked(0)], + topics: topics.clone(), + sender: Sender::new(2), + }, + ); + peers.push(slow_peer_id); + let mesh = gs.mesh.entry(topic_hash.clone()).or_default(); + mesh.insert(slow_peer_id); + + let peer_id = PeerId::random(); + peers.push(peer_id); + gs.connected_peers.insert( + peer_id, + PeerConnections { + kind: PeerKind::Gossipsubv1_1, + connections: vec![ConnectionId::new_unchecked(0)], + topics: topics.clone(), + sender: Sender::new(gs.config.connection_handler_queue_len()), + }, + ); + + let publish_data = vec![1; 59]; + let transformed = gs + .data_transform + .outbound_transform(&topic_hash, publish_data.clone()) + .unwrap(); + let raw_message = gs + .build_raw_message(topic_hash.clone(), transformed) + .unwrap(); + let msg_id = gs.config.message_id(&Message { + source: raw_message.source, + data: publish_data, + sequence_number: raw_message.sequence_number, + topic: raw_message.topic.clone(), + }); + + gs.forward_msg(&msg_id, raw_message.clone(), None, HashSet::new()); + gs.forward_msg(&msg_id, raw_message, None, HashSet::new()); + + gs.heartbeat(); + + let slow_peer_failed_messages = gs + .events + .into_iter() + .find_map(|e| match e { + ToSwarm::GenerateEvent(Event::SlowPeer { + peer_id, + failed_messages, + }) if peer_id == slow_peer_id => Some(failed_messages), + _ => None, + }) + .unwrap(); + + let failed_messages = FailedMessages { + publish: 0, + forward: 1, + priority: 0, + non_priority: 1, + timeout: 0, + }; + + assert_eq!(slow_peer_failed_messages.priority, failed_messages.priority); + assert_eq!( + slow_peer_failed_messages.non_priority, + failed_messages.non_priority + ); + assert_eq!(slow_peer_failed_messages.publish, failed_messages.publish); + assert_eq!(slow_peer_failed_messages.forward, failed_messages.forward); +} + +#[test] +fn test_slow_peer_is_downscored_on_publish() { + let gs_config = ConfigBuilder::default() + .validation_mode(ValidationMode::Permissive) + .build() + .unwrap(); + + let mut gs: Behaviour = Behaviour::new(MessageAuthenticity::RandomAuthor, gs_config).unwrap(); + let slow_peer_params = PeerScoreParams::default(); + gs.with_peer_score(slow_peer_params.clone(), PeerScoreThresholds::default()) + .unwrap(); + + let topic_hash = Topic::new("Test").hash(); + let mut peers = vec![]; + let mut topics = BTreeSet::new(); + topics.insert(topic_hash.clone()); + + let slow_peer_id = PeerId::random(); + peers.push(slow_peer_id); + let mesh = gs.mesh.entry(topic_hash.clone()).or_default(); + mesh.insert(slow_peer_id); + gs.connected_peers.insert( + slow_peer_id, + PeerConnections { + kind: PeerKind::Gossipsubv1_1, + connections: vec![ConnectionId::new_unchecked(0)], + topics: topics.clone(), + sender: Sender::new(2), + }, + ); + gs.peer_score.as_mut().unwrap().0.add_peer(slow_peer_id); + let peer_id = PeerId::random(); + peers.push(peer_id); + gs.connected_peers.insert( + peer_id, + PeerConnections { + kind: PeerKind::Gossipsubv1_1, + connections: vec![ConnectionId::new_unchecked(0)], + topics: topics.clone(), + sender: Sender::new(gs.config.connection_handler_queue_len()), + }, + ); + + let publish_data = vec![0; 42]; + gs.publish(topic_hash.clone(), publish_data.clone()) + .unwrap(); + let publish_data = vec![2; 59]; + gs.publish(topic_hash.clone(), publish_data).unwrap(); + gs.heartbeat(); + let slow_peer_score = gs.peer_score(&slow_peer_id).unwrap(); + assert_eq!(slow_peer_score, slow_peer_params.slow_peer_weight); +} + +#[tokio::test] +async fn test_timedout_messages_are_reported() { + let gs_config = ConfigBuilder::default() + .validation_mode(ValidationMode::Permissive) + .build() + .unwrap(); + + let mut gs: Behaviour = Behaviour::new(MessageAuthenticity::RandomAuthor, gs_config).unwrap(); + + let sender = Sender::new(2); + let topic_hash = Topic::new("Test").hash(); + let publish_data = vec![2; 59]; + let raw_message = gs.build_raw_message(topic_hash, publish_data).unwrap(); + + sender + .send_message(RpcOut::Publish { + message: raw_message, + timeout: Delay::new(Duration::from_nanos(1)), + }) + .unwrap(); + let mut receiver = sender.new_receiver(); + let stale = future::poll_fn(|cx| receiver.poll_stale(cx)).await.unwrap(); + assert!(matches!(stale, RpcOut::Publish { .. })); +} + +#[test] +fn test_priority_messages_are_always_sent() { + let sender = Sender::new(2); + let topic_hash = Topic::new("Test").hash(); + // Fill the buffer with the first message. + assert!(sender + .send_message(RpcOut::Subscribe(topic_hash.clone())) + .is_ok()); + assert!(sender + .send_message(RpcOut::Subscribe(topic_hash.clone())) + .is_ok()); + assert!(sender.send_message(RpcOut::Unsubscribe(topic_hash)).is_ok()); +} diff --git a/protocols/gossipsub/src/config.rs b/protocols/gossipsub/src/config.rs index 1ee2e940661..6e7861bae10 100644 --- a/protocols/gossipsub/src/config.rs +++ b/protocols/gossipsub/src/config.rs @@ -95,6 +95,9 @@ pub struct Config { max_ihave_messages: usize, iwant_followup_time: Duration, published_message_ids_cache_time: Duration, + connection_handler_queue_len: usize, + connection_handler_publish_duration: Duration, + connection_handler_forward_duration: Duration, } impl Config { @@ -351,6 +354,23 @@ impl Config { pub fn published_message_ids_cache_time(&self) -> Duration { self.published_message_ids_cache_time } + + /// The max number of messages a `ConnectionHandler` can buffer. The default is 5000. + pub fn connection_handler_queue_len(&self) -> usize { + self.connection_handler_queue_len + } + + /// The duration a message to be published can wait to be sent before it is abandoned. The + /// default is 5 seconds. + pub fn publish_queue_duration(&self) -> Duration { + self.connection_handler_publish_duration + } + + /// The duration a message to be forwarded can wait to be sent before it is abandoned. The + /// default is 1s. + pub fn forward_queue_duration(&self) -> Duration { + self.connection_handler_forward_duration + } } impl Default for Config { @@ -418,6 +438,9 @@ impl Default for ConfigBuilder { max_ihave_messages: 10, iwant_followup_time: Duration::from_secs(3), published_message_ids_cache_time: Duration::from_secs(10), + connection_handler_queue_len: 5000, + connection_handler_publish_duration: Duration::from_secs(5), + connection_handler_forward_duration: Duration::from_secs(1), }, invalid_protocol: false, } @@ -783,6 +806,26 @@ impl ConfigBuilder { self } + /// The max number of messages a `ConnectionHandler` can buffer. The default is 5000. + pub fn connection_handler_queue_len(&mut self, len: usize) -> &mut Self { + self.config.connection_handler_queue_len = len; + self + } + + /// The duration a message to be published can wait to be sent before it is abandoned. The + /// default is 5 seconds. + pub fn publish_queue_duration(&mut self, duration: Duration) -> &mut Self { + self.config.connection_handler_publish_duration = duration; + self + } + + /// The duration a message to be forwarded can wait to be sent before it is abandoned. The + /// default is 1s. + pub fn forward_queue_duration(&mut self, duration: Duration) -> &mut Self { + self.config.connection_handler_forward_duration = duration; + self + } + /// Constructs a [`Config`] from the given configuration and validates the settings. pub fn build(&self) -> Result { // check all constraints on config diff --git a/protocols/gossipsub/src/error.rs b/protocols/gossipsub/src/error.rs index 8761630467b..047d50f2338 100644 --- a/protocols/gossipsub/src/error.rs +++ b/protocols/gossipsub/src/error.rs @@ -36,6 +36,9 @@ pub enum PublishError { MessageTooLarge, /// The compression algorithm failed. TransformFailed(std::io::Error), + /// Messages could not be sent because the queues for all peers were full. The usize represents the + /// number of peers that were attempted. + AllQueuesFull(usize), } impl std::fmt::Display for PublishError { diff --git a/protocols/gossipsub/src/handler.rs b/protocols/gossipsub/src/handler.rs index 0ccea667268..5f9669c02c2 100644 --- a/protocols/gossipsub/src/handler.rs +++ b/protocols/gossipsub/src/handler.rs @@ -19,6 +19,7 @@ // DEALINGS IN THE SOFTWARE. use crate::protocol::{GossipsubCodec, ProtocolConfig}; +use crate::rpc::Receiver; use crate::rpc_proto::proto; use crate::types::{PeerKind, RawMessage, Rpc, RpcOut}; use crate::ValidationError; @@ -32,7 +33,6 @@ use libp2p_swarm::handler::{ FullyNegotiatedInbound, FullyNegotiatedOutbound, StreamUpgradeError, SubstreamProtocol, }; use libp2p_swarm::Stream; -use smallvec::SmallVec; use std::{ pin::Pin, task::{Context, Poll}, @@ -55,14 +55,14 @@ pub enum HandlerEvent { /// An inbound or outbound substream has been established with the peer and this informs over /// which protocol. This message only occurs once per connection. PeerKind(PeerKind), + /// A message to be published was dropped because it could not be sent in time. + MessageDropped(RpcOut), } /// A message sent from the behaviour to the handler. #[allow(clippy::large_enum_variant)] #[derive(Debug)] pub enum HandlerIn { - /// A gossipsub message to send. - Message(RpcOut), /// The peer has joined the mesh. JoinedMesh, /// The peer has left the mesh. @@ -94,8 +94,8 @@ pub struct EnabledHandler { /// The single long-lived inbound substream. inbound_substream: Option, - /// Queue of values that we want to send to the remote. - send_queue: SmallVec<[proto::RPC; 16]>, + /// Queue of values that we want to send to the remote + send_queue: Receiver, /// Flag indicating that an outbound substream is being established to prevent duplicate /// requests. @@ -159,7 +159,7 @@ enum OutboundSubstreamState { impl Handler { /// Builds a new [`Handler`]. - pub fn new(protocol_config: ProtocolConfig) -> Self { + pub fn new(protocol_config: ProtocolConfig, message_queue: Receiver) -> Self { Handler::Enabled(EnabledHandler { listen_protocol: protocol_config, inbound_substream: None, @@ -167,7 +167,7 @@ impl Handler { outbound_substream_establishing: false, outbound_substream_attempts: 0, inbound_substream_attempts: 0, - send_queue: SmallVec::new(), + send_queue: message_queue, peer_kind: None, peer_kind_sent: false, last_io_activity: Instant::now(), @@ -232,7 +232,7 @@ impl EnabledHandler { } // determine if we need to create the outbound stream - if !self.send_queue.is_empty() + if !self.send_queue.poll_is_empty(cx) && self.outbound_substream.is_none() && !self.outbound_substream_establishing { @@ -250,10 +250,31 @@ impl EnabledHandler { ) { // outbound idle state Some(OutboundSubstreamState::WaitingOutput(substream)) => { - if let Some(message) = self.send_queue.pop() { - self.send_queue.shrink_to_fit(); - self.outbound_substream = - Some(OutboundSubstreamState::PendingSend(substream, message)); + if let Poll::Ready(Some(mut message)) = self.send_queue.poll_next_unpin(cx) { + match message { + RpcOut::Publish { + message: _, + ref mut timeout, + } + | RpcOut::Forward { + message: _, + ref mut timeout, + } => { + if Pin::new(timeout).poll(cx).is_ready() { + // Inform the behaviour and end the poll. + self.outbound_substream = + Some(OutboundSubstreamState::WaitingOutput(substream)); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::MessageDropped(message), + )); + } + } + _ => {} // All other messages are not time-bound. + } + self.outbound_substream = Some(OutboundSubstreamState::PendingSend( + substream, + message.into_protobuf(), + )); continue; } @@ -319,6 +340,7 @@ impl EnabledHandler { } } + // Handle inbound messages. loop { match std::mem::replace( &mut self.inbound_substream, @@ -383,6 +405,13 @@ impl EnabledHandler { } } + // Drop the next message in queue if it's stale. + if let Poll::Ready(Some(rpc)) = self.send_queue.poll_stale(cx) { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::MessageDropped(rpc), + )); + } + Poll::Pending } } @@ -409,7 +438,6 @@ impl ConnectionHandler for Handler { fn on_behaviour_event(&mut self, message: HandlerIn) { match self { Handler::Enabled(handler) => match message { - HandlerIn::Message(m) => handler.send_queue.push(m.into_protobuf()), HandlerIn::JoinedMesh => { handler.in_mesh = true; } diff --git a/protocols/gossipsub/src/lib.rs b/protocols/gossipsub/src/lib.rs index 3db2fa7ce51..f6a51da4a51 100644 --- a/protocols/gossipsub/src/lib.rs +++ b/protocols/gossipsub/src/lib.rs @@ -103,6 +103,7 @@ mod mcache; mod metrics; mod peer_score; mod protocol; +mod rpc; mod rpc_proto; mod subscription_filter; mod time_cache; @@ -125,7 +126,7 @@ pub use self::subscription_filter::{ }; pub use self::topic::{Hasher, Topic, TopicHash}; pub use self::transform::{DataTransform, IdentityTransform}; -pub use self::types::{Message, MessageAcceptance, MessageId, RawMessage}; +pub use self::types::{FailedMessages, Message, MessageAcceptance, MessageId, RawMessage}; #[deprecated(note = "Will be removed from the public API.")] pub type Rpc = self::types::Rpc; diff --git a/protocols/gossipsub/src/metrics.rs b/protocols/gossipsub/src/metrics.rs index 7d4acada3c7..40af1af2cac 100644 --- a/protocols/gossipsub/src/metrics.rs +++ b/protocols/gossipsub/src/metrics.rs @@ -127,6 +127,12 @@ pub(crate) struct Metrics { ignored_messages: Family, /// The number of messages rejected by the application (validation result). rejected_messages: Family, + /// The number of publish messages dropped by the sender. + publish_messages_dropped: Family, + /// The number of forward messages dropped by the sender. + forward_messages_dropped: Family, + /// The number of messages that timed out and could not be sent. + timedout_messages_dropped: Family, /* Metrics regarding mesh state */ /// Number of peers in our mesh. This metric should be updated with the count of peers for a @@ -174,6 +180,11 @@ pub(crate) struct Metrics { /// The number of times we have decided that an IWANT control message is required for this /// topic. A very high metric might indicate an underperforming network. topic_iwant_msgs: Family, + + /// The size of the priority queue. + priority_queue_size: Histogram, + /// The size of the non-priority queue. + non_priority_queue_size: Histogram, } impl Metrics { @@ -222,6 +233,21 @@ impl Metrics { "Number of rejected messages received for each topic" ); + let publish_messages_dropped = register_family!( + "publish_messages_dropped_per_topic", + "Number of publish messages dropped per topic" + ); + + let forward_messages_dropped = register_family!( + "forward_messages_dropped_per_topic", + "Number of forward messages dropped per topic" + ); + + let timedout_messages_dropped = register_family!( + "timedout_messages_dropped_per_topic", + "Number of timedout messages dropped per topic" + ); + let mesh_peer_counts = register_family!( "mesh_peer_counts", "Number of peers in each topic in our mesh" @@ -302,6 +328,20 @@ impl Metrics { metric }; + let priority_queue_size = Histogram::new(linear_buckets(0.0, 25.0, 100)); + registry.register( + "priority_queue_size", + "Histogram of observed priority queue sizes", + priority_queue_size.clone(), + ); + + let non_priority_queue_size = Histogram::new(linear_buckets(0.0, 25.0, 100)); + registry.register( + "non_priority_queue_size", + "Histogram of observed non-priority queue sizes", + non_priority_queue_size.clone(), + ); + Self { max_topics, max_never_subscribed_topics, @@ -312,6 +352,9 @@ impl Metrics { accepted_messages, ignored_messages, rejected_messages, + publish_messages_dropped, + forward_messages_dropped, + timedout_messages_dropped, mesh_peer_counts, mesh_peer_inclusion_events, mesh_peer_churn_events, @@ -327,6 +370,8 @@ impl Metrics { heartbeat_duration, memcache_misses, topic_iwant_msgs, + priority_queue_size, + non_priority_queue_size, } } @@ -457,6 +502,27 @@ impl Metrics { } } + /// Register dropping a Publish message over a topic. + pub(crate) fn publish_msg_dropped(&mut self, topic: &TopicHash) { + if self.register_topic(topic).is_ok() { + self.publish_messages_dropped.get_or_create(topic).inc(); + } + } + + /// Register dropping a Forward message over a topic. + pub(crate) fn forward_msg_dropped(&mut self, topic: &TopicHash) { + if self.register_topic(topic).is_ok() { + self.forward_messages_dropped.get_or_create(topic).inc(); + } + } + + /// Register dropping a message that timedout over a topic. + pub(crate) fn timeout_msg_dropped(&mut self, topic: &TopicHash) { + if self.register_topic(topic).is_ok() { + self.timedout_messages_dropped.get_or_create(topic).inc(); + } + } + /// Register that a message was received (and was not a duplicate). pub(crate) fn msg_recvd(&mut self, topic: &TopicHash) { if self.register_topic(topic).is_ok() { @@ -507,6 +573,16 @@ impl Metrics { self.heartbeat_duration.observe(millis as f64); } + /// Observes a priority queue size. + pub(crate) fn observe_priority_queue_size(&mut self, len: usize) { + self.priority_queue_size.observe(len as f64); + } + + /// Observes a non-priority queue size. + pub(crate) fn observe_non_priority_queue_size(&mut self, len: usize) { + self.non_priority_queue_size.observe(len as f64); + } + /// Observe a score of a mesh peer. pub(crate) fn observe_mesh_peers_score(&mut self, topic: &TopicHash, score: f64) { if self.register_topic(topic).is_ok() { diff --git a/protocols/gossipsub/src/peer_score.rs b/protocols/gossipsub/src/peer_score.rs index 4df8f162ed9..e8d1a6e5f97 100644 --- a/protocols/gossipsub/src/peer_score.rs +++ b/protocols/gossipsub/src/peer_score.rs @@ -68,6 +68,8 @@ struct PeerStats { behaviour_penalty: f64, /// Application specific score. Can be manipulated by calling PeerScore::set_application_score application_score: f64, + /// Scoring based on how whether this peer consumes messages fast enough or not. + slow_peer_penalty: f64, } enum ConnectionStatus { @@ -88,6 +90,7 @@ impl Default for PeerStats { known_ips: HashSet::new(), behaviour_penalty: 0f64, application_score: 0f64, + slow_peer_penalty: 0f64, } } } @@ -334,12 +337,19 @@ impl PeerScore { } } - // P7: behavioural pattern penalty + // P7: behavioural pattern penalty. if peer_stats.behaviour_penalty > self.params.behaviour_penalty_threshold { let excess = peer_stats.behaviour_penalty - self.params.behaviour_penalty_threshold; let p7 = excess * excess; score += p7 * self.params.behaviour_penalty_weight; } + + // Slow peer weighting. + if peer_stats.slow_peer_penalty > self.params.slow_peer_threshold { + let excess = peer_stats.slow_peer_penalty - self.params.slow_peer_threshold; + score += excess * self.params.slow_peer_weight; + } + score } @@ -429,6 +439,13 @@ impl PeerScore { if peer_stats.behaviour_penalty < params_ref.decay_to_zero { peer_stats.behaviour_penalty = 0.0; } + + // decay slow peer score + peer_stats.slow_peer_penalty *= params_ref.slow_peer_decay; + if peer_stats.slow_peer_penalty < params_ref.decay_to_zero { + peer_stats.slow_peer_penalty = 0.0; + } + true }); } @@ -456,6 +473,14 @@ impl PeerScore { self.peer_ips.entry(ip).or_default().insert(*peer_id); } + /// Indicate that a peer has been too slow to consume a message. + pub(crate) fn failed_message_slow_peer(&mut self, peer_id: &PeerId) { + if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { + peer_stats.slow_peer_penalty += 1.0; + tracing::debug!(peer=%peer_id, %peer_stats.slow_peer_penalty, "[Penalty] Expired message penalty."); + } + } + /// Removes an ip from a peer pub(crate) fn remove_ip(&mut self, peer_id: &PeerId, ip: &IpAddr) { if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { diff --git a/protocols/gossipsub/src/peer_score/params.rs b/protocols/gossipsub/src/peer_score/params.rs index 8c7fdb9bd35..ae70991f7fb 100644 --- a/protocols/gossipsub/src/peer_score/params.rs +++ b/protocols/gossipsub/src/peer_score/params.rs @@ -148,6 +148,13 @@ pub struct PeerScoreParams { /// Time to remember counters for a disconnected peer. pub retain_score: Duration, + + /// Slow peer penalty conditions, + /// by default `slow_peer_weight` is 50 times lower than `behaviour_penalty_weight` + /// i.e. 50 slow peer penalties match 1 behaviour penalty. + pub slow_peer_weight: f64, + pub slow_peer_threshold: f64, + pub slow_peer_decay: f64, } impl Default for PeerScoreParams { @@ -165,6 +172,9 @@ impl Default for PeerScoreParams { decay_interval: Duration::from_secs(DEFAULT_DECAY_INTERVAL), decay_to_zero: DEFAULT_DECAY_TO_ZERO, retain_score: Duration::from_secs(3600), + slow_peer_weight: -0.2, + slow_peer_threshold: 0.0, + slow_peer_decay: 0.2, } } } diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index 13edecd5846..8d33fe51a90 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -23,7 +23,8 @@ use crate::handler::HandlerEvent; use crate::rpc_proto::proto; use crate::topic::TopicHash; use crate::types::{ - ControlAction, MessageId, PeerInfo, PeerKind, RawMessage, Rpc, Subscription, SubscriptionAction, + ControlAction, Graft, IHave, IWant, MessageId, PeerInfo, PeerKind, Prune, RawMessage, Rpc, + Subscription, SubscriptionAction, }; use crate::ValidationError; use asynchronous_codec::{Decoder, Encoder, Framed}; @@ -412,33 +413,39 @@ impl Decoder for GossipsubCodec { let ihave_msgs: Vec = rpc_control .ihave .into_iter() - .map(|ihave| ControlAction::IHave { - topic_hash: TopicHash::from_raw(ihave.topic_id.unwrap_or_default()), - message_ids: ihave - .message_ids - .into_iter() - .map(MessageId::from) - .collect::>(), + .map(|ihave| { + ControlAction::IHave(IHave { + topic_hash: TopicHash::from_raw(ihave.topic_id.unwrap_or_default()), + message_ids: ihave + .message_ids + .into_iter() + .map(MessageId::from) + .collect::>(), + }) }) .collect(); let iwant_msgs: Vec = rpc_control .iwant .into_iter() - .map(|iwant| ControlAction::IWant { - message_ids: iwant - .message_ids - .into_iter() - .map(MessageId::from) - .collect::>(), + .map(|iwant| { + ControlAction::IWant(IWant { + message_ids: iwant + .message_ids + .into_iter() + .map(MessageId::from) + .collect::>(), + }) }) .collect(); let graft_msgs: Vec = rpc_control .graft .into_iter() - .map(|graft| ControlAction::Graft { - topic_hash: TopicHash::from_raw(graft.topic_id.unwrap_or_default()), + .map(|graft| { + ControlAction::Graft(Graft { + topic_hash: TopicHash::from_raw(graft.topic_id.unwrap_or_default()), + }) }) .collect(); @@ -462,11 +469,11 @@ impl Decoder for GossipsubCodec { .collect::>(); let topic_hash = TopicHash::from_raw(prune.topic_id.unwrap_or_default()); - prune_msgs.push(ControlAction::Prune { + prune_msgs.push(ControlAction::Prune(Prune { topic_hash, peers, backoff: prune.backoff, - }); + })); } control_msgs.extend(ihave_msgs); @@ -501,7 +508,7 @@ impl Decoder for GossipsubCodec { mod tests { use super::*; use crate::config::Config; - use crate::{Behaviour, ConfigBuilder}; + use crate::{Behaviour, ConfigBuilder, MessageAuthenticity}; use crate::{IdentTopic as Topic, Version}; use libp2p_identity::Keypair; use quickcheck::*; @@ -516,8 +523,9 @@ mod tests { // generate an arbitrary GossipsubMessage using the behaviour signing functionality let config = Config::default(); let mut gs: Behaviour = - Behaviour::new(crate::MessageAuthenticity::Signed(keypair.0), config).unwrap(); - let data = (0..g.gen_range(10..10024u32)) + Behaviour::new(MessageAuthenticity::Signed(keypair.0), config).unwrap(); + let mut data_g = quickcheck::Gen::new(10024); + let data = (0..u8::arbitrary(&mut data_g)) .map(|_| u8::arbitrary(g)) .collect::>(); let topic_id = TopicId::arbitrary(g).0; @@ -530,7 +538,8 @@ mod tests { impl Arbitrary for TopicId { fn arbitrary(g: &mut Gen) -> Self { - let topic_string: String = (0..g.gen_range(20..1024u32)) + let mut data_g = quickcheck::Gen::new(1024); + let topic_string: String = (0..u8::arbitrary(&mut data_g)) .map(|_| char::arbitrary(g)) .collect::(); TopicId(Topic::new(topic_string).into()) diff --git a/protocols/gossipsub/src/rpc.rs b/protocols/gossipsub/src/rpc.rs new file mode 100644 index 00000000000..c90e46a85da --- /dev/null +++ b/protocols/gossipsub/src/rpc.rs @@ -0,0 +1,192 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use futures::{stream::Peekable, Stream, StreamExt}; +use std::{ + future::Future, + pin::Pin, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + task::{Context, Poll}, +}; + +use crate::types::RpcOut; + +/// `RpcOut` sender that is priority aware. +#[derive(Debug)] +pub(crate) struct Sender { + /// Capacity of the priority channel for `Publish` messages. + priority_cap: usize, + len: Arc, + pub(crate) priority_sender: async_channel::Sender, + pub(crate) non_priority_sender: async_channel::Sender, + priority_receiver: async_channel::Receiver, + non_priority_receiver: async_channel::Receiver, +} + +impl Sender { + /// Create a RpcSender. + pub(crate) fn new(cap: usize) -> Sender { + // We intentionally do not bound the channel, as we still need to send control messages + // such as `GRAFT`, `PRUNE`, `SUBSCRIBE`, and `UNSUBSCRIBE`. + // That's also why we define `cap` and divide it by two, + // to ensure there is capacity for both priority and non_priority messages. + let (priority_sender, priority_receiver) = async_channel::unbounded(); + let (non_priority_sender, non_priority_receiver) = async_channel::bounded(cap / 2); + let len = Arc::new(AtomicUsize::new(0)); + Sender { + priority_cap: cap / 2, + len, + priority_sender, + non_priority_sender, + priority_receiver, + non_priority_receiver, + } + } + + /// Create a new Receiver to the sender. + pub(crate) fn new_receiver(&self) -> Receiver { + Receiver { + priority_queue_len: self.len.clone(), + priority: Box::pin(self.priority_receiver.clone().peekable()), + non_priority: Box::pin(self.non_priority_receiver.clone().peekable()), + } + } + + #[allow(clippy::result_large_err)] + pub(crate) fn send_message(&self, rpc: RpcOut) -> Result<(), RpcOut> { + if let RpcOut::Publish { .. } = rpc { + // Update number of publish message in queue. + let len = self.len.load(Ordering::Relaxed); + if len >= self.priority_cap { + return Err(rpc); + } + self.len.store(len + 1, Ordering::Relaxed); + } + let sender = match rpc { + RpcOut::Publish { .. } + | RpcOut::Graft(_) + | RpcOut::Prune(_) + | RpcOut::Subscribe(_) + | RpcOut::Unsubscribe(_) => &self.priority_sender, + RpcOut::Forward { .. } | RpcOut::IHave(_) | RpcOut::IWant(_) => { + &self.non_priority_sender + } + }; + sender.try_send(rpc).map_err(|err| err.into_inner()) + } + + /// Returns the current size of the priority queue. + pub(crate) fn priority_queue_len(&self) -> usize { + self.len.load(Ordering::Relaxed) + } + + /// Returns the current size of the non-priority queue. + pub(crate) fn non_priority_queue_len(&self) -> usize { + self.non_priority_sender.len() + } +} + +/// `RpcOut` sender that is priority aware. +#[derive(Debug)] +pub struct Receiver { + /// The maximum length of the priority queue. + pub(crate) priority_queue_len: Arc, + /// The priority queue receiver. + pub(crate) priority: Pin>>>, + /// The non priority queue receiver. + pub(crate) non_priority: Pin>>>, +} + +impl Receiver { + // Peek the next message in the queues and return it if its timeout has elapsed. + // Returns `None` if there aren't any more messages on the stream or none is stale. + pub(crate) fn poll_stale(&mut self, cx: &mut Context<'_>) -> Poll> { + // Peek priority queue. + let priority = match self.priority.as_mut().poll_peek_mut(cx) { + Poll::Ready(Some(RpcOut::Publish { + message: _, + ref mut timeout, + })) => { + if Pin::new(timeout).poll(cx).is_ready() { + // Return the message. + let dropped = futures::ready!(self.priority.poll_next_unpin(cx)) + .expect("There should be a message"); + return Poll::Ready(Some(dropped)); + } + Poll::Ready(None) + } + poll => poll, + }; + + let non_priority = match self.non_priority.as_mut().poll_peek_mut(cx) { + Poll::Ready(Some(RpcOut::Forward { + message: _, + ref mut timeout, + })) => { + if Pin::new(timeout).poll(cx).is_ready() { + // Return the message. + let dropped = futures::ready!(self.non_priority.poll_next_unpin(cx)) + .expect("There should be a message"); + return Poll::Ready(Some(dropped)); + } + Poll::Ready(None) + } + poll => poll, + }; + + match (priority, non_priority) { + (Poll::Ready(None), Poll::Ready(None)) => Poll::Ready(None), + _ => Poll::Pending, + } + } + + /// Poll queues and return true if both are empty. + pub(crate) fn poll_is_empty(&mut self, cx: &mut Context<'_>) -> bool { + matches!( + ( + self.priority.as_mut().poll_peek(cx), + self.non_priority.as_mut().poll_peek(cx), + ), + (Poll::Ready(None), Poll::Ready(None)) + ) + } +} + +impl Stream for Receiver { + type Item = RpcOut; + + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + // The priority queue is first polled. + if let Poll::Ready(rpc) = Pin::new(&mut self.priority).poll_next(cx) { + if let Some(RpcOut::Publish { .. }) = rpc { + self.priority_queue_len.fetch_sub(1, Ordering::Relaxed); + } + return Poll::Ready(rpc); + } + // Then we poll the non priority. + Pin::new(&mut self.non_priority).poll_next(cx) + } +} diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index a88f4822ac2..bb1916fefd0 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -19,7 +19,9 @@ // DEALINGS IN THE SOFTWARE. //! A collection of types using the Gossipsub system. +use crate::rpc::Sender; use crate::TopicHash; +use futures_timer::Delay; use libp2p_identity::PeerId; use libp2p_swarm::ConnectionId; use prometheus_client::encoding::EncodeLabelValue; @@ -31,6 +33,33 @@ use crate::rpc_proto::proto; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +/// Messages that have expired while attempting to be sent to a peer. +#[derive(Clone, Debug, Default)] +pub struct FailedMessages { + /// The number of publish messages that failed to be published in a heartbeat. + pub publish: usize, + /// The number of forward messages that failed to be published in a heartbeat. + pub forward: usize, + /// The number of messages that were failed to be sent to the priority queue as it was full. + pub priority: usize, + /// The number of messages that were failed to be sent to the non-priority queue as it was full. + pub non_priority: usize, + /// The number of messages that timed out and could not be sent. + pub timeout: usize, +} + +impl FailedMessages { + /// The total number of messages that failed due to the queue being full. + pub fn total_queue_full(&self) -> usize { + self.priority + self.non_priority + } + + /// The total failed messages in a heartbeat. + pub fn total(&self) -> usize { + self.priority + self.non_priority + } +} + #[derive(Debug)] /// Validation kinds from the application for received messages. pub enum MessageAcceptance { @@ -71,7 +100,7 @@ impl std::fmt::Debug for MessageId { } } -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug)] pub(crate) struct PeerConnections { /// The kind of protocol the peer supports. pub(crate) kind: PeerKind, @@ -79,6 +108,8 @@ pub(crate) struct PeerConnections { pub(crate) connections: Vec, /// Subscribed topics. pub(crate) topics: BTreeSet, + /// The rpc sender to the connection handler(s). + pub(crate) sender: Sender, } /// Describes the types of peers that can exist in the gossipsub context. @@ -197,8 +228,8 @@ pub enum SubscriptionAction { } #[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct PeerInfo { - pub peer_id: Option, +pub(crate) struct PeerInfo { + pub(crate) peer_id: Option, //TODO add this when RFC: Signed Address Records got added to the spec (see pull request // https://github.com/libp2p/specs/pull/217) //pub signed_peer_record: ?, @@ -208,46 +239,70 @@ pub struct PeerInfo { #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum ControlAction { /// Node broadcasts known messages per topic - IHave control message. - IHave { - /// The topic of the messages. - topic_hash: TopicHash, - /// A list of known message ids (peer_id + sequence _number) as a string. - message_ids: Vec, - }, + IHave(IHave), /// The node requests specific message ids (peer_id + sequence _number) - IWant control message. - IWant { - /// A list of known message ids (peer_id + sequence _number) as a string. - message_ids: Vec, - }, + IWant(IWant), /// The node has been added to the mesh - Graft control message. - Graft { - /// The mesh topic the peer should be added to. - topic_hash: TopicHash, - }, + Graft(Graft), /// The node has been removed from the mesh - Prune control message. - Prune { - /// The mesh topic the peer should be removed from. - topic_hash: TopicHash, - /// A list of peers to be proposed to the removed peer as peer exchange - peers: Vec, - /// The backoff time in seconds before we allow to reconnect - backoff: Option, - }, + Prune(Prune), } -/// A Gossipsub RPC message sent. +/// Node broadcasts known messages per topic - IHave control message. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct IHave { + /// The topic of the messages. + pub(crate) topic_hash: TopicHash, + /// A list of known message ids (peer_id + sequence _number) as a string. + pub(crate) message_ids: Vec, +} + +/// The node requests specific message ids (peer_id + sequence _number) - IWant control message. #[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct IWant { + /// A list of known message ids (peer_id + sequence _number) as a string. + pub(crate) message_ids: Vec, +} + +/// The node has been added to the mesh - Graft control message. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct Graft { + /// The mesh topic the peer should be added to. + pub(crate) topic_hash: TopicHash, +} + +/// The node has been removed from the mesh - Prune control message. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct Prune { + /// The mesh topic the peer should be removed from. + pub(crate) topic_hash: TopicHash, + /// A list of peers to be proposed to the removed peer as peer exchange + pub(crate) peers: Vec, + /// The backoff time in seconds before we allow to reconnect + pub(crate) backoff: Option, +} + +/// A Gossipsub RPC message sent. +#[derive(Debug)] pub enum RpcOut { - /// Publish a Gossipsub message on network. - Publish(RawMessage), - /// Forward a Gossipsub message to the network. - Forward(RawMessage), + /// Publish a Gossipsub message on network.`timeout` limits the duration the message + /// can wait to be sent before it is abandoned. + Publish { message: RawMessage, timeout: Delay }, + /// Forward a Gossipsub message on network. `timeout` limits the duration the message + /// can wait to be sent before it is abandoned. + Forward { message: RawMessage, timeout: Delay }, /// Subscribe a topic. Subscribe(TopicHash), /// Unsubscribe a topic. Unsubscribe(TopicHash), - /// List of Gossipsub control messages. - Control(ControlAction), + /// Send a GRAFT control message. + Graft(Graft), + /// Send a PRUNE control message. + Prune(Prune), + /// Send a IHave control message. + IHave(IHave), + /// Send a IWant control message. + IWant(IWant), } impl RpcOut { @@ -262,12 +317,18 @@ impl From for proto::RPC { /// Converts the RPC into protobuf format. fn from(rpc: RpcOut) -> Self { match rpc { - RpcOut::Publish(message) => proto::RPC { + RpcOut::Publish { + message, + timeout: _, + } => proto::RPC { subscriptions: Vec::new(), publish: vec![message.into()], control: None, }, - RpcOut::Forward(message) => proto::RPC { + RpcOut::Forward { + message, + timeout: _, + } => proto::RPC { publish: vec![message.into()], subscriptions: Vec::new(), control: None, @@ -288,7 +349,7 @@ impl From for proto::RPC { }], control: None, }, - RpcOut::Control(ControlAction::IHave { + RpcOut::IHave(IHave { topic_hash, message_ids, }) => proto::RPC { @@ -304,7 +365,7 @@ impl From for proto::RPC { prune: vec![], }), }, - RpcOut::Control(ControlAction::IWant { message_ids }) => proto::RPC { + RpcOut::IWant(IWant { message_ids }) => proto::RPC { publish: Vec::new(), subscriptions: Vec::new(), control: Some(proto::ControlMessage { @@ -316,7 +377,7 @@ impl From for proto::RPC { prune: vec![], }), }, - RpcOut::Control(ControlAction::Graft { topic_hash }) => proto::RPC { + RpcOut::Graft(Graft { topic_hash }) => proto::RPC { publish: Vec::new(), subscriptions: vec![], control: Some(proto::ControlMessage { @@ -328,7 +389,7 @@ impl From for proto::RPC { prune: vec![], }), }, - RpcOut::Control(ControlAction::Prune { + RpcOut::Prune(Prune { topic_hash, peers, backoff, @@ -420,33 +481,33 @@ impl From for proto::RPC { for action in rpc.control_msgs { match action { // collect all ihave messages - ControlAction::IHave { + ControlAction::IHave(IHave { topic_hash, message_ids, - } => { + }) => { let rpc_ihave = proto::ControlIHave { topic_id: Some(topic_hash.into_string()), message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), }; control.ihave.push(rpc_ihave); } - ControlAction::IWant { message_ids } => { + ControlAction::IWant(IWant { message_ids }) => { let rpc_iwant = proto::ControlIWant { message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), }; control.iwant.push(rpc_iwant); } - ControlAction::Graft { topic_hash } => { + ControlAction::Graft(Graft { topic_hash }) => { let rpc_graft = proto::ControlGraft { topic_id: Some(topic_hash.into_string()), }; control.graft.push(rpc_graft); } - ControlAction::Prune { + ControlAction::Prune(Prune { topic_hash, peers, backoff, - } => { + }) => { let rpc_prune = proto::ControlPrune { topic_id: Some(topic_hash.into_string()), peers: peers From b057f918df4ca1da0d3af6e0103811618eb90c06 Mon Sep 17 00:00:00 2001 From: hanabi1224 Date: Tue, 26 Nov 2024 04:14:50 +0800 Subject: [PATCH 36/50] chore(deps): upgrade `thiserror` to 2.0 Changes: - upgrade `thiserror` crate from `1` to `2` - move `thiserror` to `workspace.dependencies` - sort `workspace.dependencies` - ~run `cargo update` to update `Cargo.lock`~ (Skipping changelog as `thiserror` does not present in any public APIs) Pull-Request: #5689. --- Cargo.lock | 200 ++++++++++++---------- Cargo.toml | 17 +- core/Cargo.toml | 2 +- identity/Cargo.toml | 2 +- libp2p/Cargo.toml | 2 +- misc/quick-protobuf-codec/Cargo.toml | 2 +- misc/webrtc-utils/Cargo.toml | 2 +- muxers/yamux/Cargo.toml | 2 +- protocols/autonat/Cargo.toml | 2 +- protocols/dcutr/Cargo.toml | 2 +- protocols/floodsub/Cargo.toml | 2 +- protocols/identify/Cargo.toml | 2 +- protocols/kad/Cargo.toml | 2 +- protocols/perf/Cargo.toml | 2 +- protocols/relay/Cargo.toml | 2 +- protocols/rendezvous/Cargo.toml | 2 +- transports/noise/Cargo.toml | 2 +- transports/quic/Cargo.toml | 2 +- transports/tls/Cargo.toml | 2 +- transports/webrtc-websys/Cargo.toml | 2 +- transports/webrtc/Cargo.toml | 2 +- transports/websocket-websys/Cargo.toml | 2 +- transports/websocket/Cargo.toml | 2 +- transports/webtransport-websys/Cargo.toml | 2 +- 24 files changed, 142 insertions(+), 119 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e24db6e69d4..d405464f58f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -175,7 +175,7 @@ dependencies = [ "nom", "num-traits", "rusticata-macros", - "thiserror", + "thiserror 1.0.63", "time", ] @@ -191,7 +191,7 @@ dependencies = [ "nom", "num-traits", "rusticata-macros", - "thiserror", + "thiserror 1.0.63", "time", ] @@ -215,7 +215,7 @@ checksum = "7378575ff571966e99a744addeff0bff98b8ada0dedf1956d59e634db95eaac1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", "synstructure 0.13.1", ] @@ -238,7 +238,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", ] [[package]] @@ -472,7 +472,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", ] [[package]] @@ -489,7 +489,7 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", ] [[package]] @@ -963,7 +963,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", ] [[package]] @@ -1226,7 +1226,7 @@ checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", ] [[package]] @@ -1366,7 +1366,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", ] [[package]] @@ -1471,7 +1471,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", ] [[package]] @@ -1727,7 +1727,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", ] [[package]] @@ -1981,7 +1981,7 @@ dependencies = [ "once_cell", "rand 0.8.5", "socket2 0.5.7", - "thiserror", + "thiserror 1.0.63", "tinyvec", "tokio", "tracing", @@ -2004,7 +2004,7 @@ dependencies = [ "rand 0.8.5", "resolv-conf", "smallvec", - "thiserror", + "thiserror 1.0.63", "tokio", "tracing", ] @@ -2369,7 +2369,7 @@ dependencies = [ "rand 0.8.5", "rtcp", "rtp", - "thiserror", + "thiserror 1.0.63", "tokio", "waitgroup", "webrtc-srtp", @@ -2592,7 +2592,7 @@ dependencies = [ "multiaddr", "pin-project", "rw-stream-sink", - "thiserror", + "thiserror 2.0.3", "tokio", "tracing-subscriber", ] @@ -2630,7 +2630,7 @@ dependencies = [ "quick-protobuf-codec", "rand 0.8.5", "rand_core 0.6.4", - "thiserror", + "thiserror 2.0.3", "tokio", "tracing", "tracing-subscriber", @@ -2677,7 +2677,7 @@ dependencies = [ "rw-stream-sink", "serde", "smallvec", - "thiserror", + "thiserror 2.0.3", "tracing", "unsigned-varint 0.8.0", "web-time 1.1.0", @@ -2709,7 +2709,7 @@ dependencies = [ "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", - "thiserror", + "thiserror 2.0.3", "tokio", "tracing", "tracing-subscriber", @@ -2750,7 +2750,7 @@ dependencies = [ "quick-protobuf-codec", "rand 0.8.5", "smallvec", - "thiserror", + "thiserror 2.0.3", "tracing", ] @@ -2809,7 +2809,7 @@ dependencies = [ "quick-protobuf", "quick-protobuf-codec", "smallvec", - "thiserror", + "thiserror 2.0.3", "tracing", "tracing-subscriber", ] @@ -2837,7 +2837,7 @@ dependencies = [ "serde", "serde_json", "sha2 0.10.8", - "thiserror", + "thiserror 2.0.3", "tracing", "zeroize", ] @@ -2869,7 +2869,7 @@ dependencies = [ "serde", "sha2 0.10.8", "smallvec", - "thiserror", + "thiserror 2.0.3", "tracing", "tracing-subscriber", "uint", @@ -2992,7 +2992,7 @@ dependencies = [ "sha2 0.10.8", "snow", "static_assertions", - "thiserror", + "thiserror 2.0.3", "tracing", "tracing-subscriber", "x25519-dalek", @@ -3021,7 +3021,7 @@ dependencies = [ "rand 0.8.5", "serde", "serde_json", - "thiserror", + "thiserror 2.0.3", "tokio", "tracing", "tracing-subscriber", @@ -3109,7 +3109,7 @@ dependencies = [ "ring 0.17.8", "rustls 0.23.11", "socket2 0.5.7", - "thiserror", + "thiserror 2.0.3", "tokio", "tracing", "tracing-subscriber", @@ -3137,7 +3137,7 @@ dependencies = [ "quickcheck-ext", "rand 0.8.5", "static_assertions", - "thiserror", + "thiserror 2.0.3", "tracing", "tracing-subscriber", "web-time 1.1.0", @@ -3165,7 +3165,7 @@ dependencies = [ "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", - "thiserror", + "thiserror 2.0.3", "tokio", "tracing", "tracing-subscriber", @@ -3276,7 +3276,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", ] [[package]] @@ -3330,7 +3330,7 @@ dependencies = [ "ring 0.17.8", "rustls 0.23.11", "rustls-webpki 0.101.7", - "thiserror", + "thiserror 2.0.3", "tokio", "x509-parser 0.16.0", "yasna", @@ -3381,7 +3381,7 @@ dependencies = [ "rcgen", "serde", "stun 0.6.0", - "thiserror", + "thiserror 2.0.3", "tinytemplate", "tokio", "tokio-util", @@ -3407,7 +3407,7 @@ dependencies = [ "rand 0.8.5", "serde", "sha2 0.10.8", - "thiserror", + "thiserror 2.0.3", "tinytemplate", "tracing", ] @@ -3425,7 +3425,7 @@ dependencies = [ "libp2p-identity", "libp2p-webrtc-utils", "send_wrapper 0.6.0", - "thiserror", + "thiserror 2.0.3", "tracing", "wasm-bindgen", "wasm-bindgen-futures", @@ -3449,7 +3449,7 @@ dependencies = [ "rcgen", "rw-stream-sink", "soketto", - "thiserror", + "thiserror 2.0.3", "tracing", "url", "webpki-roots 0.25.2", @@ -3468,7 +3468,7 @@ dependencies = [ "libp2p-yamux", "parking_lot", "send_wrapper 0.6.0", - "thiserror", + "thiserror 2.0.3", "tracing", "wasm-bindgen", "web-sys", @@ -3488,7 +3488,7 @@ dependencies = [ "multihash", "once_cell", "send_wrapper 0.6.0", - "thiserror", + "thiserror 2.0.3", "tracing", "wasm-bindgen", "wasm-bindgen-futures", @@ -3504,7 +3504,7 @@ dependencies = [ "futures", "libp2p-core", "libp2p-muxer-test-harness", - "thiserror", + "thiserror 2.0.3", "tracing", "yamux 0.12.1", "yamux 0.13.3", @@ -3861,7 +3861,7 @@ dependencies = [ "anyhow", "byteorder", "paste", - "thiserror", + "thiserror 1.0.63", ] [[package]] @@ -3875,7 +3875,7 @@ dependencies = [ "log", "netlink-packet-core", "netlink-sys", - "thiserror", + "thiserror 1.0.63", "tokio", ] @@ -4066,7 +4066,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", ] [[package]] @@ -4099,7 +4099,7 @@ dependencies = [ "js-sys", "once_cell", "pin-project-lite", - "thiserror", + "thiserror 1.0.63", "urlencoding", ] @@ -4114,7 +4114,7 @@ dependencies = [ "js-sys", "once_cell", "pin-project-lite", - "thiserror", + "thiserror 1.0.63", ] [[package]] @@ -4146,7 +4146,7 @@ dependencies = [ "opentelemetry-proto", "opentelemetry_sdk 0.25.0", "prost", - "thiserror", + "thiserror 1.0.63", "tokio", "tonic", ] @@ -4189,7 +4189,7 @@ dependencies = [ "ordered-float 4.2.0", "percent-encoding", "rand 0.8.5", - "thiserror", + "thiserror 1.0.63", "tokio", "tokio-stream", ] @@ -4210,7 +4210,7 @@ dependencies = [ "percent-encoding", "rand 0.8.5", "serde_json", - "thiserror", + "thiserror 1.0.63", "tokio", "tokio-stream", ] @@ -4340,7 +4340,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", ] [[package]] @@ -4492,9 +4492,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.85" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22244ce15aa966053a896d1accb3a6e68469b97c7f33f284b99f0d576879fc23" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] @@ -4519,7 +4519,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", ] [[package]] @@ -4542,7 +4542,7 @@ dependencies = [ "itertools", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", ] [[package]] @@ -4570,7 +4570,7 @@ dependencies = [ "futures", "quick-protobuf", "quickcheck-ext", - "thiserror", + "thiserror 2.0.3", "unsigned-varint 0.8.0", ] @@ -4608,7 +4608,7 @@ dependencies = [ "quinn-udp", "rustc-hash 1.1.0", "rustls 0.23.11", - "thiserror", + "thiserror 1.0.63", "tokio", "tracing", ] @@ -4625,7 +4625,7 @@ dependencies = [ "rustc-hash 2.0.0", "rustls 0.23.11", "slab", - "thiserror", + "thiserror 1.0.63", "tinyvec", "tracing", ] @@ -4801,7 +4801,7 @@ checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ "getrandom 0.2.15", "redox_syscall 0.2.16", - "thiserror", + "thiserror 1.0.63", ] [[package]] @@ -5006,7 +5006,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3677908cadfbecb4cc1da9a56a32524fae4ebdfa7c2ea93886e1b1e846488cb9" dependencies = [ "bytes", - "thiserror", + "thiserror 1.0.63", "webrtc-util 0.8.1", ] @@ -5022,7 +5022,7 @@ dependencies = [ "netlink-packet-route", "netlink-proto", "nix 0.24.3", - "thiserror", + "thiserror 1.0.63", "tokio", ] @@ -5035,7 +5035,7 @@ dependencies = [ "bytes", "rand 0.8.5", "serde", - "thiserror", + "thiserror 1.0.63", "webrtc-util 0.8.1", ] @@ -5060,7 +5060,7 @@ dependencies = [ "quote", "rust-embed-utils", "shellexpand", - "syn 2.0.66", + "syn 2.0.89", "walkdir", ] @@ -5294,7 +5294,7 @@ checksum = "4653054c30ebce63658762eb0d64e27673868a95564474811ae6c220cf767640" dependencies = [ "rand 0.8.5", "substring", - "thiserror", + "thiserror 1.0.63", "url", ] @@ -5373,7 +5373,7 @@ checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", ] [[package]] @@ -5406,7 +5406,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", ] [[package]] @@ -5691,7 +5691,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.66", + "syn 2.0.89", ] [[package]] @@ -5707,7 +5707,7 @@ dependencies = [ "rand 0.8.5", "ring 0.17.8", "subtle", - "thiserror", + "thiserror 1.0.63", "tokio", "url", "webrtc-util 0.8.1", @@ -5726,7 +5726,7 @@ dependencies = [ "rand 0.8.5", "ring 0.17.8", "subtle", - "thiserror", + "thiserror 1.0.63", "tokio", "url", "webrtc-util 0.9.0", @@ -5760,9 +5760,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.66" +version = "2.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5" +checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e" dependencies = [ "proc-macro2", "quote", @@ -5801,7 +5801,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", ] [[package]] @@ -5881,7 +5881,7 @@ dependencies = [ "stringmatch", "strum", "thirtyfour-macros", - "thiserror", + "thiserror 1.0.63", "tokio", "tracing", "url", @@ -5895,7 +5895,7 @@ checksum = "b72d056365e368fc57a56d0cec9e41b02fb4a3474a61c8735262b1cfebe67425" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", ] [[package]] @@ -5904,7 +5904,16 @@ version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" dependencies = [ - "thiserror-impl", + "thiserror-impl 1.0.63", +] + +[[package]] +name = "thiserror" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +dependencies = [ + "thiserror-impl 2.0.3", ] [[package]] @@ -5915,7 +5924,18 @@ checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", ] [[package]] @@ -6033,7 +6053,7 @@ checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", ] [[package]] @@ -6223,7 +6243,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", ] [[package]] @@ -6346,7 +6366,7 @@ dependencies = [ "rand 0.8.5", "ring 0.16.20", "stun 0.5.1", - "thiserror", + "thiserror 1.0.63", "tokio", "webrtc-util 0.8.1", ] @@ -6577,7 +6597,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", "wasm-bindgen-shared", ] @@ -6611,7 +6631,7 @@ checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6645,7 +6665,7 @@ checksum = "4b8220be1fa9e4c889b30fd207d4906657e7e90b12e0e6b0c8b8d8709f5de021" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", ] [[package]] @@ -6732,7 +6752,7 @@ dependencies = [ "sha2 0.10.8", "smol_str", "stun 0.5.1", - "thiserror", + "thiserror 1.0.63", "time", "tokio", "turn", @@ -6756,7 +6776,7 @@ checksum = "a45d2461d0e0bf93f181e30eb0b40df32b8bf3efb89c53cebb1990e603e2067d" dependencies = [ "bytes", "log", - "thiserror", + "thiserror 1.0.63", "tokio", "webrtc-sctp", "webrtc-util 0.8.1", @@ -6792,7 +6812,7 @@ dependencies = [ "sha1", "sha2 0.10.8", "subtle", - "thiserror", + "thiserror 1.0.63", "tokio", "webrtc-util 0.8.1", "x25519-dalek", @@ -6813,7 +6833,7 @@ dependencies = [ "serde", "serde_json", "stun 0.5.1", - "thiserror", + "thiserror 1.0.63", "tokio", "turn", "url", @@ -6831,7 +6851,7 @@ checksum = "62bebbd40e7f8b630a0f1a74783dbfff1edfc0ccaae891c4689891156a8c4d8c" dependencies = [ "log", "socket2 0.5.7", - "thiserror", + "thiserror 1.0.63", "tokio", "webrtc-util 0.8.1", ] @@ -6846,7 +6866,7 @@ dependencies = [ "bytes", "rand 0.8.5", "rtp", - "thiserror", + "thiserror 1.0.63", ] [[package]] @@ -6861,7 +6881,7 @@ dependencies = [ "crc", "log", "rand 0.8.5", - "thiserror", + "thiserror 1.0.63", "tokio", "webrtc-util 0.8.1", ] @@ -6884,7 +6904,7 @@ dependencies = [ "rtp", "sha1", "subtle", - "thiserror", + "thiserror 1.0.63", "tokio", "webrtc-util 0.8.1", ] @@ -6904,7 +6924,7 @@ dependencies = [ "log", "nix 0.26.4", "rand 0.8.5", - "thiserror", + "thiserror 1.0.63", "tokio", "winapi", ] @@ -6925,7 +6945,7 @@ dependencies = [ "nix 0.26.4", "portable-atomic", "rand 0.8.5", - "thiserror", + "thiserror 1.0.63", "tokio", "winapi", ] @@ -7210,7 +7230,7 @@ dependencies = [ "oid-registry 0.6.1", "ring 0.16.20", "rusticata-macros", - "thiserror", + "thiserror 1.0.63", "time", ] @@ -7227,7 +7247,7 @@ dependencies = [ "nom", "oid-registry 0.7.0", "rusticata-macros", - "thiserror", + "thiserror 1.0.63", "time", ] @@ -7303,7 +7323,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", ] [[package]] @@ -7323,5 +7343,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.89", ] diff --git a/Cargo.toml b/Cargo.toml index a7f944d22fc..b631b587dee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -72,9 +72,6 @@ resolver = "2" rust-version = "1.75.0" [workspace.dependencies] -asynchronous-codec = { version = "0.7.0" } -futures-bounded = { version = "0.2.4" } -futures-rustls = { version = "0.26.0", default-features = false } libp2p = { version = "0.54.2", path = "libp2p" } libp2p-allow-block-list = { version = "0.4.2", path = "misc/allow-block-list" } libp2p-autonat = { version = "0.13.1", path = "protocols/autonat" } @@ -116,21 +113,27 @@ libp2p-websocket = { version = "0.44.1", path = "transports/websocket" } libp2p-websocket-websys = { version = "0.4.1", path = "transports/websocket-websys" } libp2p-webtransport-websys = { version = "0.4.0", path = "transports/webtransport-websys" } libp2p-yamux = { version = "0.46.0", path = "muxers/yamux" } + +# External dependencies +asynchronous-codec = { version = "0.7.0" } +futures = "0.3.30" +futures-bounded = { version = "0.2.4" } +futures-rustls = { version = "0.26.0", default-features = false } multiaddr = "0.18.1" multihash = "0.19.1" multistream-select = { version = "0.13.0", path = "misc/multistream-select" } prometheus-client = "0.22.2" quick-protobuf-codec = { version = "0.3.1", path = "misc/quick-protobuf-codec" } quickcheck = { package = "quickcheck-ext", path = "misc/quickcheck-ext" } +rcgen = "0.11.3" +ring = "0.17.8" rw-stream-sink = { version = "0.4.0", path = "misc/rw-stream-sink" } -unsigned-varint = { version = "0.8.0" } +thiserror = "2" tokio = { version = "1.38", default-features = false } tracing = "0.1.37" tracing-subscriber = "0.3" -futures = "0.3.30" +unsigned-varint = { version = "0.8.0" } web-time = "1.1.0" -ring = "0.17.8" -rcgen = "0.11.3" [patch.crates-io] diff --git a/core/Cargo.toml b/core/Cargo.toml index c257ff25ec4..8ec0b0fc197 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -28,7 +28,7 @@ rand = "0.8" rw-stream-sink = { workspace = true } serde = { version = "1", optional = true, features = ["derive"] } smallvec = "1.13.2" -thiserror = "1.0" +thiserror = { workspace = true } tracing = { workspace = true } unsigned-varint = { workspace = true } diff --git a/identity/Cargo.toml b/identity/Cargo.toml index d3b07c5dc87..cc41abb3e24 100644 --- a/identity/Cargo.toml +++ b/identity/Cargo.toml @@ -25,7 +25,7 @@ rand = { version = "0.8", optional = true } sec1 = { version = "0.7", default-features = false, optional = true } serde = { version = "1", optional = true, features = ["derive"] } sha2 = { version = "0.10.8", optional = true } -thiserror = { version = "1.0", optional = true } +thiserror = { workspace = true, optional = true } zeroize = { version = "1.8", optional = true } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] diff --git a/libp2p/Cargo.toml b/libp2p/Cargo.toml index 83ef86a4ca4..79f4b8fbb9a 100644 --- a/libp2p/Cargo.toml +++ b/libp2p/Cargo.toml @@ -122,7 +122,7 @@ libp2p-webtransport-websys = { workspace = true, optional = true } libp2p-yamux = { workspace = true, optional = true } multiaddr = { workspace = true } pin-project = "1.0.0" -thiserror = "1.0" +thiserror = { workspace = true } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] libp2p-dns = { workspace = true, optional = true } diff --git a/misc/quick-protobuf-codec/Cargo.toml b/misc/quick-protobuf-codec/Cargo.toml index 985479059a2..2501e94ca19 100644 --- a/misc/quick-protobuf-codec/Cargo.toml +++ b/misc/quick-protobuf-codec/Cargo.toml @@ -13,7 +13,7 @@ categories = ["asynchronous"] [dependencies] asynchronous-codec = { workspace = true } bytes = { version = "1" } -thiserror = "1.0" +thiserror = { workspace = true } unsigned-varint = { workspace = true, features = ["std"] } quick-protobuf = "0.8" diff --git a/misc/webrtc-utils/Cargo.toml b/misc/webrtc-utils/Cargo.toml index 88f576f12d9..287388a49e7 100644 --- a/misc/webrtc-utils/Cargo.toml +++ b/misc/webrtc-utils/Cargo.toml @@ -23,7 +23,7 @@ quick-protobuf-codec = { workspace = true } rand = "0.8" serde = { version = "1.0", features = ["derive"] } sha2 = "0.10.8" -thiserror = "1" +thiserror = { workspace = true } tinytemplate = "1.2" tracing = { workspace = true } diff --git a/muxers/yamux/Cargo.toml b/muxers/yamux/Cargo.toml index 0c52eca3fd4..cd3f8347bd0 100644 --- a/muxers/yamux/Cargo.toml +++ b/muxers/yamux/Cargo.toml @@ -14,7 +14,7 @@ categories = ["network-programming", "asynchronous"] either = "1" futures = { workspace = true } libp2p-core = { workspace = true } -thiserror = "1.0" +thiserror = { workspace = true } yamux012 = { version = "0.12.1", package = "yamux" } yamux013 = { version = "0.13.3", package = "yamux" } tracing = { workspace = true } diff --git a/protocols/autonat/Cargo.toml b/protocols/autonat/Cargo.toml index ced5dbeb4e8..92ca163d8ec 100644 --- a/protocols/autonat/Cargo.toml +++ b/protocols/autonat/Cargo.toml @@ -33,7 +33,7 @@ tracing = { workspace = true } quick-protobuf-codec = { workspace = true } rand = "0.8" rand_core = { version = "0.6", optional = true } -thiserror = { version = "1.0.52", optional = true } +thiserror = { workspace = true, optional = true } [dev-dependencies] tokio = { workspace = true, features = ["macros", "rt", "sync"] } diff --git a/protocols/dcutr/Cargo.toml b/protocols/dcutr/Cargo.toml index 69517181aab..a47f5400488 100644 --- a/protocols/dcutr/Cargo.toml +++ b/protocols/dcutr/Cargo.toml @@ -21,7 +21,7 @@ libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } -thiserror = "1.0" +thiserror = { workspace = true } tracing = { workspace = true } lru = "0.12.3" futures-bounded = { workspace = true } diff --git a/protocols/floodsub/Cargo.toml b/protocols/floodsub/Cargo.toml index 18d77e99e9c..dcfde6383cc 100644 --- a/protocols/floodsub/Cargo.toml +++ b/protocols/floodsub/Cargo.toml @@ -23,7 +23,7 @@ quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } rand = "0.8" smallvec = "1.13.2" -thiserror = "1.0.61" +thiserror = { workspace = true } tracing = { workspace = true } # Passing arguments to the docsrs builder in order to properly document cfg's. diff --git a/protocols/identify/Cargo.toml b/protocols/identify/Cargo.toml index 87b3ed63774..d7f6b6eca76 100644 --- a/protocols/identify/Cargo.toml +++ b/protocols/identify/Cargo.toml @@ -22,7 +22,7 @@ lru = "0.12.3" quick-protobuf-codec = { workspace = true } quick-protobuf = "0.8" smallvec = "1.13.2" -thiserror = "1.0" +thiserror = { workspace = true } tracing = { workspace = true } either = "1.12.0" diff --git a/protocols/kad/Cargo.toml b/protocols/kad/Cargo.toml index 11df81afbf8..295414f6ddd 100644 --- a/protocols/kad/Cargo.toml +++ b/protocols/kad/Cargo.toml @@ -30,7 +30,7 @@ uint = "0.9" futures-timer = "3.0.3" web-time = { workspace = true } serde = { version = "1.0", optional = true, features = ["derive"] } -thiserror = "1" +thiserror = { workspace = true } tracing = { workspace = true } [dev-dependencies] diff --git a/protocols/perf/Cargo.toml b/protocols/perf/Cargo.toml index a1a6128c6ed..cd499a8c949 100644 --- a/protocols/perf/Cargo.toml +++ b/protocols/perf/Cargo.toml @@ -28,7 +28,7 @@ libp2p-tls = { workspace = true } libp2p-yamux = { workspace = true } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" -thiserror = "1.0" +thiserror = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] } diff --git a/protocols/relay/Cargo.toml b/protocols/relay/Cargo.toml index c996a014845..6c2c7b90304 100644 --- a/protocols/relay/Cargo.toml +++ b/protocols/relay/Cargo.toml @@ -25,7 +25,7 @@ quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } rand = "0.8.4" static_assertions = "1" -thiserror = "1.0" +thiserror = { workspace = true } tracing = { workspace = true } [dev-dependencies] diff --git a/protocols/rendezvous/Cargo.toml b/protocols/rendezvous/Cargo.toml index 5aa70688dbe..5fa40c3785b 100644 --- a/protocols/rendezvous/Cargo.toml +++ b/protocols/rendezvous/Cargo.toml @@ -24,7 +24,7 @@ libp2p-request-response = { workspace = true } quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } rand = "0.8" -thiserror = "1" +thiserror = { workspace = true } tracing = { workspace = true } [dev-dependencies] diff --git a/transports/noise/Cargo.toml b/transports/noise/Cargo.toml index 7f8e9004cd0..9798ba1836e 100644 --- a/transports/noise/Cargo.toml +++ b/transports/noise/Cargo.toml @@ -22,7 +22,7 @@ quick-protobuf = "0.8" rand = "0.8.3" sha2 = "0.10.8" static_assertions = "1" -thiserror = "1.0.61" +thiserror = { workspace = true } tracing = { workspace = true } x25519-dalek = "2" zeroize = "1" diff --git a/transports/quic/Cargo.toml b/transports/quic/Cargo.toml index a33ef4ef0b1..17d5014b974 100644 --- a/transports/quic/Cargo.toml +++ b/transports/quic/Cargo.toml @@ -21,7 +21,7 @@ parking_lot = "0.12.3" quinn = { version = "0.11.2", default-features = false, features = ["rustls", "futures-io"] } rand = "0.8.5" rustls = { version = "0.23.9", default-features = false } -thiserror = "1.0.61" +thiserror = { workspace = true } tokio = { workspace = true, default-features = false, features = ["net", "rt", "time"], optional = true } tracing = { workspace = true } socket2 = "0.5.7" diff --git a/transports/tls/Cargo.toml b/transports/tls/Cargo.toml index c27e14bb537..fce76e2aa79 100644 --- a/transports/tls/Cargo.toml +++ b/transports/tls/Cargo.toml @@ -15,7 +15,7 @@ libp2p-core = { workspace = true } libp2p-identity = { workspace = true } rcgen = { workspace = true } ring = { workspace = true } -thiserror = "1.0.61" +thiserror = { workspace = true } webpki = { version = "0.101.4", package = "rustls-webpki", features = ["std"] } x509-parser = "0.16.0" yasna = "0.5.2" diff --git a/transports/webrtc-websys/Cargo.toml b/transports/webrtc-websys/Cargo.toml index 453abe57f74..4663913c849 100644 --- a/transports/webrtc-websys/Cargo.toml +++ b/transports/webrtc-websys/Cargo.toml @@ -21,7 +21,7 @@ libp2p-core = { workspace = true } libp2p-identity = { workspace = true } libp2p-webrtc-utils = { workspace = true } send_wrapper = { version = "0.6.0", features = ["futures"] } -thiserror = "1" +thiserror = { workspace = true } tracing = { workspace = true } wasm-bindgen = { version = "0.2.90" } wasm-bindgen-futures = { version = "0.4.42" } diff --git a/transports/webrtc/Cargo.toml b/transports/webrtc/Cargo.toml index fc2748d93c3..4197a9419d8 100644 --- a/transports/webrtc/Cargo.toml +++ b/transports/webrtc/Cargo.toml @@ -26,7 +26,7 @@ rand = "0.8" rcgen = { workspace = true } serde = { version = "1.0", features = ["derive"] } stun = "0.6" -thiserror = "1" +thiserror = { workspace = true } tinytemplate = "1.2" tokio = { workspace = true, features = ["net"], optional = true } tokio-util = { version = "0.7", features = ["compat"], optional = true } diff --git a/transports/websocket-websys/Cargo.toml b/transports/websocket-websys/Cargo.toml index 1687d3c0fb5..1e604ba0478 100644 --- a/transports/websocket-websys/Cargo.toml +++ b/transports/websocket-websys/Cargo.toml @@ -18,7 +18,7 @@ libp2p-core = { workspace = true } tracing = { workspace = true } parking_lot = "0.12.3" send_wrapper = "0.6.0" -thiserror = "1.0.61" +thiserror = { workspace = true } wasm-bindgen = "0.2.90" web-sys = { version = "0.3.69", features = ["BinaryType", "CloseEvent", "MessageEvent", "WebSocket", "Window", "WorkerGlobalScope"] } diff --git a/transports/websocket/Cargo.toml b/transports/websocket/Cargo.toml index 07f84901eda..5c9734e420a 100644 --- a/transports/websocket/Cargo.toml +++ b/transports/websocket/Cargo.toml @@ -21,7 +21,7 @@ pin-project-lite = "0.2.14" rw-stream-sink = { workspace = true } soketto = "0.8.0" tracing = { workspace = true } -thiserror = "1.0.61" +thiserror = { workspace = true } url = "2.5" webpki-roots = "0.25" diff --git a/transports/webtransport-websys/Cargo.toml b/transports/webtransport-websys/Cargo.toml index eeb474d4a63..0cfc37bf041 100644 --- a/transports/webtransport-websys/Cargo.toml +++ b/transports/webtransport-websys/Cargo.toml @@ -23,7 +23,7 @@ multiaddr = { workspace = true } multihash = { workspace = true } once_cell = "1.19.0" send_wrapper = { version = "0.6.0", features = ["futures"] } -thiserror = "1.0.61" +thiserror = { workspace = true } tracing = { workspace = true } wasm-bindgen = "0.2.93" wasm-bindgen-futures = "0.4.43" From c9c44b1b1b031c215a5e20f523b202b1694fc4b4 Mon Sep 17 00:00:00 2001 From: leopardracer <136604165+leopardracer@users.noreply.github.com> Date: Wed, 27 Nov 2024 01:11:10 +0200 Subject: [PATCH 37/50] fix: typos in documentation files Pull-Request: #5693. --- core/CHANGELOG.md | 2 +- identity/CHANGELOG.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index dbd46a38f07..68b1f99cc2a 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -7,7 +7,7 @@ - Update `Transport::dial` function signature with a `DialOpts` param and remove `Transport::dial_as_listener`: - `DialOpts` struct contains `PortUse` and `Endpoint`, - - `PortUse` allows controling port allocation of new connections (defaults to `PortUse::Reuse`) - + - `PortUse` allows controlling port allocation of new connections (defaults to `PortUse::Reuse`) - - Add `port_use` field to `ConnectedPoint` - Set `endpoint` field in `DialOpts` to `Endpoint::Listener` to dial as a listener - Remove `Transport::address_translation` and relocate functionality to `libp2p_swarm` diff --git a/identity/CHANGELOG.md b/identity/CHANGELOG.md index 8ee12c8124a..98f3e5c5636 100644 --- a/identity/CHANGELOG.md +++ b/identity/CHANGELOG.md @@ -10,7 +10,7 @@ ## 0.2.8 -- Bump `ring` to `0.17.5. +- Bump `ring` to `0.17.5`. See [PR 4779](https://github.com/libp2p/rust-libp2p/pull/4779). ## 0.2.7 From 0d890fdc65743b7c5af50905dd3e6cd26ce773ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Wed, 27 Nov 2024 17:05:29 +0000 Subject: [PATCH 38/50] fix(gossipsub): fix mesh/fanout inconsistencies When a peer unsubscribes also remove it from fanout. Pull-Request: #5690. --- protocols/gossipsub/CHANGELOG.md | 6 ++++-- protocols/gossipsub/src/behaviour.rs | 5 ++++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/protocols/gossipsub/CHANGELOG.md b/protocols/gossipsub/CHANGELOG.md index 8d95abc01a2..ddbbc7fb552 100644 --- a/protocols/gossipsub/CHANGELOG.md +++ b/protocols/gossipsub/CHANGELOG.md @@ -1,15 +1,17 @@ ## 0.48.0 +- Correct state inconsistencies with the mesh and fanout when unsubscribing. + See [PR 5690](https://github.com/libp2p/rust-libp2p/pull/5690) + - Deprecate `futures-ticker` and use `futures-timer` instead. See [PR 5674](https://github.com/libp2p/rust-libp2p/pull/5674). + - Apply `max_transmit_size` to the inner message instead of the final payload. See [PR 5642](https://github.com/libp2p/rust-libp2p/pull/5642). - Deprecate `void` crate. See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676). -## 0.47.1 - - Attempt to publish to at least mesh_n peers when flood publish is disabled. See [PR 5578](https://github.com/libp2p/rust-libp2p/pull/5578). diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index fae45ed452e..075a881db48 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -1962,8 +1962,11 @@ where } } - // remove unsubscribed peers from the mesh if it exists + // remove unsubscribed peers from the mesh and fanout if they exist there. for (peer_id, topic_hash) in unsubscribed_peers { + self.fanout + .get_mut(&topic_hash) + .map(|peers| peers.remove(&peer_id)); self.remove_peer_from_mesh(&peer_id, &topic_hash, None, false, Churn::Unsub); } From 930118ef5a6566f058d22e1614a8e96b4c287262 Mon Sep 17 00:00:00 2001 From: hanabi1224 Date: Thu, 28 Nov 2024 18:04:50 +0800 Subject: [PATCH 39/50] fix(ci): Clippy Beta Fixes CI failure in Clippy (Beta) https://github.com/libp2p/rust-libp2p/actions/runs/12055058396/job/33614543029 Pull-Request: #5700. --- Cargo.lock | 4 ++-- Cargo.toml | 4 ++-- protocols/gossipsub/CHANGELOG.md | 4 ++++ protocols/gossipsub/src/backoff.rs | 2 +- protocols/gossipsub/src/behaviour.rs | 2 +- protocols/kad/CHANGELOG.md | 2 ++ protocols/kad/src/jobs.rs | 2 +- protocols/kad/src/kbucket.rs | 4 ++-- protocols/kad/src/kbucket/bucket.rs | 4 ++-- protocols/kad/src/record.rs | 4 ++-- swarm/CHANGELOG.md | 3 +++ swarm/src/connection/pool.rs | 2 +- transports/noise/CHANGELOG.md | 5 +++++ transports/noise/Cargo.toml | 2 +- transports/noise/src/io/handshake.rs | 2 +- transports/websocket-websys/CHANGELOG.md | 3 +++ transports/websocket-websys/src/lib.rs | 2 ++ transports/webtransport-websys/CHANGELOG.md | 5 +++++ transports/webtransport-websys/Cargo.toml | 2 +- transports/webtransport-websys/src/lib.rs | 2 ++ wasm-tests/webtransport-tests/src/lib.rs | 2 ++ 21 files changed, 45 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d405464f58f..4093d49504b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2974,7 +2974,7 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.45.0" +version = "0.45.1" dependencies = [ "asynchronous-codec", "bytes", @@ -3476,7 +3476,7 @@ dependencies = [ [[package]] name = "libp2p-webtransport-websys" -version = "0.4.0" +version = "0.4.1" dependencies = [ "futures", "js-sys", diff --git a/Cargo.toml b/Cargo.toml index b631b587dee..dfa32628dbc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -88,7 +88,7 @@ libp2p-mdns = { version = "0.46.0", path = "protocols/mdns" } libp2p-memory-connection-limits = { version = "0.3.1", path = "misc/memory-connection-limits" } libp2p-metrics = { version = "0.15.0", path = "misc/metrics" } libp2p-mplex = { version = "0.42.0", path = "muxers/mplex" } -libp2p-noise = { version = "0.45.0", path = "transports/noise" } +libp2p-noise = { version = "0.45.1", path = "transports/noise" } libp2p-perf = { version = "0.4.0", path = "protocols/perf" } libp2p-ping = { version = "0.45.1", path = "protocols/ping" } libp2p-plaintext = { version = "0.42.0", path = "transports/plaintext" } @@ -111,7 +111,7 @@ libp2p-webrtc-utils = { version = "0.3.0", path = "misc/webrtc-utils" } libp2p-webrtc-websys = { version = "0.4.0-alpha.2", path = "transports/webrtc-websys" } libp2p-websocket = { version = "0.44.1", path = "transports/websocket" } libp2p-websocket-websys = { version = "0.4.1", path = "transports/websocket-websys" } -libp2p-webtransport-websys = { version = "0.4.0", path = "transports/webtransport-websys" } +libp2p-webtransport-websys = { version = "0.4.1", path = "transports/webtransport-websys" } libp2p-yamux = { version = "0.46.0", path = "muxers/yamux" } # External dependencies diff --git a/protocols/gossipsub/CHANGELOG.md b/protocols/gossipsub/CHANGELOG.md index ddbbc7fb552..0bfee4d3e91 100644 --- a/protocols/gossipsub/CHANGELOG.md +++ b/protocols/gossipsub/CHANGELOG.md @@ -18,10 +18,14 @@ - Introduce back pressure and penalize slow peers. Drop stale messages that timeout before being delivered. See [PR 5595](https://github.com/libp2p/rust-libp2p/pull/5595). + - Change `Behaviour::unsubscribe` and `Behaviour::report_message_validation_result` to `bool` they don't need to be a `Result`. See [PR 5595](https://github.com/libp2p/rust-libp2p/pull/5595). +- Fix `cargo clippy` warnings in `rustc 1.84.0-beta.1`. + See [PR 5700](https://github.com/libp2p/rust-libp2p/pull/5700). + ## 0.47.0 diff --git a/protocols/gossipsub/src/backoff.rs b/protocols/gossipsub/src/backoff.rs index 4414ffb00e6..c955ee59c65 100644 --- a/protocols/gossipsub/src/backoff.rs +++ b/protocols/gossipsub/src/backoff.rs @@ -124,7 +124,7 @@ impl BackoffStorage { pub(crate) fn is_backoff_with_slack(&self, topic: &TopicHash, peer: &PeerId) -> bool { self.backoffs .get(topic) - .map_or(false, |m| m.contains_key(peer)) + .is_some_and(|m| m.contains_key(peer)) } pub(crate) fn get_backoff_time(&self, topic: &TopicHash, peer: &PeerId) -> Option { diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 075a881db48..ae808d97261 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -1689,7 +1689,7 @@ where let self_published = !self.config.allow_self_origin() && if let Some(own_id) = self.publish_config.get_own_id() { own_id != propagation_source - && raw_message.source.as_ref().map_or(false, |s| s == own_id) + && raw_message.source.as_ref().is_some_and(|s| s == own_id) } else { self.published_message_ids.contains(msg_id) }; diff --git a/protocols/kad/CHANGELOG.md b/protocols/kad/CHANGELOG.md index 55d269bf98f..64049c7b60b 100644 --- a/protocols/kad/CHANGELOG.md +++ b/protocols/kad/CHANGELOG.md @@ -6,6 +6,8 @@ See [PR 5573](https://github.com/libp2p/rust-libp2p/pull/5573). - Add `Behavior::find_closest_local_peers()`. See [PR 5645](https://github.com/libp2p/rust-libp2p/pull/5645). +- Fix `cargo clippy` warnings in `rustc 1.84.0-beta.1`. + See [PR 5700](https://github.com/libp2p/rust-libp2p/pull/5700). ## 0.46.2 diff --git a/protocols/kad/src/jobs.rs b/protocols/kad/src/jobs.rs index 537f652b7a4..fa558878a38 100644 --- a/protocols/kad/src/jobs.rs +++ b/protocols/kad/src/jobs.rs @@ -203,7 +203,7 @@ impl PutRecordJob { T: RecordStore, { if self.inner.check_ready(cx, now) { - let publish = self.next_publish.map_or(false, |t_pub| now >= t_pub); + let publish = self.next_publish.is_some_and(|t_pub| now >= t_pub); let records = store .records() .filter_map(|r| { diff --git a/protocols/kad/src/kbucket.rs b/protocols/kad/src/kbucket.rs index 28d7df03917..99d534fa669 100644 --- a/protocols/kad/src/kbucket.rs +++ b/protocols/kad/src/kbucket.rs @@ -529,12 +529,12 @@ where /// Returns true if the bucket has a pending node. pub fn has_pending(&self) -> bool { - self.bucket.pending().map_or(false, |n| !n.is_ready()) + self.bucket.pending().is_some_and(|n| !n.is_ready()) } /// Tests whether the given distance falls into this bucket. pub fn contains(&self, d: &Distance) -> bool { - BucketIndex::new(d).map_or(false, |i| i == self.index) + BucketIndex::new(d).is_some_and(|i| i == self.index) } /// Generates a random distance that falls into this bucket. diff --git a/protocols/kad/src/kbucket/bucket.rs b/protocols/kad/src/kbucket/bucket.rs index 1426017aa7a..ec2b7756c43 100644 --- a/protocols/kad/src/kbucket/bucket.rs +++ b/protocols/kad/src/kbucket/bucket.rs @@ -377,7 +377,7 @@ where // Adjust `first_connected_pos` accordingly. match status { NodeStatus::Connected => { - if self.first_connected_pos.map_or(false, |p| p == pos.0) + if self.first_connected_pos.is_some_and(|p| p == pos.0) && pos.0 == self.nodes.len() { // It was the last connected node. @@ -398,7 +398,7 @@ where /// Returns the status of the node at the given position. pub(crate) fn status(&self, pos: Position) -> NodeStatus { - if self.first_connected_pos.map_or(false, |i| pos.0 >= i) { + if self.first_connected_pos.is_some_and(|i| pos.0 >= i) { NodeStatus::Connected } else { NodeStatus::Disconnected diff --git a/protocols/kad/src/record.rs b/protocols/kad/src/record.rs index cb7c4b866fc..b8a644acdd6 100644 --- a/protocols/kad/src/record.rs +++ b/protocols/kad/src/record.rs @@ -101,7 +101,7 @@ impl Record { /// Checks whether the record is expired w.r.t. the given `Instant`. pub fn is_expired(&self, now: Instant) -> bool { - self.expires.map_or(false, |t| now >= t) + self.expires.is_some_and(|t| now >= t) } } @@ -154,7 +154,7 @@ impl ProviderRecord { /// Checks whether the provider record is expired w.r.t. the given `Instant`. pub fn is_expired(&self, now: Instant) -> bool { - self.expires.map_or(false, |t| now >= t) + self.expires.is_some_and(|t| now >= t) } } diff --git a/swarm/CHANGELOG.md b/swarm/CHANGELOG.md index 0109a33747c..69446e62d07 100644 --- a/swarm/CHANGELOG.md +++ b/swarm/CHANGELOG.md @@ -5,6 +5,9 @@ - Deprecate `void` crate. See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676). + +- Fix `cargo clippy` warnings in `rustc 1.84.0-beta.1`. + See [PR 5700](https://github.com/libp2p/rust-libp2p/pull/5700). ## 0.45.1 diff --git a/swarm/src/connection/pool.rs b/swarm/src/connection/pool.rs index b2accf745ef..7964ecbfa69 100644 --- a/swarm/src/connection/pool.rs +++ b/swarm/src/connection/pool.rs @@ -207,7 +207,7 @@ struct PendingConnection { impl PendingConnection { fn is_for_same_remote_as(&self, other: PeerId) -> bool { - self.peer_id.map_or(false, |peer| peer == other) + self.peer_id == Some(other) } /// Aborts the connection attempt, closing the connection. diff --git a/transports/noise/CHANGELOG.md b/transports/noise/CHANGELOG.md index f599ae3533f..cda7132cb28 100644 --- a/transports/noise/CHANGELOG.md +++ b/transports/noise/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.45.1 + +- Fix `cargo clippy` warnings in `rustc 1.84.0-beta.1`. + See [PR 5700](https://github.com/libp2p/rust-libp2p/pull/5700). + ## 0.45.0 diff --git a/transports/noise/Cargo.toml b/transports/noise/Cargo.toml index 9798ba1836e..8824adcc50c 100644 --- a/transports/noise/Cargo.toml +++ b/transports/noise/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-noise" edition = "2021" rust-version = { workspace = true } description = "Cryptographic handshake protocol using the noise framework." -version = "0.45.0" +version = "0.45.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" diff --git a/transports/noise/src/io/handshake.rs b/transports/noise/src/io/handshake.rs index 8993a5795b6..d8dfb9b802e 100644 --- a/transports/noise/src/io/handshake.rs +++ b/transports/noise/src/io/handshake.rs @@ -106,7 +106,7 @@ where .id_remote_pubkey .ok_or_else(|| Error::AuthenticationFailed)?; - let is_valid_signature = self.dh_remote_pubkey_sig.as_ref().map_or(false, |s| { + let is_valid_signature = self.dh_remote_pubkey_sig.as_ref().is_some_and(|s| { id_pk.verify(&[STATIC_KEY_DOMAIN.as_bytes(), pubkey.as_ref()].concat(), s) }); diff --git a/transports/websocket-websys/CHANGELOG.md b/transports/websocket-websys/CHANGELOG.md index 9d0cb7d7726..affe9ff2551 100644 --- a/transports/websocket-websys/CHANGELOG.md +++ b/transports/websocket-websys/CHANGELOG.md @@ -3,6 +3,9 @@ - fix: Return `None` when extracting a `/dnsaddr` address See [PR 5613](https://github.com/libp2p/rust-libp2p/pull/5613) +- Fix `cargo clippy` warnings in `rustc 1.84.0-beta.1`. + See [PR 5700](https://github.com/libp2p/rust-libp2p/pull/5700). + ## 0.4.0 - Implement refactored `Transport`. diff --git a/transports/websocket-websys/src/lib.rs b/transports/websocket-websys/src/lib.rs index 17b07c71c0a..21789eeca66 100644 --- a/transports/websocket-websys/src/lib.rs +++ b/transports/websocket-websys/src/lib.rs @@ -20,6 +20,8 @@ //! Libp2p websocket transports built on [web-sys](https://rustwasm.github.io/wasm-bindgen/web-sys/index.html). +#![allow(unexpected_cfgs)] + mod web_context; use bytes::BytesMut; diff --git a/transports/webtransport-websys/CHANGELOG.md b/transports/webtransport-websys/CHANGELOG.md index 411117918bd..45a94495e4e 100644 --- a/transports/webtransport-websys/CHANGELOG.md +++ b/transports/webtransport-websys/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.4.1 + +- Fix `cargo clippy` warnings in `rustc 1.84.0-beta.1`. + See [PR 5700](https://github.com/libp2p/rust-libp2p/pull/5700). + ## 0.4.0 - Implement refactored `Transport`. diff --git a/transports/webtransport-websys/Cargo.toml b/transports/webtransport-websys/Cargo.toml index 0cfc37bf041..ef2865535bf 100644 --- a/transports/webtransport-websys/Cargo.toml +++ b/transports/webtransport-websys/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-webtransport-websys" edition = "2021" rust-version = { workspace = true } description = "WebTransport for libp2p under WASM environment" -version = "0.4.0" +version = "0.4.1" authors = [ "Yiannis Marangos ", "oblique ", diff --git a/transports/webtransport-websys/src/lib.rs b/transports/webtransport-websys/src/lib.rs index f9c59694fa3..dcb1010d986 100644 --- a/transports/webtransport-websys/src/lib.rs +++ b/transports/webtransport-websys/src/lib.rs @@ -1,5 +1,7 @@ //! Libp2p WebTransport built on [web-sys](https://rustwasm.github.io/wasm-bindgen/web-sys/index.html) +#![allow(unexpected_cfgs)] + mod bindings; mod connection; mod endpoint; diff --git a/wasm-tests/webtransport-tests/src/lib.rs b/wasm-tests/webtransport-tests/src/lib.rs index 938cdf0b3e1..4cf4375bf7a 100644 --- a/wasm-tests/webtransport-tests/src/lib.rs +++ b/wasm-tests/webtransport-tests/src/lib.rs @@ -1,3 +1,5 @@ +#![allow(unexpected_cfgs)] + use futures::channel::oneshot; use futures::{AsyncReadExt, AsyncWriteExt}; use getrandom::getrandom; From b187c14ef36744ea6b2f29740321b5fe896a50ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Thu, 28 Nov 2024 15:44:50 +0000 Subject: [PATCH 40/50] chore: introduce rustfmt.toml Pull-Request: #5695. --- core/src/connection.rs | 22 +- core/src/either.rs | 17 +- core/src/lib.rs | 17 +- core/src/muxing.rs | 48 +- core/src/muxing/boxed.rs | 20 +- core/src/peer_record.rs | 21 +- core/src/signed_envelope.rs | 20 +- core/src/transport.rs | 28 +- core/src/transport/and_then.rs | 15 +- core/src/transport/boxed.rs | 8 +- core/src/transport/choice.rs | 13 +- core/src/transport/dummy.rs | 13 +- core/src/transport/global_only.rs | 47 +- core/src/transport/map.rs | 13 +- core/src/transport/map_err.rs | 10 +- core/src/transport/memory.rs | 24 +- core/src/transport/optional.rs | 9 +- core/src/transport/timeout.rs | 16 +- core/src/transport/upgrade.rs | 38 +- core/src/upgrade.rs | 7 +- core/src/upgrade/apply.rs | 16 +- core/src/upgrade/denied.rs | 7 +- core/src/upgrade/either.rs | 8 +- core/src/upgrade/error.rs | 3 +- core/src/upgrade/pending.rs | 7 +- core/src/upgrade/ready.rs | 10 +- core/src/upgrade/select.rs | 16 +- core/tests/transport_upgrade.rs | 11 +- examples/autonat/src/bin/autonat_client.rs | 16 +- examples/autonat/src/bin/autonat_server.rs | 15 +- examples/browser-webrtc/src/lib.rs | 8 +- examples/browser-webrtc/src/main.rs | 25 +- examples/chat/src/main.rs | 20 +- examples/dcutr/src/main.rs | 4 +- .../distributed-key-value-store/src/main.rs | 9 +- examples/file-sharing/src/main.rs | 11 +- examples/file-sharing/src/network.rs | 23 +- examples/identify/src/main.rs | 3 +- examples/ipfs-kad/src/main.rs | 16 +- examples/ipfs-private/src/main.rs | 3 +- examples/metrics/src/http_service.rs | 16 +- examples/metrics/src/main.rs | 20 +- examples/ping/src/main.rs | 3 +- examples/relay-server/src/main.rs | 10 +- examples/rendezvous/src/bin/rzv-discover.rs | 4 +- examples/rendezvous/src/bin/rzv-identify.rs | 3 +- examples/rendezvous/src/bin/rzv-register.rs | 7 +- examples/rendezvous/src/main.rs | 4 +- examples/stream/src/main.rs | 11 +- examples/upnp/src/main.rs | 3 +- hole-punching-tests/src/main.rs | 23 +- identity/src/ecdsa.rs | 16 +- identity/src/ed25519.rs | 14 +- identity/src/error.rs | 3 +- identity/src/keypair.rs | 39 +- identity/src/peer_id.rs | 6 +- identity/src/rsa.rs | 20 +- identity/src/secp256k1.rs | 8 +- interop-tests/src/arch.rs | 27 +- interop-tests/src/bin/wasm_ping.rs | 31 +- interop-tests/src/lib.rs | 13 +- libp2p/src/bandwidth.rs | 19 +- libp2p/src/builder.rs | 53 +- libp2p/src/builder/phase.rs | 11 +- libp2p/src/builder/phase/bandwidth_logging.rs | 7 +- libp2p/src/builder/phase/bandwidth_metrics.rs | 7 +- libp2p/src/builder/phase/behaviour.rs | 7 +- libp2p/src/builder/phase/build.rs | 6 +- libp2p/src/builder/phase/dns.rs | 3 +- libp2p/src/builder/phase/identity.rs | 3 +- libp2p/src/builder/phase/other_transport.rs | 13 +- libp2p/src/builder/phase/provider.rs | 16 +- libp2p/src/builder/phase/quic.rs | 8 +- libp2p/src/builder/phase/relay.rs | 3 +- libp2p/src/builder/phase/tcp.rs | 8 +- libp2p/src/builder/phase/websocket.rs | 12 +- libp2p/src/builder/select_muxer.rs | 11 +- libp2p/src/builder/select_security.rs | 12 +- libp2p/src/lib.rs | 30 +- libp2p/src/transport_ext.rs | 28 +- libp2p/src/tutorials/hole_punching.rs | 21 +- libp2p/src/tutorials/ping.rs | 36 +- misc/allow-block-list/src/lib.rs | 37 +- misc/connection-limits/src/lib.rs | 43 +- misc/keygen/src/config.rs | 8 +- misc/keygen/src/main.rs | 13 +- misc/memory-connection-limits/src/lib.rs | 42 +- .../tests/max_bytes.rs | 9 +- .../tests/max_percentage.rs | 12 +- .../tests/util/mod.rs | 6 +- misc/metrics/src/bandwidth.rs | 16 +- misc/metrics/src/dcutr.rs | 9 +- misc/metrics/src/gossipsub.rs | 3 +- misc/metrics/src/identify.rs | 22 +- misc/metrics/src/kad.rs | 14 +- misc/metrics/src/lib.rs | 2 +- misc/metrics/src/ping.rs | 14 +- misc/metrics/src/relay.rs | 9 +- misc/metrics/src/swarm.rs | 23 +- misc/multistream-select/src/dialer_select.rs | 34 +- .../src/length_delimited.rs | 11 +- misc/multistream-select/src/lib.rs | 17 +- .../multistream-select/src/listener_select.rs | 19 +- misc/multistream-select/src/negotiated.rs | 35 +- misc/multistream-select/src/protocol.rs | 19 +- misc/quick-protobuf-codec/src/lib.rs | 13 +- .../tests/large_message.rs | 3 +- misc/quickcheck-ext/src/lib.rs | 4 +- misc/rw-stream-sink/src/lib.rs | 11 +- misc/server/src/behaviour.rs | 17 +- misc/server/src/config.rs | 4 +- misc/server/src/http_service.rs | 16 +- misc/server/src/main.rs | 24 +- misc/webrtc-utils/src/fingerprint.rs | 3 +- misc/webrtc-utils/src/noise.rs | 12 +- misc/webrtc-utils/src/sdp.rs | 11 +- misc/webrtc-utils/src/stream.rs | 29 +- misc/webrtc-utils/src/stream/drop_listener.rs | 23 +- misc/webrtc-utils/src/stream/framed_dc.rs | 6 +- misc/webrtc-utils/src/stream/state.rs | 15 +- misc/webrtc-utils/src/transport.rs | 9 +- muxers/mplex/benches/split_send_size.rs | 26 +- muxers/mplex/src/codec.rs | 7 +- muxers/mplex/src/config.rs | 3 +- muxers/mplex/src/io.rs | 51 +- muxers/mplex/src/lib.rs | 15 +- muxers/test-harness/src/lib.rs | 36 +- muxers/yamux/src/lib.rs | 32 +- protocols/autonat/src/v1.rs | 3 +- protocols/autonat/src/v1/behaviour.rs | 61 +- .../autonat/src/v1/behaviour/as_client.rs | 21 +- .../autonat/src/v1/behaviour/as_server.rs | 20 +- protocols/autonat/src/v1/protocol.rs | 12 +- protocols/autonat/src/v2.rs | 12 +- protocols/autonat/src/v2/client.rs | 3 +- protocols/autonat/src/v2/client/behaviour.rs | 17 +- .../src/v2/client/handler/dial_back.rs | 4 +- .../src/v2/client/handler/dial_request.rs | 22 +- protocols/autonat/src/v2/protocol.rs | 10 +- protocols/autonat/src/v2/server.rs | 3 +- protocols/autonat/src/v2/server/behaviour.rs | 9 +- .../src/v2/server/handler/dial_back.rs | 3 +- protocols/autonat/tests/autonatv2.rs | 14 +- protocols/autonat/tests/test_client.rs | 6 +- protocols/autonat/tests/test_server.rs | 9 +- protocols/dcutr/src/behaviour.rs | 44 +- protocols/dcutr/src/handler/relayed.rs | 33 +- protocols/dcutr/src/protocol/inbound.rs | 6 +- protocols/dcutr/src/protocol/outbound.rs | 7 +- protocols/dcutr/tests/lib.rs | 10 +- protocols/floodsub/src/layer.rs | 38 +- protocols/floodsub/src/lib.rs | 8 +- protocols/floodsub/src/protocol.rs | 10 +- protocols/gossipsub/src/backoff.rs | 22 +- protocols/gossipsub/src/behaviour.rs | 86 +-- protocols/gossipsub/src/behaviour/tests.rs | 688 +++++++++--------- protocols/gossipsub/src/config.rs | 46 +- protocols/gossipsub/src/error.rs | 4 +- protocols/gossipsub/src/gossip_promises.rs | 8 +- protocols/gossipsub/src/handler.rs | 35 +- protocols/gossipsub/src/lib.rs | 51 +- protocols/gossipsub/src/mcache.rs | 17 +- protocols/gossipsub/src/metrics.rs | 42 +- protocols/gossipsub/src/peer_score.rs | 51 +- protocols/gossipsub/src/peer_score/params.rs | 34 +- protocols/gossipsub/src/peer_score/tests.rs | 7 +- protocols/gossipsub/src/protocol.rs | 36 +- protocols/gossipsub/src/rpc.rs | 3 +- protocols/gossipsub/src/rpc_proto.rs | 4 +- .../gossipsub/src/subscription_filter.rs | 8 +- protocols/gossipsub/src/time_cache.rs | 19 +- protocols/gossipsub/src/topic.rs | 6 +- protocols/gossipsub/src/types.rs | 20 +- protocols/gossipsub/tests/smoke.rs | 9 +- protocols/identify/src/behaviour.rs | 47 +- protocols/identify/src/handler.rs | 31 +- protocols/identify/src/lib.rs | 14 +- protocols/identify/src/protocol.rs | 12 +- protocols/identify/tests/smoke.rs | 15 +- protocols/kad/src/addresses.rs | 3 +- protocols/kad/src/behaviour.rs | 107 +-- protocols/kad/src/behaviour/test.rs | 25 +- protocols/kad/src/bootstrap.rs | 50 +- protocols/kad/src/handler.rs | 38 +- protocols/kad/src/jobs.rs | 37 +- protocols/kad/src/kbucket.rs | 11 +- protocols/kad/src/kbucket/bucket.rs | 24 +- protocols/kad/src/kbucket/entry.rs | 1 - protocols/kad/src/kbucket/key.rs | 19 +- protocols/kad/src/lib.rs | 26 +- protocols/kad/src/protocol.rs | 194 ++--- protocols/kad/src/query.rs | 27 +- protocols/kad/src/query/peers.rs | 13 +- protocols/kad/src/query/peers/closest.rs | 24 +- .../kad/src/query/peers/closest/disjoint.rs | 12 +- protocols/kad/src/query/peers/fixed.rs | 5 +- protocols/kad/src/record.rs | 13 +- protocols/kad/src/record/store.rs | 23 +- protocols/kad/src/record/store/memory.rs | 16 +- protocols/kad/tests/client_mode.rs | 7 +- protocols/mdns/src/behaviour.rs | 56 +- protocols/mdns/src/behaviour/iface.rs | 29 +- protocols/mdns/src/behaviour/iface/dns.rs | 13 +- protocols/mdns/src/behaviour/iface/query.rs | 21 +- protocols/mdns/src/behaviour/socket.rs | 12 +- protocols/mdns/src/behaviour/timer.rs | 16 +- protocols/mdns/src/lib.rs | 11 +- protocols/mdns/tests/use-async-std.rs | 6 +- protocols/mdns/tests/use-tokio.rs | 3 +- protocols/perf/src/bin/perf.rs | 13 +- protocols/perf/src/client.rs | 6 +- protocols/perf/src/client/behaviour.rs | 4 +- protocols/perf/src/client/handler.rs | 6 +- protocols/perf/src/protocol.rs | 4 +- protocols/perf/src/server/behaviour.rs | 3 +- protocols/perf/src/server/handler.rs | 6 +- protocols/ping/src/handler.rs | 33 +- protocols/ping/src/lib.rs | 28 +- protocols/ping/src/protocol.rs | 14 +- protocols/ping/tests/ping.rs | 6 +- protocols/relay/src/behaviour.rs | 39 +- protocols/relay/src/behaviour/handler.rs | 38 +- protocols/relay/src/behaviour/rate_limiter.rs | 17 +- protocols/relay/src/copy_future.rs | 37 +- protocols/relay/src/lib.rs | 4 +- protocols/relay/src/multiaddr_ext.rs | 3 +- protocols/relay/src/priv_client.rs | 45 +- protocols/relay/src/priv_client/handler.rs | 40 +- protocols/relay/src/priv_client/transport.rs | 53 +- protocols/relay/src/protocol.rs | 6 +- protocols/relay/src/protocol/inbound_hop.rs | 9 +- protocols/relay/src/protocol/inbound_stop.rs | 10 +- protocols/relay/src/protocol/outbound_hop.rs | 15 +- protocols/relay/src/protocol/outbound_stop.rs | 9 +- protocols/relay/tests/lib.rs | 30 +- protocols/rendezvous/src/client.rs | 39 +- protocols/rendezvous/src/codec.rs | 16 +- protocols/rendezvous/src/lib.rs | 3 +- protocols/rendezvous/src/server.rs | 36 +- protocols/rendezvous/tests/rendezvous.rs | 17 +- protocols/request-response/src/cbor.rs | 14 +- protocols/request-response/src/codec.rs | 3 +- protocols/request-response/src/handler.rs | 36 +- protocols/request-response/src/json.rs | 16 +- protocols/request-response/src/lib.rs | 31 +- .../request-response/tests/error_reporting.rs | 5 +- .../request-response/tests/peer_address.rs | 3 +- protocols/request-response/tests/ping.rs | 3 +- protocols/stream/src/control.rs | 11 +- protocols/stream/src/handler.rs | 3 +- protocols/stream/src/shared.rs | 9 +- protocols/upnp/src/behaviour.rs | 8 +- protocols/upnp/src/lib.rs | 1 - protocols/upnp/src/tokio.rs | 4 +- rustfmt.toml | 10 + swarm-derive/src/lib.rs | 6 +- swarm-test/src/lib.rs | 101 ++- swarm/benches/connection_handler.rs | 5 +- swarm/src/behaviour.rs | 133 ++-- swarm/src/behaviour/either.rs | 15 +- swarm/src/behaviour/external_addresses.rs | 10 +- swarm/src/behaviour/listen_addresses.rs | 9 +- swarm/src/behaviour/peer_addresses.rs | 13 +- swarm/src/behaviour/toggle.rs | 28 +- swarm/src/connection.rs | 127 ++-- swarm/src/connection/error.rs | 5 +- swarm/src/connection/pool.rs | 61 +- swarm/src/connection/pool/concurrent_dial.rs | 14 +- swarm/src/connection/pool/task.rs | 17 +- swarm/src/connection/supported_protocols.rs | 4 +- swarm/src/dial_opts.rs | 18 +- swarm/src/dummy.rs | 24 +- swarm/src/executor.rs | 11 +- swarm/src/handler.rs | 96 ++- swarm/src/handler/either.rs | 16 +- swarm/src/handler/map_in.rs | 7 +- swarm/src/handler/map_out.rs | 10 +- swarm/src/handler/multi.rs | 24 +- swarm/src/handler/one_shot.rs | 33 +- swarm/src/handler/pending.rs | 10 +- swarm/src/handler/select.rs | 19 +- swarm/src/lib.rs | 195 ++--- swarm/src/listen_opts.rs | 3 +- swarm/src/stream.rs | 6 +- swarm/src/stream_protocol.rs | 13 +- swarm/src/test.rs | 35 +- swarm/src/upgrade.rs | 9 +- swarm/tests/connection_close.rs | 18 +- swarm/tests/listener.rs | 1 - swarm/tests/swarm_derive.rs | 19 +- transports/dns/src/lib.rs | 65 +- transports/noise/src/io.rs | 10 +- transports/noise/src/io/framed.rs | 17 +- transports/noise/src/io/handshake.rs | 30 +- transports/noise/src/lib.rs | 33 +- transports/noise/src/protocol.rs | 3 +- transports/noise/tests/smoke.rs | 11 +- .../noise/tests/webtransport_certhashes.rs | 3 +- transports/plaintext/src/error.rs | 4 +- transports/plaintext/src/handshake.rs | 12 +- transports/plaintext/src/lib.rs | 21 +- transports/pnet/src/crypt_writer.rs | 6 +- transports/pnet/src/lib.rs | 23 +- transports/pnet/tests/smoke.rs | 7 +- transports/quic/src/config.rs | 3 +- transports/quic/src/connection.rs | 14 +- transports/quic/src/connection/connecting.rs | 13 +- transports/quic/src/hole_punching.rs | 13 +- transports/quic/src/lib.rs | 12 +- transports/quic/src/provider.rs | 8 +- transports/quic/src/provider/async_std.rs | 3 +- transports/quic/src/provider/tokio.rs | 3 +- transports/quic/src/transport.rs | 56 +- transports/quic/tests/smoke.rs | 47 +- transports/quic/tests/stream_compliance.rs | 12 +- transports/tcp/src/lib.rs | 56 +- transports/tcp/src/provider.rs | 17 +- transports/tcp/src/provider/async_io.rs | 19 +- transports/tcp/src/provider/tokio.rs | 21 +- transports/tls/src/certificate.rs | 11 +- transports/tls/src/lib.rs | 8 +- transports/tls/src/upgrade.rs | 26 +- transports/tls/src/verifier.rs | 18 +- transports/tls/tests/smoke.rs | 8 +- transports/uds/src/lib.rs | 23 +- transports/webrtc-websys/src/connection.rs | 26 +- transports/webrtc-websys/src/lib.rs | 10 +- transports/webrtc-websys/src/sdp.rs | 3 +- transports/webrtc-websys/src/stream.rs | 10 +- .../src/stream/poll_data_channel.rs | 39 +- transports/webrtc-websys/src/transport.rs | 23 +- transports/webrtc-websys/src/upgrade.rs | 14 +- transports/webrtc/src/lib.rs | 9 +- transports/webrtc/src/tokio/certificate.rs | 3 +- transports/webrtc/src/tokio/connection.rs | 27 +- transports/webrtc/src/tokio/req_res_chan.rs | 10 +- transports/webrtc/src/tokio/sdp.rs | 6 +- transports/webrtc/src/tokio/stream.rs | 4 +- transports/webrtc/src/tokio/transport.rs | 27 +- transports/webrtc/src/tokio/udp_mux.rs | 31 +- transports/webrtc/src/tokio/upgrade.rs | 30 +- transports/webrtc/tests/smoke.rs | 33 +- transports/websocket-websys/src/lib.rs | 36 +- transports/websocket/src/error.rs | 6 +- transports/websocket/src/framed.rs | 27 +- transports/websocket/src/lib.rs | 51 +- transports/websocket/src/quicksink.rs | 11 +- transports/websocket/src/tls.rs | 3 +- .../webtransport-websys/src/connection.rs | 31 +- .../webtransport-websys/src/endpoint.rs | 12 +- .../src/fused_js_promise.rs | 9 +- transports/webtransport-websys/src/lib.rs | 10 +- transports/webtransport-websys/src/stream.rs | 20 +- .../webtransport-websys/src/transport.rs | 19 +- transports/webtransport-websys/src/utils.rs | 5 +- wasm-tests/webtransport-tests/src/lib.rs | 12 +- 356 files changed, 4305 insertions(+), 3310 deletions(-) create mode 100644 rustfmt.toml diff --git a/core/src/connection.rs b/core/src/connection.rs index bb6639842c9..d46a6cf81e6 100644 --- a/core/src/connection.rs +++ b/core/src/connection.rs @@ -70,18 +70,16 @@ pub enum ConnectedPoint { /// /// - [`Endpoint::Dialer`] represents the default non-overriding option. /// - /// - [`Endpoint::Listener`] represents the overriding option. - /// Realization depends on the transport protocol. E.g. in the case of - /// TCP, both endpoints dial each other, resulting in a _simultaneous - /// open_ TCP connection. On this new connection both endpoints assume - /// to be the dialer of the connection. This is problematic during the - /// connection upgrade process where an upgrade assumes one side to be - /// the listener. With the help of this option, both peers can - /// negotiate the roles (dialer and listener) for the new connection - /// ahead of time, through some external channel, e.g. the DCUtR - /// protocol, and thus have one peer dial the other and upgrade the - /// connection as a dialer and one peer dial the other and upgrade the - /// connection _as a listener_ overriding its role. + /// - [`Endpoint::Listener`] represents the overriding option. Realization depends on the + /// transport protocol. E.g. in the case of TCP, both endpoints dial each other, + /// resulting in a _simultaneous open_ TCP connection. On this new connection both + /// endpoints assume to be the dialer of the connection. This is problematic during the + /// connection upgrade process where an upgrade assumes one side to be the listener. With + /// the help of this option, both peers can negotiate the roles (dialer and listener) for + /// the new connection ahead of time, through some external channel, e.g. the DCUtR + /// protocol, and thus have one peer dial the other and upgrade the connection as a + /// dialer and one peer dial the other and upgrade the connection _as a listener_ + /// overriding its role. role_override: Endpoint, /// Whether the port for the outgoing connection was reused from a listener /// or a new port was allocated. This is useful for address translation. diff --git a/core/src/either.rs b/core/src/either.rs index 2593174290c..aa0340a46bf 100644 --- a/core/src/either.rs +++ b/core/src/either.rs @@ -18,17 +18,20 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::muxing::StreamMuxerEvent; -use crate::transport::DialOpts; -use crate::{ - muxing::StreamMuxer, - transport::{ListenerId, Transport, TransportError, TransportEvent}, - Multiaddr, +use std::{ + pin::Pin, + task::{Context, Poll}, }; + use either::Either; use futures::prelude::*; use pin_project::pin_project; -use std::{pin::Pin, task::Context, task::Poll}; + +use crate::{ + muxing::{StreamMuxer, StreamMuxerEvent}, + transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}, + Multiaddr, +}; impl StreamMuxer for future::Either where diff --git a/core/src/lib.rs b/core/src/lib.rs index ab5afbedae4..bbe42adc26a 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -22,14 +22,12 @@ //! //! The main concepts of libp2p-core are: //! -//! - The [`Transport`] trait defines how to reach a remote node or listen for -//! incoming remote connections. See the [`transport`] module. -//! - The [`StreamMuxer`] trait is implemented on structs that hold a connection -//! to a remote and can subdivide this connection into multiple substreams. -//! See the [`muxing`] module. -//! - The [`UpgradeInfo`], [`InboundUpgrade`] and [`OutboundUpgrade`] traits -//! define how to upgrade each individual substream to use a protocol. -//! See the `upgrade` module. +//! - The [`Transport`] trait defines how to reach a remote node or listen for incoming remote +//! connections. See the [`transport`] module. +//! - The [`StreamMuxer`] trait is implemented on structs that hold a connection to a remote and can +//! subdivide this connection into multiple substreams. See the [`muxing`] module. +//! - The [`UpgradeInfo`], [`InboundUpgrade`] and [`OutboundUpgrade`] traits define how to upgrade +//! each individual substream to use a protocol. See the `upgrade` module. #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] @@ -37,7 +35,8 @@ mod proto { #![allow(unreachable_pub)] include!("generated/mod.rs"); pub use self::{ - envelope_proto::*, peer_record_proto::mod_PeerRecord::*, peer_record_proto::PeerRecord, + envelope_proto::*, + peer_record_proto::{mod_PeerRecord::*, PeerRecord}, }; } diff --git a/core/src/muxing.rs b/core/src/muxing.rs index 477e1608073..60062f899f9 100644 --- a/core/src/muxing.rs +++ b/core/src/muxing.rs @@ -24,7 +24,7 @@ //! has ownership of a connection, lets you open and close substreams. //! //! > **Note**: You normally don't need to use the methods of the `StreamMuxer` directly, as this -//! > is managed by the library's internals. +//! > is managed by the library's internals. //! //! Each substream of a connection is an isolated stream of data. All the substreams are muxed //! together so that the data read from or written to each substream doesn't influence the other @@ -36,9 +36,9 @@ //! require maintaining long-lived channels of communication. //! //! > **Example**: The Kademlia protocol opens a new substream for each request it wants to -//! > perform. Multiple requests can be performed simultaneously by opening multiple -//! > substreams, without having to worry about associating responses with the -//! > right request. +//! > perform. Multiple requests can be performed simultaneously by opening multiple +//! > substreams, without having to worry about associating responses with the +//! > right request. //! //! # Implementing a muxing protocol //! @@ -50,21 +50,23 @@ //! The upgrade process will take ownership of the connection, which makes it possible for the //! implementation of `StreamMuxer` to control everything that happens on the wire. -use futures::{task::Context, task::Poll, AsyncRead, AsyncWrite}; +use std::{future::Future, pin::Pin}; + +use futures::{ + task::{Context, Poll}, + AsyncRead, AsyncWrite, +}; use multiaddr::Multiaddr; -use std::future::Future; -use std::pin::Pin; -pub use self::boxed::StreamMuxerBox; -pub use self::boxed::SubstreamBox; +pub use self::boxed::{StreamMuxerBox, SubstreamBox}; mod boxed; /// Provides multiplexing for a connection by allowing users to open substreams. /// -/// A substream created by a [`StreamMuxer`] is a type that implements [`AsyncRead`] and [`AsyncWrite`]. -/// The [`StreamMuxer`] itself is modelled closely after [`AsyncWrite`]. It features `poll`-style -/// functions that allow the implementation to make progress on various tasks. +/// A substream created by a [`StreamMuxer`] is a type that implements [`AsyncRead`] and +/// [`AsyncWrite`]. The [`StreamMuxer`] itself is modelled closely after [`AsyncWrite`]. It features +/// `poll`-style functions that allow the implementation to make progress on various tasks. pub trait StreamMuxer { /// Type of the object that represents the raw substream where data can be read and written. type Substream: AsyncRead + AsyncWrite; @@ -90,13 +92,13 @@ pub trait StreamMuxer { /// Poll to close this [`StreamMuxer`]. /// - /// After this has returned `Poll::Ready(Ok(()))`, the muxer has become useless and may be safely - /// dropped. + /// After this has returned `Poll::Ready(Ok(()))`, the muxer has become useless and may be + /// safely dropped. /// /// > **Note**: You are encouraged to call this method and wait for it to return `Ready`, so - /// > that the remote is properly informed of the shutdown. However, apart from - /// > properly informing the remote, there is no difference between this and - /// > immediately dropping the muxer. + /// > that the remote is properly informed of the shutdown. However, apart from + /// > properly informing the remote, there is no difference between this and + /// > immediately dropping the muxer. fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; /// Poll to allow the underlying connection to make progress. @@ -120,7 +122,8 @@ pub enum StreamMuxerEvent { /// Extension trait for [`StreamMuxer`]. pub trait StreamMuxerExt: StreamMuxer + Sized { - /// Convenience function for calling [`StreamMuxer::poll_inbound`] for [`StreamMuxer`]s that are `Unpin`. + /// Convenience function for calling [`StreamMuxer::poll_inbound`] + /// for [`StreamMuxer`]s that are `Unpin`. fn poll_inbound_unpin( &mut self, cx: &mut Context<'_>, @@ -131,7 +134,8 @@ pub trait StreamMuxerExt: StreamMuxer + Sized { Pin::new(self).poll_inbound(cx) } - /// Convenience function for calling [`StreamMuxer::poll_outbound`] for [`StreamMuxer`]s that are `Unpin`. + /// Convenience function for calling [`StreamMuxer::poll_outbound`] + /// for [`StreamMuxer`]s that are `Unpin`. fn poll_outbound_unpin( &mut self, cx: &mut Context<'_>, @@ -142,7 +146,8 @@ pub trait StreamMuxerExt: StreamMuxer + Sized { Pin::new(self).poll_outbound(cx) } - /// Convenience function for calling [`StreamMuxer::poll`] for [`StreamMuxer`]s that are `Unpin`. + /// Convenience function for calling [`StreamMuxer::poll`] + /// for [`StreamMuxer`]s that are `Unpin`. fn poll_unpin(&mut self, cx: &mut Context<'_>) -> Poll> where Self: Unpin, @@ -150,7 +155,8 @@ pub trait StreamMuxerExt: StreamMuxer + Sized { Pin::new(self).poll(cx) } - /// Convenience function for calling [`StreamMuxer::poll_close`] for [`StreamMuxer`]s that are `Unpin`. + /// Convenience function for calling [`StreamMuxer::poll_close`] + /// for [`StreamMuxer`]s that are `Unpin`. fn poll_close_unpin(&mut self, cx: &mut Context<'_>) -> Poll> where Self: Unpin, diff --git a/core/src/muxing/boxed.rs b/core/src/muxing/boxed.rs index e909fb9fbf1..8e76c32b73e 100644 --- a/core/src/muxing/boxed.rs +++ b/core/src/muxing/boxed.rs @@ -1,12 +1,15 @@ -use crate::muxing::{StreamMuxer, StreamMuxerEvent}; +use std::{ + error::Error, + fmt, io, + io::{IoSlice, IoSliceMut}, + pin::Pin, + task::{Context, Poll}, +}; + use futures::{AsyncRead, AsyncWrite}; use pin_project::pin_project; -use std::error::Error; -use std::fmt; -use std::io; -use std::io::{IoSlice, IoSliceMut}; -use std::pin::Pin; -use std::task::{Context, Poll}; + +use crate::muxing::{StreamMuxer, StreamMuxerEvent}; /// Abstract `StreamMuxer`. pub struct StreamMuxerBox { @@ -139,7 +142,8 @@ impl StreamMuxer for StreamMuxerBox { } impl SubstreamBox { - /// Construct a new [`SubstreamBox`] from something that implements [`AsyncRead`] and [`AsyncWrite`]. + /// Construct a new [`SubstreamBox`] from something + /// that implements [`AsyncRead`] and [`AsyncWrite`]. pub fn new(stream: S) -> Self { Self(Box::pin(stream)) } diff --git a/core/src/peer_record.rs b/core/src/peer_record.rs index ac488338cc6..9c6b7f73f05 100644 --- a/core/src/peer_record.rs +++ b/core/src/peer_record.rs @@ -1,18 +1,16 @@ -use crate::signed_envelope::SignedEnvelope; -use crate::{proto, signed_envelope, DecodeError, Multiaddr}; -use libp2p_identity::Keypair; -use libp2p_identity::PeerId; -use libp2p_identity::SigningError; +use libp2p_identity::{Keypair, PeerId, SigningError}; use quick_protobuf::{BytesReader, Writer}; use web_time::SystemTime; +use crate::{proto, signed_envelope, signed_envelope::SignedEnvelope, DecodeError, Multiaddr}; + const PAYLOAD_TYPE: &str = "/libp2p/routing-state-record"; const DOMAIN_SEP: &str = "libp2p-routing-state"; /// Represents a peer routing record. /// -/// Peer records are designed to be distributable and carry a signature by being wrapped in a signed envelope. -/// For more information see RFC0003 of the libp2p specifications: +/// Peer records are designed to be distributable and carry a signature by being wrapped in a signed +/// envelope. For more information see RFC0003 of the libp2p specifications: #[derive(Debug, PartialEq, Eq, Clone)] pub struct PeerRecord { peer_id: PeerId, @@ -21,14 +19,16 @@ pub struct PeerRecord { /// A signed envelope representing this [`PeerRecord`]. /// - /// If this [`PeerRecord`] was constructed from a [`SignedEnvelope`], this is the original instance. + /// If this [`PeerRecord`] was constructed from a [`SignedEnvelope`], this is the original + /// instance. envelope: SignedEnvelope, } impl PeerRecord { /// Attempt to re-construct a [`PeerRecord`] from a [`SignedEnvelope`]. /// - /// If this function succeeds, the [`SignedEnvelope`] contained a peer record with a valid signature and can hence be considered authenticated. + /// If this function succeeds, the [`SignedEnvelope`] contained a peer record with a valid + /// signature and can hence be considered authenticated. pub fn from_signed_envelope(envelope: SignedEnvelope) -> Result { use quick_protobuf::MessageRead; @@ -60,7 +60,8 @@ impl PeerRecord { /// Construct a new [`PeerRecord`] by authenticating the provided addresses with the given key. /// - /// This is the same key that is used for authenticating every libp2p connection of your application, i.e. what you use when setting up your [`crate::transport::Transport`]. + /// This is the same key that is used for authenticating every libp2p connection of your + /// application, i.e. what you use when setting up your [`crate::transport::Transport`]. pub fn new(key: &Keypair, addresses: Vec) -> Result { use quick_protobuf::MessageWrite; diff --git a/core/src/signed_envelope.rs b/core/src/signed_envelope.rs index 19a0cac4f82..754d6ec204d 100644 --- a/core/src/signed_envelope.rs +++ b/core/src/signed_envelope.rs @@ -1,11 +1,13 @@ -use crate::{proto, DecodeError}; -use libp2p_identity::SigningError; -use libp2p_identity::{Keypair, PublicKey}; -use quick_protobuf::{BytesReader, Writer}; use std::fmt; + +use libp2p_identity::{Keypair, PublicKey, SigningError}; +use quick_protobuf::{BytesReader, Writer}; use unsigned_varint::encode::usize_buffer; -/// A signed envelope contains an arbitrary byte string payload, a signature of the payload, and the public key that can be used to verify the signature. +use crate::{proto, DecodeError}; + +/// A signed envelope contains an arbitrary byte string payload, a signature of the payload, and the +/// public key that can be used to verify the signature. /// /// For more details see libp2p RFC0002: #[derive(Debug, Clone, PartialEq, Eq)] @@ -46,8 +48,9 @@ impl SignedEnvelope { /// Extract the payload and signing key of this [`SignedEnvelope`]. /// - /// You must provide the correct domain-separation string and expected payload type in order to get the payload. - /// This guards against accidental mis-use of the payload where the signature was created for a different purpose or payload type. + /// You must provide the correct domain-separation string and expected payload type in order to + /// get the payload. This guards against accidental mis-use of the payload where the + /// signature was created for a different purpose or payload type. /// /// It is the caller's responsibility to check that the signing key is what /// is expected. For example, checking that the signing key is from a @@ -156,7 +159,8 @@ pub enum DecodingError { /// Errors that occur whilst extracting the payload of a [`SignedEnvelope`]. #[derive(Debug)] pub enum ReadPayloadError { - /// The signature on the signed envelope does not verify with the provided domain separation string. + /// The signature on the signed envelope does not verify + /// with the provided domain separation string. InvalidSignature, /// The payload contained in the envelope is not of the expected type. UnexpectedPayloadType { expected: Vec, got: Vec }, diff --git a/core/src/transport.rs b/core/src/transport.rs index 28ce2dbf650..ecd332f28cc 100644 --- a/core/src/transport.rs +++ b/core/src/transport.rs @@ -25,8 +25,6 @@ //! any desired protocols. The rest of the module defines combinators for //! modifying a transport through composition with other transports or protocol upgrades. -use futures::prelude::*; -use multiaddr::Multiaddr; use std::{ error::Error, fmt, @@ -35,6 +33,9 @@ use std::{ task::{Context, Poll}, }; +use futures::prelude::*; +use multiaddr::Multiaddr; + pub mod and_then; pub mod choice; pub mod dummy; @@ -48,14 +49,12 @@ pub mod upgrade; mod boxed; mod optional; +pub use self::{ + boxed::Boxed, choice::OrTransport, memory::MemoryTransport, optional::OptionalTransport, + upgrade::Upgrade, +}; use crate::{ConnectedPoint, Endpoint}; -pub use self::boxed::Boxed; -pub use self::choice::OrTransport; -pub use self::memory::MemoryTransport; -pub use self::optional::OptionalTransport; -pub use self::upgrade::Upgrade; - static NEXT_LISTENER_ID: AtomicUsize = AtomicUsize::new(1); /// The port use policy for a new connection. @@ -75,8 +74,9 @@ pub enum PortUse { pub struct DialOpts { /// The endpoint establishing a new connection. /// - /// When attempting a hole-punch, both parties simultaneously "dial" each other but one party has to be the "listener" on the final connection. - /// This option specifies the role of this node in the final connection. + /// When attempting a hole-punch, both parties simultaneously "dial" each other but one party + /// has to be the "listener" on the final connection. This option specifies the role of + /// this node in the final connection. pub role: Endpoint, /// The port use policy for a new connection. pub port_use: PortUse, @@ -161,10 +161,10 @@ pub trait Transport { /// Poll for [`TransportEvent`]s. /// - /// A [`TransportEvent::Incoming`] should be produced whenever a connection is received at the lowest - /// level of the transport stack. The item must be a [`ListenerUpgrade`](Transport::ListenerUpgrade) - /// future that resolves to an [`Output`](Transport::Output) value once all protocol upgrades have - /// been applied. + /// A [`TransportEvent::Incoming`] should be produced whenever a connection is received at the + /// lowest level of the transport stack. The item must be a + /// [`ListenerUpgrade`](Transport::ListenerUpgrade) future that resolves to an + /// [`Output`](Transport::Output) value once all protocol upgrades have been applied. /// /// Transports are expected to produce [`TransportEvent::Incoming`] events only for /// listen addresses which have previously been announced via diff --git a/core/src/transport/and_then.rs b/core/src/transport/and_then.rs index e85703f77fb..5d2b7d91553 100644 --- a/core/src/transport/and_then.rs +++ b/core/src/transport/and_then.rs @@ -18,14 +18,21 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::{ - connection::ConnectedPoint, - transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}, +use std::{ + error, + marker::PhantomPinned, + pin::Pin, + task::{Context, Poll}, }; + use either::Either; use futures::prelude::*; use multiaddr::Multiaddr; -use std::{error, marker::PhantomPinned, pin::Pin, task::Context, task::Poll}; + +use crate::{ + connection::ConnectedPoint, + transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}, +}; /// See the [`Transport::and_then`] method. #[pin_project::pin_project] diff --git a/core/src/transport/boxed.rs b/core/src/transport/boxed.rs index 596ab262221..6894d9876aa 100644 --- a/core/src/transport/boxed.rs +++ b/core/src/transport/boxed.rs @@ -18,9 +18,6 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; -use futures::{prelude::*, stream::FusedStream}; -use multiaddr::Multiaddr; use std::{ error::Error, fmt, io, @@ -28,6 +25,11 @@ use std::{ task::{Context, Poll}, }; +use futures::{prelude::*, stream::FusedStream}; +use multiaddr::Multiaddr; + +use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; + /// Creates a new [`Boxed`] transport from the given transport. pub(crate) fn boxed(transport: T) -> Boxed where diff --git a/core/src/transport/choice.rs b/core/src/transport/choice.rs index 4339f6bba71..251091f2008 100644 --- a/core/src/transport/choice.rs +++ b/core/src/transport/choice.rs @@ -18,12 +18,19 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::either::EitherFuture; -use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; + use either::Either; use futures::future; use multiaddr::Multiaddr; -use std::{pin::Pin, task::Context, task::Poll}; + +use crate::{ + either::EitherFuture, + transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}, +}; /// Struct returned by `or_transport()`. #[derive(Debug, Copy, Clone)] diff --git a/core/src/transport/dummy.rs b/core/src/transport/dummy.rs index 72558d34a79..85c5815fd37 100644 --- a/core/src/transport/dummy.rs +++ b/core/src/transport/dummy.rs @@ -18,11 +18,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; -use crate::Multiaddr; -use futures::{prelude::*, task::Context, task::Poll}; use std::{fmt, io, marker::PhantomData, pin::Pin}; +use futures::{ + prelude::*, + task::{Context, Poll}, +}; + +use crate::{ + transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}, + Multiaddr, +}; + /// Implementation of `Transport` that doesn't support any multiaddr. /// /// Useful for testing purposes, or as a fallback implementation when no protocol is available. diff --git a/core/src/transport/global_only.rs b/core/src/transport/global_only.rs index 83774f37004..00df6457412 100644 --- a/core/src/transport/global_only.rs +++ b/core/src/transport/global_only.rs @@ -18,15 +18,16 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::{ - multiaddr::{Multiaddr, Protocol}, - transport::{DialOpts, ListenerId, TransportError, TransportEvent}, -}; use std::{ pin::Pin, task::{Context, Poll}, }; +use crate::{ + multiaddr::{Multiaddr, Protocol}, + transport::{DialOpts, ListenerId, TransportError, TransportEvent}, +}; + /// Dropping all dial requests to non-global IP addresses. #[derive(Debug, Clone, Default)] pub struct Transport { @@ -104,7 +105,8 @@ mod ipv4_global { /// Returns [`true`] if the address appears to be globally reachable /// as specified by the [IANA IPv4 Special-Purpose Address Registry]. - /// Whether or not an address is practically reachable will depend on your network configuration. + /// Whether or not an address is practically reachable will depend on your network + /// configuration. /// /// Most IPv4 addresses are globally reachable; /// unless they are specifically defined as *not* globally reachable. @@ -121,7 +123,8 @@ mod ipv4_global { /// - Reserved addresses ([`is_reserved`](Ipv4Addr::is_reserved)) /// - The [broadcast address] ([`is_broadcast`](Ipv4Addr::is_broadcast)) /// - /// For the complete overview of which addresses are globally reachable, see the table at the [IANA IPv4 Special-Purpose Address Registry]. + /// For the complete overview of which addresses are globally reachable, see the table at the + /// [IANA IPv4 Special-Purpose Address Registry]. /// /// [IANA IPv4 Special-Purpose Address Registry]: https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml /// [unspecified address]: Ipv4Addr::UNSPECIFIED @@ -154,9 +157,10 @@ mod ipv6_global { /// Returns `true` if the address is a unicast address with link-local scope, /// as defined in [RFC 4291]. /// - /// A unicast address has link-local scope if it has the prefix `fe80::/10`, as per [RFC 4291 section 2.4]. - /// Note that this encompasses more addresses than those defined in [RFC 4291 section 2.5.6], - /// which describes "Link-Local IPv6 Unicast Addresses" as having the following stricter format: + /// A unicast address has link-local scope if it has the prefix `fe80::/10`, as per [RFC 4291 + /// section 2.4]. Note that this encompasses more addresses than those defined in [RFC 4291 + /// section 2.5.6], which describes "Link-Local IPv6 Unicast Addresses" as having the + /// following stricter format: /// /// ```text /// | 10 bits | 54 bits | 64 bits | @@ -164,12 +168,14 @@ mod ipv6_global { /// |1111111010| 0 | interface ID | /// +----------+-------------------------+----------------------------+ /// ``` - /// So while currently the only addresses with link-local scope an application will encounter are all in `fe80::/64`, - /// this might change in the future with the publication of new standards. More addresses in `fe80::/10` could be allocated, - /// and those addresses will have link-local scope. + /// So while currently the only addresses with link-local scope an application will encounter + /// are all in `fe80::/64`, this might change in the future with the publication of new + /// standards. More addresses in `fe80::/10` could be allocated, and those addresses will + /// have link-local scope. /// - /// Also note that while [RFC 4291 section 2.5.3] mentions about the [loopback address] (`::1`) that "it is treated as having Link-Local scope", - /// this does not mean that the loopback address actually has link-local scope and this method will return `false` on it. + /// Also note that while [RFC 4291 section 2.5.3] mentions about the [loopback address] (`::1`) + /// that "it is treated as having Link-Local scope", this does not mean that the loopback + /// address actually has link-local scope and this method will return `false` on it. /// /// [RFC 4291]: https://tools.ietf.org/html/rfc4291 /// [RFC 4291 section 2.4]: https://tools.ietf.org/html/rfc4291#section-2.4 @@ -207,7 +213,8 @@ mod ipv6_global { /// Returns [`true`] if the address appears to be globally reachable /// as specified by the [IANA IPv6 Special-Purpose Address Registry]. - /// Whether or not an address is practically reachable will depend on your network configuration. + /// Whether or not an address is practically reachable will depend on your network + /// configuration. /// /// Most IPv6 addresses are globally reachable; /// unless they are specifically defined as *not* globally reachable. @@ -219,13 +226,15 @@ mod ipv6_global { /// - Addresses reserved for benchmarking /// - Addresses reserved for documentation ([`is_documentation`](Ipv6Addr::is_documentation)) /// - Unique local addresses ([`is_unique_local`](Ipv6Addr::is_unique_local)) - /// - Unicast addresses with link-local scope ([`is_unicast_link_local`](Ipv6Addr::is_unicast_link_local)) + /// - Unicast addresses with link-local scope + /// ([`is_unicast_link_local`](Ipv6Addr::is_unicast_link_local)) /// - /// For the complete overview of which addresses are globally reachable, see the table at the [IANA IPv6 Special-Purpose Address Registry]. + /// For the complete overview of which addresses are globally reachable, see the table at the + /// [IANA IPv6 Special-Purpose Address Registry]. /// /// Note that an address having global scope is not the same as being globally reachable, - /// and there is no direct relation between the two concepts: There exist addresses with global scope - /// that are not globally reachable (for example unique local addresses), + /// and there is no direct relation between the two concepts: There exist addresses with global + /// scope that are not globally reachable (for example unique local addresses), /// and addresses that are globally reachable without having global scope /// (multicast addresses with non-global scope). /// diff --git a/core/src/transport/map.rs b/core/src/transport/map.rs index 9aab84ba8b1..4f6910b141f 100644 --- a/core/src/transport/map.rs +++ b/core/src/transport/map.rs @@ -18,16 +18,19 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::transport::DialOpts; -use crate::{ - connection::ConnectedPoint, - transport::{Transport, TransportError, TransportEvent}, +use std::{ + pin::Pin, + task::{Context, Poll}, }; + use futures::prelude::*; use multiaddr::Multiaddr; -use std::{pin::Pin, task::Context, task::Poll}; use super::ListenerId; +use crate::{ + connection::ConnectedPoint, + transport::{DialOpts, Transport, TransportError, TransportEvent}, +}; /// See `Transport::map`. #[derive(Debug, Copy, Clone)] diff --git a/core/src/transport/map_err.rs b/core/src/transport/map_err.rs index 5d44af9af2e..f47f5713225 100644 --- a/core/src/transport/map_err.rs +++ b/core/src/transport/map_err.rs @@ -18,10 +18,16 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; +use std::{ + error, + pin::Pin, + task::{Context, Poll}, +}; + use futures::prelude::*; use multiaddr::Multiaddr; -use std::{error, pin::Pin, task::Context, task::Poll}; + +use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; /// See `Transport::map_err`. #[derive(Debug, Copy, Clone)] diff --git a/core/src/transport/memory.rs b/core/src/transport/memory.rs index 85680265e8b..19197ddf714 100644 --- a/core/src/transport/memory.rs +++ b/core/src/transport/memory.rs @@ -18,13 +18,6 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; -use fnv::FnvHashMap; -use futures::{channel::mpsc, future::Ready, prelude::*, task::Context, task::Poll}; -use multiaddr::{Multiaddr, Protocol}; -use once_cell::sync::Lazy; -use parking_lot::Mutex; -use rw_stream_sink::RwStreamSink; use std::{ collections::{hash_map::Entry, VecDeque}, error, fmt, io, @@ -32,6 +25,20 @@ use std::{ pin::Pin, }; +use fnv::FnvHashMap; +use futures::{ + channel::mpsc, + future::Ready, + prelude::*, + task::{Context, Poll}, +}; +use multiaddr::{Multiaddr, Protocol}; +use once_cell::sync::Lazy; +use parking_lot::Mutex; +use rw_stream_sink::RwStreamSink; + +use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; + static HUB: Lazy = Lazy::new(|| Hub(Mutex::new(FnvHashMap::default()))); struct Hub(Mutex>); @@ -398,9 +405,8 @@ impl Drop for Chan { #[cfg(test)] mod tests { - use crate::{transport::PortUse, Endpoint}; - use super::*; + use crate::{transport::PortUse, Endpoint}; #[test] fn parse_memory_addr_works() { diff --git a/core/src/transport/optional.rs b/core/src/transport/optional.rs index f18bfa441b0..262f84f3095 100644 --- a/core/src/transport/optional.rs +++ b/core/src/transport/optional.rs @@ -18,9 +18,14 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; + use multiaddr::Multiaddr; -use std::{pin::Pin, task::Context, task::Poll}; + +use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; /// Transport that is possibly disabled. /// diff --git a/core/src/transport/timeout.rs b/core/src/transport/timeout.rs index 830ed099629..ce494216279 100644 --- a/core/src/transport/timeout.rs +++ b/core/src/transport/timeout.rs @@ -24,14 +24,20 @@ //! underlying `Transport`. // TODO: add example -use crate::transport::DialOpts; -use crate::{ - transport::{ListenerId, TransportError, TransportEvent}, - Multiaddr, Transport, +use std::{ + error, fmt, io, + pin::Pin, + task::{Context, Poll}, + time::Duration, }; + use futures::prelude::*; use futures_timer::Delay; -use std::{error, fmt, io, pin::Pin, task::Context, task::Poll, time::Duration}; + +use crate::{ + transport::{DialOpts, ListenerId, TransportError, TransportEvent}, + Multiaddr, Transport, +}; /// A `TransportTimeout` is a `Transport` that wraps another `Transport` and adds /// timeouts to all inbound and outbound connection attempts. diff --git a/core/src/transport/upgrade.rs b/core/src/transport/upgrade.rs index 66b9e7509af..480c2710020 100644 --- a/core/src/transport/upgrade.rs +++ b/core/src/transport/upgrade.rs @@ -20,15 +20,25 @@ //! Configuration of transport protocol upgrades. -pub use crate::upgrade::Version; +use std::{ + error::Error, + fmt, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; + +use futures::{prelude::*, ready}; +use libp2p_identity::PeerId; +use multiaddr::Multiaddr; -use crate::transport::DialOpts; +pub use crate::upgrade::Version; use crate::{ connection::ConnectedPoint, muxing::{StreamMuxer, StreamMuxerBox}, transport::{ - and_then::AndThen, boxed::boxed, timeout::TransportTimeout, ListenerId, Transport, - TransportError, TransportEvent, + and_then::AndThen, boxed::boxed, timeout::TransportTimeout, DialOpts, ListenerId, + Transport, TransportError, TransportEvent, }, upgrade::{ self, apply_inbound, apply_outbound, InboundConnectionUpgrade, InboundUpgradeApply, @@ -36,16 +46,6 @@ use crate::{ }, Negotiated, }; -use futures::{prelude::*, ready}; -use libp2p_identity::PeerId; -use multiaddr::Multiaddr; -use std::{ - error::Error, - fmt, - pin::Pin, - task::{Context, Poll}, - time::Duration, -}; /// A `Builder` facilitates upgrading of a [`Transport`] for use with /// a `Swarm`. @@ -59,13 +59,13 @@ use std::{ /// It thus enforces the following invariants on every transport /// obtained from [`multiplex`](Authenticated::multiplex): /// -/// 1. The transport must be [authenticated](Builder::authenticate) -/// and [multiplexed](Authenticated::multiplex). +/// 1. The transport must be [authenticated](Builder::authenticate) and +/// [multiplexed](Authenticated::multiplex). /// 2. Authentication must precede the negotiation of a multiplexer. /// 3. Applying a multiplexer is the last step in the upgrade process. -/// 4. The [`Transport::Output`] conforms to the requirements of a `Swarm`, -/// namely a tuple of a [`PeerId`] (from the authentication upgrade) and a -/// [`StreamMuxer`] (from the multiplexing upgrade). +/// 4. The [`Transport::Output`] conforms to the requirements of a `Swarm`, namely a tuple of a +/// [`PeerId`] (from the authentication upgrade) and a [`StreamMuxer`] (from the multiplexing +/// upgrade). #[derive(Clone)] pub struct Builder { inner: T, diff --git a/core/src/upgrade.rs b/core/src/upgrade.rs index 7a1fd3724d0..93039705938 100644 --- a/core/src/upgrade.rs +++ b/core/src/upgrade.rs @@ -29,8 +29,8 @@ //! connection or substream. //! //! > **Note**: Multiple versions of the same protocol are treated as different protocols. -//! > For example, `/foo/1.0.0` and `/foo/1.1.0` are totally unrelated as far as -//! > upgrading is concerned. +//! > For example, `/foo/1.0.0` and `/foo/1.1.0` are totally unrelated as far as +//! > upgrading is concerned. //! //! # Upgrade process //! @@ -55,7 +55,6 @@ //! > connection or substream. However if you use the recommended `Swarm` or //! > `ConnectionHandler` APIs, the upgrade is automatically handled for you and you don't //! > need to use these methods. -//! mod apply; mod denied; @@ -70,12 +69,12 @@ pub(crate) use apply::{ }; pub(crate) use error::UpgradeError; use futures::future::Future; +pub use multistream_select::{NegotiatedComplete, NegotiationError, ProtocolError, Version}; pub use self::{ denied::DeniedUpgrade, pending::PendingUpgrade, ready::ReadyUpgrade, select::SelectUpgrade, }; pub use crate::Negotiated; -pub use multistream_select::{NegotiatedComplete, NegotiationError, ProtocolError, Version}; /// Common trait for upgrades that can be applied on inbound substreams, outbound substreams, /// or both. diff --git a/core/src/upgrade/apply.rs b/core/src/upgrade/apply.rs index f84aaaac9fa..9e090267b0c 100644 --- a/core/src/upgrade/apply.rs +++ b/core/src/upgrade/apply.rs @@ -18,13 +18,21 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeError}; -use crate::{connection::ConnectedPoint, Negotiated}; +use std::{ + mem, + pin::Pin, + task::{Context, Poll}, +}; + use futures::{future::Either, prelude::*}; +pub(crate) use multistream_select::Version; use multistream_select::{DialerSelectFuture, ListenerSelectFuture}; -use std::{mem, pin::Pin, task::Context, task::Poll}; -pub(crate) use multistream_select::Version; +use crate::{ + connection::ConnectedPoint, + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeError}, + Negotiated, +}; // TODO: Still needed? /// Applies an upgrade to the inbound and outbound direction of a connection or substream. diff --git a/core/src/upgrade/denied.rs b/core/src/upgrade/denied.rs index 568bbfb056d..9bea6fb023b 100644 --- a/core/src/upgrade/denied.rs +++ b/core/src/upgrade/denied.rs @@ -18,10 +18,11 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use std::{convert::Infallible, iter}; + use futures::future; -use std::convert::Infallible; -use std::iter; + +use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; /// Dummy implementation of `UpgradeInfo`/`InboundUpgrade`/`OutboundUpgrade` that doesn't support /// any protocol. diff --git a/core/src/upgrade/either.rs b/core/src/upgrade/either.rs index db62f8d6558..9970dcb0b1d 100644 --- a/core/src/upgrade/either.rs +++ b/core/src/upgrade/either.rs @@ -18,13 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::iter::Map; + +use either::Either; +use futures::future; + use crate::{ either::EitherFuture, upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}, }; -use either::Either; -use futures::future; -use std::iter::Map; impl UpgradeInfo for Either where diff --git a/core/src/upgrade/error.rs b/core/src/upgrade/error.rs index 3d349587c2c..c81ed7cf75b 100644 --- a/core/src/upgrade/error.rs +++ b/core/src/upgrade/error.rs @@ -18,9 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use multistream_select::NegotiationError; use std::fmt; +use multistream_select::NegotiationError; + /// Error that can happen when upgrading a connection or substream to use a protocol. #[derive(Debug)] pub enum UpgradeError { diff --git a/core/src/upgrade/pending.rs b/core/src/upgrade/pending.rs index 5e3c65422f1..60a9fb9aba1 100644 --- a/core/src/upgrade/pending.rs +++ b/core/src/upgrade/pending.rs @@ -19,10 +19,11 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use std::{convert::Infallible, iter}; + use futures::future; -use std::convert::Infallible; -use std::iter; + +use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; /// Implementation of [`UpgradeInfo`], [`InboundUpgrade`] and [`OutboundUpgrade`] that always /// returns a pending upgrade. diff --git a/core/src/upgrade/ready.rs b/core/src/upgrade/ready.rs index 13270aa8b6d..22708d726e7 100644 --- a/core/src/upgrade/ready.rs +++ b/core/src/upgrade/ready.rs @@ -19,12 +19,14 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use std::{convert::Infallible, iter}; + use futures::future; -use std::convert::Infallible; -use std::iter; -/// Implementation of [`UpgradeInfo`], [`InboundUpgrade`] and [`OutboundUpgrade`] that directly yields the substream. +use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; + +/// Implementation of [`UpgradeInfo`], [`InboundUpgrade`] and [`OutboundUpgrade`] +/// that directly yields the substream. #[derive(Debug, Copy, Clone)] pub struct ReadyUpgrade

{ protocol_name: P, diff --git a/core/src/upgrade/select.rs b/core/src/upgrade/select.rs index 037045a2f29..b7fe4a53a7f 100644 --- a/core/src/upgrade/select.rs +++ b/core/src/upgrade/select.rs @@ -18,14 +18,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::either::EitherFuture; -use crate::upgrade::{ - InboundConnectionUpgrade, InboundUpgrade, OutboundConnectionUpgrade, OutboundUpgrade, - UpgradeInfo, -}; +use std::iter::{Chain, Map}; + use either::Either; use futures::future; -use std::iter::{Chain, Map}; + +use crate::{ + either::EitherFuture, + upgrade::{ + InboundConnectionUpgrade, InboundUpgrade, OutboundConnectionUpgrade, OutboundUpgrade, + UpgradeInfo, + }, +}; /// Upgrade that combines two upgrades into one. Supports all the protocols supported by either /// sub-upgrade. diff --git a/core/tests/transport_upgrade.rs b/core/tests/transport_upgrade.rs index d8bec6f2b59..b9733e38322 100644 --- a/core/tests/transport_upgrade.rs +++ b/core/tests/transport_upgrade.rs @@ -18,18 +18,19 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{io, pin::Pin}; + use futures::prelude::*; -use libp2p_core::transport::{DialOpts, ListenerId, MemoryTransport, PortUse, Transport}; -use libp2p_core::upgrade::{ - self, InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo, +use libp2p_core::{ + transport::{DialOpts, ListenerId, MemoryTransport, PortUse, Transport}, + upgrade::{self, InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}, + Endpoint, }; -use libp2p_core::Endpoint; use libp2p_identity as identity; use libp2p_mplex::MplexConfig; use libp2p_noise as noise; use multiaddr::{Multiaddr, Protocol}; use rand::random; -use std::{io, pin::Pin}; #[derive(Clone)] struct HelloUpgrade {} diff --git a/examples/autonat/src/bin/autonat_client.rs b/examples/autonat/src/bin/autonat_client.rs index def66c4823b..80d7039eccb 100644 --- a/examples/autonat/src/bin/autonat_client.rs +++ b/examples/autonat/src/bin/autonat_client.rs @@ -20,15 +20,17 @@ #![doc = include_str!("../../README.md")] +use std::{error::Error, net::Ipv4Addr, time::Duration}; + use clap::Parser; use futures::StreamExt; -use libp2p::core::multiaddr::Protocol; -use libp2p::core::Multiaddr; -use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; -use libp2p::{autonat, identify, identity, noise, tcp, yamux, PeerId}; -use std::error::Error; -use std::net::Ipv4Addr; -use std::time::Duration; +use libp2p::{ + autonat, + core::{multiaddr::Protocol, Multiaddr}, + identify, identity, noise, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, yamux, PeerId, +}; use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] diff --git a/examples/autonat/src/bin/autonat_server.rs b/examples/autonat/src/bin/autonat_server.rs index 389cc0fa26f..83e456d8fda 100644 --- a/examples/autonat/src/bin/autonat_server.rs +++ b/examples/autonat/src/bin/autonat_server.rs @@ -20,14 +20,17 @@ #![doc = include_str!("../../README.md")] +use std::{error::Error, net::Ipv4Addr, time::Duration}; + use clap::Parser; use futures::StreamExt; -use libp2p::core::{multiaddr::Protocol, Multiaddr}; -use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; -use libp2p::{autonat, identify, identity, noise, tcp, yamux}; -use std::error::Error; -use std::net::Ipv4Addr; -use std::time::Duration; +use libp2p::{ + autonat, + core::{multiaddr::Protocol, Multiaddr}, + identify, identity, noise, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, yamux, +}; use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] diff --git a/examples/browser-webrtc/src/lib.rs b/examples/browser-webrtc/src/lib.rs index 9499ccbd158..e2d884cb445 100644 --- a/examples/browser-webrtc/src/lib.rs +++ b/examples/browser-webrtc/src/lib.rs @@ -1,13 +1,11 @@ #![cfg(target_arch = "wasm32")] +use std::{io, time::Duration}; + use futures::StreamExt; use js_sys::Date; -use libp2p::core::Multiaddr; -use libp2p::ping; -use libp2p::swarm::SwarmEvent; +use libp2p::{core::Multiaddr, ping, swarm::SwarmEvent}; use libp2p_webrtc_websys as webrtc_websys; -use std::io; -use std::time::Duration; use wasm_bindgen::prelude::*; use web_sys::{Document, HtmlElement}; diff --git a/examples/browser-webrtc/src/main.rs b/examples/browser-webrtc/src/main.rs index 7f06b0d0d99..ec6be0c066d 100644 --- a/examples/browser-webrtc/src/main.rs +++ b/examples/browser-webrtc/src/main.rs @@ -1,23 +1,27 @@ #![allow(non_upper_case_globals)] +use std::{ + net::{Ipv4Addr, SocketAddr}, + time::Duration, +}; + use anyhow::Result; -use axum::extract::{Path, State}; -use axum::http::header::CONTENT_TYPE; -use axum::http::StatusCode; -use axum::response::{Html, IntoResponse}; -use axum::{http::Method, routing::get, Router}; +use axum::{ + extract::{Path, State}, + http::{header::CONTENT_TYPE, Method, StatusCode}, + response::{Html, IntoResponse}, + routing::get, + Router, +}; use futures::StreamExt; use libp2p::{ - core::muxing::StreamMuxerBox, - core::Transport, + core::{muxing::StreamMuxerBox, Transport}, multiaddr::{Multiaddr, Protocol}, ping, swarm::SwarmEvent, }; use libp2p_webrtc as webrtc; use rand::thread_rng; -use std::net::{Ipv4Addr, SocketAddr}; -use std::time::Duration; use tokio::net::TcpListener; use tower_http::cors::{Any, CorsLayer}; @@ -127,7 +131,8 @@ struct Libp2pEndpoint(Multiaddr); /// Serves the index.html file for our client. /// /// Our server listens on a random UDP port for the WebRTC transport. -/// To allow the client to connect, we replace the `__LIBP2P_ENDPOINT__` placeholder with the actual address. +/// To allow the client to connect, we replace the `__LIBP2P_ENDPOINT__` +/// placeholder with the actual address. async fn get_index( State(Libp2pEndpoint(libp2p_endpoint)): State, ) -> Result, StatusCode> { diff --git a/examples/chat/src/main.rs b/examples/chat/src/main.rs index c785d301c2f..cda1e90bd35 100644 --- a/examples/chat/src/main.rs +++ b/examples/chat/src/main.rs @@ -20,12 +20,19 @@ #![doc = include_str!("../README.md")] +use std::{ + collections::hash_map::DefaultHasher, + error::Error, + hash::{Hash, Hasher}, + time::Duration, +}; + use futures::stream::StreamExt; -use libp2p::{gossipsub, mdns, noise, swarm::NetworkBehaviour, swarm::SwarmEvent, tcp, yamux}; -use std::collections::hash_map::DefaultHasher; -use std::error::Error; -use std::hash::{Hash, Hasher}; -use std::time::Duration; +use libp2p::{ + gossipsub, mdns, noise, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, yamux, +}; use tokio::{io, io::AsyncBufReadExt, select}; use tracing_subscriber::EnvFilter; @@ -61,7 +68,8 @@ async fn main() -> Result<(), Box> { // Set a custom gossipsub configuration let gossipsub_config = gossipsub::ConfigBuilder::default() .heartbeat_interval(Duration::from_secs(10)) // This is set to aid debugging by not cluttering the log space - .validation_mode(gossipsub::ValidationMode::Strict) // This sets the kind of message validation. The default is Strict (enforce message signing) + .validation_mode(gossipsub::ValidationMode::Strict) // This sets the kind of message validation. The default is Strict (enforce message + // signing) .message_id_fn(message_id_fn) // content-address messages. No two messages of the same content will be propagated. .build() .map_err(|msg| io::Error::new(io::ErrorKind::Other, msg))?; // Temporary hack because `build` does not return a proper `std::error::Error`. diff --git a/examples/dcutr/src/main.rs b/examples/dcutr/src/main.rs index 630d4b2b1f3..0ec1f2a321a 100644 --- a/examples/dcutr/src/main.rs +++ b/examples/dcutr/src/main.rs @@ -20,6 +20,8 @@ #![doc = include_str!("../README.md")] +use std::{error::Error, str::FromStr, time::Duration}; + use clap::Parser; use futures::{executor::block_on, future::FutureExt, stream::StreamExt}; use libp2p::{ @@ -28,8 +30,6 @@ use libp2p::{ swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, PeerId, }; -use std::str::FromStr; -use std::{error::Error, time::Duration}; use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] diff --git a/examples/distributed-key-value-store/src/main.rs b/examples/distributed-key-value-store/src/main.rs index 6b7947b7eb3..63944f2e9bd 100644 --- a/examples/distributed-key-value-store/src/main.rs +++ b/examples/distributed-key-value-store/src/main.rs @@ -20,17 +20,16 @@ #![doc = include_str!("../README.md")] +use std::{error::Error, time::Duration}; + use futures::stream::StreamExt; -use libp2p::kad; -use libp2p::kad::store::MemoryStore; -use libp2p::kad::Mode; use libp2p::{ + kad, + kad::{store::MemoryStore, Mode}, mdns, noise, swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, }; -use std::error::Error; -use std::time::Duration; use tokio::{ io::{self, AsyncBufReadExt}, select, diff --git a/examples/file-sharing/src/main.rs b/examples/file-sharing/src/main.rs index 5f6be83dc11..1e3b80a330c 100644 --- a/examples/file-sharing/src/main.rs +++ b/examples/file-sharing/src/main.rs @@ -22,15 +22,12 @@ mod network; -use clap::Parser; -use tokio::task::spawn; +use std::{error::Error, io::Write, path::PathBuf}; -use futures::prelude::*; -use futures::StreamExt; +use clap::Parser; +use futures::{prelude::*, StreamExt}; use libp2p::{core::Multiaddr, multiaddr::Protocol}; -use std::error::Error; -use std::io::Write; -use std::path::PathBuf; +use tokio::task::spawn; use tracing_subscriber::EnvFilter; #[tokio::main] diff --git a/examples/file-sharing/src/network.rs b/examples/file-sharing/src/network.rs index a74afd1c0da..409255ee9ec 100644 --- a/examples/file-sharing/src/network.rs +++ b/examples/file-sharing/src/network.rs @@ -1,7 +1,14 @@ -use futures::channel::{mpsc, oneshot}; -use futures::prelude::*; -use futures::StreamExt; +use std::{ + collections::{hash_map, HashMap, HashSet}, + error::Error, + time::Duration, +}; +use futures::{ + channel::{mpsc, oneshot}, + prelude::*, + StreamExt, +}; use libp2p::{ core::Multiaddr, identity, kad, @@ -9,19 +16,13 @@ use libp2p::{ noise, request_response::{self, OutboundRequestId, ProtocolSupport, ResponseChannel}, swarm::{NetworkBehaviour, Swarm, SwarmEvent}, - tcp, yamux, PeerId, + tcp, yamux, PeerId, StreamProtocol, }; - -use libp2p::StreamProtocol; use serde::{Deserialize, Serialize}; -use std::collections::{hash_map, HashMap, HashSet}; -use std::error::Error; -use std::time::Duration; /// Creates the network components, namely: /// -/// - The network client to interact with the network layer from anywhere -/// within your application. +/// - The network client to interact with the network layer from anywhere within your application. /// /// - The network event stream, e.g. for incoming requests. /// diff --git a/examples/identify/src/main.rs b/examples/identify/src/main.rs index 22474061da6..55d093c0399 100644 --- a/examples/identify/src/main.rs +++ b/examples/identify/src/main.rs @@ -20,9 +20,10 @@ #![doc = include_str!("../README.md")] +use std::{error::Error, time::Duration}; + use futures::StreamExt; use libp2p::{core::multiaddr::Multiaddr, identify, noise, swarm::SwarmEvent, tcp, yamux}; -use std::{error::Error, time::Duration}; use tracing_subscriber::EnvFilter; #[tokio::main] diff --git a/examples/ipfs-kad/src/main.rs b/examples/ipfs-kad/src/main.rs index 95921d6fa35..c2df603fcc2 100644 --- a/examples/ipfs-kad/src/main.rs +++ b/examples/ipfs-kad/src/main.rs @@ -20,15 +20,21 @@ #![doc = include_str!("../README.md")] -use std::num::NonZeroUsize; -use std::ops::Add; -use std::time::{Duration, Instant}; +use std::{ + num::NonZeroUsize, + ops::Add, + time::{Duration, Instant}, +}; use anyhow::{bail, Result}; use clap::Parser; use futures::StreamExt; -use libp2p::swarm::{StreamProtocol, SwarmEvent}; -use libp2p::{bytes::BufMut, identity, kad, noise, tcp, yamux, PeerId}; +use libp2p::{ + bytes::BufMut, + identity, kad, noise, + swarm::{StreamProtocol, SwarmEvent}, + tcp, yamux, PeerId, +}; use tracing_subscriber::EnvFilter; const BOOTNODES: [&str; 4] = [ diff --git a/examples/ipfs-private/src/main.rs b/examples/ipfs-private/src/main.rs index a57bfd465e0..19d38c767e9 100644 --- a/examples/ipfs-private/src/main.rs +++ b/examples/ipfs-private/src/main.rs @@ -20,6 +20,8 @@ #![doc = include_str!("../README.md")] +use std::{env, error::Error, fs, path::Path, str::FromStr, time::Duration}; + use either::Either; use futures::prelude::*; use libp2p::{ @@ -31,7 +33,6 @@ use libp2p::{ swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, Multiaddr, Transport, }; -use std::{env, error::Error, fs, path::Path, str::FromStr, time::Duration}; use tokio::{io, io::AsyncBufReadExt, select}; use tracing_subscriber::EnvFilter; diff --git a/examples/metrics/src/http_service.rs b/examples/metrics/src/http_service.rs index 4a9c9785bb3..f1485832d86 100644 --- a/examples/metrics/src/http_service.rs +++ b/examples/metrics/src/http_service.rs @@ -18,15 +18,13 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use axum::extract::State; -use axum::http::StatusCode; -use axum::response::IntoResponse; -use axum::routing::get; -use axum::Router; -use prometheus_client::encoding::text::encode; -use prometheus_client::registry::Registry; -use std::net::SocketAddr; -use std::sync::{Arc, Mutex}; +use std::{ + net::SocketAddr, + sync::{Arc, Mutex}, +}; + +use axum::{extract::State, http::StatusCode, response::IntoResponse, routing::get, Router}; +use prometheus_client::{encoding::text::encode, registry::Registry}; use tokio::net::TcpListener; const METRICS_CONTENT_TYPE: &str = "application/openmetrics-text;charset=utf-8;version=1.0.0"; diff --git a/examples/metrics/src/main.rs b/examples/metrics/src/main.rs index 1755c769053..92aa90479fd 100644 --- a/examples/metrics/src/main.rs +++ b/examples/metrics/src/main.rs @@ -20,18 +20,20 @@ #![doc = include_str!("../README.md")] +use std::{error::Error, time::Duration}; + use futures::StreamExt; -use libp2p::core::Multiaddr; -use libp2p::metrics::{Metrics, Recorder}; -use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; -use libp2p::{identify, identity, noise, ping, tcp, yamux}; +use libp2p::{ + core::Multiaddr, + identify, identity, + metrics::{Metrics, Recorder}, + noise, ping, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, yamux, +}; use opentelemetry::{trace::TracerProvider, KeyValue}; use prometheus_client::registry::Registry; -use std::error::Error; -use std::time::Duration; -use tracing_subscriber::layer::SubscriberExt; -use tracing_subscriber::util::SubscriberInitExt; -use tracing_subscriber::{EnvFilter, Layer}; +use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter, Layer}; mod http_service; diff --git a/examples/ping/src/main.rs b/examples/ping/src/main.rs index 911b0384f89..565ef057c0d 100644 --- a/examples/ping/src/main.rs +++ b/examples/ping/src/main.rs @@ -20,9 +20,10 @@ #![doc = include_str!("../README.md")] +use std::{error::Error, time::Duration}; + use futures::prelude::*; use libp2p::{noise, ping, swarm::SwarmEvent, tcp, yamux, Multiaddr}; -use std::{error::Error, time::Duration}; use tracing_subscriber::EnvFilter; #[tokio::main] diff --git a/examples/relay-server/src/main.rs b/examples/relay-server/src/main.rs index 46a122d0717..b7868418fb0 100644 --- a/examples/relay-server/src/main.rs +++ b/examples/relay-server/src/main.rs @@ -21,17 +21,19 @@ #![doc = include_str!("../README.md")] +use std::{ + error::Error, + net::{Ipv4Addr, Ipv6Addr}, +}; + use clap::Parser; use futures::StreamExt; use libp2p::{ - core::multiaddr::Protocol, - core::Multiaddr, + core::{multiaddr::Protocol, Multiaddr}, identify, identity, noise, ping, relay, swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, }; -use std::error::Error; -use std::net::{Ipv4Addr, Ipv6Addr}; use tracing_subscriber::EnvFilter; #[tokio::main] diff --git a/examples/rendezvous/src/bin/rzv-discover.rs b/examples/rendezvous/src/bin/rzv-discover.rs index edd3d10a0ce..b133c82d158 100644 --- a/examples/rendezvous/src/bin/rzv-discover.rs +++ b/examples/rendezvous/src/bin/rzv-discover.rs @@ -18,6 +18,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{error::Error, time::Duration}; + use futures::StreamExt; use libp2p::{ multiaddr::Protocol, @@ -25,8 +27,6 @@ use libp2p::{ swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, Multiaddr, }; -use std::error::Error; -use std::time::Duration; use tracing_subscriber::EnvFilter; const NAMESPACE: &str = "rendezvous"; diff --git a/examples/rendezvous/src/bin/rzv-identify.rs b/examples/rendezvous/src/bin/rzv-identify.rs index ff637aa6f49..ce4933a29a9 100644 --- a/examples/rendezvous/src/bin/rzv-identify.rs +++ b/examples/rendezvous/src/bin/rzv-identify.rs @@ -18,13 +18,14 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::time::Duration; + use futures::StreamExt; use libp2p::{ identify, noise, ping, rendezvous, swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, Multiaddr, }; -use std::time::Duration; use tracing_subscriber::EnvFilter; #[tokio::main] diff --git a/examples/rendezvous/src/bin/rzv-register.rs b/examples/rendezvous/src/bin/rzv-register.rs index bd848238d4a..8ef2d30c880 100644 --- a/examples/rendezvous/src/bin/rzv-register.rs +++ b/examples/rendezvous/src/bin/rzv-register.rs @@ -18,13 +18,14 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::time::Duration; + use futures::StreamExt; use libp2p::{ noise, ping, rendezvous, swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, Multiaddr, }; -use std::time::Duration; use tracing_subscriber::EnvFilter; #[tokio::main] @@ -54,8 +55,8 @@ async fn main() { .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(5))) .build(); - // In production the external address should be the publicly facing IP address of the rendezvous point. - // This address is recorded in the registration entry by the rendezvous point. + // In production the external address should be the publicly facing IP address of the rendezvous + // point. This address is recorded in the registration entry by the rendezvous point. let external_address = "/ip4/127.0.0.1/tcp/0".parse::().unwrap(); swarm.add_external_address(external_address); diff --git a/examples/rendezvous/src/main.rs b/examples/rendezvous/src/main.rs index a15bc1ca2d3..0f26f2c9934 100644 --- a/examples/rendezvous/src/main.rs +++ b/examples/rendezvous/src/main.rs @@ -20,14 +20,14 @@ #![doc = include_str!("../README.md")] +use std::{error::Error, time::Duration}; + use futures::StreamExt; use libp2p::{ identify, noise, ping, rendezvous, swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, }; -use std::error::Error; -use std::time::Duration; use tracing_subscriber::EnvFilter; #[tokio::main] diff --git a/examples/stream/src/main.rs b/examples/stream/src/main.rs index 872ab8c3b98..71d2d2fcc76 100644 --- a/examples/stream/src/main.rs +++ b/examples/stream/src/main.rs @@ -44,12 +44,14 @@ async fn main() -> Result<()> { // Deal with incoming streams. // Spawning a dedicated task is just one way of doing this. // libp2p doesn't care how you handle incoming streams but you _must_ handle them somehow. - // To mitigate DoS attacks, libp2p will internally drop incoming streams if your application cannot keep up processing them. + // To mitigate DoS attacks, libp2p will internally drop incoming streams if your application + // cannot keep up processing them. tokio::spawn(async move { // This loop handles incoming streams _sequentially_ but that doesn't have to be the case. // You can also spawn a dedicated task per stream if you want to. - // Be aware that this breaks backpressure though as spawning new tasks is equivalent to an unbounded buffer. - // Each task needs memory meaning an aggressive remote peer may force you OOM this way. + // Be aware that this breaks backpressure though as spawning new tasks is equivalent to an + // unbounded buffer. Each task needs memory meaning an aggressive remote peer may + // force you OOM this way. while let Some((peer, stream)) = incoming_streams.next().await { match echo(stream).await { @@ -102,7 +104,8 @@ async fn connection_handler(peer: PeerId, mut control: stream::Control) { } Err(error) => { // Other errors may be temporary. - // In production, something like an exponential backoff / circuit-breaker may be more appropriate. + // In production, something like an exponential backoff / circuit-breaker may be + // more appropriate. tracing::debug!(%peer, %error); continue; } diff --git a/examples/upnp/src/main.rs b/examples/upnp/src/main.rs index fd0764990d1..19de8d773ae 100644 --- a/examples/upnp/src/main.rs +++ b/examples/upnp/src/main.rs @@ -20,9 +20,10 @@ #![doc = include_str!("../README.md")] +use std::error::Error; + use futures::prelude::*; use libp2p::{noise, swarm::SwarmEvent, upnp, yamux, Multiaddr}; -use std::error::Error; use tracing_subscriber::EnvFilter; #[tokio::main] diff --git a/hole-punching-tests/src/main.rs b/hole-punching-tests/src/main.rs index 02229e16262..bc5a1bae4f5 100644 --- a/hole-punching-tests/src/main.rs +++ b/hole-punching-tests/src/main.rs @@ -18,24 +18,27 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + collections::HashMap, + fmt, io, + net::{IpAddr, Ipv4Addr}, + str::FromStr, + time::Duration, +}; + use anyhow::{Context, Result}; use either::Either; use futures::stream::StreamExt; -use libp2p::core::transport::ListenerId; -use libp2p::swarm::dial_opts::DialOpts; -use libp2p::swarm::ConnectionId; use libp2p::{ - core::multiaddr::{Multiaddr, Protocol}, + core::{ + multiaddr::{Multiaddr, Protocol}, + transport::ListenerId, + }, dcutr, identify, noise, ping, relay, - swarm::{NetworkBehaviour, SwarmEvent}, + swarm::{dial_opts::DialOpts, ConnectionId, NetworkBehaviour, SwarmEvent}, tcp, yamux, Swarm, }; use redis::AsyncCommands; -use std::collections::HashMap; -use std::net::{IpAddr, Ipv4Addr}; -use std::str::FromStr; -use std::time::Duration; -use std::{fmt, io}; /// The redis key we push the relay's TCP listen address to. const RELAY_TCP_ADDRESS: &str = "RELAY_TCP_ADDRESS"; diff --git a/identity/src/ecdsa.rs b/identity/src/ecdsa.rs index 922675097df..11cdaced795 100644 --- a/identity/src/ecdsa.rs +++ b/identity/src/ecdsa.rs @@ -20,10 +20,9 @@ //! ECDSA keys with secp256r1 curve support. -use super::error::DecodingError; -use core::cmp; -use core::fmt; -use core::hash; +use core::{cmp, fmt, hash}; +use std::convert::Infallible; + use p256::{ ecdsa::{ signature::{Signer, Verifier}, @@ -32,9 +31,10 @@ use p256::{ EncodedPoint, }; use sec1::{DecodeEcPrivateKey, EncodeEcPrivateKey}; -use std::convert::Infallible; use zeroize::Zeroize; +use super::error::DecodingError; + /// An ECDSA keypair generated using `secp256r1` curve. #[derive(Clone)] pub struct Keypair { @@ -158,7 +158,8 @@ impl PublicKey { self.0.verify(msg, &sig).is_ok() } - /// Try to parse a public key from a byte buffer containing raw components of a key with or without compression. + /// Try to parse a public key from a byte buffer containing raw + /// components of a key with or without compression. pub fn try_from_bytes(k: &[u8]) -> Result { let enc_pt = EncodedPoint::from_bytes(k) .map_err(|e| DecodingError::failed_to_parse("ecdsa p256 encoded point", e))?; @@ -168,7 +169,8 @@ impl PublicKey { .map(PublicKey) } - /// Convert a public key into a byte buffer containing raw components of the key without compression. + /// Convert a public key into a byte buffer containing + /// raw components of the key without compression. pub fn to_bytes(&self) -> Vec { self.0.to_encoded_point(false).as_bytes().to_owned() } diff --git a/identity/src/ed25519.rs b/identity/src/ed25519.rs index d77c44547d6..5a1a53dd4af 100644 --- a/identity/src/ed25519.rs +++ b/identity/src/ed25519.rs @@ -20,13 +20,13 @@ //! Ed25519 keys. -use super::error::DecodingError; -use core::cmp; -use core::fmt; -use core::hash; +use core::{cmp, fmt, hash}; + use ed25519_dalek::{self as ed25519, Signer as _, Verifier as _}; use zeroize::Zeroize; +use super::error::DecodingError; + /// An Ed25519 keypair. #[derive(Clone)] pub struct Keypair(ed25519::SigningKey); @@ -152,7 +152,8 @@ impl PublicKey { self.0.to_bytes() } - /// Try to parse a public key from a byte array containing the actual key as produced by `to_bytes`. + /// Try to parse a public key from a byte array containing + /// the actual key as produced by `to_bytes`. pub fn try_from_bytes(k: &[u8]) -> Result { let k = <[u8; 32]>::try_from(k) .map_err(|e| DecodingError::failed_to_parse("Ed25519 public key", e))?; @@ -206,9 +207,10 @@ impl SecretKey { #[cfg(test)] mod tests { - use super::*; use quickcheck::*; + use super::*; + fn eq_keypairs(kp1: &Keypair, kp2: &Keypair) -> bool { kp1.public() == kp2.public() && kp1.0.to_bytes() == kp2.0.to_bytes() } diff --git a/identity/src/error.rs b/identity/src/error.rs index 71cd78fe1ea..6e8c4d02caa 100644 --- a/identity/src/error.rs +++ b/identity/src/error.rs @@ -20,8 +20,7 @@ //! Errors during identity key operations. -use std::error::Error; -use std::fmt; +use std::{error::Error, fmt}; use crate::KeyType; diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index f1e8a7c2142..a1bbba00fa9 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -24,40 +24,40 @@ feature = "ed25519", feature = "rsa" ))] -#[cfg(feature = "ed25519")] -use crate::ed25519; +use quick_protobuf::{BytesReader, Writer}; + +#[cfg(feature = "ecdsa")] +use crate::ecdsa; #[cfg(any( feature = "ecdsa", feature = "secp256k1", feature = "ed25519", feature = "rsa" ))] -use crate::error::OtherVariantError; -use crate::error::{DecodingError, SigningError}; +#[cfg(feature = "ed25519")] +use crate::ed25519; #[cfg(any( feature = "ecdsa", feature = "secp256k1", feature = "ed25519", feature = "rsa" ))] -use crate::proto; +use crate::error::OtherVariantError; #[cfg(any( feature = "ecdsa", feature = "secp256k1", feature = "ed25519", feature = "rsa" ))] -use quick_protobuf::{BytesReader, Writer}; - +use crate::proto; #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] use crate::rsa; - #[cfg(feature = "secp256k1")] use crate::secp256k1; - -#[cfg(feature = "ecdsa")] -use crate::ecdsa; -use crate::KeyType; +use crate::{ + error::{DecodingError, SigningError}, + KeyType, +}; /// Identity keypair of a node. /// @@ -75,7 +75,6 @@ use crate::KeyType; /// let mut bytes = std::fs::read("private.pk8").unwrap(); /// let keypair = Keypair::rsa_from_pkcs8(&mut bytes); /// ``` -/// #[derive(Debug, Clone)] pub struct Keypair { keypair: KeyPairInner, @@ -341,7 +340,8 @@ impl Keypair { } } - /// Deterministically derive a new secret from this [`Keypair`], taking into account the provided domain. + /// Deterministically derive a new secret from this [`Keypair`], + /// taking into account the provided domain. /// /// This works for all key types except RSA where it returns `None`. /// @@ -352,10 +352,11 @@ impl Keypair { /// # use libp2p_identity as identity; /// let key = identity::Keypair::generate_ed25519(); /// - /// let new_key = key.derive_secret(b"my encryption key").expect("can derive secret for ed25519"); + /// let new_key = key + /// .derive_secret(b"my encryption key") + /// .expect("can derive secret for ed25519"); /// # } /// ``` - /// #[cfg(any( feature = "ecdsa", feature = "secp256k1", @@ -904,9 +905,10 @@ mod tests { #[test] fn public_key_implements_hash() { - use crate::PublicKey; use std::hash::Hash; + use crate::PublicKey; + fn assert_implements_hash() {} assert_implements_hash::(); @@ -914,9 +916,10 @@ mod tests { #[test] fn public_key_implements_ord() { - use crate::PublicKey; use std::cmp::Ord; + use crate::PublicKey; + fn assert_implements_ord() {} assert_implements_ord::(); diff --git a/identity/src/peer_id.rs b/identity/src/peer_id.rs index 8ae6d99ae32..7f6d1f44eab 100644 --- a/identity/src/peer_id.rs +++ b/identity/src/peer_id.rs @@ -18,17 +18,19 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{fmt, str::FromStr}; + #[cfg(feature = "rand")] use rand::Rng; use sha2::Digest as _; -use std::{fmt, str::FromStr}; use thiserror::Error; /// Local type-alias for multihash. /// /// Must be big enough to accommodate for `MAX_INLINE_KEY_LENGTH`. /// 64 satisfies that and can hold 512 bit hashes which is what the ecosystem typically uses. -/// Given that this appears in our type-signature, using a "common" number here makes us more compatible. +/// Given that this appears in our type-signature, +/// using a "common" number here makes us more compatible. type Multihash = multihash::Multihash<64>; #[cfg(feature = "serde")] diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs index 5eb78a4af75..b14d8c66d86 100644 --- a/identity/src/rsa.rs +++ b/identity/src/rsa.rs @@ -20,15 +20,20 @@ //! RSA keys. -use super::error::*; -use asn1_der::typed::{DerDecodable, DerEncodable, DerTypeView, Sequence}; -use asn1_der::{Asn1DerError, Asn1DerErrorVariant, DerObject, Sink, VecBacking}; -use ring::rand::SystemRandom; -use ring::signature::KeyPair; -use ring::signature::{self, RsaKeyPair, RSA_PKCS1_2048_8192_SHA256, RSA_PKCS1_SHA256}; use std::{fmt, sync::Arc}; + +use asn1_der::{ + typed::{DerDecodable, DerEncodable, DerTypeView, Sequence}, + Asn1DerError, Asn1DerErrorVariant, DerObject, Sink, VecBacking, +}; +use ring::{ + rand::SystemRandom, + signature::{self, KeyPair, RsaKeyPair, RSA_PKCS1_2048_8192_SHA256, RSA_PKCS1_SHA256}, +}; use zeroize::Zeroize; +use super::error::*; + /// An RSA keypair. #[derive(Clone)] pub struct Keypair(Arc); @@ -315,9 +320,10 @@ impl DerDecodable<'_> for Asn1SubjectPublicKeyInfo { #[cfg(test)] mod tests { - use super::*; use quickcheck::*; + use super::*; + const KEY1: &[u8] = include_bytes!("test/rsa-2048.pk8"); const KEY2: &[u8] = include_bytes!("test/rsa-3072.pk8"); const KEY3: &[u8] = include_bytes!("test/rsa-4096.pk8"); diff --git a/identity/src/secp256k1.rs b/identity/src/secp256k1.rs index a6e9e923268..e884cf1385d 100644 --- a/identity/src/secp256k1.rs +++ b/identity/src/secp256k1.rs @@ -20,15 +20,15 @@ //! Secp256k1 keys. -use super::error::DecodingError; +use core::{cmp, fmt, hash}; + use asn1_der::typed::{DerDecodable, Sequence}; -use core::cmp; -use core::fmt; -use core::hash; use libsecp256k1::{Message, Signature}; use sha2::{Digest as ShaDigestTrait, Sha256}; use zeroize::Zeroize; +use super::error::DecodingError; + /// A Secp256k1 keypair. #[derive(Clone)] pub struct Keypair { diff --git a/interop-tests/src/arch.rs b/interop-tests/src/arch.rs index df36f8e5baf..87a508742dc 100644 --- a/interop-tests/src/arch.rs +++ b/interop-tests/src/arch.rs @@ -1,7 +1,6 @@ // Native re-exports #[cfg(not(target_arch = "wasm32"))] pub(crate) use native::{build_swarm, init_logger, sleep, Instant, RedisClient}; - // Wasm re-exports #[cfg(target_arch = "wasm32")] pub(crate) use wasm::{build_swarm, init_logger, sleep, Instant, RedisClient}; @@ -11,11 +10,13 @@ pub(crate) mod native { use std::time::Duration; use anyhow::{bail, Context, Result}; - use futures::future::BoxFuture; - use futures::FutureExt; - use libp2p::identity::Keypair; - use libp2p::swarm::{NetworkBehaviour, Swarm}; - use libp2p::{noise, tcp, tls, yamux}; + use futures::{future::BoxFuture, FutureExt}; + use libp2p::{ + identity::Keypair, + noise, + swarm::{NetworkBehaviour, Swarm}, + tcp, tls, yamux, + }; use libp2p_mplex as mplex; use libp2p_webrtc as webrtc; use redis::AsyncCommands; @@ -186,15 +187,19 @@ pub(crate) mod native { #[cfg(target_arch = "wasm32")] pub(crate) mod wasm { + use std::time::Duration; + use anyhow::{bail, Context, Result}; use futures::future::{BoxFuture, FutureExt}; - use libp2p::core::upgrade::Version; - use libp2p::identity::Keypair; - use libp2p::swarm::{NetworkBehaviour, Swarm}; - use libp2p::{noise, websocket_websys, webtransport_websys, yamux, Transport as _}; + use libp2p::{ + core::upgrade::Version, + identity::Keypair, + noise, + swarm::{NetworkBehaviour, Swarm}, + websocket_websys, webtransport_websys, yamux, Transport as _, + }; use libp2p_mplex as mplex; use libp2p_webrtc_websys as webrtc_websys; - use std::time::Duration; use crate::{BlpopRequest, Muxer, SecProtocol, Transport}; diff --git a/interop-tests/src/bin/wasm_ping.rs b/interop-tests/src/bin/wasm_ping.rs index 0d697a0e2a3..7730b869456 100644 --- a/interop-tests/src/bin/wasm_ping.rs +++ b/interop-tests/src/bin/wasm_ping.rs @@ -1,26 +1,27 @@ #![allow(non_upper_case_globals)] -use std::future::IntoFuture; -use std::process::Stdio; -use std::time::Duration; +use std::{future::IntoFuture, process::Stdio, time::Duration}; use anyhow::{bail, Context, Result}; -use axum::http::{header, Uri}; -use axum::response::{Html, IntoResponse, Response}; -use axum::routing::get; -use axum::{extract::State, http::StatusCode, routing::post, Json, Router}; +use axum::{ + extract::State, + http::{header, StatusCode, Uri}, + response::{Html, IntoResponse, Response}, + routing::{get, post}, + Json, Router, +}; +use interop_tests::{BlpopRequest, Report}; use redis::{AsyncCommands, Client}; use thirtyfour::prelude::*; -use tokio::io::{AsyncBufReadExt, BufReader}; -use tokio::net::TcpListener; -use tokio::process::Child; -use tokio::sync::mpsc; -use tower_http::cors::CorsLayer; -use tower_http::trace::TraceLayer; +use tokio::{ + io::{AsyncBufReadExt, BufReader}, + net::TcpListener, + process::Child, + sync::mpsc, +}; +use tower_http::{cors::CorsLayer, trace::TraceLayer}; use tracing_subscriber::{fmt, prelude::*, EnvFilter}; -use interop_tests::{BlpopRequest, Report}; - mod config; const BIND_ADDR: &str = "127.0.0.1:8080"; diff --git a/interop-tests/src/lib.rs b/interop-tests/src/lib.rs index 0154bec51a4..a16dc4b8228 100644 --- a/interop-tests/src/lib.rs +++ b/interop-tests/src/lib.rs @@ -1,11 +1,14 @@ -use std::str::FromStr; -use std::time::Duration; +use std::{str::FromStr, time::Duration}; use anyhow::{bail, Context, Result}; use futures::{FutureExt, StreamExt}; -use libp2p::identity::Keypair; -use libp2p::swarm::SwarmEvent; -use libp2p::{identify, ping, swarm::NetworkBehaviour, Multiaddr}; +use libp2p::{ + identify, + identity::Keypair, + ping, + swarm::{NetworkBehaviour, SwarmEvent}, + Multiaddr, +}; #[cfg(target_arch = "wasm32")] use wasm_bindgen::prelude::*; diff --git a/libp2p/src/bandwidth.rs b/libp2p/src/bandwidth.rs index 8931c5c4166..ac668e26b3f 100644 --- a/libp2p/src/bandwidth.rs +++ b/libp2p/src/bandwidth.rs @@ -20,13 +20,6 @@ #![allow(deprecated)] -use crate::core::muxing::{StreamMuxer, StreamMuxerEvent}; - -use futures::{ - io::{IoSlice, IoSliceMut}, - prelude::*, - ready, -}; use std::{ convert::TryFrom as _, io, @@ -38,6 +31,14 @@ use std::{ task::{Context, Poll}, }; +use futures::{ + io::{IoSlice, IoSliceMut}, + prelude::*, + ready, +}; + +use crate::core::muxing::{StreamMuxer, StreamMuxerEvent}; + /// Wraps around a [`StreamMuxer`] and counts the number of bytes that go through all the opened /// streams. #[derive(Clone)] @@ -123,7 +124,7 @@ impl BandwidthSinks { /// Returns the total number of bytes that have been downloaded on all the streams. /// /// > **Note**: This method is by design subject to race conditions. The returned value should - /// > only ever be used for statistics purposes. + /// > only ever be used for statistics purposes. pub fn total_inbound(&self) -> u64 { self.inbound.load(Ordering::Relaxed) } @@ -131,7 +132,7 @@ impl BandwidthSinks { /// Returns the total number of bytes that have been uploaded on all the streams. /// /// > **Note**: This method is by design subject to race conditions. The returned value should - /// > only ever be used for statistics purposes. + /// > only ever be used for statistics purposes. pub fn total_outbound(&self) -> u64 { self.outbound.load(Ordering::Relaxed) } diff --git a/libp2p/src/builder.rs b/libp2p/src/builder.rs index de003314cca..99c340a5e3e 100644 --- a/libp2p/src/builder.rs +++ b/libp2p/src/builder.rs @@ -33,31 +33,31 @@ mod select_security; /// # relay: libp2p_relay::client::Behaviour, /// # } /// -/// let swarm = SwarmBuilder::with_new_identity() -/// .with_tokio() -/// .with_tcp( -/// Default::default(), -/// (libp2p_tls::Config::new, libp2p_noise::Config::new), -/// libp2p_yamux::Config::default, -/// )? -/// .with_quic() -/// .with_other_transport(|_key| DummyTransport::<(PeerId, StreamMuxerBox)>::new())? -/// .with_dns()? -/// .with_websocket( -/// (libp2p_tls::Config::new, libp2p_noise::Config::new), -/// libp2p_yamux::Config::default, -/// ) -/// .await? -/// .with_relay_client( -/// (libp2p_tls::Config::new, libp2p_noise::Config::new), -/// libp2p_yamux::Config::default, -/// )? -/// .with_behaviour(|_key, relay| MyBehaviour { relay })? -/// .with_swarm_config(|cfg| { -/// // Edit cfg here. -/// cfg -/// }) -/// .build(); +/// let swarm = SwarmBuilder::with_new_identity() +/// .with_tokio() +/// .with_tcp( +/// Default::default(), +/// (libp2p_tls::Config::new, libp2p_noise::Config::new), +/// libp2p_yamux::Config::default, +/// )? +/// .with_quic() +/// .with_other_transport(|_key| DummyTransport::<(PeerId, StreamMuxerBox)>::new())? +/// .with_dns()? +/// .with_websocket( +/// (libp2p_tls::Config::new, libp2p_noise::Config::new), +/// libp2p_yamux::Config::default, +/// ) +/// .await? +/// .with_relay_client( +/// (libp2p_tls::Config::new, libp2p_noise::Config::new), +/// libp2p_yamux::Config::default, +/// )? +/// .with_behaviour(|_key, relay| MyBehaviour { relay })? +/// .with_swarm_config(|cfg| { +/// // Edit cfg here. +/// cfg +/// }) +/// .build(); /// # /// # Ok(()) /// # } @@ -70,11 +70,12 @@ pub struct SwarmBuilder { #[cfg(test)] mod tests { - use crate::SwarmBuilder; use libp2p_core::{muxing::StreamMuxerBox, transport::dummy::DummyTransport}; use libp2p_identity::PeerId; use libp2p_swarm::NetworkBehaviour; + use crate::SwarmBuilder; + #[test] #[cfg(all( feature = "tokio", diff --git a/libp2p/src/builder/phase.rs b/libp2p/src/builder/phase.rs index c9679a46767..6e3f41755ca 100644 --- a/libp2p/src/builder/phase.rs +++ b/libp2p/src/builder/phase.rs @@ -19,6 +19,8 @@ use bandwidth_metrics::*; use behaviour::*; use build::*; use dns::*; +use libp2p_core::{muxing::StreamMuxerBox, Transport}; +use libp2p_identity::Keypair; use other_transport::*; use provider::*; use quic::*; @@ -27,12 +29,9 @@ use swarm::*; use tcp::*; use websocket::*; -use super::select_muxer::SelectMuxerUpgrade; -use super::select_security::SelectSecurityUpgrade; -use super::SwarmBuilder; - -use libp2p_core::{muxing::StreamMuxerBox, Transport}; -use libp2p_identity::Keypair; +use super::{ + select_muxer::SelectMuxerUpgrade, select_security::SelectSecurityUpgrade, SwarmBuilder, +}; #[allow(unreachable_pub)] pub trait IntoSecurityUpgrade { diff --git a/libp2p/src/builder/phase/bandwidth_logging.rs b/libp2p/src/builder/phase/bandwidth_logging.rs index cee9498fcaa..f24df5f3df5 100644 --- a/libp2p/src/builder/phase/bandwidth_logging.rs +++ b/libp2p/src/builder/phase/bandwidth_logging.rs @@ -1,10 +1,9 @@ +use std::{marker::PhantomData, sync::Arc}; + use super::*; #[allow(deprecated)] use crate::bandwidth::BandwidthSinks; -use crate::transport_ext::TransportExt; -use crate::SwarmBuilder; -use std::marker::PhantomData; -use std::sync::Arc; +use crate::{transport_ext::TransportExt, SwarmBuilder}; pub struct BandwidthLoggingPhase { pub(crate) relay_behaviour: R, diff --git a/libp2p/src/builder/phase/bandwidth_metrics.rs b/libp2p/src/builder/phase/bandwidth_metrics.rs index 52daa731ddd..ddd292c140e 100644 --- a/libp2p/src/builder/phase/bandwidth_metrics.rs +++ b/libp2p/src/builder/phase/bandwidth_metrics.rs @@ -1,10 +1,9 @@ +use std::{marker::PhantomData, sync::Arc}; + use super::*; #[allow(deprecated)] use crate::bandwidth::BandwidthSinks; -use crate::transport_ext::TransportExt; -use crate::SwarmBuilder; -use std::marker::PhantomData; -use std::sync::Arc; +use crate::{transport_ext::TransportExt, SwarmBuilder}; pub struct BandwidthMetricsPhase { pub(crate) relay_behaviour: R, diff --git a/libp2p/src/builder/phase/behaviour.rs b/libp2p/src/builder/phase/behaviour.rs index 939db935c80..22f8c617051 100644 --- a/libp2p/src/builder/phase/behaviour.rs +++ b/libp2p/src/builder/phase/behaviour.rs @@ -1,8 +1,9 @@ +use std::{convert::Infallible, marker::PhantomData}; + +use libp2p_swarm::NetworkBehaviour; + use super::*; use crate::SwarmBuilder; -use libp2p_swarm::NetworkBehaviour; -use std::convert::Infallible; -use std::marker::PhantomData; pub struct BehaviourPhase { pub(crate) relay_behaviour: R, diff --git a/libp2p/src/builder/phase/build.rs b/libp2p/src/builder/phase/build.rs index 80a83994eeb..f9621da756b 100644 --- a/libp2p/src/builder/phase/build.rs +++ b/libp2p/src/builder/phase/build.rs @@ -1,9 +1,9 @@ +use libp2p_core::Transport; +use libp2p_swarm::Swarm; + #[allow(unused_imports)] use super::*; - use crate::SwarmBuilder; -use libp2p_core::Transport; -use libp2p_swarm::Swarm; pub struct BuildPhase { pub(crate) behaviour: B, diff --git a/libp2p/src/builder/phase/dns.rs b/libp2p/src/builder/phase/dns.rs index 638064d58bb..83653836a34 100644 --- a/libp2p/src/builder/phase/dns.rs +++ b/libp2p/src/builder/phase/dns.rs @@ -1,6 +1,7 @@ +use std::marker::PhantomData; + use super::*; use crate::SwarmBuilder; -use std::marker::PhantomData; pub struct DnsPhase { pub(crate) transport: T, diff --git a/libp2p/src/builder/phase/identity.rs b/libp2p/src/builder/phase/identity.rs index ceb86819dc7..e2511267cd3 100644 --- a/libp2p/src/builder/phase/identity.rs +++ b/libp2p/src/builder/phase/identity.rs @@ -1,6 +1,7 @@ +use std::marker::PhantomData; + use super::*; use crate::SwarmBuilder; -use std::marker::PhantomData; pub struct IdentityPhase {} diff --git a/libp2p/src/builder/phase/other_transport.rs b/libp2p/src/builder/phase/other_transport.rs index e04621b2e3f..c3b951c8c75 100644 --- a/libp2p/src/builder/phase/other_transport.rs +++ b/libp2p/src/builder/phase/other_transport.rs @@ -1,20 +1,19 @@ -use std::convert::Infallible; -use std::marker::PhantomData; -use std::sync::Arc; +use std::{convert::Infallible, marker::PhantomData, sync::Arc}; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; -use libp2p_core::Transport; +use libp2p_core::{ + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}, + Transport, +}; #[cfg(feature = "relay")] use libp2p_core::{Negotiated, UpgradeInfo}; #[cfg(feature = "relay")] use libp2p_identity::PeerId; +use super::*; #[allow(deprecated)] use crate::bandwidth::BandwidthSinks; use crate::SwarmBuilder; -use super::*; - pub struct OtherTransportPhase { pub(crate) transport: T, } diff --git a/libp2p/src/builder/phase/provider.rs b/libp2p/src/builder/phase/provider.rs index 2a9154cda74..00a79e14a30 100644 --- a/libp2p/src/builder/phase/provider.rs +++ b/libp2p/src/builder/phase/provider.rs @@ -1,13 +1,15 @@ +use std::marker::PhantomData; + #[allow(unused_imports)] use super::*; use crate::SwarmBuilder; -use std::marker::PhantomData; /// Represents the phase where a provider is not yet specified. -/// This is a marker type used in the type-state pattern to ensure compile-time checks of the builder's state. +/// This is a marker type used in the type-state pattern to ensure compile-time checks of the +/// builder's state. pub enum NoProviderSpecified {} -// Define enums for each of the possible runtime environments. These are used as markers in the type-state pattern, -// allowing compile-time checks for the appropriate environment configuration. +// Define enums for each of the possible runtime environments. These are used as markers in the +// type-state pattern, allowing compile-time checks for the appropriate environment configuration. #[cfg(all(not(target_arch = "wasm32"), feature = "async-std"))] /// Represents the AsyncStd runtime environment. @@ -26,7 +28,8 @@ pub struct ProviderPhase {} impl SwarmBuilder { /// Configures the SwarmBuilder to use the AsyncStd runtime. - /// This method is only available when compiling for non-Wasm targets with the `async-std` feature enabled. + /// This method is only available when compiling for non-Wasm + /// targets with the `async-std` feature enabled. #[cfg(all(not(target_arch = "wasm32"), feature = "async-std"))] pub fn with_async_std(self) -> SwarmBuilder { SwarmBuilder { @@ -37,7 +40,8 @@ impl SwarmBuilder { } /// Configures the SwarmBuilder to use the Tokio runtime. - /// This method is only available when compiling for non-Wasm targets with the `tokio` feature enabled + /// This method is only available when compiling for non-Wasm + /// targets with the `tokio` feature enabled #[cfg(all(not(target_arch = "wasm32"), feature = "tokio"))] pub fn with_tokio(self) -> SwarmBuilder { SwarmBuilder { diff --git a/libp2p/src/builder/phase/quic.rs b/libp2p/src/builder/phase/quic.rs index e030e9493bb..1b6329c1095 100644 --- a/libp2p/src/builder/phase/quic.rs +++ b/libp2p/src/builder/phase/quic.rs @@ -1,5 +1,5 @@ -use super::*; -use crate::SwarmBuilder; +use std::{marker::PhantomData, sync::Arc}; + #[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))] use libp2p_core::muxing::StreamMuxer; use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; @@ -8,7 +8,9 @@ use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; all(not(target_arch = "wasm32"), feature = "websocket") ))] use libp2p_core::{InboundUpgrade, Negotiated, OutboundUpgrade, UpgradeInfo}; -use std::{marker::PhantomData, sync::Arc}; + +use super::*; +use crate::SwarmBuilder; pub struct QuicPhase { pub(crate) transport: T, diff --git a/libp2p/src/builder/phase/relay.rs b/libp2p/src/builder/phase/relay.rs index f8305f9d246..33dbf1fb54c 100644 --- a/libp2p/src/builder/phase/relay.rs +++ b/libp2p/src/builder/phase/relay.rs @@ -10,9 +10,8 @@ use libp2p_core::{InboundUpgrade, Negotiated, OutboundUpgrade, StreamMuxer, Upgr #[cfg(feature = "relay")] use libp2p_identity::PeerId; -use crate::SwarmBuilder; - use super::*; +use crate::SwarmBuilder; pub struct RelayPhase { pub(crate) transport: T, diff --git a/libp2p/src/builder/phase/tcp.rs b/libp2p/src/builder/phase/tcp.rs index 4b7cf29b3d2..f38f52441e5 100644 --- a/libp2p/src/builder/phase/tcp.rs +++ b/libp2p/src/builder/phase/tcp.rs @@ -1,5 +1,5 @@ -use super::*; -use crate::SwarmBuilder; +use std::marker::PhantomData; + #[cfg(all( not(target_arch = "wasm32"), any(feature = "tcp", feature = "websocket") @@ -14,7 +14,9 @@ use libp2p_core::Transport; use libp2p_core::{ upgrade::InboundConnectionUpgrade, upgrade::OutboundConnectionUpgrade, Negotiated, UpgradeInfo, }; -use std::marker::PhantomData; + +use super::*; +use crate::SwarmBuilder; pub struct TcpPhase {} diff --git a/libp2p/src/builder/phase/websocket.rs b/libp2p/src/builder/phase/websocket.rs index 68a85bb77b7..a23c6eca854 100644 --- a/libp2p/src/builder/phase/websocket.rs +++ b/libp2p/src/builder/phase/websocket.rs @@ -1,5 +1,5 @@ -use super::*; -use crate::SwarmBuilder; +use std::marker::PhantomData; + #[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))] use libp2p_core::muxing::{StreamMuxer, StreamMuxerBox}; use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; @@ -15,7 +15,9 @@ use libp2p_core::{InboundUpgrade, Negotiated, OutboundUpgrade, UpgradeInfo}; feature = "relay" ))] use libp2p_identity::PeerId; -use std::marker::PhantomData; + +use super::*; +use crate::SwarmBuilder; pub struct WebsocketPhase { pub(crate) transport: T, @@ -126,8 +128,8 @@ impl_websocket_builder!( impl_websocket_builder!( "tokio", super::provider::Tokio, - // Note this is an unnecessary await for Tokio Websocket (i.e. tokio dns) in order to be consistent - // with above AsyncStd construction. + // Note this is an unnecessary await for Tokio Websocket (i.e. tokio dns) in order to be + // consistent with above AsyncStd construction. futures::future::ready(libp2p_dns::tokio::Transport::system( libp2p_tcp::tokio::Transport::new(libp2p_tcp::Config::default()) )), diff --git a/libp2p/src/builder/select_muxer.rs b/libp2p/src/builder/select_muxer.rs index c93ba9d9991..93ae0547269 100644 --- a/libp2p/src/builder/select_muxer.rs +++ b/libp2p/src/builder/select_muxer.rs @@ -20,12 +20,15 @@ #![allow(unreachable_pub)] +use std::iter::{Chain, Map}; + use either::Either; use futures::future; -use libp2p_core::either::EitherFuture; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; -use libp2p_core::UpgradeInfo; -use std::iter::{Chain, Map}; +use libp2p_core::{ + either::EitherFuture, + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}, + UpgradeInfo, +}; #[derive(Debug, Clone)] pub struct SelectMuxerUpgrade(A, B); diff --git a/libp2p/src/builder/select_security.rs b/libp2p/src/builder/select_security.rs index d6c7f8c172f..1ed760feb1b 100644 --- a/libp2p/src/builder/select_security.rs +++ b/libp2p/src/builder/select_security.rs @@ -21,13 +21,15 @@ #![allow(unreachable_pub)] +use std::iter::{Chain, Map}; + use either::Either; -use futures::future::MapOk; -use futures::{future, TryFutureExt}; -use libp2p_core::either::EitherFuture; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}; +use futures::{future, future::MapOk, TryFutureExt}; +use libp2p_core::{ + either::EitherFuture, + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}, +}; use libp2p_identity::PeerId; -use std::iter::{Chain, Map}; /// Upgrade that combines two upgrades into one. Supports all the protocols supported by either /// sub-upgrade. diff --git a/libp2p/src/lib.rs b/libp2p/src/lib.rs index 58f911e9445..1ec1cc530fc 100644 --- a/libp2p/src/lib.rs +++ b/libp2p/src/lib.rs @@ -34,11 +34,6 @@ pub use bytes; pub use futures; -#[doc(inline)] -pub use libp2p_core::multihash; -#[doc(inline)] -pub use multiaddr; - #[doc(inline)] pub use libp2p_allow_block_list as allow_block_list; #[cfg(feature = "autonat")] @@ -48,6 +43,8 @@ pub use libp2p_autonat as autonat; pub use libp2p_connection_limits as connection_limits; #[doc(inline)] pub use libp2p_core as core; +#[doc(inline)] +pub use libp2p_core::multihash; #[cfg(feature = "dcutr")] #[doc(inline)] pub use libp2p_dcutr as dcutr; @@ -140,6 +137,8 @@ pub use libp2p_webtransport_websys as webtransport_websys; #[cfg(feature = "yamux")] #[doc(inline)] pub use libp2p_yamux as yamux; +#[doc(inline)] +pub use multiaddr; mod builder; mod transport_ext; @@ -149,15 +148,18 @@ pub mod bandwidth; #[cfg(doc)] pub mod tutorials; -pub use self::builder::SwarmBuilder; -pub use self::core::{ - transport::TransportError, - upgrade::{InboundUpgrade, OutboundUpgrade}, - Transport, -}; -pub use self::multiaddr::{multiaddr as build_multiaddr, Multiaddr}; -pub use self::swarm::Swarm; -pub use self::transport_ext::TransportExt; pub use libp2p_identity as identity; pub use libp2p_identity::PeerId; pub use libp2p_swarm::{Stream, StreamProtocol}; + +pub use self::{ + builder::SwarmBuilder, + core::{ + transport::TransportError, + upgrade::{InboundUpgrade, OutboundUpgrade}, + Transport, + }, + multiaddr::{multiaddr as build_multiaddr, Multiaddr}, + swarm::Swarm, + transport_ext::TransportExt, +}; diff --git a/libp2p/src/transport_ext.rs b/libp2p/src/transport_ext.rs index 4f07484fc1f..0ed5b816903 100644 --- a/libp2p/src/transport_ext.rs +++ b/libp2p/src/transport_ext.rs @@ -20,15 +20,19 @@ //! Provides the `TransportExt` trait. +use std::sync::Arc; + +use libp2p_identity::PeerId; + #[allow(deprecated)] use crate::bandwidth::{BandwidthLogging, BandwidthSinks}; -use crate::core::{ - muxing::{StreamMuxer, StreamMuxerBox}, - transport::Boxed, +use crate::{ + core::{ + muxing::{StreamMuxer, StreamMuxerBox}, + transport::Boxed, + }, + Transport, }; -use crate::Transport; -use libp2p_identity::PeerId; -use std::sync::Arc; /// Trait automatically implemented on all objects that implement `Transport`. Provides some /// additional utilities. @@ -42,23 +46,17 @@ pub trait TransportExt: Transport { /// # Example /// /// ``` - /// use libp2p_yamux as yamux; + /// use libp2p::{core::upgrade, identity, Transport, TransportExt}; /// use libp2p_noise as noise; /// use libp2p_tcp as tcp; - /// use libp2p::{ - /// core::upgrade, - /// identity, - /// TransportExt, - /// Transport, - /// }; + /// use libp2p_yamux as yamux; /// /// let id_keys = identity::Keypair::generate_ed25519(); /// /// let transport = tcp::tokio::Transport::new(tcp::Config::default().nodelay(true)) /// .upgrade(upgrade::Version::V1) /// .authenticate( - /// noise::Config::new(&id_keys) - /// .expect("Signing libp2p-noise static DH keypair failed."), + /// noise::Config::new(&id_keys).expect("Signing libp2p-noise static DH keypair failed."), /// ) /// .multiplex(yamux::Config::default()) /// .boxed(); diff --git a/libp2p/src/tutorials/hole_punching.rs b/libp2p/src/tutorials/hole_punching.rs index 0963c0ca59e..06a4dad4037 100644 --- a/libp2p/src/tutorials/hole_punching.rs +++ b/libp2p/src/tutorials/hole_punching.rs @@ -57,8 +57,8 @@ //! cargo build --bin relay-server-example //! ``` //! -//! You can find the binary at `target/debug/relay-server-example`. In case you built it locally, copy -//! it to your server. +//! You can find the binary at `target/debug/relay-server-example`. In case you built it locally, +//! copy it to your server. //! //! On your server, start the relay server binary: //! @@ -98,7 +98,8 @@ //! //! ``` bash //! $ libp2p-lookup direct --address /ip4/111.11.111.111/tcp/4001 -//! Lookup for peer with id PeerId("12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN") succeeded. +//! Lookup for peer with id PeerId("12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN") +//! succeeded. //! //! Protocol version: "/TODO/0.0.1" //! Agent version: "rust-libp2p/0.36.0" @@ -163,12 +164,18 @@ //! [`Multiaddr`](crate::Multiaddr). //! //! ``` ignore -//! [2022-01-30T12:54:10Z INFO client] Established connection to PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X") via Dialer { address: "/ip4/$RELAY_PEER_ID/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN/p2p-circuit/p2p/12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X", role_override: Dialer } +//! [2022-01-30T12:54:10Z INFO client] Established connection to +//! PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X") via Dialer { address: +//! "/ip4/$RELAY_PEER_ID/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN/ +//! p2p-circuit/p2p/12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X", +//! role_override: Dialer } //! ``` //! -//! 2. The direct connection upgrade, also known as hole punch, succeeding. -//! Reported by [`dcutr`](crate::dcutr) through [`Event`](crate::dcutr::Event) containing [`Result::Ok`] with the [`ConnectionId`](libp2p_swarm::ConnectionId) of the new direct connection. +//! 2. The direct connection upgrade, also known as hole punch, succeeding. Reported by +//! [`dcutr`](crate::dcutr) through [`Event`](crate::dcutr::Event) containing [`Result::Ok`] with +//! the [`ConnectionId`](libp2p_swarm::ConnectionId) of the new direct connection. //! //! ``` ignore -//! [2022-01-30T12:54:11Z INFO client] Event { remote_peer_id: PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X"), result: Ok(2) } +//! [2022-01-30T12:54:11Z INFO client] Event { remote_peer_id: +//! PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X"), result: Ok(2) } //! ``` diff --git a/libp2p/src/tutorials/ping.rs b/libp2p/src/tutorials/ping.rs index 31bf5ba3a14..f35fef8f488 100644 --- a/libp2p/src/tutorials/ping.rs +++ b/libp2p/src/tutorials/ping.rs @@ -72,6 +72,7 @@ //! //! ```rust //! use std::error::Error; +//! //! use tracing_subscriber::EnvFilter; //! //! #[tokio::main] @@ -98,8 +99,9 @@ //! //! ```rust //! use std::error::Error; -//! use tracing_subscriber::EnvFilter; +//! //! use libp2p::{noise, tcp, yamux}; +//! use tracing_subscriber::EnvFilter; //! //! #[tokio::main] //! async fn main() -> Result<(), Box> { @@ -139,12 +141,14 @@ //! The two traits [`Transport`] and [`NetworkBehaviour`] allow us to cleanly //! separate _how_ to send bytes from _what_ bytes and to _whom_ to send. //! -//! With the above in mind, let's extend our example, creating a [`ping::Behaviour`](crate::ping::Behaviour) at the end: +//! With the above in mind, let's extend our example, creating a +//! [`ping::Behaviour`](crate::ping::Behaviour) at the end: //! //! ```rust //! use std::error::Error; -//! use tracing_subscriber::EnvFilter; +//! //! use libp2p::{noise, ping, tcp, yamux}; +//! use tracing_subscriber::EnvFilter; //! //! #[tokio::main] //! async fn main() -> Result<(), Box> { @@ -174,8 +178,9 @@ //! //! ```rust //! use std::error::Error; -//! use tracing_subscriber::EnvFilter; +//! //! use libp2p::{noise, ping, tcp, yamux}; +//! use tracing_subscriber::EnvFilter; //! //! #[tokio::main] //! async fn main() -> Result<(), Box> { @@ -209,8 +214,9 @@ //! //! ```rust //! use std::{error::Error, time::Duration}; -//! use tracing_subscriber::EnvFilter; +//! //! use libp2p::{noise, ping, tcp, yamux}; +//! use tracing_subscriber::EnvFilter; //! //! #[tokio::main] //! async fn main() -> Result<(), Box> { @@ -226,7 +232,9 @@ //! yamux::Config::default, //! )? //! .with_behaviour(|_| ping::Behaviour::default())? -//! .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX))) +//! .with_swarm_config(|cfg| { +//! cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX)) +//! }) //! .build(); //! //! Ok(()) @@ -261,8 +269,9 @@ //! //! ```rust //! use std::{error::Error, time::Duration}; -//! use tracing_subscriber::EnvFilter; +//! //! use libp2p::{noise, ping, tcp, yamux, Multiaddr}; +//! use tracing_subscriber::EnvFilter; //! //! #[tokio::main] //! async fn main() -> Result<(), Box> { @@ -278,7 +287,9 @@ //! yamux::Config::default, //! )? //! .with_behaviour(|_| ping::Behaviour::default())? -//! .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX))) +//! .with_swarm_config(|cfg| { +//! cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX)) +//! }) //! .build(); //! //! // Tell the swarm to listen on all interfaces and a random, OS-assigned @@ -305,9 +316,10 @@ //! //! ```no_run //! use std::{error::Error, time::Duration}; -//! use tracing_subscriber::EnvFilter; -//! use libp2p::{noise, ping, tcp, yamux, Multiaddr, swarm::SwarmEvent}; +//! //! use futures::prelude::*; +//! use libp2p::{noise, ping, swarm::SwarmEvent, tcp, yamux, Multiaddr}; +//! use tracing_subscriber::EnvFilter; //! //! #[tokio::main] //! async fn main() -> Result<(), Box> { @@ -323,7 +335,9 @@ //! yamux::Config::default, //! )? //! .with_behaviour(|_| ping::Behaviour::default())? -//! .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX))) +//! .with_swarm_config(|cfg| { +//! cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX)) +//! }) //! .build(); //! //! // Tell the swarm to listen on all interfaces and a random, OS-assigned diff --git a/misc/allow-block-list/src/lib.rs b/misc/allow-block-list/src/lib.rs index f93cf4ffefa..ea0d56b5a67 100644 --- a/misc/allow-block-list/src/lib.rs +++ b/misc/allow-block-list/src/lib.rs @@ -31,12 +31,12 @@ //! #[derive(NetworkBehaviour)] //! # #[behaviour(prelude = "libp2p_swarm::derive_prelude")] //! struct MyBehaviour { -//! allowed_peers: allow_block_list::Behaviour, +//! allowed_peers: allow_block_list::Behaviour, //! } //! //! # fn main() { //! let behaviour = MyBehaviour { -//! allowed_peers: allow_block_list::Behaviour::default() +//! allowed_peers: allow_block_list::Behaviour::default(), //! }; //! # } //! ``` @@ -51,27 +51,29 @@ //! #[derive(NetworkBehaviour)] //! # #[behaviour(prelude = "libp2p_swarm::derive_prelude")] //! struct MyBehaviour { -//! blocked_peers: allow_block_list::Behaviour, +//! blocked_peers: allow_block_list::Behaviour, //! } //! //! # fn main() { //! let behaviour = MyBehaviour { -//! blocked_peers: allow_block_list::Behaviour::default() +//! blocked_peers: allow_block_list::Behaviour::default(), //! }; //! # } //! ``` -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr}; +use std::{ + collections::{HashSet, VecDeque}, + convert::Infallible, + fmt, + task::{Context, Poll, Waker}, +}; + +use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ dummy, CloseConnection, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use std::collections::{HashSet, VecDeque}; -use std::convert::Infallible; -use std::fmt; -use std::task::{Context, Poll, Waker}; /// A [`NetworkBehaviour`] that can act as an allow or block list. #[derive(Default, Debug)] @@ -101,7 +103,8 @@ impl Behaviour { /// Allow connections to the given peer. /// - /// Returns whether the peer was newly inserted. Does nothing if the peer was already present in the set. + /// Returns whether the peer was newly inserted. Does nothing if the peer + /// was already present in the set. pub fn allow_peer(&mut self, peer: PeerId) -> bool { let inserted = self.state.peers.insert(peer); if inserted { @@ -116,7 +119,8 @@ impl Behaviour { /// /// All active connections to this peer will be closed immediately. /// - /// Returns whether the peer was present in the set. Does nothing if the peer was not present in the set. + /// Returns whether the peer was present in the set. Does nothing if the peer + /// was not present in the set. pub fn disallow_peer(&mut self, peer: PeerId) -> bool { let removed = self.state.peers.remove(&peer); if removed { @@ -139,7 +143,8 @@ impl Behaviour { /// /// All active connections to this peer will be closed immediately. /// - /// Returns whether the peer was newly inserted. Does nothing if the peer was already present in the set. + /// Returns whether the peer was newly inserted. Does nothing if the peer was already present in + /// the set. pub fn block_peer(&mut self, peer: PeerId) -> bool { let inserted = self.state.peers.insert(peer); if inserted { @@ -153,7 +158,8 @@ impl Behaviour { /// Unblock connections to a given peer. /// - /// Returns whether the peer was present in the set. Does nothing if the peer was not present in the set. + /// Returns whether the peer was present in the set. Does nothing if the peer + /// was not present in the set. pub fn unblock_peer(&mut self, peer: PeerId) -> bool { let removed = self.state.peers.remove(&peer); if removed { @@ -294,10 +300,11 @@ where #[cfg(test)] mod tests { - use super::*; use libp2p_swarm::{dial_opts::DialOpts, DialError, ListenError, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; + use super::*; + #[async_std::test] async fn cannot_dial_blocked_peer() { let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); diff --git a/misc/connection-limits/src/lib.rs b/misc/connection-limits/src/lib.rs index 016a7f2cfd4..c8df5be5653 100644 --- a/misc/connection-limits/src/lib.rs +++ b/misc/connection-limits/src/lib.rs @@ -18,6 +18,13 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + collections::{HashMap, HashSet}, + convert::Infallible, + fmt, + task::{Context, Poll}, +}; + use libp2p_core::{transport::PortUse, ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ @@ -25,22 +32,22 @@ use libp2p_swarm::{ dummy, ConnectionClosed, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use std::collections::{HashMap, HashSet}; -use std::convert::Infallible; -use std::fmt; -use std::task::{Context, Poll}; /// A [`NetworkBehaviour`] that enforces a set of [`ConnectionLimits`]. /// -/// For these limits to take effect, this needs to be composed into the behaviour tree of your application. +/// For these limits to take effect, this needs to be composed +/// into the behaviour tree of your application. /// -/// If a connection is denied due to a limit, either a [`SwarmEvent::IncomingConnectionError`](libp2p_swarm::SwarmEvent::IncomingConnectionError) -/// or [`SwarmEvent::OutgoingConnectionError`](libp2p_swarm::SwarmEvent::OutgoingConnectionError) will be emitted. -/// The [`ListenError::Denied`](libp2p_swarm::ListenError::Denied) and respectively the [`DialError::Denied`](libp2p_swarm::DialError::Denied) variant -/// contain a [`ConnectionDenied`] type that can be downcast to [`Exceeded`] error if (and only if) **this** -/// behaviour denied the connection. +/// If a connection is denied due to a limit, either a +/// [`SwarmEvent::IncomingConnectionError`](libp2p_swarm::SwarmEvent::IncomingConnectionError) +/// or [`SwarmEvent::OutgoingConnectionError`](libp2p_swarm::SwarmEvent::OutgoingConnectionError) +/// will be emitted. The [`ListenError::Denied`](libp2p_swarm::ListenError::Denied) and respectively +/// the [`DialError::Denied`](libp2p_swarm::DialError::Denied) variant +/// contain a [`ConnectionDenied`] type that can be downcast to [`Exceeded`] error if (and only if) +/// **this** behaviour denied the connection. /// -/// If you employ multiple [`NetworkBehaviour`]s that manage connections, it may also be a different error. +/// If you employ multiple [`NetworkBehaviour`]s that manage connections, +/// it may also be a different error. /// /// # Example /// @@ -53,9 +60,9 @@ use std::task::{Context, Poll}; /// #[derive(NetworkBehaviour)] /// # #[behaviour(prelude = "libp2p_swarm::derive_prelude")] /// struct MyBehaviour { -/// identify: identify::Behaviour, -/// ping: ping::Behaviour, -/// limits: connection_limits::Behaviour +/// identify: identify::Behaviour, +/// ping: ping::Behaviour, +/// limits: connection_limits::Behaviour, /// } /// ``` pub struct Behaviour { @@ -367,14 +374,16 @@ impl NetworkBehaviour for Behaviour { #[cfg(test)] mod tests { - use super::*; use libp2p_swarm::{ - behaviour::toggle::Toggle, dial_opts::DialOpts, dial_opts::PeerCondition, DialError, - ListenError, Swarm, SwarmEvent, + behaviour::toggle::Toggle, + dial_opts::{DialOpts, PeerCondition}, + DialError, ListenError, Swarm, SwarmEvent, }; use libp2p_swarm_test::SwarmExt; use quickcheck::*; + use super::*; + #[test] fn max_outgoing() { use rand::Rng; diff --git a/misc/keygen/src/config.rs b/misc/keygen/src/config.rs index e6c563b3c32..7d46b1849bd 100644 --- a/misc/keygen/src/config.rs +++ b/misc/keygen/src/config.rs @@ -1,10 +1,8 @@ +use std::{error::Error, path::Path}; + use base64::prelude::*; +use libp2p_identity::{Keypair, PeerId}; use serde::{Deserialize, Serialize}; -use std::error::Error; -use std::path::Path; - -use libp2p_identity::Keypair; -use libp2p_identity::PeerId; #[derive(Clone, Serialize, Deserialize)] #[serde(rename_all = "PascalCase")] diff --git a/misc/keygen/src/main.rs b/misc/keygen/src/main.rs index 64d98005369..4c4d3bfbf66 100644 --- a/misc/keygen/src/main.rs +++ b/misc/keygen/src/main.rs @@ -1,9 +1,12 @@ +use std::{ + error::Error, + path::PathBuf, + str::{self, FromStr}, + sync::mpsc, + thread, +}; + use base64::prelude::*; -use std::error::Error; -use std::path::PathBuf; -use std::str::{self, FromStr}; -use std::sync::mpsc; -use std::thread; mod config; diff --git a/misc/memory-connection-limits/src/lib.rs b/misc/memory-connection-limits/src/lib.rs index e2a89977991..0735464a67e 100644 --- a/misc/memory-connection-limits/src/lib.rs +++ b/misc/memory-connection-limits/src/lib.rs @@ -18,35 +18,40 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + convert::Infallible, + fmt, + task::{Context, Poll}, + time::{Duration, Instant}, +}; + use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ dummy, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use std::convert::Infallible; - -use std::{ - fmt, - task::{Context, Poll}, - time::{Duration, Instant}, -}; use sysinfo::MemoryRefreshKind; /// A [`NetworkBehaviour`] that enforces a set of memory usage based limits. /// -/// For these limits to take effect, this needs to be composed into the behaviour tree of your application. +/// For these limits to take effect, this needs to be composed +/// into the behaviour tree of your application. /// -/// If a connection is denied due to a limit, either a [`SwarmEvent::IncomingConnectionError`](libp2p_swarm::SwarmEvent::IncomingConnectionError) -/// or [`SwarmEvent::OutgoingConnectionError`](libp2p_swarm::SwarmEvent::OutgoingConnectionError) will be emitted. -/// The [`ListenError::Denied`](libp2p_swarm::ListenError::Denied) and respectively the [`DialError::Denied`](libp2p_swarm::DialError::Denied) variant -/// contain a [`ConnectionDenied`] type that can be downcast to [`MemoryUsageLimitExceeded`] error if (and only if) **this** -/// behaviour denied the connection. +/// If a connection is denied due to a limit, either a +/// [`SwarmEvent::IncomingConnectionError`](libp2p_swarm::SwarmEvent::IncomingConnectionError) +/// or [`SwarmEvent::OutgoingConnectionError`](libp2p_swarm::SwarmEvent::OutgoingConnectionError) +/// will be emitted. The [`ListenError::Denied`](libp2p_swarm::ListenError::Denied) and respectively +/// the [`DialError::Denied`](libp2p_swarm::DialError::Denied) variant +/// contain a [`ConnectionDenied`] type that can be downcast to [`MemoryUsageLimitExceeded`] error +/// if (and only if) **this** behaviour denied the connection. /// -/// If you employ multiple [`NetworkBehaviour`]s that manage connections, it may also be a different error. +/// If you employ multiple [`NetworkBehaviour`]s that manage connections, +/// it may also be a different error. /// /// [Behaviour::with_max_bytes] and [Behaviour::with_max_percentage] are mutually exclusive. -/// If you need to employ both of them, compose two instances of [Behaviour] into your custom behaviour. +/// If you need to employ both of them, +/// compose two instances of [Behaviour] into your custom behaviour. /// /// # Example /// @@ -58,8 +63,8 @@ use sysinfo::MemoryRefreshKind; /// #[derive(NetworkBehaviour)] /// # #[behaviour(prelude = "libp2p_swarm::derive_prelude")] /// struct MyBehaviour { -/// identify: identify::Behaviour, -/// limits: memory_connection_limits::Behaviour +/// identify: identify::Behaviour, +/// limits: memory_connection_limits::Behaviour, /// } /// ``` pub struct Behaviour { @@ -68,7 +73,8 @@ pub struct Behaviour { last_refreshed: Instant, } -/// The maximum duration for which the retrieved memory-stats of the process are allowed to be stale. +/// The maximum duration for which the retrieved memory-stats +/// of the process are allowed to be stale. /// /// Once exceeded, we will retrieve new stats. const MAX_STALE_DURATION: Duration = Duration::from_millis(100); diff --git a/misc/memory-connection-limits/tests/max_bytes.rs b/misc/memory-connection-limits/tests/max_bytes.rs index 7f89e2c7a9a..e82ad67d076 100644 --- a/misc/memory-connection-limits/tests/max_bytes.rs +++ b/misc/memory-connection-limits/tests/max_bytes.rs @@ -20,14 +20,14 @@ mod util; +use std::time::Duration; + use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_memory_connection_limits::*; -use std::time::Duration; -use util::*; - use libp2p_swarm::{dial_opts::DialOpts, DialError, Swarm}; use libp2p_swarm_test::SwarmExt; +use util::*; #[test] fn max_bytes() { @@ -69,7 +69,8 @@ fn max_bytes() { .expect("Unexpected connection limit."); } - std::thread::sleep(Duration::from_millis(100)); // Memory stats are only updated every 100ms internally, ensure they are up-to-date when we try to exceed it. + std::thread::sleep(Duration::from_millis(100)); // Memory stats are only updated every 100ms internally, ensure they are up-to-date when we try + // to exceed it. match network .dial( diff --git a/misc/memory-connection-limits/tests/max_percentage.rs b/misc/memory-connection-limits/tests/max_percentage.rs index bfb1b504af5..51fe783b3c5 100644 --- a/misc/memory-connection-limits/tests/max_percentage.rs +++ b/misc/memory-connection-limits/tests/max_percentage.rs @@ -20,18 +20,18 @@ mod util; +use std::time::Duration; + use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_memory_connection_limits::*; -use std::time::Duration; -use sysinfo::{MemoryRefreshKind, RefreshKind}; -use util::*; - use libp2p_swarm::{ dial_opts::{DialOpts, PeerCondition}, DialError, Swarm, }; use libp2p_swarm_test::SwarmExt; +use sysinfo::{MemoryRefreshKind, RefreshKind}; +use util::*; #[test] fn max_percentage() { @@ -76,7 +76,9 @@ fn max_percentage() { .expect("Unexpected connection limit."); } - std::thread::sleep(Duration::from_millis(100)); // Memory stats are only updated every 100ms internally, ensure they are up-to-date when we try to exceed it. + // Memory stats are only updated every 100ms internally, + // ensure they are up-to-date when we try to exceed it. + std::thread::sleep(Duration::from_millis(100)); match network .dial( diff --git a/misc/memory-connection-limits/tests/util/mod.rs b/misc/memory-connection-limits/tests/util/mod.rs index 333b0ee135f..205f4d13bc4 100644 --- a/misc/memory-connection-limits/tests/util/mod.rs +++ b/misc/memory-connection-limits/tests/util/mod.rs @@ -18,7 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::task::{Context, Poll}; +use std::{ + convert::Infallible, + task::{Context, Poll}, +}; use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; @@ -26,7 +29,6 @@ use libp2p_swarm::{ dummy, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use std::convert::Infallible; #[derive(libp2p_swarm_derive::NetworkBehaviour)] #[behaviour(prelude = "libp2p_swarm::derive_prelude")] diff --git a/misc/metrics/src/bandwidth.rs b/misc/metrics/src/bandwidth.rs index 8a0f54e5b65..b6308ed1b51 100644 --- a/misc/metrics/src/bandwidth.rs +++ b/misc/metrics/src/bandwidth.rs @@ -1,4 +1,10 @@ -use crate::protocol_stack; +use std::{ + convert::TryFrom as _, + io, + pin::Pin, + task::{Context, Poll}, +}; + use futures::{ future::{MapOk, TryFutureExt}, io::{IoSlice, IoSliceMut}, @@ -16,12 +22,8 @@ use prometheus_client::{ metrics::{counter::Counter, family::Family}, registry::{Registry, Unit}, }; -use std::{ - convert::TryFrom as _, - io, - pin::Pin, - task::{Context, Poll}, -}; + +use crate::protocol_stack; #[derive(Debug, Clone)] #[pin_project::pin_project] diff --git a/misc/metrics/src/dcutr.rs b/misc/metrics/src/dcutr.rs index 3e60dca2cab..6a0f27394e9 100644 --- a/misc/metrics/src/dcutr.rs +++ b/misc/metrics/src/dcutr.rs @@ -18,10 +18,11 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; -use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::family::Family; -use prometheus_client::registry::Registry; +use prometheus_client::{ + encoding::{EncodeLabelSet, EncodeLabelValue}, + metrics::{counter::Counter, family::Family}, + registry::Registry, +}; pub(crate) struct Metrics { events: Family, diff --git a/misc/metrics/src/gossipsub.rs b/misc/metrics/src/gossipsub.rs index 2d90b92fbc6..b3e2e11f0b0 100644 --- a/misc/metrics/src/gossipsub.rs +++ b/misc/metrics/src/gossipsub.rs @@ -18,8 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use prometheus_client::metrics::counter::Counter; -use prometheus_client::registry::Registry; +use prometheus_client::{metrics::counter::Counter, registry::Registry}; pub(crate) struct Metrics { messages: Counter, diff --git a/misc/metrics/src/identify.rs b/misc/metrics/src/identify.rs index 03ac3f9634e..b16c6a56ccf 100644 --- a/misc/metrics/src/identify.rs +++ b/misc/metrics/src/identify.rs @@ -18,17 +18,21 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::protocol_stack; +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, +}; + use libp2p_identity::PeerId; use libp2p_swarm::StreamProtocol; -use prometheus_client::collector::Collector; -use prometheus_client::encoding::{DescriptorEncoder, EncodeMetric}; -use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::gauge::ConstGauge; -use prometheus_client::metrics::MetricType; -use prometheus_client::registry::Registry; -use std::collections::HashMap; -use std::sync::{Arc, Mutex}; +use prometheus_client::{ + collector::Collector, + encoding::{DescriptorEncoder, EncodeMetric}, + metrics::{counter::Counter, gauge::ConstGauge, MetricType}, + registry::Registry, +}; + +use crate::protocol_stack; const ALLOWED_PROTOCOLS: &[StreamProtocol] = &[ #[cfg(feature = "dcutr")] diff --git a/misc/metrics/src/kad.rs b/misc/metrics/src/kad.rs index bd5a6526737..0a2a8038511 100644 --- a/misc/metrics/src/kad.rs +++ b/misc/metrics/src/kad.rs @@ -18,11 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; -use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::family::Family; -use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; -use prometheus_client::registry::{Registry, Unit}; +use prometheus_client::{ + encoding::{EncodeLabelSet, EncodeLabelValue}, + metrics::{ + counter::Counter, + family::Family, + histogram::{exponential_buckets, Histogram}, + }, + registry::{Registry, Unit}, +}; pub(crate) struct Metrics { query_result_get_record_ok: Counter, diff --git a/misc/metrics/src/lib.rs b/misc/metrics/src/lib.rs index 74fd15e2181..1fd79e7846f 100644 --- a/misc/metrics/src/lib.rs +++ b/misc/metrics/src/lib.rs @@ -67,8 +67,8 @@ impl Metrics { /// Create a new set of Swarm and protocol [`Metrics`]. /// /// ``` - /// use prometheus_client::registry::Registry; /// use libp2p_metrics::Metrics; + /// use prometheus_client::registry::Registry; /// let mut registry = Registry::default(); /// let metrics = Metrics::new(&mut registry); /// ``` diff --git a/misc/metrics/src/ping.rs b/misc/metrics/src/ping.rs index afdd05134a6..ce653c72ea1 100644 --- a/misc/metrics/src/ping.rs +++ b/misc/metrics/src/ping.rs @@ -18,11 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; -use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::family::Family; -use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; -use prometheus_client::registry::{Registry, Unit}; +use prometheus_client::{ + encoding::{EncodeLabelSet, EncodeLabelValue}, + metrics::{ + counter::Counter, + family::Family, + histogram::{exponential_buckets, Histogram}, + }, + registry::{Registry, Unit}, +}; #[derive(Clone, Hash, PartialEq, Eq, EncodeLabelSet, Debug)] struct FailureLabels { diff --git a/misc/metrics/src/relay.rs b/misc/metrics/src/relay.rs index 607daf3f1e1..d4c25b6eb3e 100644 --- a/misc/metrics/src/relay.rs +++ b/misc/metrics/src/relay.rs @@ -18,10 +18,11 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; -use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::family::Family; -use prometheus_client::registry::Registry; +use prometheus_client::{ + encoding::{EncodeLabelSet, EncodeLabelValue}, + metrics::{counter::Counter, family::Family}, + registry::Registry, +}; pub(crate) struct Metrics { events: Family, diff --git a/misc/metrics/src/swarm.rs b/misc/metrics/src/swarm.rs index 51c0a0af253..6e95d082de6 100644 --- a/misc/metrics/src/swarm.rs +++ b/misc/metrics/src/swarm.rs @@ -18,18 +18,25 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::collections::HashMap; -use std::sync::{Arc, Mutex}; +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, +}; -use crate::protocol_stack; use libp2p_swarm::{ConnectionId, DialError, SwarmEvent}; -use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; -use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::family::Family; -use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; -use prometheus_client::registry::{Registry, Unit}; +use prometheus_client::{ + encoding::{EncodeLabelSet, EncodeLabelValue}, + metrics::{ + counter::Counter, + family::Family, + histogram::{exponential_buckets, Histogram}, + }, + registry::{Registry, Unit}, +}; use web_time::Instant; +use crate::protocol_stack; + pub(crate) struct Metrics { connections_incoming: Family, connections_incoming_error: Family, diff --git a/misc/multistream-select/src/dialer_select.rs b/misc/multistream-select/src/dialer_select.rs index 83bb4909041..1d13e94910d 100644 --- a/misc/multistream-select/src/dialer_select.rs +++ b/misc/multistream-select/src/dialer_select.rs @@ -20,10 +20,6 @@ //! Protocol negotiation strategies for the peer acting as the dialer. -use crate::protocol::{HeaderLine, Message, MessageIO, Protocol, ProtocolError}; -use crate::{Negotiated, NegotiationError, Version}; - -use futures::prelude::*; use std::{ convert::TryFrom as _, iter, mem, @@ -31,6 +27,13 @@ use std::{ task::{Context, Poll}, }; +use futures::prelude::*; + +use crate::{ + protocol::{HeaderLine, Message, MessageIO, Protocol, ProtocolError}, + Negotiated, NegotiationError, Version, +}; + /// Returns a `Future` that negotiates a protocol on the given I/O stream /// for a peer acting as the _dialer_ (or _initiator_). /// @@ -84,8 +87,9 @@ enum State { impl Future for DialerSelectFuture where - // The Unpin bound here is required because we produce a `Negotiated` as the output. - // It also makes the implementation considerably easier to write. + // The Unpin bound here is required because we produce + // a `Negotiated` as the output. It also makes + // the implementation considerably easier to write. R: AsyncRead + AsyncWrite + Unpin, I: Iterator, I::Item: AsRef, @@ -204,15 +208,19 @@ where #[cfg(test)] mod tests { - use super::*; - use crate::listener_select_proto; - use async_std::future::timeout; - use async_std::net::{TcpListener, TcpStream}; - use quickcheck::{Arbitrary, Gen, GenRange}; use std::time::Duration; + + use async_std::{ + future::timeout, + net::{TcpListener, TcpStream}, + }; + use quickcheck::{Arbitrary, Gen, GenRange}; use tracing::metadata::LevelFilter; use tracing_subscriber::EnvFilter; + use super::*; + use crate::listener_select_proto; + #[test] fn select_proto_basic() { async fn run(version: Version) { @@ -353,8 +361,8 @@ mod tests { .unwrap(); assert_eq!(proto, "/proto1"); - // client can close the connection even though protocol negotiation is not yet done, i.e. - // `_server_connection` had been untouched. + // client can close the connection even though protocol negotiation is not yet done, + // i.e. `_server_connection` had been untouched. io.close().await.unwrap(); }); diff --git a/misc/multistream-select/src/length_delimited.rs b/misc/multistream-select/src/length_delimited.rs index 3a7988d0548..8062455de46 100644 --- a/misc/multistream-select/src/length_delimited.rs +++ b/misc/multistream-select/src/length_delimited.rs @@ -18,8 +18,6 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use bytes::{Buf as _, BufMut as _, Bytes, BytesMut}; -use futures::{io::IoSlice, prelude::*}; use std::{ convert::TryFrom as _, io, @@ -27,6 +25,9 @@ use std::{ task::{Context, Poll}, }; +use bytes::{Buf as _, BufMut as _, Bytes, BytesMut}; +use futures::{io::IoSlice, prelude::*}; + const MAX_LEN_BYTES: u16 = 2; const MAX_FRAME_SIZE: u16 = (1 << (MAX_LEN_BYTES * 8 - MAX_LEN_BYTES)) - 1; const DEFAULT_BUFFER_SIZE: usize = 64; @@ -383,10 +384,12 @@ where #[cfg(test)] mod tests { - use crate::length_delimited::LengthDelimited; + use std::io::ErrorKind; + use futures::{io::Cursor, prelude::*}; use quickcheck::*; - use std::io::ErrorKind; + + use crate::length_delimited::LengthDelimited; #[test] fn basic_read() { diff --git a/misc/multistream-select/src/lib.rs b/misc/multistream-select/src/lib.rs index 5565623f25e..96432de6cb0 100644 --- a/misc/multistream-select/src/lib.rs +++ b/misc/multistream-select/src/lib.rs @@ -70,20 +70,21 @@ //! //! ```no_run //! use async_std::net::TcpStream; -//! use multistream_select::{dialer_select_proto, Version}; //! use futures::prelude::*; +//! use multistream_select::{dialer_select_proto, Version}; //! //! async_std::task::block_on(async move { //! let socket = TcpStream::connect("127.0.0.1:10333").await.unwrap(); //! //! let protos = vec!["/echo/1.0.0", "/echo/2.5.0"]; -//! let (protocol, _io) = dialer_select_proto(socket, protos, Version::V1).await.unwrap(); +//! let (protocol, _io) = dialer_select_proto(socket, protos, Version::V1) +//! .await +//! .unwrap(); //! //! println!("Negotiated protocol: {:?}", protocol); //! // You can now use `_io` to communicate with the remote. //! }); //! ``` -//! #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] @@ -93,10 +94,12 @@ mod listener_select; mod negotiated; mod protocol; -pub use self::dialer_select::{dialer_select_proto, DialerSelectFuture}; -pub use self::listener_select::{listener_select_proto, ListenerSelectFuture}; -pub use self::negotiated::{Negotiated, NegotiatedComplete, NegotiationError}; -pub use self::protocol::ProtocolError; +pub use self::{ + dialer_select::{dialer_select_proto, DialerSelectFuture}, + listener_select::{listener_select_proto, ListenerSelectFuture}, + negotiated::{Negotiated, NegotiatedComplete, NegotiationError}, + protocol::ProtocolError, +}; /// Supported multistream-select versions. #[derive(Clone, Copy, Debug, PartialEq, Eq, Default)] diff --git a/misc/multistream-select/src/listener_select.rs b/misc/multistream-select/src/listener_select.rs index b4236310a1d..cd5af72a9d0 100644 --- a/misc/multistream-select/src/listener_select.rs +++ b/misc/multistream-select/src/listener_select.rs @@ -21,11 +21,6 @@ //! Protocol negotiation strategies for the peer acting as the listener //! in a multistream-select protocol negotiation. -use crate::protocol::{HeaderLine, Message, MessageIO, Protocol, ProtocolError}; -use crate::{Negotiated, NegotiationError}; - -use futures::prelude::*; -use smallvec::SmallVec; use std::{ convert::TryFrom as _, mem, @@ -33,6 +28,14 @@ use std::{ task::{Context, Poll}, }; +use futures::prelude::*; +use smallvec::SmallVec; + +use crate::{ + protocol::{HeaderLine, Message, MessageIO, Protocol, ProtocolError}, + Negotiated, NegotiationError, +}; + /// Returns a `Future` that negotiates a protocol on the given I/O stream /// for a peer acting as the _listener_ (or _responder_). /// @@ -109,8 +112,10 @@ enum State { impl Future for ListenerSelectFuture where - // The Unpin bound here is required because we produce a `Negotiated` as the output. - // It also makes the implementation considerably easier to write. + // The Unpin bound here is required because + // we produce a `Negotiated` as the output. + // It also makes the implementation considerably + // easier to write. R: AsyncRead + AsyncWrite + Unpin, N: AsRef + Clone, { diff --git a/misc/multistream-select/src/negotiated.rs b/misc/multistream-select/src/negotiated.rs index a24014a4f5f..6693b3b5636 100644 --- a/misc/multistream-select/src/negotiated.rs +++ b/misc/multistream-select/src/negotiated.rs @@ -18,7 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::protocol::{HeaderLine, Message, MessageReader, Protocol, ProtocolError}; +use std::{ + error::Error, + fmt, io, mem, + pin::Pin, + task::{Context, Poll}, +}; use futures::{ io::{IoSlice, IoSliceMut}, @@ -26,12 +31,8 @@ use futures::{ ready, }; use pin_project::pin_project; -use std::{ - error::Error, - fmt, io, mem, - pin::Pin, - task::{Context, Poll}, -}; + +use crate::protocol::{HeaderLine, Message, MessageReader, Protocol, ProtocolError}; /// An I/O stream that has settled on an (application-layer) protocol to use. /// @@ -59,8 +60,10 @@ pub struct NegotiatedComplete { impl Future for NegotiatedComplete where - // `Unpin` is required not because of implementation details but because we produce the - // `Negotiated` as the output of the future. + // `Unpin` is required not because of + // implementation details but because we produce + // the `Negotiated` as the output of the + // future. TInner: AsyncRead + AsyncWrite + Unpin, { type Output = Result, NegotiationError>; @@ -250,13 +253,13 @@ where } // TODO: implement once method is stabilized in the futures crate - /*unsafe fn initializer(&self) -> Initializer { - match &self.state { - State::Completed { io, .. } => io.initializer(), - State::Expecting { io, .. } => io.inner_ref().initializer(), - State::Invalid => panic!("Negotiated: Invalid state"), - } - }*/ + // unsafe fn initializer(&self) -> Initializer { + // match &self.state { + // State::Completed { io, .. } => io.initializer(), + // State::Expecting { io, .. } => io.inner_ref().initializer(), + // State::Invalid => panic!("Negotiated: Invalid state"), + // } + // } fn poll_read_vectored( mut self: Pin<&mut Self>, diff --git a/misc/multistream-select/src/protocol.rs b/misc/multistream-select/src/protocol.rs index 92b6acedaeb..93cd4ac02b5 100644 --- a/misc/multistream-select/src/protocol.rs +++ b/misc/multistream-select/src/protocol.rs @@ -25,19 +25,22 @@ //! `Stream` and `Sink` implementations of `MessageIO` and //! `MessageReader`. -use crate::length_delimited::{LengthDelimited, LengthDelimitedReader}; -use crate::Version; - -use bytes::{BufMut, Bytes, BytesMut}; -use futures::{io::IoSlice, prelude::*, ready}; use std::{ error::Error, fmt, io, pin::Pin, task::{Context, Poll}, }; + +use bytes::{BufMut, Bytes, BytesMut}; +use futures::{io::IoSlice, prelude::*, ready}; use unsigned_varint as uvi; +use crate::{ + length_delimited::{LengthDelimited, LengthDelimitedReader}, + Version, +}; + /// The maximum number of supported protocols that can be processed. const MAX_PROTOCOLS: usize = 1000; @@ -461,10 +464,12 @@ impl fmt::Display for ProtocolError { #[cfg(test)] mod tests { - use super::*; - use quickcheck::*; use std::iter; + use quickcheck::*; + + use super::*; + impl Arbitrary for Protocol { fn arbitrary(g: &mut Gen) -> Protocol { let n = g.gen_range(1..g.size()); diff --git a/misc/quick-protobuf-codec/src/lib.rs b/misc/quick-protobuf-codec/src/lib.rs index c57b7da7db8..d49315a54c3 100644 --- a/misc/quick-protobuf-codec/src/lib.rs +++ b/misc/quick-protobuf-codec/src/lib.rs @@ -1,10 +1,10 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +use std::{io, marker::PhantomData}; + use asynchronous_codec::{Decoder, Encoder}; use bytes::{Buf, BufMut, BytesMut}; use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer, WriterBackend}; -use std::io; -use std::marker::PhantomData; mod generated; @@ -182,12 +182,13 @@ impl From for io::Error { #[cfg(test)] mod tests { - use super::*; + use std::error::Error; + use asynchronous_codec::FramedRead; - use futures::io::Cursor; - use futures::{FutureExt, StreamExt}; + use futures::{io::Cursor, FutureExt, StreamExt}; use quickcheck::{Arbitrary, Gen, QuickCheck}; - use std::error::Error; + + use super::*; #[test] fn honors_max_message_length() { diff --git a/misc/quick-protobuf-codec/tests/large_message.rs b/misc/quick-protobuf-codec/tests/large_message.rs index 65dafe065d1..a434d3ce17f 100644 --- a/misc/quick-protobuf-codec/tests/large_message.rs +++ b/misc/quick-protobuf-codec/tests/large_message.rs @@ -1,7 +1,6 @@ use asynchronous_codec::Encoder; use bytes::BytesMut; -use quick_protobuf_codec::proto; -use quick_protobuf_codec::Codec; +use quick_protobuf_codec::{proto, Codec}; #[test] fn encode_large_message() { diff --git a/misc/quickcheck-ext/src/lib.rs b/misc/quickcheck-ext/src/lib.rs index 4ada7e73ba1..9c2deec8743 100644 --- a/misc/quickcheck-ext/src/lib.rs +++ b/misc/quickcheck-ext/src/lib.rs @@ -1,9 +1,9 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -pub use quickcheck::*; - use core::ops::Range; + use num_traits::sign::Unsigned; +pub use quickcheck::*; pub trait GenRange { fn gen_range(&mut self, _range: Range) -> T; diff --git a/misc/rw-stream-sink/src/lib.rs b/misc/rw-stream-sink/src/lib.rs index f10e683ad33..5fdf1987252 100644 --- a/misc/rw-stream-sink/src/lib.rs +++ b/misc/rw-stream-sink/src/lib.rs @@ -27,7 +27,6 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use futures::{prelude::*, ready}; use std::{ io::{self, Read}, mem, @@ -35,6 +34,8 @@ use std::{ task::{Context, Poll}, }; +use futures::{prelude::*, ready}; + static_assertions::const_assert!(mem::size_of::() <= mem::size_of::()); /// Wraps a [`Stream`] and [`Sink`] whose items are buffers. @@ -115,14 +116,16 @@ where #[cfg(test)] mod tests { - use super::RwStreamSink; - use async_std::task; - use futures::{channel::mpsc, prelude::*}; use std::{ pin::Pin, task::{Context, Poll}, }; + use async_std::task; + use futures::{channel::mpsc, prelude::*}; + + use super::RwStreamSink; + // This struct merges a stream and a sink and is quite useful for tests. struct Wrapper(St, Si); diff --git a/misc/server/src/behaviour.rs b/misc/server/src/behaviour.rs index 36b18c9798d..230d62a2ef3 100644 --- a/misc/server/src/behaviour.rs +++ b/misc/server/src/behaviour.rs @@ -1,13 +1,10 @@ -use libp2p::autonat; -use libp2p::identify; -use libp2p::kad; -use libp2p::ping; -use libp2p::relay; -use libp2p::swarm::behaviour::toggle::Toggle; -use libp2p::swarm::{NetworkBehaviour, StreamProtocol}; -use libp2p::{identity, Multiaddr, PeerId}; -use std::str::FromStr; -use std::time::Duration; +use std::{str::FromStr, time::Duration}; + +use libp2p::{ + autonat, identify, identity, kad, ping, relay, + swarm::{behaviour::toggle::Toggle, NetworkBehaviour, StreamProtocol}, + Multiaddr, PeerId, +}; const BOOTNODES: [&str; 4] = [ "QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", diff --git a/misc/server/src/config.rs b/misc/server/src/config.rs index c3e3ec529c1..2e4b2746d09 100644 --- a/misc/server/src/config.rs +++ b/misc/server/src/config.rs @@ -1,7 +1,7 @@ +use std::{error::Error, path::Path}; + use libp2p::Multiaddr; use serde_derive::Deserialize; -use std::error::Error; -use std::path::Path; #[derive(Clone, Deserialize)] #[serde(rename_all = "PascalCase")] diff --git a/misc/server/src/http_service.rs b/misc/server/src/http_service.rs index cee1aa96e28..87a8adb94e0 100644 --- a/misc/server/src/http_service.rs +++ b/misc/server/src/http_service.rs @@ -18,15 +18,13 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use axum::extract::State; -use axum::http::StatusCode; -use axum::response::IntoResponse; -use axum::routing::get; -use axum::Router; -use prometheus_client::encoding::text::encode; -use prometheus_client::registry::Registry; -use std::net::SocketAddr; -use std::sync::{Arc, Mutex}; +use std::{ + net::SocketAddr, + sync::{Arc, Mutex}, +}; + +use axum::{extract::State, http::StatusCode, response::IntoResponse, routing::get, Router}; +use prometheus_client::{encoding::text::encode, registry::Registry}; use tokio::net::TcpListener; const METRICS_CONTENT_TYPE: &str = "application/openmetrics-text;charset=utf-8;version=1.0.0"; diff --git a/misc/server/src/main.rs b/misc/server/src/main.rs index 820921beaed..a633a80207e 100644 --- a/misc/server/src/main.rs +++ b/misc/server/src/main.rs @@ -1,18 +1,18 @@ +use std::{error::Error, path::PathBuf, str::FromStr}; + use base64::Engine; use clap::Parser; use futures::stream::StreamExt; -use libp2p::identity; -use libp2p::identity::PeerId; -use libp2p::kad; -use libp2p::metrics::{Metrics, Recorder}; -use libp2p::swarm::SwarmEvent; -use libp2p::tcp; -use libp2p::{identify, noise, yamux}; -use prometheus_client::metrics::info::Info; -use prometheus_client::registry::Registry; -use std::error::Error; -use std::path::PathBuf; -use std::str::FromStr; +use libp2p::{ + identify, identity, + identity::PeerId, + kad, + metrics::{Metrics, Recorder}, + noise, + swarm::SwarmEvent, + tcp, yamux, +}; +use prometheus_client::{metrics::info::Info, registry::Registry}; use tracing_subscriber::EnvFilter; use zeroize::Zeroizing; diff --git a/misc/webrtc-utils/src/fingerprint.rs b/misc/webrtc-utils/src/fingerprint.rs index a02c4d1116d..c32d33d5bab 100644 --- a/misc/webrtc-utils/src/fingerprint.rs +++ b/misc/webrtc-utils/src/fingerprint.rs @@ -19,9 +19,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::fmt; + use libp2p_core::multihash; use sha2::Digest as _; -use std::fmt; pub const SHA256: &str = "sha-256"; const MULTIHASH_SHA256_CODE: u64 = 0x12; diff --git a/misc/webrtc-utils/src/noise.rs b/misc/webrtc-utils/src/noise.rs index 9180acfc1ca..705db7f4697 100644 --- a/misc/webrtc-utils/src/noise.rs +++ b/misc/webrtc-utils/src/noise.rs @@ -19,16 +19,17 @@ // DEALINGS IN THE SOFTWARE. use futures::{AsyncRead, AsyncWrite, AsyncWriteExt}; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; -use libp2p_core::UpgradeInfo; +use libp2p_core::{ + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}, + UpgradeInfo, +}; use libp2p_identity as identity; use libp2p_identity::PeerId; use libp2p_noise as noise; +pub use noise::Error; use crate::fingerprint::Fingerprint; -pub use noise::Error; - pub async fn inbound( id_keys: identity::Keypair, stream: T, @@ -89,9 +90,10 @@ pub(crate) fn noise_prologue( #[cfg(test)] mod tests { - use super::*; use hex_literal::hex; + use super::*; + #[test] fn noise_prologue_tests() { let a = Fingerprint::raw(hex!( diff --git a/misc/webrtc-utils/src/sdp.rs b/misc/webrtc-utils/src/sdp.rs index 0796548f449..96a07f5db95 100644 --- a/misc/webrtc-utils/src/sdp.rs +++ b/misc/webrtc-utils/src/sdp.rs @@ -18,13 +18,13 @@ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::fingerprint::Fingerprint; -use serde::Serialize; use std::net::{IpAddr, SocketAddr}; + +use rand::{distributions::Alphanumeric, thread_rng, Rng}; +use serde::Serialize; use tinytemplate::TinyTemplate; -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; +use crate::fingerprint::Fingerprint; pub fn answer(addr: SocketAddr, server_fingerprint: Fingerprint, client_ufrag: &str) -> String { let answer = render_description( @@ -71,7 +71,8 @@ pub fn answer(addr: SocketAddr, server_fingerprint: Fingerprint, client_ufrag: & // the answerer is received, which adds additional latency. setup:active allows the answer and // the DTLS handshake to occur in parallel. Thus, setup:active is RECOMMENDED. // -// a=candidate: +// a=candidate: +// // // A transport address for a candidate that can be used for connectivity checks (RFC8839). // diff --git a/misc/webrtc-utils/src/stream.rs b/misc/webrtc-utils/src/stream.rs index 17f746a92a1..0ec420a103a 100644 --- a/misc/webrtc-utils/src/stream.rs +++ b/misc/webrtc-utils/src/stream.rs @@ -19,20 +19,22 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use bytes::Bytes; -use futures::{channel::oneshot, prelude::*, ready}; - use std::{ io, pin::Pin, task::{Context, Poll}, }; -use crate::proto::{Flag, Message}; +use bytes::Bytes; +use futures::{channel::oneshot, prelude::*, ready}; + use crate::{ - stream::drop_listener::GracefullyClosed, - stream::framed_dc::FramedDc, - stream::state::{Closing, State}, + proto::{Flag, Message}, + stream::{ + drop_listener::GracefullyClosed, + framed_dc::FramedDc, + state::{Closing, State}, + }, }; mod drop_listener; @@ -69,7 +71,8 @@ impl Stream where T: AsyncRead + AsyncWrite + Unpin + Clone, { - /// Returns a new [`Stream`] and a [`DropListener`], which will notify the receiver when/if the stream is dropped. + /// Returns a new [`Stream`] and a [`DropListener`], + /// which will notify the receiver when/if the stream is dropped. pub fn new(data_channel: T) -> (Self, DropListener) { let (sender, receiver) = oneshot::channel(); @@ -175,8 +178,9 @@ where buf: &[u8], ) -> Poll> { while self.state.read_flags_in_async_write() { - // TODO: In case AsyncRead::poll_read encountered an error or returned None earlier, we will poll the - // underlying I/O resource once more. Is that allowed? How about introducing a state IoReadClosed? + // TODO: In case AsyncRead::poll_read encountered an error or returned None earlier, we + // will poll the underlying I/O resource once more. Is that allowed? How + // about introducing a state IoReadClosed? let Self { read_buffer, @@ -265,11 +269,12 @@ where #[cfg(test)] mod tests { - use super::*; - use crate::stream::framed_dc::codec; use asynchronous_codec::Encoder; use bytes::BytesMut; + use super::*; + use crate::stream::framed_dc::codec; + #[test] fn max_data_len() { // Largest possible message. diff --git a/misc/webrtc-utils/src/stream/drop_listener.rs b/misc/webrtc-utils/src/stream/drop_listener.rs index 9745e3d4364..ea3f19d2f57 100644 --- a/misc/webrtc-utils/src/stream/drop_listener.rs +++ b/misc/webrtc-utils/src/stream/drop_listener.rs @@ -18,17 +18,22 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::channel::oneshot; -use futures::channel::oneshot::Canceled; -use futures::{AsyncRead, AsyncWrite, FutureExt, SinkExt}; +use std::{ + future::Future, + io, + pin::Pin, + task::{Context, Poll}, +}; -use std::future::Future; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; +use futures::{ + channel::{oneshot, oneshot::Canceled}, + AsyncRead, AsyncWrite, FutureExt, SinkExt, +}; -use crate::proto::{Flag, Message}; -use crate::stream::framed_dc::FramedDc; +use crate::{ + proto::{Flag, Message}, + stream::framed_dc::FramedDc, +}; #[must_use] pub struct DropListener { diff --git a/misc/webrtc-utils/src/stream/framed_dc.rs b/misc/webrtc-utils/src/stream/framed_dc.rs index 721178fdcd3..a7b9b6214e0 100644 --- a/misc/webrtc-utils/src/stream/framed_dc.rs +++ b/misc/webrtc-utils/src/stream/framed_dc.rs @@ -21,8 +21,10 @@ use asynchronous_codec::Framed; use futures::{AsyncRead, AsyncWrite}; -use crate::proto::Message; -use crate::stream::{MAX_DATA_LEN, MAX_MSG_LEN, VARINT_LEN}; +use crate::{ + proto::Message, + stream::{MAX_DATA_LEN, MAX_MSG_LEN, VARINT_LEN}, +}; pub(crate) type FramedDc = Framed>; pub(crate) fn new(inner: T) -> FramedDc diff --git a/misc/webrtc-utils/src/stream/state.rs b/misc/webrtc-utils/src/stream/state.rs index 082325e4d47..006c1610d00 100644 --- a/misc/webrtc-utils/src/stream/state.rs +++ b/misc/webrtc-utils/src/stream/state.rs @@ -18,10 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use bytes::Bytes; - use std::io; +use bytes::Bytes; + use crate::proto::Flag; #[derive(Debug, Copy, Clone)] @@ -46,8 +46,8 @@ pub(crate) enum State { /// Represents the state of closing one half (either read or write) of the connection. /// -/// Gracefully closing the read or write requires sending the `STOP_SENDING` or `FIN` flag respectively -/// and flushing the underlying connection. +/// Gracefully closing the read or write requires sending the `STOP_SENDING` or `FIN` flag +/// respectively and flushing the underlying connection. #[derive(Debug, Copy, Clone)] pub(crate) enum Closing { Requested, @@ -181,8 +181,8 @@ impl State { /// Whether we should read from the stream in the [`futures::AsyncWrite`] implementation. /// - /// This is necessary for read-closed streams because we would otherwise not read any more flags from - /// the socket. + /// This is necessary for read-closed streams because we would otherwise + /// not read any more flags from the socket. pub(crate) fn read_flags_in_async_write(&self) -> bool { matches!(self, Self::ReadClosed) } @@ -324,9 +324,10 @@ impl State { #[cfg(test)] mod tests { - use super::*; use std::io::ErrorKind; + use super::*; + #[test] fn cannot_read_after_receiving_fin() { let mut open = State::Open; diff --git a/misc/webrtc-utils/src/transport.rs b/misc/webrtc-utils/src/transport.rs index 440ad73ed02..60b1934082f 100644 --- a/misc/webrtc-utils/src/transport.rs +++ b/misc/webrtc-utils/src/transport.rs @@ -1,7 +1,9 @@ -use crate::fingerprint::Fingerprint; -use libp2p_core::{multiaddr::Protocol, Multiaddr}; use std::net::{IpAddr, SocketAddr}; +use libp2p_core::{multiaddr::Protocol, Multiaddr}; + +use crate::fingerprint::Fingerprint; + /// Parse the given [`Multiaddr`] into a [`SocketAddr`] and a [`Fingerprint`] for dialing. pub fn parse_webrtc_dial_addr(addr: &Multiaddr) -> Option<(SocketAddr, Fingerprint)> { let mut iter = addr.iter(); @@ -38,9 +40,10 @@ pub fn parse_webrtc_dial_addr(addr: &Multiaddr) -> Option<(SocketAddr, Fingerpri #[cfg(test)] mod tests { - use super::*; use std::net::{Ipv4Addr, Ipv6Addr}; + use super::*; + #[test] fn parse_valid_address_with_certhash_and_p2p() { let addr = "/ip4/127.0.0.1/udp/39901/webrtc-direct/certhash/uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w/p2p/12D3KooWNpDk9w6WrEEcdsEH1y47W71S36yFjw4sd3j7omzgCSMS" diff --git a/muxers/mplex/benches/split_send_size.rs b/muxers/mplex/benches/split_send_size.rs index 44eafa884ac..b0dd4babff7 100644 --- a/muxers/mplex/benches/split_send_size.rs +++ b/muxers/mplex/benches/split_send_size.rs @@ -21,21 +21,23 @@ //! A benchmark for the `split_send_size` configuration option //! using different transports. +use std::{pin::Pin, time::Duration}; + use async_std::task; use criterion::{black_box, criterion_group, criterion_main, Criterion, Throughput}; -use futures::future::poll_fn; -use futures::prelude::*; -use futures::{channel::oneshot, future::join}; -use libp2p_core::muxing::StreamMuxerExt; -use libp2p_core::transport::ListenerId; -use libp2p_core::Endpoint; -use libp2p_core::{multiaddr::multiaddr, muxing, transport, upgrade, Multiaddr, Transport}; +use futures::{ + channel::oneshot, + future::{join, poll_fn}, + prelude::*, +}; +use libp2p_core::{ + multiaddr::multiaddr, muxing, muxing::StreamMuxerExt, transport, transport::ListenerId, + upgrade, Endpoint, Multiaddr, Transport, +}; use libp2p_identity as identity; use libp2p_identity::PeerId; use libp2p_mplex as mplex; use libp2p_plaintext as plaintext; -use std::pin::Pin; -use std::time::Duration; use tracing_subscriber::EnvFilter; type BenchTransport = transport::Boxed<(PeerId, muxing::StreamMuxerBox)>; @@ -120,7 +122,8 @@ fn run( } transport::TransportEvent::Incoming { upgrade, .. } => { let (_peer, mut conn) = upgrade.await.unwrap(); - // Just calling `poll_inbound` without `poll` is fine here because mplex makes progress through all `poll_` functions. It is hacky though. + // Just calling `poll_inbound` without `poll` is fine here because mplex makes + // progress through all `poll_` functions. It is hacky though. let mut s = poll_fn(|cx| conn.poll_inbound_unpin(cx)) .await .expect("unexpected error"); @@ -158,7 +161,8 @@ fn run( .unwrap() .await .unwrap(); - // Just calling `poll_outbound` without `poll` is fine here because mplex makes progress through all `poll_` functions. It is hacky though. + // Just calling `poll_outbound` without `poll` is fine here because mplex makes progress + // through all `poll_` functions. It is hacky though. let mut stream = poll_fn(|cx| conn.poll_outbound_unpin(cx)).await.unwrap(); let mut off = 0; loop { diff --git a/muxers/mplex/src/codec.rs b/muxers/mplex/src/codec.rs index 014ee899280..a4a04d1964d 100644 --- a/muxers/mplex/src/codec.rs +++ b/muxers/mplex/src/codec.rs @@ -18,14 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use asynchronous_codec::{Decoder, Encoder}; -use bytes::{BufMut, Bytes, BytesMut}; -use libp2p_core::Endpoint; use std::{ fmt, hash::{Hash, Hasher}, io, mem, }; + +use asynchronous_codec::{Decoder, Encoder}; +use bytes::{BufMut, Bytes, BytesMut}; +use libp2p_core::Endpoint; use unsigned_varint::{codec, encode}; // Maximum size for a packet: 1MB as per the spec. diff --git a/muxers/mplex/src/config.rs b/muxers/mplex/src/config.rs index 3bf5e703a18..45bb05b2240 100644 --- a/muxers/mplex/src/config.rs +++ b/muxers/mplex/src/config.rs @@ -18,9 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::codec::MAX_FRAME_SIZE; use std::cmp; +use crate::codec::MAX_FRAME_SIZE; + pub(crate) const DEFAULT_MPLEX_PROTOCOL_NAME: &str = "/mplex/6.7.0"; /// Configuration for the multiplexer. diff --git a/muxers/mplex/src/io.rs b/muxers/mplex/src/io.rs index 50fc0fc1d3f..ac93fd3865e 100644 --- a/muxers/mplex/src/io.rs +++ b/muxers/mplex/src/io.rs @@ -18,23 +18,31 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::codec::{Codec, Frame, LocalStreamId, RemoteStreamId}; -use crate::{MaxBufferBehaviour, MplexConfig}; +pub(crate) use std::io::{Error, Result}; +use std::{ + cmp, + collections::VecDeque, + fmt, io, mem, + sync::Arc, + task::{Context, Poll, Waker}, +}; + use asynchronous_codec::Framed; use bytes::Bytes; -use futures::task::{waker_ref, ArcWake, AtomicWaker, WakerRef}; -use futures::{prelude::*, ready, stream::Fuse}; +use futures::{ + prelude::*, + ready, + stream::Fuse, + task::{waker_ref, ArcWake, AtomicWaker, WakerRef}, +}; use nohash_hasher::{IntMap, IntSet}; use parking_lot::Mutex; use smallvec::SmallVec; -use std::collections::VecDeque; -use std::{ - cmp, fmt, io, mem, - sync::Arc, - task::{Context, Poll, Waker}, -}; -pub(crate) use std::io::{Error, Result}; +use crate::{ + codec::{Codec, Frame, LocalStreamId, RemoteStreamId}, + MaxBufferBehaviour, MplexConfig, +}; /// A connection identifier. /// /// Randomly generated and mainly intended to improve log output @@ -302,13 +310,11 @@ where /// reading and writing immediately. The remote is informed /// based on the current state of the substream: /// - /// * If the substream was open, a `Reset` frame is sent at - /// the next opportunity. - /// * If the substream was half-closed, i.e. a `Close` frame - /// has already been sent, nothing further happens. - /// * If the substream was half-closed by the remote, i.e. - /// a `Close` frame has already been received, a `Close` - /// frame is sent at the next opportunity. + /// * If the substream was open, a `Reset` frame is sent at the next opportunity. + /// * If the substream was half-closed, i.e. a `Close` frame has already been sent, nothing + /// further happens. + /// * If the substream was half-closed by the remote, i.e. a `Close` frame has already been + /// received, a `Close` frame is sent at the next opportunity. /// /// If the multiplexed stream is closed or encountered /// an error earlier, or there is no known substream with @@ -1146,15 +1152,14 @@ const EXTRA_PENDING_FRAMES: usize = 1000; #[cfg(test)] mod tests { - use super::*; + use std::{collections::HashSet, num::NonZeroU8, ops::DerefMut, pin::Pin}; + use async_std::task; use asynchronous_codec::{Decoder, Encoder}; use bytes::BytesMut; use quickcheck::*; - use std::collections::HashSet; - use std::num::NonZeroU8; - use std::ops::DerefMut; - use std::pin::Pin; + + use super::*; impl Arbitrary for MaxBufferBehaviour { fn arbitrary(g: &mut Gen) -> MaxBufferBehaviour { diff --git a/muxers/mplex/src/lib.rs b/muxers/mplex/src/lib.rs index 17ca9ad46f6..1ef89dc283a 100644 --- a/muxers/mplex/src/lib.rs +++ b/muxers/mplex/src/lib.rs @@ -26,15 +26,22 @@ mod codec; mod config; mod io; -pub use config::{MaxBufferBehaviour, MplexConfig}; +use std::{ + cmp, iter, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; use bytes::Bytes; use codec::LocalStreamId; +pub use config::{MaxBufferBehaviour, MplexConfig}; use futures::{prelude::*, ready}; -use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}; +use libp2p_core::{ + muxing::{StreamMuxer, StreamMuxerEvent}, + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}, +}; use parking_lot::Mutex; -use std::{cmp, iter, pin::Pin, sync::Arc, task::Context, task::Poll}; impl UpgradeInfo for MplexConfig { type Info = &'static str; diff --git a/muxers/test-harness/src/lib.rs b/muxers/test-harness/src/lib.rs index d03bdbdfed7..489d476f158 100644 --- a/muxers/test-harness/src/lib.rs +++ b/muxers/test-harness/src/lib.rs @@ -1,15 +1,20 @@ +use std::{ + fmt, + future::Future, + mem, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; + +use futures::{future, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, Stream, StreamExt}; +use libp2p_core::{ + muxing::StreamMuxerExt, + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}, + StreamMuxer, UpgradeInfo, +}; + use crate::future::{BoxFuture, Either, FutureExt}; -use futures::{future, AsyncRead, AsyncWrite}; -use futures::{AsyncReadExt, Stream}; -use futures::{AsyncWriteExt, StreamExt}; -use libp2p_core::muxing::StreamMuxerExt; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; -use libp2p_core::{StreamMuxer, UpgradeInfo}; -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::time::Duration; -use std::{fmt, mem}; pub async fn connected_muxers_on_memory_ring_buffer() -> (M, M) where @@ -41,7 +46,8 @@ where .unwrap() } -/// Verifies that Alice can send a message and immediately close the stream afterwards and Bob can use `read_to_end` to read the entire message. +/// Verifies that Alice can send a message and immediately close the stream afterwards and Bob can +/// use `read_to_end` to read the entire message. pub async fn close_implies_flush(alice: A, bob: B) where A: StreamMuxer + Unpin, @@ -99,7 +105,8 @@ where .await; } -/// Runs the given protocol between the two parties, ensuring commutativity, i.e. either party can be the dialer and listener. +/// Runs the given protocol between the two parties, ensuring commutativity, i.e. either party can +/// be the dialer and listener. async fn run_commutative( mut alice: A, mut bob: B, @@ -120,7 +127,8 @@ async fn run_commutative( /// Runs a given protocol between the two parties. /// /// The first party will open a new substream and the second party will wait for this. -/// The [`StreamMuxer`] is polled until both parties have completed the protocol to ensure that the underlying connection can make progress at all times. +/// The [`StreamMuxer`] is polled until both parties have completed the protocol to ensure that the +/// underlying connection can make progress at all times. async fn run( dialer: &mut A, listener: &mut B, diff --git a/muxers/yamux/src/lib.rs b/muxers/yamux/src/lib.rs index bcfeb62fccf..001eb6b0348 100644 --- a/muxers/yamux/src/lib.rs +++ b/muxers/yamux/src/lib.rs @@ -22,17 +22,20 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use either::Either; -use futures::{prelude::*, ready}; -use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}; -use std::collections::VecDeque; -use std::io::{IoSlice, IoSliceMut}; -use std::task::Waker; use std::{ - io, iter, + collections::VecDeque, + io, + io::{IoSlice, IoSliceMut}, + iter, pin::Pin, - task::{Context, Poll}, + task::{Context, Poll, Waker}, +}; + +use either::Either; +use futures::{prelude::*, ready}; +use libp2p_core::{ + muxing::{StreamMuxer, StreamMuxerEvent}, + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}, }; use thiserror::Error; @@ -40,10 +43,12 @@ use thiserror::Error; #[derive(Debug)] pub struct Muxer { connection: Either, yamux013::Connection>, - /// Temporarily buffers inbound streams in case our node is performing backpressure on the remote. + /// Temporarily buffers inbound streams in case our node is + /// performing backpressure on the remote. /// - /// The only way how yamux can make progress is by calling [`yamux013::Connection::poll_next_inbound`]. However, the - /// [`StreamMuxer`] interface is designed to allow a caller to selectively make progress via + /// The only way how yamux can make progress is by calling + /// [`yamux013::Connection::poll_next_inbound`]. However, the [`StreamMuxer`] interface is + /// designed to allow a caller to selectively make progress via /// [`StreamMuxer::poll_inbound`] and [`StreamMuxer::poll_outbound`] whilst the more general /// [`StreamMuxer::poll`] is designed to make progress on existing streams etc. /// @@ -57,7 +62,8 @@ pub struct Muxer { /// How many streams to buffer before we start resetting them. /// /// This is equal to the ACK BACKLOG in `rust-yamux`. -/// Thus, for peers running on a recent version of `rust-libp2p`, we should never need to reset streams because they'll voluntarily stop opening them once they hit the ACK backlog. +/// Thus, for peers running on a recent version of `rust-libp2p`, we should never need to reset +/// streams because they'll voluntarily stop opening them once they hit the ACK backlog. const MAX_BUFFERED_INBOUND_STREAMS: usize = 256; impl Muxer diff --git a/protocols/autonat/src/v1.rs b/protocols/autonat/src/v1.rs index c60e4805f40..4de601c5df5 100644 --- a/protocols/autonat/src/v1.rs +++ b/protocols/autonat/src/v1.rs @@ -29,6 +29,8 @@ pub(crate) mod behaviour; pub(crate) mod protocol; +pub use libp2p_request_response::{InboundFailure, OutboundFailure}; + pub use self::{ behaviour::{ Behaviour, Config, Event, InboundProbeError, InboundProbeEvent, NatStatus, @@ -36,7 +38,6 @@ pub use self::{ }, protocol::{ResponseError, DEFAULT_PROTOCOL_NAME}, }; -pub use libp2p_request_response::{InboundFailure, OutboundFailure}; pub(crate) mod proto { #![allow(unreachable_pub)] diff --git a/protocols/autonat/src/v1/behaviour.rs b/protocols/autonat/src/v1/behaviour.rs index 7a717baed8d..24ec1b13be7 100644 --- a/protocols/autonat/src/v1/behaviour.rs +++ b/protocols/autonat/src/v1/behaviour.rs @@ -21,15 +21,19 @@ mod as_client; mod as_server; -use crate::protocol::{AutoNatCodec, DialRequest, DialResponse, ResponseError}; -use crate::DEFAULT_PROTOCOL_NAME; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + iter, + task::{Context, Poll}, + time::Duration, +}; + use as_client::AsClient; pub use as_client::{OutboundProbeError, OutboundProbeEvent}; use as_server::AsServer; pub use as_server::{InboundProbeError, InboundProbeEvent}; use futures_timer::Delay; -use libp2p_core::transport::PortUse; -use libp2p_core::{multiaddr::Protocol, ConnectedPoint, Endpoint, Multiaddr}; +use libp2p_core::{multiaddr::Protocol, transport::PortUse, ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_request_response::{ self as request_response, InboundRequestId, OutboundRequestId, ProtocolSupport, ResponseChannel, @@ -39,14 +43,13 @@ use libp2p_swarm::{ ConnectionDenied, ConnectionId, ListenAddresses, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use std::{ - collections::{HashMap, HashSet, VecDeque}, - iter, - task::{Context, Poll}, - time::Duration, -}; use web_time::Instant; +use crate::{ + protocol::{AutoNatCodec, DialRequest, DialResponse, ResponseError}, + DEFAULT_PROTOCOL_NAME, +}; + /// Config for the [`Behaviour`]. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Config { @@ -148,17 +151,18 @@ pub enum Event { /// [`NetworkBehaviour`] for AutoNAT. /// -/// The behaviour frequently runs probes to determine whether the local peer is behind NAT and/ or a firewall, or -/// publicly reachable. -/// In a probe, a dial-back request is sent to a peer that is randomly selected from the list of fixed servers and -/// connected peers. Upon receiving a dial-back request, the remote tries to dial the included addresses. When a -/// first address was successfully dialed, a status Ok will be send back together with the dialed address. If no address -/// can be reached a dial-error is send back. +/// The behaviour frequently runs probes to determine whether the local peer is behind NAT and/ or a +/// firewall, or publicly reachable. +/// In a probe, a dial-back request is sent to a peer that is randomly selected from the list of +/// fixed servers and connected peers. Upon receiving a dial-back request, the remote tries to dial +/// the included addresses. When a first address was successfully dialed, a status Ok will be send +/// back together with the dialed address. If no address can be reached a dial-error is send back. /// Based on the received response, the sender assumes themselves to be public or private. -/// The status is retried in a frequency of [`Config::retry_interval`] or [`Config::retry_interval`], depending on whether -/// enough confidence in the assumed NAT status was reached or not. -/// The confidence increases each time a probe confirms the assumed status, and decreases if a different status is reported. -/// If the confidence is 0, the status is flipped and the Behaviour will report the new status in an `OutEvent`. +/// The status is retried in a frequency of [`Config::retry_interval`] or +/// [`Config::retry_interval`], depending on whether enough confidence in the assumed NAT status was +/// reached or not. The confidence increases each time a probe confirms the assumed status, and +/// decreases if a different status is reported. If the confidence is 0, the status is flipped and +/// the Behaviour will report the new status in an `OutEvent`. pub struct Behaviour { // Local peer id local_peer_id: PeerId, @@ -195,11 +199,12 @@ pub struct Behaviour { ongoing_outbound: HashMap, // Connected peers with the observed address of each connection. - // If the endpoint of a connection is relayed or not global (in case of Config::only_global_ips), - // the observed address is `None`. + // If the endpoint of a connection is relayed or not global (in case of + // Config::only_global_ips), the observed address is `None`. connected: HashMap>>, - // Used servers in recent outbound probes that are throttled through Config::throttle_server_period. + // Used servers in recent outbound probes that are throttled through + // Config::throttle_server_period. throttled_servers: Vec<(PeerId, Instant)>, // Recent probes done for clients @@ -264,8 +269,8 @@ impl Behaviour { } /// Add a peer to the list over servers that may be used for probes. - /// These peers are used for dial-request even if they are currently not connection, in which case a connection will be - /// establish before sending the dial-request. + /// These peers are used for dial-request even if they are currently not connection, in which + /// case a connection will be establish before sending the dial-request. pub fn add_server(&mut self, peer: PeerId, address: Option) { self.servers.insert(peer); if let Some(addr) = address { @@ -564,7 +569,8 @@ impl NetworkBehaviour for Behaviour { type Action = ToSwarm<::ToSwarm, THandlerInEvent>; -// Trait implemented for `AsClient` and `AsServer` to handle events from the inner [`request_response::Behaviour`] Protocol. +// Trait implemented for `AsClient` and `AsServer` to handle events from the inner +// [`request_response::Behaviour`] Protocol. trait HandleInnerEvent { fn handle_event( &mut self, @@ -671,7 +677,8 @@ impl GlobalIp for std::net::Ipv6Addr { // Variation of unstable method [`std::net::Ipv6Addr::multicast_scope`] that instead of the // `Ipv6MulticastScope` just returns if the scope is global or not. - // Equivalent to `Ipv6Addr::multicast_scope(..).map(|scope| matches!(scope, Ipv6MulticastScope::Global))`. + // Equivalent to `Ipv6Addr::multicast_scope(..).map(|scope| matches!(scope, + // Ipv6MulticastScope::Global))`. fn is_multicast_scope_global(addr: &std::net::Ipv6Addr) -> Option { match addr.segments()[0] & 0x000f { 14 => Some(true), // Global multicast scope. diff --git a/protocols/autonat/src/v1/behaviour/as_client.rs b/protocols/autonat/src/v1/behaviour/as_client.rs index 385dee50ee1..3377964373c 100644 --- a/protocols/autonat/src/v1/behaviour/as_client.rs +++ b/protocols/autonat/src/v1/behaviour/as_client.rs @@ -18,12 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::ResponseError; - -use super::{ - Action, AutoNatCodec, Config, DialRequest, DialResponse, Event, HandleInnerEvent, NatStatus, - ProbeId, +use std::{ + collections::{HashMap, HashSet, VecDeque}, + task::{Context, Poll}, + time::Duration, }; + use futures::FutureExt; use futures_timer::Delay; use libp2p_core::Multiaddr; @@ -31,13 +31,14 @@ use libp2p_identity::PeerId; use libp2p_request_response::{self as request_response, OutboundFailure, OutboundRequestId}; use libp2p_swarm::{ConnectionId, ListenAddresses, ToSwarm}; use rand::{seq::SliceRandom, thread_rng}; -use std::{ - collections::{HashMap, HashSet, VecDeque}, - task::{Context, Poll}, - time::Duration, -}; use web_time::Instant; +use super::{ + Action, AutoNatCodec, Config, DialRequest, DialResponse, Event, HandleInnerEvent, NatStatus, + ProbeId, +}; +use crate::ResponseError; + /// Outbound probe failed or was aborted. #[derive(Debug)] pub enum OutboundProbeError { diff --git a/protocols/autonat/src/v1/behaviour/as_server.rs b/protocols/autonat/src/v1/behaviour/as_server.rs index 01148add6e8..663f94122c7 100644 --- a/protocols/autonat/src/v1/behaviour/as_server.rs +++ b/protocols/autonat/src/v1/behaviour/as_server.rs @@ -17,10 +17,11 @@ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::{ - Action, AutoNatCodec, Config, DialRequest, DialResponse, Event, HandleInnerEvent, ProbeId, - ResponseError, +use std::{ + collections::{HashMap, HashSet, VecDeque}, + num::NonZeroU8, }; + use libp2p_core::{multiaddr::Protocol, Multiaddr}; use libp2p_identity::PeerId; use libp2p_request_response::{ @@ -30,12 +31,13 @@ use libp2p_swarm::{ dial_opts::{DialOpts, PeerCondition}, ConnectionId, DialError, ToSwarm, }; -use std::{ - collections::{HashMap, HashSet, VecDeque}, - num::NonZeroU8, -}; use web_time::Instant; +use super::{ + Action, AutoNatCodec, Config, DialRequest, DialResponse, Event, HandleInnerEvent, ProbeId, + ResponseError, +}; + /// Inbound probe failed. #[derive(Debug)] pub enum InboundProbeError { @@ -379,10 +381,10 @@ impl AsServer<'_> { #[cfg(test)] mod test { - use super::*; - use std::net::Ipv4Addr; + use super::*; + fn random_ip<'a>() -> Protocol<'a> { Protocol::Ip4(Ipv4Addr::new( rand::random(), diff --git a/protocols/autonat/src/v1/protocol.rs b/protocols/autonat/src/v1/protocol.rs index 2ce538fddf4..6aa0c99167b 100644 --- a/protocols/autonat/src/v1/protocol.rs +++ b/protocols/autonat/src/v1/protocol.rs @@ -18,16 +18,20 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; +use std::io; + use async_trait::async_trait; use asynchronous_codec::{FramedRead, FramedWrite}; -use futures::io::{AsyncRead, AsyncWrite}; -use futures::{SinkExt, StreamExt}; +use futures::{ + io::{AsyncRead, AsyncWrite}, + SinkExt, StreamExt, +}; use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_request_response::{self as request_response}; use libp2p_swarm::StreamProtocol; -use std::io; + +use crate::proto; /// The protocol name used for negotiating with multistream-select. pub const DEFAULT_PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/libp2p/autonat/1.0.0"); diff --git a/protocols/autonat/src/v2.rs b/protocols/autonat/src/v2.rs index 48e9f25f890..94decf50a55 100644 --- a/protocols/autonat/src/v2.rs +++ b/protocols/autonat/src/v2.rs @@ -4,17 +4,17 @@ //! //! The new version fixes the issues of the first version: //! - The server now always dials back over a newly allocated port. This greatly reduces the risk of -//! false positives that often occurred in the first version, when the clinet-server connection -//! occurred over a hole-punched port. +//! false positives that often occurred in the first version, when the clinet-server connection +//! occurred over a hole-punched port. //! - The server protects against DoS attacks by requiring the client to send more data to the -//! server then the dial back puts on the client, thus making the protocol unatractive for an -//! attacker. +//! server then the dial back puts on the client, thus making the protocol unatractive for an +//! attacker. //! //! The protocol is separated into two parts: //! - The client part, which is implemented in the `client` module. (The client is the party that -//! wants to check if it is reachable from the outside.) +//! wants to check if it is reachable from the outside.) //! - The server part, which is implemented in the `server` module. (The server is the party -//! performing reachability checks on behalf of the client.) +//! performing reachability checks on behalf of the client.) //! //! The two can be used together. diff --git a/protocols/autonat/src/v2/client.rs b/protocols/autonat/src/v2/client.rs index d3272512f35..11ddb792839 100644 --- a/protocols/autonat/src/v2/client.rs +++ b/protocols/autonat/src/v2/client.rs @@ -1,5 +1,4 @@ mod behaviour; mod handler; -pub use behaviour::Event; -pub use behaviour::{Behaviour, Config}; +pub use behaviour::{Behaviour, Config, Event}; diff --git a/protocols/autonat/src/v2/client/behaviour.rs b/protocols/autonat/src/v2/client/behaviour.rs index 97509c05443..8e238fc9be4 100644 --- a/protocols/autonat/src/v2/client/behaviour.rs +++ b/protocols/autonat/src/v2/client/behaviour.rs @@ -1,5 +1,6 @@ use std::{ collections::{HashMap, VecDeque}, + fmt::{Debug, Display, Formatter}, task::{Context, Poll}, time::Duration, }; @@ -15,14 +16,12 @@ use libp2p_swarm::{ }; use rand::prelude::*; use rand_core::OsRng; -use std::fmt::{Debug, Display, Formatter}; - -use crate::v2::{protocol::DialRequest, Nonce}; use super::handler::{ dial_back::{self, IncomingNonce}, dial_request, }; +use crate::v2::{protocol::DialRequest, Nonce}; #[derive(Debug, Clone, Copy)] pub struct Config { @@ -281,10 +280,12 @@ where } } - /// Issues dial requests to random AutoNAT servers for the most frequently reported, untested candidates. + /// Issues dial requests to random AutoNAT servers for the most frequently reported, untested + /// candidates. /// /// In the current implementation, we only send a single address to each AutoNAT server. - /// This spreads our candidates out across all servers we are connected to which should give us pretty fast feedback on all of them. + /// This spreads our candidates out across all servers we are connected to which should give us + /// pretty fast feedback on all of them. fn issue_dial_requests_for_untested_candidates(&mut self) { for addr in self.untested_candidates() { let Some((conn_id, peer_id)) = self.random_autonat_server() else { @@ -311,7 +312,8 @@ where /// Returns all untested candidates, sorted by the frequency they were reported at. /// - /// More frequently reported candidates are considered to more likely be external addresses and thus tested first. + /// More frequently reported candidates are considered to more likely be external addresses and + /// thus tested first. fn untested_candidates(&self) -> impl Iterator { let mut entries = self .address_candidates @@ -333,7 +335,8 @@ where .map(|(addr, _)| addr) } - /// Chooses an active connection to one of our peers that reported support for the [`DIAL_REQUEST_PROTOCOL`](crate::v2::DIAL_REQUEST_PROTOCOL) protocol. + /// Chooses an active connection to one of our peers that reported support for the + /// [`DIAL_REQUEST_PROTOCOL`](crate::v2::DIAL_REQUEST_PROTOCOL) protocol. fn random_autonat_server(&mut self) -> Option<(ConnectionId, PeerId)> { let (conn_id, info) = self .peer_info diff --git a/protocols/autonat/src/v2/client/handler/dial_back.rs b/protocols/autonat/src/v2/client/handler/dial_back.rs index b3b3a59c02d..ef544a4c77a 100644 --- a/protocols/autonat/src/v2/client/handler/dial_back.rs +++ b/protocols/autonat/src/v2/client/handler/dial_back.rs @@ -1,4 +1,5 @@ use std::{ + convert::Infallible, io, task::{Context, Poll}, time::Duration, @@ -11,7 +12,6 @@ use libp2p_swarm::{ handler::{ConnectionEvent, FullyNegotiatedInbound, ListenUpgradeError}, ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, SubstreamProtocol, }; -use std::convert::Infallible; use crate::v2::{protocol, Nonce, DIAL_BACK_PROTOCOL}; @@ -83,7 +83,7 @@ impl ConnectionHandler for Handler { tracing::warn!("Dial back request dropped, too many requests in flight"); } } - // TODO: remove when Rust 1.82 is MSRVprotocols/autonat/src/v2/client/handler/dial_back.rs + // TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] ConnectionEvent::ListenUpgradeError(ListenUpgradeError { error, .. }) => { libp2p_core::util::unreachable(error); diff --git a/protocols/autonat/src/v2/client/handler/dial_request.rs b/protocols/autonat/src/v2/client/handler/dial_request.rs index 0f303167523..fff83ad9453 100644 --- a/protocols/autonat/src/v2/client/handler/dial_request.rs +++ b/protocols/autonat/src/v2/client/handler/dial_request.rs @@ -1,10 +1,18 @@ +use std::{ + collections::VecDeque, + convert::Infallible, + io, + iter::{once, repeat}, + task::{Context, Poll}, + time::Duration, +}; + use futures::{channel::oneshot, AsyncWrite}; use futures_bounded::FuturesMap; use libp2p_core::{ upgrade::{DeniedUpgrade, ReadyUpgrade}, Multiaddr, }; - use libp2p_swarm::{ handler::{ ConnectionEvent, DialUpgradeError, FullyNegotiatedOutbound, OutboundUpgradeSend, @@ -13,14 +21,6 @@ use libp2p_swarm::{ ConnectionHandler, ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError, SubstreamProtocol, }; -use std::{ - collections::VecDeque, - convert::Infallible, - io, - iter::{once, repeat}, - task::{Context, Poll}, - time::Duration, -}; use crate::v2::{ generated::structs::{mod_DialResponse::ResponseStatus, DialStatus}, @@ -261,7 +261,9 @@ async fn start_stream_handle( Ok(_) => {} Err(err) => { if err.kind() == io::ErrorKind::ConnectionReset { - // The AutoNAT server may have already closed the stream (this is normal because the probe is finished), in this case we have this error: + // The AutoNAT server may have already closed the stream + // (this is normal because the probe is finished), + // in this case we have this error: // Err(Custom { kind: ConnectionReset, error: Stopped(0) }) // so we silently ignore this error } else { diff --git a/protocols/autonat/src/v2/protocol.rs b/protocols/autonat/src/v2/protocol.rs index 4077fd65f5d..70f9f8c37af 100644 --- a/protocols/autonat/src/v2/protocol.rs +++ b/protocols/autonat/src/v2/protocol.rs @@ -1,13 +1,10 @@ // change to quick-protobuf-codec -use std::io; -use std::io::ErrorKind; +use std::{io, io::ErrorKind}; use asynchronous_codec::{Framed, FramedRead, FramedWrite}; - use futures::{AsyncRead, AsyncWrite, SinkExt, StreamExt}; use libp2p_core::Multiaddr; - use quick_protobuf_codec::Codec; use rand::Rng; @@ -103,7 +100,10 @@ impl From for proto::Message { ); proto::Message { msg: proto::mod_Message::OneOfmsg::dialDataResponse(proto::DialDataResponse { - data: vec![0; val.data_count], // One could use Cow::Borrowed here, but it will require a modification of the generated code and that will fail the CI + // One could use Cow::Borrowed here, but it will + // require a modification of the generated code + // and that will fail the CI + data: vec![0; val.data_count], }), } } diff --git a/protocols/autonat/src/v2/server.rs b/protocols/autonat/src/v2/server.rs index 25819307784..cd9b1e46b18 100644 --- a/protocols/autonat/src/v2/server.rs +++ b/protocols/autonat/src/v2/server.rs @@ -1,5 +1,4 @@ mod behaviour; mod handler; -pub use behaviour::Behaviour; -pub use behaviour::Event; +pub use behaviour::{Behaviour, Event}; diff --git a/protocols/autonat/src/v2/server/behaviour.rs b/protocols/autonat/src/v2/server/behaviour.rs index 027cfff7c13..125955cb53a 100644 --- a/protocols/autonat/src/v2/server/behaviour.rs +++ b/protocols/autonat/src/v2/server/behaviour.rs @@ -4,20 +4,19 @@ use std::{ task::{Context, Poll}, }; -use crate::v2::server::handler::dial_request::DialBackStatus; use either::Either; use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::dial_opts::PeerCondition; use libp2p_swarm::{ - dial_opts::DialOpts, dummy, ConnectionDenied, ConnectionHandler, ConnectionId, DialFailure, - FromSwarm, NetworkBehaviour, ToSwarm, + dial_opts::{DialOpts, PeerCondition}, + dummy, ConnectionDenied, ConnectionHandler, ConnectionId, DialFailure, FromSwarm, + NetworkBehaviour, ToSwarm, }; use rand_core::{OsRng, RngCore}; use crate::v2::server::handler::{ dial_back, - dial_request::{self, DialBackCommand}, + dial_request::{self, DialBackCommand, DialBackStatus}, Handler, }; diff --git a/protocols/autonat/src/v2/server/handler/dial_back.rs b/protocols/autonat/src/v2/server/handler/dial_back.rs index 3cacd4ff32b..61593da318d 100644 --- a/protocols/autonat/src/v2/server/handler/dial_back.rs +++ b/protocols/autonat/src/v2/server/handler/dial_back.rs @@ -14,13 +14,12 @@ use libp2p_swarm::{ SubstreamProtocol, }; +use super::dial_request::{DialBackCommand, DialBackStatus as DialBackRes}; use crate::v2::{ protocol::{dial_back, recv_dial_back_response}, DIAL_BACK_PROTOCOL, }; -use super::dial_request::{DialBackCommand, DialBackStatus as DialBackRes}; - pub(crate) type ToBehaviour = io::Result<()>; pub struct Handler { diff --git a/protocols/autonat/tests/autonatv2.rs b/protocols/autonat/tests/autonatv2.rs index f22a2e51470..49866a9adb5 100644 --- a/protocols/autonat/tests/autonatv2.rs +++ b/protocols/autonat/tests/autonatv2.rs @@ -1,15 +1,15 @@ -use libp2p_autonat::v2::client::{self, Config}; -use libp2p_autonat::v2::server; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::transport::TransportError; -use libp2p_core::Multiaddr; +use std::{sync::Arc, time::Duration}; + +use libp2p_autonat::v2::{ + client::{self, Config}, + server, +}; +use libp2p_core::{multiaddr::Protocol, transport::TransportError, Multiaddr}; use libp2p_swarm::{ DialError, FromSwarm, NetworkBehaviour, NewExternalAddrCandidate, Swarm, SwarmEvent, }; use libp2p_swarm_test::SwarmExt; use rand_core::OsRng; -use std::sync::Arc; -use std::time::Duration; use tokio::sync::oneshot; use tracing_subscriber::EnvFilter; diff --git a/protocols/autonat/tests/test_client.rs b/protocols/autonat/tests/test_client.rs index f5c18e3f34e..49c6c483514 100644 --- a/protocols/autonat/tests/test_client.rs +++ b/protocols/autonat/tests/test_client.rs @@ -18,6 +18,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::time::Duration; + use libp2p_autonat::{ Behaviour, Config, Event, NatStatus, OutboundProbeError, OutboundProbeEvent, ResponseError, }; @@ -25,7 +27,6 @@ use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; -use std::time::Duration; use tokio::task::JoinHandle; const MAX_CONFIDENCE: usize = 3; @@ -116,7 +117,8 @@ async fn test_auto_probe() { // It can happen that the server observed the established connection and // returned a response before the inbound established connection was reported at the client. - // In this (rare) case the `ConnectionEstablished` event occurs after the `OutboundProbeEvent::Response`. + // In this (rare) case the `ConnectionEstablished` event + // occurs after the `OutboundProbeEvent::Response`. if !had_connection_event { match client.next_swarm_event().await { SwarmEvent::ConnectionEstablished { diff --git a/protocols/autonat/tests/test_server.rs b/protocols/autonat/tests/test_server.rs index d43d14198d4..944c4301b20 100644 --- a/protocols/autonat/tests/test_server.rs +++ b/protocols/autonat/tests/test_server.rs @@ -18,15 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{num::NonZeroU32, time::Duration}; + use libp2p_autonat::{ Behaviour, Config, Event, InboundProbeError, InboundProbeEvent, ResponseError, }; use libp2p_core::{multiaddr::Protocol, ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::DialError; -use libp2p_swarm::{Swarm, SwarmEvent}; +use libp2p_swarm::{DialError, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; -use std::{num::NonZeroU32, time::Duration}; #[tokio::test] async fn test_dial_back() { @@ -340,7 +340,8 @@ async fn test_global_ips_config() { client.listen().await; tokio::spawn(client.loop_on_next()); - // Expect the probe to be refused as both peers run on the same machine and thus in the same local network. + // Expect the probe to be refused as both peers run + // on the same machine and thus in the same local network. match server.next_behaviour_event().await { Event::InboundProbe(InboundProbeEvent::Error { error, .. }) => assert!(matches!( error, diff --git a/protocols/dcutr/src/behaviour.rs b/protocols/dcutr/src/behaviour.rs index 7d0366c98bc..989635c02ba 100644 --- a/protocols/dcutr/src/behaviour.rs +++ b/protocols/dcutr/src/behaviour.rs @@ -20,27 +20,29 @@ //! [`NetworkBehaviour`] to act as a direct connection upgrade through relay node. -use crate::{handler, protocol}; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + convert::Infallible, + num::NonZeroUsize, + task::{Context, Poll}, +}; + use either::Either; -use libp2p_core::connection::ConnectedPoint; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr}; +use libp2p_core::{ + connection::ConnectedPoint, multiaddr::Protocol, transport::PortUse, Endpoint, Multiaddr, +}; use libp2p_identity::PeerId; -use libp2p_swarm::behaviour::{ConnectionClosed, DialFailure, FromSwarm}; -use libp2p_swarm::dial_opts::{self, DialOpts}; use libp2p_swarm::{ - dummy, ConnectionDenied, ConnectionHandler, ConnectionId, NewExternalAddrCandidate, THandler, - THandlerOutEvent, + behaviour::{ConnectionClosed, DialFailure, FromSwarm}, + dial_opts::{self, DialOpts}, + dummy, ConnectionDenied, ConnectionHandler, ConnectionId, NetworkBehaviour, + NewExternalAddrCandidate, NotifyHandler, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use libp2p_swarm::{NetworkBehaviour, NotifyHandler, THandlerInEvent, ToSwarm}; use lru::LruCache; -use std::collections::{HashMap, HashSet, VecDeque}; -use std::convert::Infallible; -use std::num::NonZeroUsize; -use std::task::{Context, Poll}; use thiserror::Error; +use crate::{handler, protocol}; + pub(crate) const MAX_NUMBER_OF_UPGRADE_ATTEMPTS: u8 = 3; /// The events produced by the [`Behaviour`]. @@ -184,7 +186,8 @@ impl NetworkBehaviour for Behaviour { handler::relayed::Handler::new(connected_point, self.observed_addresses()); handler.on_behaviour_event(handler::relayed::Command::Connect); - return Ok(Either::Left(handler)); // TODO: We could make two `handler::relayed::Handler` here, one inbound one outbound. + // TODO: We could make two `handler::relayed::Handler` here, one inbound one outbound. + return Ok(Either::Left(handler)); } self.direct_connections .entry(peer) @@ -217,7 +220,8 @@ impl NetworkBehaviour for Behaviour { port_use, }, self.observed_addresses(), - ))); // TODO: We could make two `handler::relayed::Handler` here, one inbound one outbound. + ))); // TODO: We could make two `handler::relayed::Handler` here, one inbound one + // outbound. } self.direct_connections @@ -255,7 +259,8 @@ impl NetworkBehaviour for Behaviour { Either::Left(_) => connection_id, Either::Right(_) => match self.direct_to_relayed_connections.get(&connection_id) { None => { - // If the connection ID is unknown to us, it means we didn't create it so ignore any event coming from it. + // If the connection ID is unknown to us, it means we didn't create it so ignore + // any event coming from it. return; } Some(relayed_connection_id) => *relayed_connection_id, @@ -347,8 +352,9 @@ impl NetworkBehaviour for Behaviour { /// /// We use an [`LruCache`] to favor addresses that are reported more often. /// When attempting a hole-punch, we will try more frequent addresses first. -/// Most of these addresses will come from observations by other nodes (via e.g. the identify protocol). -/// More common observations mean a more likely stable port-mapping and thus a higher chance of a successful hole-punch. +/// Most of these addresses will come from observations by other nodes (via e.g. the identify +/// protocol). More common observations mean a more likely stable port-mapping and thus a higher +/// chance of a successful hole-punch. struct Candidates { inner: LruCache, me: PeerId, diff --git a/protocols/dcutr/src/handler/relayed.rs b/protocols/dcutr/src/handler/relayed.rs index ad12a196cb9..0d6e1b5e889 100644 --- a/protocols/dcutr/src/handler/relayed.rs +++ b/protocols/dcutr/src/handler/relayed.rs @@ -20,26 +20,31 @@ //! [`ConnectionHandler`] handling relayed connection potentially upgraded to a direct connection. -use crate::behaviour::MAX_NUMBER_OF_UPGRADE_ATTEMPTS; -use crate::{protocol, PROTOCOL_NAME}; +use std::{ + collections::VecDeque, + io, + task::{Context, Poll}, + time::Duration, +}; + use either::Either; use futures::future; -use libp2p_core::multiaddr::Multiaddr; -use libp2p_core::upgrade::{DeniedUpgrade, ReadyUpgrade}; -use libp2p_core::ConnectedPoint; -use libp2p_swarm::handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, - ListenUpgradeError, +use libp2p_core::{ + multiaddr::Multiaddr, + upgrade::{DeniedUpgrade, ReadyUpgrade}, + ConnectedPoint, }; use libp2p_swarm::{ + handler::{ + ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, + ListenUpgradeError, + }, ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, StreamUpgradeError, SubstreamProtocol, }; use protocol::{inbound, outbound}; -use std::collections::VecDeque; -use std::io; -use std::task::{Context, Poll}; -use std::time::Duration; + +use crate::{behaviour::MAX_NUMBER_OF_UPGRADE_ATTEMPTS, protocol, PROTOCOL_NAME}; #[derive(Debug)] pub enum Command { @@ -114,8 +119,8 @@ impl Handler { } self.attempts += 1; } - // A connection listener denies all incoming substreams, thus none can ever be fully negotiated. - // TODO: remove when Rust 1.82 is MSRV + // A connection listener denies all incoming substreams, thus none can ever be fully + // negotiated. TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] future::Either::Right(output) => libp2p_core::util::unreachable(output), } diff --git a/protocols/dcutr/src/protocol/inbound.rs b/protocols/dcutr/src/protocol/inbound.rs index 005d8394f5e..c5209930ca2 100644 --- a/protocols/dcutr/src/protocol/inbound.rs +++ b/protocols/dcutr/src/protocol/inbound.rs @@ -18,14 +18,16 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; +use std::io; + use asynchronous_codec::Framed; use futures::prelude::*; use libp2p_core::{multiaddr::Protocol, Multiaddr}; use libp2p_swarm::Stream; -use std::io; use thiserror::Error; +use crate::proto; + pub(crate) async fn handshake( stream: Stream, candidates: Vec, diff --git a/protocols/dcutr/src/protocol/outbound.rs b/protocols/dcutr/src/protocol/outbound.rs index 8639ff4f053..cdd3d5fbf0b 100644 --- a/protocols/dcutr/src/protocol/outbound.rs +++ b/protocols/dcutr/src/protocol/outbound.rs @@ -18,17 +18,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; -use crate::PROTOCOL_NAME; +use std::io; + use asynchronous_codec::Framed; use futures::prelude::*; use futures_timer::Delay; use libp2p_core::{multiaddr::Protocol, Multiaddr}; use libp2p_swarm::Stream; -use std::io; use thiserror::Error; use web_time::Instant; +use crate::{proto, PROTOCOL_NAME}; + pub(crate) async fn handshake( stream: Stream, candidates: Vec, diff --git a/protocols/dcutr/tests/lib.rs b/protocols/dcutr/tests/lib.rs index 36f168fb04a..a35c9a50cfe 100644 --- a/protocols/dcutr/tests/lib.rs +++ b/protocols/dcutr/tests/lib.rs @@ -18,9 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use libp2p_core::multiaddr::{Multiaddr, Protocol}; -use libp2p_core::transport::upgrade::Version; -use libp2p_core::transport::{MemoryTransport, Transport}; +use std::time::Duration; + +use libp2p_core::{ + multiaddr::{Multiaddr, Protocol}, + transport::{upgrade::Version, MemoryTransport, Transport}, +}; use libp2p_dcutr as dcutr; use libp2p_identify as identify; use libp2p_identity as identity; @@ -29,7 +32,6 @@ use libp2p_plaintext as plaintext; use libp2p_relay as relay; use libp2p_swarm::{Config, NetworkBehaviour, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; -use std::time::Duration; use tracing_subscriber::EnvFilter; #[tokio::test] diff --git a/protocols/floodsub/src/layer.rs b/protocols/floodsub/src/layer.rs index 1a70d2213b2..477172b42c0 100644 --- a/protocols/floodsub/src/layer.rs +++ b/protocols/floodsub/src/layer.rs @@ -18,27 +18,36 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::protocol::{ - FloodsubMessage, FloodsubProtocol, FloodsubRpc, FloodsubSubscription, - FloodsubSubscriptionAction, +use std::{ + collections::{ + hash_map::{DefaultHasher, HashMap}, + VecDeque, + }, + iter, + task::{Context, Poll}, }; -use crate::topic::Topic; -use crate::FloodsubConfig; + use bytes::Bytes; use cuckoofilter::{CuckooError, CuckooFilter}; use fnv::FnvHashSet; -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr}; +use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::behaviour::{ConnectionClosed, ConnectionEstablished, FromSwarm}; use libp2p_swarm::{ - dial_opts::DialOpts, CloseConnection, ConnectionDenied, ConnectionId, NetworkBehaviour, - NotifyHandler, OneShotHandler, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + behaviour::{ConnectionClosed, ConnectionEstablished, FromSwarm}, + dial_opts::DialOpts, + CloseConnection, ConnectionDenied, ConnectionId, NetworkBehaviour, NotifyHandler, + OneShotHandler, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use smallvec::SmallVec; -use std::collections::hash_map::{DefaultHasher, HashMap}; -use std::task::{Context, Poll}; -use std::{collections::VecDeque, iter}; + +use crate::{ + protocol::{ + FloodsubMessage, FloodsubProtocol, FloodsubRpc, FloodsubSubscription, + FloodsubSubscriptionAction, + }, + topic::Topic, + FloodsubConfig, +}; /// Network behaviour that handles the floodsub protocol. pub struct Floodsub { @@ -192,7 +201,8 @@ impl Floodsub { self.publish_many_inner(topic, data, true) } - /// Publishes a message with multiple topics to the network, even if we're not subscribed to any of the topics. + /// Publishes a message with multiple topics to the network, even if we're not subscribed to any + /// of the topics. pub fn publish_many_any( &mut self, topic: impl IntoIterator>, diff --git a/protocols/floodsub/src/lib.rs b/protocols/floodsub/src/lib.rs index 94766d5fdca..d43b0c88788 100644 --- a/protocols/floodsub/src/lib.rs +++ b/protocols/floodsub/src/lib.rs @@ -35,9 +35,11 @@ mod proto { pub(crate) use self::floodsub::pb::{mod_RPC::SubOpts, Message, RPC}; } -pub use self::layer::{Floodsub, FloodsubEvent}; -pub use self::protocol::{FloodsubMessage, FloodsubRpc}; -pub use self::topic::Topic; +pub use self::{ + layer::{Floodsub, FloodsubEvent}, + protocol::{FloodsubMessage, FloodsubRpc}, + topic::Topic, +}; /// Configuration options for the Floodsub protocol. #[derive(Debug, Clone)] diff --git a/protocols/floodsub/src/protocol.rs b/protocols/floodsub/src/protocol.rs index edc842be8ce..69cfcbd9dc7 100644 --- a/protocols/floodsub/src/protocol.rs +++ b/protocols/floodsub/src/protocol.rs @@ -18,19 +18,19 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; -use crate::topic::Topic; +use std::{io, iter, pin::Pin}; + use asynchronous_codec::Framed; use bytes::Bytes; use futures::{ io::{AsyncRead, AsyncWrite}, - Future, + Future, SinkExt, StreamExt, }; -use futures::{SinkExt, StreamExt}; use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use libp2p_identity::PeerId; use libp2p_swarm::StreamProtocol; -use std::{io, iter, pin::Pin}; + +use crate::{proto, topic::Topic}; const MAX_MESSAGE_LEN_BYTES: usize = 2048; diff --git a/protocols/gossipsub/src/backoff.rs b/protocols/gossipsub/src/backoff.rs index c955ee59c65..ee600d22098 100644 --- a/protocols/gossipsub/src/backoff.rs +++ b/protocols/gossipsub/src/backoff.rs @@ -19,15 +19,19 @@ // DEALINGS IN THE SOFTWARE. //! Data structure for efficiently storing known back-off's when pruning peers. -use crate::topic::TopicHash; -use libp2p_identity::PeerId; -use std::collections::{ - hash_map::{Entry, HashMap}, - HashSet, +use std::{ + collections::{ + hash_map::{Entry, HashMap}, + HashSet, + }, + time::Duration, }; -use std::time::Duration; + +use libp2p_identity::PeerId; use web_time::Instant; +use crate::topic::TopicHash; + #[derive(Copy, Clone)] struct HeartbeatIndex(usize); @@ -68,8 +72,8 @@ impl BackoffStorage { } } - /// Updates the backoff for a peer (if there is already a more restrictive backoff then this call - /// doesn't change anything). + /// Updates the backoff for a peer (if there is already a more restrictive backoff then this + /// call doesn't change anything). pub(crate) fn update_backoff(&mut self, topic: &TopicHash, peer: &PeerId, time: Duration) { let instant = Instant::now() + time; let insert_into_backoffs_by_heartbeat = @@ -155,7 +159,7 @@ impl BackoffStorage { None => false, }; if !keep { - //remove from backoffs + // remove from backoffs if let Entry::Occupied(mut m) = backoffs.entry(topic.clone()) { if m.get_mut().remove(peer).is_some() && m.get().is_empty() { m.remove(); diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index ae808d97261..bb3eaaa9b5a 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -19,11 +19,10 @@ // DEALINGS IN THE SOFTWARE. use std::{ - cmp::{max, Ordering}, - collections::HashSet, - collections::VecDeque, - collections::{BTreeSet, HashMap}, + cmp::{max, Ordering, Ordering::Equal}, + collections::{BTreeSet, HashMap, HashSet, VecDeque}, fmt, + fmt::Debug, net::IpAddr, task::{Context, Poll}, time::Duration, @@ -31,52 +30,44 @@ use std::{ use futures::FutureExt; use futures_timer::Delay; -use prometheus_client::registry::Registry; -use rand::{seq::SliceRandom, thread_rng}; - use libp2p_core::{ - multiaddr::Protocol::Ip4, multiaddr::Protocol::Ip6, transport::PortUse, Endpoint, Multiaddr, + multiaddr::Protocol::{Ip4, Ip6}, + transport::PortUse, + Endpoint, Multiaddr, }; -use libp2p_identity::Keypair; -use libp2p_identity::PeerId; +use libp2p_identity::{Keypair, PeerId}; use libp2p_swarm::{ behaviour::{AddressChange, ConnectionClosed, ConnectionEstablished, FromSwarm}, dial_opts::DialOpts, ConnectionDenied, ConnectionId, NetworkBehaviour, NotifyHandler, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; +use prometheus_client::registry::Registry; +use quick_protobuf::{MessageWrite, Writer}; +use rand::{seq::SliceRandom, thread_rng}; use web_time::{Instant, SystemTime}; -use crate::peer_score::{PeerScore, PeerScoreParams, PeerScoreThresholds, RejectReason}; -use crate::protocol::SIGNING_PREFIX; -use crate::subscription_filter::{AllowAllSubscriptionFilter, TopicSubscriptionFilter}; -use crate::time_cache::DuplicateCache; -use crate::topic::{Hasher, Topic, TopicHash}; -use crate::transform::{DataTransform, IdentityTransform}; -use crate::types::{ - ControlAction, Message, MessageAcceptance, MessageId, PeerInfo, RawMessage, Subscription, - SubscriptionAction, -}; -use crate::types::{PeerConnections, PeerKind, RpcOut}; -use crate::{backoff::BackoffStorage, FailedMessages}; use crate::{ + backoff::BackoffStorage, config::{Config, ValidationMode}, - types::Graft, -}; -use crate::{gossip_promises::GossipPromises, types::Prune}; -use crate::{ + gossip_promises::GossipPromises, handler::{Handler, HandlerEvent, HandlerIn}, - types::IWant, -}; -use crate::{mcache::MessageCache, types::IHave}; -use crate::{ + mcache::MessageCache, metrics::{Churn, Config as MetricsConfig, Inclusion, Metrics, Penalty}, + peer_score::{PeerScore, PeerScoreParams, PeerScoreThresholds, RejectReason}, + protocol::SIGNING_PREFIX, rpc::Sender, + rpc_proto::proto, + subscription_filter::{AllowAllSubscriptionFilter, TopicSubscriptionFilter}, + time_cache::DuplicateCache, + topic::{Hasher, Topic, TopicHash}, + transform::{DataTransform, IdentityTransform}, + types::{ + ControlAction, Graft, IHave, IWant, Message, MessageAcceptance, MessageId, PeerConnections, + PeerInfo, PeerKind, Prune, RawMessage, RpcOut, Subscription, SubscriptionAction, + }, + FailedMessages, PublishError, SubscriptionError, TopicScoreParams, ValidationError, }; -use crate::{rpc_proto::proto, TopicScoreParams}; -use crate::{PublishError, SubscriptionError, ValidationError}; -use quick_protobuf::{MessageWrite, Writer}; -use std::{cmp::Ordering::Equal, fmt::Debug}; #[cfg(test)] mod tests; @@ -221,8 +212,9 @@ impl From for PublishConfig { let public_key = keypair.public(); let key_enc = public_key.encode_protobuf(); let key = if key_enc.len() <= 42 { - // The public key can be inlined in [`rpc_proto::proto::::Message::from`], so we don't include it - // specifically in the [`rpc_proto::proto::Message::key`] field. + // The public key can be inlined in [`rpc_proto::proto::::Message::from`], so we + // don't include it specifically in the + // [`rpc_proto::proto::Message::key`] field. None } else { // Include the protobuf encoding of the public key in the message. @@ -289,7 +281,7 @@ pub struct Behaviour { /// The last publish time for fanout topics. fanout_last_pub: HashMap, - ///Storage for backoffs + /// Storage for backoffs backoffs: BackoffStorage, /// Message cache for the last few heartbeats. @@ -1415,7 +1407,7 @@ where + self.config.graft_flood_threshold()) - self.config.prune_backoff(); if flood_cutoff > now { - //extra penalty + // extra penalty peer_score.add_penalty(peer_id, 1); } } @@ -1436,15 +1428,16 @@ where topic=%topic_hash, "GRAFT: ignoring peer with negative score" ); - // we do send them PRUNE however, because it's a matter of protocol correctness + // we do send them PRUNE however, because it's a matter of protocol + // correctness to_prune_topics.insert(topic_hash.clone()); // but we won't PX to them do_px = false; continue; } - // check mesh upper bound and only allow graft if the upper bound is not reached or - // if it is an outbound peer + // check mesh upper bound and only allow graft if the upper bound is not reached + // or if it is an outbound peer if peers.len() >= self.config.mesh_n_high() && !self.outbound_peers.contains(peer_id) { @@ -1572,7 +1565,7 @@ where self.remove_peer_from_mesh(peer_id, &topic_hash, backoff, true, Churn::Prune); if self.mesh.contains_key(&topic_hash) { - //connect to px peers + // connect to px peers if !px.is_empty() { // we ignore PX from peers with insufficient score if below_threshold { @@ -1604,7 +1597,7 @@ where let n = self.config.prune_peers(); // Ignore peerInfo with no ID // - //TODO: Once signed records are spec'd: Can we use peerInfo without any IDs if they have a + // TODO: Once signed records are spec'd: Can we use peerInfo without any IDs if they have a // signed peer record? px.retain(|p| p.peer_id.is_some()); if px.len() > n { @@ -2867,8 +2860,8 @@ where .expect("Previously established connection to peer must be present"); peer.connections.remove(index); - // If there are more connections and this peer is in a mesh, inform the first connection - // handler. + // If there are more connections and this peer is in a mesh, inform the first + // connection handler. if !peer.connections.is_empty() { for topic in &peer.topics { if let Some(mesh_peers) = self.mesh.get(topic) { @@ -3162,7 +3155,8 @@ where } // Handle control messages - // group some control messages, this minimises SendEvents (code is simplified to handle each event at a time however) + // group some control messages, this minimises SendEvents (code is simplified to + // handle each event at a time however) let mut ihave_msgs = vec![]; let mut graft_msgs = vec![]; let mut prune_msgs = vec![]; diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index 9567150382a..eaa983d214d 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -20,16 +20,17 @@ // Collection of tests for the gossipsub network behaviour -use super::*; -use crate::rpc::Receiver; -use crate::subscription_filter::WhitelistSubscriptionFilter; -use crate::{config::ConfigBuilder, types::Rpc, IdentTopic as Topic}; +use std::{future, net::Ipv4Addr, thread::sleep}; + use byteorder::{BigEndian, ByteOrder}; use libp2p_core::ConnectedPoint; use rand::Rng; -use std::future; -use std::net::Ipv4Addr; -use std::thread::sleep; + +use super::*; +use crate::{ + config::ConfigBuilder, rpc::Receiver, subscription_filter::WhitelistSubscriptionFilter, + types::Rpc, IdentTopic as Topic, +}; #[derive(Default, Debug)] struct InjectNodes @@ -311,7 +312,8 @@ fn proto_to_message(rpc: &proto::RPC) -> Rpc { messages.push(RawMessage { source: message.from.map(|x| PeerId::from_bytes(&x).unwrap()), data: message.data.unwrap_or_default(), - sequence_number: message.seqno.map(|x| BigEndian::read_u64(&x)), // don't inform the application + sequence_number: message.seqno.map(|x| BigEndian::read_u64(&x)), /* don't inform the + * application */ topic: TopicHash::from_raw(message.topic), signature: message.signature, // don't inform the application key: None, @@ -677,7 +679,7 @@ fn test_publish_without_flood_publishing() { // - Send publish message to all peers // - Insert message into gs.mcache and gs.received - //turn off flood publish to test old behaviour + // turn off flood publish to test old behaviour let config = ConfigBuilder::default() .flood_publish(false) .build() @@ -757,7 +759,7 @@ fn test_fanout() { // - Send publish message to fanout peers // - Insert message into gs.mcache and gs.received - //turn off flood publish to test fanout behaviour + // turn off flood publish to test fanout behaviour let config = ConfigBuilder::default() .flood_publish(false) .build() @@ -1447,10 +1449,10 @@ fn test_explicit_peer_gets_connected() { .to_subscribe(true) .create_network(); - //create new peer + // create new peer let peer = PeerId::random(); - //add peer as explicit peer + // add peer as explicit peer gs.add_explicit_peer(&peer); let num_events = gs @@ -1483,17 +1485,17 @@ fn test_explicit_peer_reconnects() { let peer = others.first().unwrap(); - //add peer as explicit peer + // add peer as explicit peer gs.add_explicit_peer(peer); flush_events(&mut gs, receivers); - //disconnect peer + // disconnect peer disconnect_peer(&mut gs, peer); gs.heartbeat(); - //check that no reconnect after first heartbeat since `explicit_peer_ticks == 2` + // check that no reconnect after first heartbeat since `explicit_peer_ticks == 2` assert_eq!( gs.events .iter() @@ -1508,7 +1510,7 @@ fn test_explicit_peer_reconnects() { gs.heartbeat(); - //check that there is a reconnect after second heartbeat + // check that there is a reconnect after second heartbeat assert!( gs.events .iter() @@ -1536,11 +1538,11 @@ fn test_handle_graft_explicit_peer() { gs.handle_graft(peer, topic_hashes.clone()); - //peer got not added to mesh + // peer got not added to mesh assert!(gs.mesh[&topic_hashes[0]].is_empty()); assert!(gs.mesh[&topic_hashes[1]].is_empty()); - //check prunes + // check prunes let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { peer_id == peer && match m { @@ -1566,13 +1568,13 @@ fn explicit_peers_not_added_to_mesh_on_receiving_subscription() { .explicit(1) .create_network(); - //only peer 1 is in the mesh not peer 0 (which is an explicit peer) + // only peer 1 is in the mesh not peer 0 (which is an explicit peer) assert_eq!( gs.mesh[&topic_hashes[0]], vec![peers[1]].into_iter().collect() ); - //assert that graft gets created to non-explicit peer + // assert that graft gets created to non-explicit peer let (control_msgs, receivers) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[1] && matches!(m, RpcOut::Graft { .. }) }); @@ -1581,7 +1583,7 @@ fn explicit_peers_not_added_to_mesh_on_receiving_subscription() { "No graft message got created to non-explicit peer" ); - //assert that no graft gets created to explicit peer + // assert that no graft gets created to explicit peer let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[0] && matches!(m, RpcOut::Graft { .. }) }); @@ -1603,10 +1605,10 @@ fn do_not_graft_explicit_peer() { gs.heartbeat(); - //mesh stays empty + // mesh stays empty assert_eq!(gs.mesh[&topic_hashes[0]], BTreeSet::new()); - //assert that no graft gets created to explicit peer + // assert that no graft gets created to explicit peer let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { peer_id == &others[0] && matches!(m, RpcOut::Graft { .. }) }); @@ -1663,7 +1665,7 @@ fn explicit_peers_not_added_to_mesh_on_subscribe() { .explicit(1) .create_network(); - //create new topic, both peers subscribing to it but we do not subscribe to it + // create new topic, both peers subscribing to it but we do not subscribe to it let topic = Topic::new(String::from("t")); let topic_hash = topic.hash(); for peer in peers.iter().take(2) { @@ -1676,13 +1678,13 @@ fn explicit_peers_not_added_to_mesh_on_subscribe() { ); } - //subscribe now to topic + // subscribe now to topic gs.subscribe(&topic).unwrap(); - //only peer 1 is in the mesh not peer 0 (which is an explicit peer) + // only peer 1 is in the mesh not peer 0 (which is an explicit peer) assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); - //assert that graft gets created to non-explicit peer + // assert that graft gets created to non-explicit peer let (control_msgs, receivers) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[1] && matches!(m, RpcOut::Graft { .. }) }); @@ -1691,7 +1693,7 @@ fn explicit_peers_not_added_to_mesh_on_subscribe() { "No graft message got created to non-explicit peer" ); - //assert that no graft gets created to explicit peer + // assert that no graft gets created to explicit peer let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[0] && matches!(m, RpcOut::Graft { .. }) }); @@ -1711,7 +1713,7 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { .explicit(1) .create_network(); - //create new topic, both peers subscribing to it but we do not subscribe to it + // create new topic, both peers subscribing to it but we do not subscribe to it let topic = Topic::new(String::from("t")); let topic_hash = topic.hash(); for peer in peers.iter().take(2) { @@ -1724,16 +1726,16 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { ); } - //we send a message for this topic => this will initialize the fanout + // we send a message for this topic => this will initialize the fanout gs.publish(topic.clone(), vec![1, 2, 3]).unwrap(); - //subscribe now to topic + // subscribe now to topic gs.subscribe(&topic).unwrap(); - //only peer 1 is in the mesh not peer 0 (which is an explicit peer) + // only peer 1 is in the mesh not peer 0 (which is an explicit peer) assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); - //assert that graft gets created to non-explicit peer + // assert that graft gets created to non-explicit peer let (control_msgs, receivers) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[1] && matches!(m, RpcOut::Graft { .. }) }); @@ -1742,7 +1744,7 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { "No graft message got created to non-explicit peer" ); - //assert that no graft gets created to explicit peer + // assert that no graft gets created to explicit peer let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[0] && matches!(m, RpcOut::Graft { .. }) }); @@ -1774,15 +1776,15 @@ fn no_gossip_gets_sent_to_explicit_peers() { validated: true, }; - //forward the message + // forward the message gs.handle_received_message(message, &local_id); - //simulate multiple gossip calls (for randomness) + // simulate multiple gossip calls (for randomness) for _ in 0..3 { gs.emit_gossip(); } - //assert that no gossip gets sent to explicit peer + // assert that no gossip gets sent to explicit peer let receiver = receivers.remove(&peers[0]).unwrap(); let mut gossips = 0; let non_priority = receiver.non_priority.get_ref(); @@ -1835,7 +1837,7 @@ fn test_mesh_subtraction() { // Adds mesh_low peers and PRUNE 2 giving us a deficit. let n = config.mesh_n_high() + 10; - //make all outbound connections so that we allow grafting to all + // make all outbound connections so that we allow grafting to all let (mut gs, peers, _receivers, topics) = inject_nodes1() .peer_no(n) .topics(vec!["test".into()]) @@ -1866,10 +1868,10 @@ fn test_connect_to_px_peers_on_handle_prune() { .to_subscribe(true) .create_network(); - //handle prune from single peer with px peers + // handle prune from single peer with px peers let mut px = Vec::new(); - //propose more px peers than config.prune_peers() + // propose more px peers than config.prune_peers() for _ in 0..config.prune_peers() + 5 { px.push(PeerInfo { peer_id: Some(PeerId::random()), @@ -1885,7 +1887,7 @@ fn test_connect_to_px_peers_on_handle_prune() { )], ); - //Check DialPeer events for px peers + // Check DialPeer events for px peers let dials: Vec<_> = gs .events .iter() @@ -1903,7 +1905,7 @@ fn test_connect_to_px_peers_on_handle_prune() { // No duplicates assert_eq!(dials_set.len(), config.prune_peers()); - //all dial peers must be in px + // all dial peers must be in px assert!(dials_set.is_subset( &px.iter() .map(|i| *i.peer_id.as_ref().unwrap()) @@ -1915,14 +1917,14 @@ fn test_connect_to_px_peers_on_handle_prune() { fn test_send_px_and_backoff_in_prune() { let config: Config = Config::default(); - //build mesh with enough peers for px + // build mesh with enough peers for px let (mut gs, peers, receivers, topics) = inject_nodes1() .peer_no(config.prune_peers() + 1) .topics(vec!["test".into()]) .to_subscribe(true) .create_network(); - //send prune to peer + // send prune to peer gs.send_graft_prune( HashMap::new(), vec![(peers[0], vec![topics[0].clone()])] @@ -1931,7 +1933,7 @@ fn test_send_px_and_backoff_in_prune() { HashSet::new(), ); - //check prune message + // check prune message let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[0] && match m { @@ -1957,14 +1959,14 @@ fn test_send_px_and_backoff_in_prune() { fn test_prune_backoffed_peer_on_graft() { let config: Config = Config::default(); - //build mesh with enough peers for px + // build mesh with enough peers for px let (mut gs, peers, receivers, topics) = inject_nodes1() .peer_no(config.prune_peers() + 1) .topics(vec!["test".into()]) .to_subscribe(true) .create_network(); - //remove peer from mesh and send prune to peer => this adds a backoff for this peer + // remove peer from mesh and send prune to peer => this adds a backoff for this peer gs.mesh.get_mut(&topics[0]).unwrap().remove(&peers[0]); gs.send_graft_prune( HashMap::new(), @@ -1974,13 +1976,13 @@ fn test_prune_backoffed_peer_on_graft() { HashSet::new(), ); - //ignore all messages until now + // ignore all messages until now let receivers = flush_events(&mut gs, receivers); - //handle graft + // handle graft gs.handle_graft(&peers[0], vec![topics[0].clone()]); - //check prune message + // check prune message let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[0] && match m { @@ -2007,7 +2009,7 @@ fn test_do_not_graft_within_backoff_period() { .heartbeat_interval(Duration::from_millis(100)) .build() .unwrap(); - //only one peer => mesh too small and will try to regraft as early as possible + // only one peer => mesh too small and will try to regraft as early as possible let (mut gs, peers, receivers, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -2015,22 +2017,22 @@ fn test_do_not_graft_within_backoff_period() { .gs_config(config) .create_network(); - //handle prune from peer with backoff of one second + // handle prune from peer with backoff of one second gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), Some(1))]); - //forget all events until now + // forget all events until now let receivers = flush_events(&mut gs, receivers); - //call heartbeat + // call heartbeat gs.heartbeat(); - //Sleep for one second and apply 10 regular heartbeats (interval = 100ms). + // Sleep for one second and apply 10 regular heartbeats (interval = 100ms). for _ in 0..10 { sleep(Duration::from_millis(100)); gs.heartbeat(); } - //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat + // Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat // is needed). let (control_msgs, receivers) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); @@ -2039,11 +2041,11 @@ fn test_do_not_graft_within_backoff_period() { "Graft message created too early within backoff period" ); - //Heartbeat one more time this should graft now + // Heartbeat one more time this should graft now sleep(Duration::from_millis(100)); gs.heartbeat(); - //check that graft got created + // check that graft got created let (control_msgs, _) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); assert!( control_msgs > 0, @@ -2053,14 +2055,14 @@ fn test_do_not_graft_within_backoff_period() { #[test] fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without_backoff() { - //set default backoff period to 1 second + // set default backoff period to 1 second let config = ConfigBuilder::default() .prune_backoff(Duration::from_millis(90)) .backoff_slack(1) .heartbeat_interval(Duration::from_millis(100)) .build() .unwrap(); - //only one peer => mesh too small and will try to regraft as early as possible + // only one peer => mesh too small and will try to regraft as early as possible let (mut gs, peers, receivers, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -2068,20 +2070,20 @@ fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without .gs_config(config) .create_network(); - //handle prune from peer without a specified backoff + // handle prune from peer without a specified backoff gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), None)]); - //forget all events until now + // forget all events until now let receivers = flush_events(&mut gs, receivers); - //call heartbeat + // call heartbeat gs.heartbeat(); - //Apply one more heartbeat + // Apply one more heartbeat sleep(Duration::from_millis(100)); gs.heartbeat(); - //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat + // Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat // is needed). let (control_msgs, receivers) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); @@ -2090,11 +2092,11 @@ fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without "Graft message created too early within backoff period" ); - //Heartbeat one more time this should graft now + // Heartbeat one more time this should graft now sleep(Duration::from_millis(100)); gs.heartbeat(); - //check that graft got created + // check that graft got created let (control_msgs, _) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); assert!( control_msgs > 0, @@ -2181,7 +2183,7 @@ fn test_flood_publish() { .to_subscribe(true) .create_network(); - //publish message + // publish message let publish_data = vec![0; 42]; gs.publish(Topic::new(topic), publish_data).unwrap(); @@ -2228,15 +2230,15 @@ fn test_flood_publish() { fn test_gossip_to_at_least_gossip_lazy_peers() { let config: Config = Config::default(); - //add more peers than in mesh to test gossipping - //by default only mesh_n_low peers will get added to mesh + // add more peers than in mesh to test gossipping + // by default only mesh_n_low peers will get added to mesh let (mut gs, _, receivers, topic_hashes) = inject_nodes1() .peer_no(config.mesh_n_low() + config.gossip_lazy() + 1) .topics(vec!["topic".into()]) .to_subscribe(true) .create_network(); - //receive message + // receive message let raw_message = RawMessage { source: Some(PeerId::random()), data: vec![], @@ -2248,7 +2250,7 @@ fn test_gossip_to_at_least_gossip_lazy_peers() { }; gs.handle_received_message(raw_message.clone(), &PeerId::random()); - //emit gossip + // emit gossip gs.emit_gossip(); // Transform the inbound message @@ -2256,7 +2258,7 @@ fn test_gossip_to_at_least_gossip_lazy_peers() { let msg_id = gs.config.message_id(message); - //check that exactly config.gossip_lazy() many gossip messages were sent. + // check that exactly config.gossip_lazy() many gossip messages were sent. let (control_msgs, _) = count_control_msgs(receivers, |_, action| match action { RpcOut::IHave(IHave { topic_hash, @@ -2271,7 +2273,7 @@ fn test_gossip_to_at_least_gossip_lazy_peers() { fn test_gossip_to_at_most_gossip_factor_peers() { let config: Config = Config::default(); - //add a lot of peers + // add a lot of peers let m = config.mesh_n_low() + config.gossip_lazy() * (2.0 / config.gossip_factor()) as usize; let (mut gs, _, receivers, topic_hashes) = inject_nodes1() .peer_no(m) @@ -2279,7 +2281,7 @@ fn test_gossip_to_at_most_gossip_factor_peers() { .to_subscribe(true) .create_network(); - //receive message + // receive message let raw_message = RawMessage { source: Some(PeerId::random()), data: vec![], @@ -2291,14 +2293,14 @@ fn test_gossip_to_at_most_gossip_factor_peers() { }; gs.handle_received_message(raw_message.clone(), &PeerId::random()); - //emit gossip + // emit gossip gs.emit_gossip(); // Transform the inbound message let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); let msg_id = gs.config.message_id(message); - //check that exactly config.gossip_lazy() many gossip messages were sent. + // check that exactly config.gossip_lazy() many gossip messages were sent. let (control_msgs, _) = count_control_msgs(receivers, |_, action| match action { RpcOut::IHave(IHave { topic_hash, @@ -2316,7 +2318,7 @@ fn test_gossip_to_at_most_gossip_factor_peers() { fn test_accept_only_outbound_peer_grafts_when_mesh_full() { let config: Config = Config::default(); - //enough peers to fill the mesh + // enough peers to fill the mesh let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) @@ -2328,30 +2330,30 @@ fn test_accept_only_outbound_peer_grafts_when_mesh_full() { gs.handle_graft(&peer, topics.clone()); } - //assert current mesh size + // assert current mesh size assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high()); - //create an outbound and an inbound peer + // create an outbound and an inbound peer let (inbound, _in_reciver) = add_peer(&mut gs, &topics, false, false); let (outbound, _out_receiver) = add_peer(&mut gs, &topics, true, false); - //send grafts + // send grafts gs.handle_graft(&inbound, vec![topics[0].clone()]); gs.handle_graft(&outbound, vec![topics[0].clone()]); - //assert mesh size + // assert mesh size assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high() + 1); - //inbound is not in mesh + // inbound is not in mesh assert!(!gs.mesh[&topics[0]].contains(&inbound)); - //outbound is in mesh + // outbound is in mesh assert!(gs.mesh[&topics[0]].contains(&outbound)); } #[test] fn test_do_not_remove_too_many_outbound_peers() { - //use an extreme case to catch errors with high probability + // use an extreme case to catch errors with high probability let m = 50; let n = 2 * m; let config = ConfigBuilder::default() @@ -2362,7 +2364,7 @@ fn test_do_not_remove_too_many_outbound_peers() { .build() .unwrap(); - //fill the mesh with inbound connections + // fill the mesh with inbound connections let (mut gs, peers, _receivers, topics) = inject_nodes1() .peer_no(n) .topics(vec!["test".into()]) @@ -2375,7 +2377,7 @@ fn test_do_not_remove_too_many_outbound_peers() { gs.handle_graft(&peer, topics.clone()); } - //create m outbound connections and graft (we will accept the graft) + // create m outbound connections and graft (we will accept the graft) let mut outbound = HashSet::new(); for _ in 0..m { let (peer, _) = add_peer(&mut gs, &topics, true, false); @@ -2383,7 +2385,7 @@ fn test_do_not_remove_too_many_outbound_peers() { gs.handle_graft(&peer, topics.clone()); } - //mesh is overly full + // mesh is overly full assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n + m); // run a heartbeat @@ -2392,7 +2394,7 @@ fn test_do_not_remove_too_many_outbound_peers() { // Peers should be removed to reach n assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n); - //all outbound peers are still in the mesh + // all outbound peers are still in the mesh assert!(outbound.iter().all(|p| gs.mesh[&topics[0]].contains(p))); } @@ -2412,7 +2414,7 @@ fn test_add_outbound_peers_if_min_is_not_satisfied() { gs.handle_graft(&peer, topics.clone()); } - //create config.mesh_outbound_min() many outbound connections without grafting + // create config.mesh_outbound_min() many outbound connections without grafting let mut peers = vec![]; for _ in 0..config.mesh_outbound_min() { peers.push(add_peer(&mut gs, &topics, true, false)); @@ -2435,7 +2437,7 @@ fn test_add_outbound_peers_if_min_is_not_satisfied() { fn test_prune_negative_scored_peers() { let config = Config::default(); - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, receivers, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -2449,16 +2451,16 @@ fn test_prune_negative_scored_peers() { ))) .create_network(); - //add penalty to peer + // add penalty to peer gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1); - //execute heartbeat + // execute heartbeat gs.heartbeat(); - //peer should not be in mesh anymore + // peer should not be in mesh anymore assert!(gs.mesh[&topics[0]].is_empty()); - //check prune message + // check prune message let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[0] && match m { @@ -2481,7 +2483,7 @@ fn test_prune_negative_scored_peers() { #[test] fn test_dont_graft_to_negative_scored_peers() { let config = Config::default(); - //init full mesh + // init full mesh let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) @@ -2493,34 +2495,34 @@ fn test_dont_graft_to_negative_scored_peers() { ))) .create_network(); - //add two additional peers that will not be part of the mesh + // add two additional peers that will not be part of the mesh let (p1, _receiver1) = add_peer(&mut gs, &topics, false, false); let (p2, _receiver2) = add_peer(&mut gs, &topics, false, false); - //reduce score of p1 to negative + // reduce score of p1 to negative gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 1); - //handle prunes of all other peers + // handle prunes of all other peers for p in peers { gs.handle_prune(&p, vec![(topics[0].clone(), Vec::new(), None)]); } - //heartbeat + // heartbeat gs.heartbeat(); - //assert that mesh only contains p2 + // assert that mesh only contains p2 assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), 1); assert!(gs.mesh.get(&topics[0]).unwrap().contains(&p2)); } -///Note that in this test also without a penalty the px would be ignored because of the +/// Note that in this test also without a penalty the px would be ignored because of the /// acceptPXThreshold, but the spec still explicitly states the rule that px from negative /// peers should get ignored, therefore we test it here. #[test] fn test_ignore_px_from_negative_scored_peer() { let config = Config::default(); - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -2532,10 +2534,10 @@ fn test_ignore_px_from_negative_scored_peer() { ))) .create_network(); - //penalize peer + // penalize peer gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1); - //handle prune from single peer with px peers + // handle prune from single peer with px peers let px = vec![PeerInfo { peer_id: Some(PeerId::random()), }]; @@ -2549,7 +2551,7 @@ fn test_ignore_px_from_negative_scored_peer() { )], ); - //assert no dials + // assert no dials assert_eq!( gs.events .iter() @@ -2760,7 +2762,7 @@ fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() { collected_messages }); - //the message got sent to p2 + // the message got sent to p2 assert!(sent_messages .iter() .map(|(peer_id, msg)| ( @@ -2768,7 +2770,7 @@ fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() { gs.data_transform.inbound_transform(msg.clone()).unwrap() )) .any(|(peer_id, msg)| peer_id == &p2 && gs.config.message_id(&msg) == msg_id)); - //the message got not sent to p1 + // the message got not sent to p1 assert!(sent_messages .iter() .map(|(peer_id, msg)| ( @@ -2786,7 +2788,7 @@ fn test_ihave_msg_from_peer_below_gossip_threshold_gets_ignored() { gossip_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, ..PeerScoreThresholds::default() }; - //build full mesh + // build full mesh let (mut gs, peers, mut receivers, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) @@ -2802,21 +2804,21 @@ fn test_ihave_msg_from_peer_below_gossip_threshold_gets_ignored() { gs.handle_graft(&peer, topics.clone()); } - //add two additional peers that will not be part of the mesh + // add two additional peers that will not be part of the mesh let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); receivers.insert(p1, receiver1); let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); receivers.insert(p2, receiver2); - //reduce score of p1 below peer_score_thresholds.gossip_threshold - //note that penalties get squared so two penalties means a score of + // reduce score of p1 below peer_score_thresholds.gossip_threshold + // note that penalties get squared so two penalties means a score of // 4 * peer_score_params.behaviour_penalty_weight. gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); - //reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold + // reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); - //message that other peers have + // message that other peers have let raw_message = RawMessage { source: Some(PeerId::random()), data: vec![], @@ -2863,31 +2865,31 @@ fn test_do_not_publish_to_peer_below_publish_threshold() { ..PeerScoreThresholds::default() }; - //build mesh with no peers and no subscribed topics + // build mesh with no peers and no subscribed topics let (mut gs, _, mut receivers, _) = inject_nodes1() .gs_config(config) .scoring(Some((peer_score_params, peer_score_thresholds))) .create_network(); - //create a new topic for which we are not subscribed + // create a new topic for which we are not subscribed let topic = Topic::new("test"); let topics = vec![topic.hash()]; - //add two additional peers that will be added to the mesh + // add two additional peers that will be added to the mesh let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); receivers.insert(p1, receiver1); let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); receivers.insert(p2, receiver2); - //reduce score of p1 below peer_score_thresholds.publish_threshold - //note that penalties get squared so two penalties means a score of + // reduce score of p1 below peer_score_thresholds.publish_threshold + // note that penalties get squared so two penalties means a score of // 4 * peer_score_params.behaviour_penalty_weight. gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); - //reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold + // reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); - //a heartbeat will remove the peers from the mesh + // a heartbeat will remove the peers from the mesh gs.heartbeat(); // publish on topic @@ -2907,7 +2909,7 @@ fn test_do_not_publish_to_peer_below_publish_threshold() { collected_publish }); - //assert only published to p2 + // assert only published to p2 assert_eq!(publishes.len(), 1); assert_eq!(publishes[0].0, p2); } @@ -2921,28 +2923,28 @@ fn test_do_not_flood_publish_to_peer_below_publish_threshold() { publish_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, ..PeerScoreThresholds::default() }; - //build mesh with no peers + // build mesh with no peers let (mut gs, _, mut receivers, topics) = inject_nodes1() .topics(vec!["test".into()]) .gs_config(config) .scoring(Some((peer_score_params, peer_score_thresholds))) .create_network(); - //add two additional peers that will be added to the mesh + // add two additional peers that will be added to the mesh let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); receivers.insert(p1, receiver1); let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); receivers.insert(p2, receiver2); - //reduce score of p1 below peer_score_thresholds.publish_threshold - //note that penalties get squared so two penalties means a score of + // reduce score of p1 below peer_score_thresholds.publish_threshold + // note that penalties get squared so two penalties means a score of // 4 * peer_score_params.behaviour_penalty_weight. gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); - //reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold + // reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); - //a heartbeat will remove the peers from the mesh + // a heartbeat will remove the peers from the mesh gs.heartbeat(); // publish on topic @@ -2962,7 +2964,7 @@ fn test_do_not_flood_publish_to_peer_below_publish_threshold() { collected_publish }); - //assert only published to p2 + // assert only published to p2 assert_eq!(publishes.len(), 1); assert!(publishes[0].0 == p2); } @@ -2978,23 +2980,23 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { ..PeerScoreThresholds::default() }; - //build mesh with no peers + // build mesh with no peers let (mut gs, _, _, topics) = inject_nodes1() .topics(vec!["test".into()]) .gs_config(config.clone()) .scoring(Some((peer_score_params, peer_score_thresholds))) .create_network(); - //add two additional peers that will be added to the mesh + // add two additional peers that will be added to the mesh let (p1, _receiver1) = add_peer(&mut gs, &topics, false, false); let (p2, _receiver2) = add_peer(&mut gs, &topics, false, false); - //reduce score of p1 below peer_score_thresholds.graylist_threshold - //note that penalties get squared so two penalties means a score of + // reduce score of p1 below peer_score_thresholds.graylist_threshold + // note that penalties get squared so two penalties means a score of // 4 * peer_score_params.behaviour_penalty_weight. gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); - //reduce score of p2 below publish_threshold but not below graylist_threshold + // reduce score of p2 below publish_threshold but not below graylist_threshold gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); let raw_message1 = RawMessage { @@ -3053,10 +3055,10 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { message_ids: vec![config.message_id(message2)], }); - //clear events + // clear events gs.events.clear(); - //receive from p1 + // receive from p1 gs.on_connection_handler_event( p1, ConnectionId::new_unchecked(0), @@ -3070,7 +3072,7 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { }, ); - //only the subscription event gets processed, the rest is dropped + // only the subscription event gets processed, the rest is dropped assert_eq!(gs.events.len(), 1); assert!(matches!( gs.events[0], @@ -3082,7 +3084,7 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { message_ids: vec![config.message_id(message4)], }); - //receive from p2 + // receive from p2 gs.on_connection_handler_event( p2, ConnectionId::new_unchecked(0), @@ -3096,7 +3098,7 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { }, ); - //events got processed + // events got processed assert!(gs.events.len() > 1); } @@ -3145,7 +3147,7 @@ fn test_ignore_px_from_peers_below_accept_px_threshold() { 0 ); - //handle prune from peer peers[1] with px peers + // handle prune from peer peers[1] with px peers let px = vec![PeerInfo { peer_id: Some(PeerId::random()), }]; @@ -3158,7 +3160,7 @@ fn test_ignore_px_from_peers_below_accept_px_threshold() { )], ); - //assert there are dials now + // assert there are dials now assert!( gs.events .iter() @@ -3178,7 +3180,7 @@ fn test_keep_best_scoring_peers_on_oversubscription() { .build() .unwrap(); - //build mesh with more peers than mesh can hold + // build mesh with more peers than mesh can hold let n = config.mesh_n_high() + 1; let (mut gs, peers, _receivers, topics) = inject_nodes1() .peer_no(n) @@ -3198,21 +3200,21 @@ fn test_keep_best_scoring_peers_on_oversubscription() { gs.handle_graft(peer, topics.clone()); } - //assign scores to peers equalling their index + // assign scores to peers equalling their index - //set random positive scores + // set random positive scores for (index, peer) in peers.iter().enumerate() { gs.set_application_score(peer, index as f64); } assert_eq!(gs.mesh[&topics[0]].len(), n); - //heartbeat to prune some peers + // heartbeat to prune some peers gs.heartbeat(); assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n()); - //mesh contains retain_scores best peers + // mesh contains retain_scores best peers assert!(gs.mesh[&topics[0]].is_superset( &peers[(n - config.retain_scores())..] .iter() @@ -3239,7 +3241,7 @@ fn test_scoring_p1() { .insert(topic_hash, topic_params.clone()); let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, _, _) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -3250,9 +3252,9 @@ fn test_scoring_p1() { .scoring(Some((peer_score_params, peer_score_thresholds))) .create_network(); - //sleep for 2 times the mesh_quantum + // sleep for 2 times the mesh_quantum sleep(topic_params.time_in_mesh_quantum * 2); - //refresh scores + // refresh scores gs.peer_score.as_mut().unwrap().0.refresh_scores(); assert!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]) @@ -3265,9 +3267,9 @@ fn test_scoring_p1() { "score should be less than 3 * time_in_mesh_weight * topic_weight" ); - //sleep again for 2 times the mesh_quantum + // sleep again for 2 times the mesh_quantum sleep(topic_params.time_in_mesh_quantum * 2); - //refresh scores + // refresh scores gs.peer_score.as_mut().unwrap().0.refresh_scores(); assert!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]) @@ -3275,9 +3277,9 @@ fn test_scoring_p1() { "score should be at least 4 * time_in_mesh_weight * topic_weight" ); - //sleep for enough periods to reach maximum + // sleep for enough periods to reach maximum sleep(topic_params.time_in_mesh_quantum * (topic_params.time_in_mesh_cap - 3.0) as u32); - //refresh scores + // refresh scores gs.peer_score.as_mut().unwrap().0.refresh_scores(); assert_eq!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]), @@ -3309,7 +3311,7 @@ fn test_scoring_p2() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh + time_in_mesh_weight: 0.0, // deactivate time in mesh first_message_deliveries_weight: 2.0, first_message_deliveries_cap: 10.0, first_message_deliveries_decay: 0.9, @@ -3321,7 +3323,7 @@ fn test_scoring_p2() { .insert(topic_hash, topic_params.clone()); let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(2) .topics(vec!["test".into()]) @@ -3338,9 +3340,9 @@ fn test_scoring_p2() { }; let m1 = random_message(&mut seq, &topics); - //peer 0 delivers message first + // peer 0 delivers message first deliver_message(&mut gs, 0, m1.clone()); - //peer 1 delivers message second + // peer 1 delivers message second deliver_message(&mut gs, 1, m1); assert_eq!( @@ -3355,7 +3357,7 @@ fn test_scoring_p2() { "there should be no score for second message deliveries * topic_weight" ); - //peer 2 delivers two new messages + // peer 2 delivers two new messages deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); assert_eq!( @@ -3364,7 +3366,7 @@ fn test_scoring_p2() { "score should be exactly 2 * first_message_deliveries_weight * topic_weight" ); - //test decaying + // test decaying gs.peer_score.as_mut().unwrap().0.refresh_scores(); assert_eq!( @@ -3385,7 +3387,7 @@ fn test_scoring_p2() { first_message_deliveries_weight * topic_weight" ); - //test cap + // test cap for _ in 0..topic_params.first_message_deliveries_cap as u64 { deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); } @@ -3407,8 +3409,8 @@ fn test_scoring_p3() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries + time_in_mesh_weight: 0.0, // deactivate time in mesh + first_message_deliveries_weight: 0.0, // deactivate first time deliveries mesh_message_deliveries_weight: -2.0, mesh_message_deliveries_decay: 0.9, mesh_message_deliveries_cap: 10.0, @@ -3421,7 +3423,7 @@ fn test_scoring_p3() { peer_score_params.topics.insert(topic_hash, topic_params); let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with two peers + // build mesh with two peers let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(2) .topics(vec!["test".into()]) @@ -3439,35 +3441,35 @@ fn test_scoring_p3() { let mut expected_message_deliveries = 0.0; - //messages used to test window + // messages used to test window let m1 = random_message(&mut seq, &topics); let m2 = random_message(&mut seq, &topics); - //peer 1 delivers m1 + // peer 1 delivers m1 deliver_message(&mut gs, 1, m1.clone()); - //peer 0 delivers two message + // peer 0 delivers two message deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); expected_message_deliveries += 2.0; sleep(Duration::from_millis(60)); - //peer 1 delivers m2 + // peer 1 delivers m2 deliver_message(&mut gs, 1, m2.clone()); sleep(Duration::from_millis(70)); - //peer 0 delivers m1 and m2 only m2 gets counted + // peer 0 delivers m1 and m2 only m2 gets counted deliver_message(&mut gs, 0, m1); deliver_message(&mut gs, 0, m2); expected_message_deliveries += 1.0; sleep(Duration::from_millis(900)); - //message deliveries penalties get activated, peer 0 has only delivered 3 messages and + // message deliveries penalties get activated, peer 0 has only delivered 3 messages and // therefore gets a penalty gs.peer_score.as_mut().unwrap().0.refresh_scores(); - expected_message_deliveries *= 0.9; //decay + expected_message_deliveries *= 0.9; // decay assert_eq!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]), @@ -3483,10 +3485,10 @@ fn test_scoring_p3() { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); - //apply 10 decays + // apply 10 decays for _ in 0..10 { gs.peer_score.as_mut().unwrap().0.refresh_scores(); - expected_message_deliveries *= 0.9; //decay + expected_message_deliveries *= 0.9; // decay } assert_eq!( @@ -3505,8 +3507,8 @@ fn test_scoring_p3b() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries + time_in_mesh_weight: 0.0, // deactivate time in mesh + first_message_deliveries_weight: 0.0, // deactivate first time deliveries mesh_message_deliveries_weight: -2.0, mesh_message_deliveries_decay: 0.9, mesh_message_deliveries_cap: 10.0, @@ -3522,7 +3524,7 @@ fn test_scoring_p3b() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -3540,49 +3542,49 @@ fn test_scoring_p3b() { let mut expected_message_deliveries = 0.0; - //add some positive score + // add some positive score gs.peer_score .as_mut() .unwrap() .0 .set_application_score(&peers[0], 100.0); - //peer 0 delivers two message + // peer 0 delivers two message deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); expected_message_deliveries += 2.0; sleep(Duration::from_millis(1050)); - //activation kicks in + // activation kicks in gs.peer_score.as_mut().unwrap().0.refresh_scores(); - expected_message_deliveries *= 0.9; //decay + expected_message_deliveries *= 0.9; // decay - //prune peer + // prune peer gs.handle_prune(&peers[0], vec![(topics[0].clone(), vec![], None)]); - //wait backoff + // wait backoff sleep(Duration::from_millis(130)); - //regraft peer + // regraft peer gs.handle_graft(&peers[0], topics.clone()); - //the score should now consider p3b + // the score should now consider p3b let mut expected_b3 = (5f64 - expected_message_deliveries).powi(2); assert_eq!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 100.0 + expected_b3 * -3.0 * 0.7 ); - //we can also add a new p3 to the score + // we can also add a new p3 to the score - //peer 0 delivers one message + // peer 0 delivers one message deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); expected_message_deliveries += 1.0; sleep(Duration::from_millis(1050)); gs.peer_score.as_mut().unwrap().0.refresh_scores(); - expected_message_deliveries *= 0.9; //decay + expected_message_deliveries *= 0.9; // decay expected_b3 *= 0.95; assert_eq!( @@ -3601,10 +3603,14 @@ fn test_scoring_p4_valid_message() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + // deactivate time in mesh + time_in_mesh_weight: 0.0, + // deactivate first time deliveries + first_message_deliveries_weight: 0.0, + // deactivate message deliveries + mesh_message_deliveries_weight: 0.0, + // deactivate mesh failure penalties + mesh_failure_penalty_weight: 0.0, invalid_message_deliveries_weight: -2.0, invalid_message_deliveries_decay: 0.9, topic_weight: 0.7, @@ -3614,7 +3620,7 @@ fn test_scoring_p4_valid_message() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with two peers + // build mesh with two peers let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -3630,7 +3636,7 @@ fn test_scoring_p4_valid_message() { gs.handle_received_message(msg, &peers[index]); }; - //peer 0 delivers valid message + // peer 0 delivers valid message let m1 = random_message(&mut seq, &topics); deliver_message(&mut gs, 0, m1.clone()); @@ -3639,7 +3645,7 @@ fn test_scoring_p4_valid_message() { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); - //message m1 gets validated + // message m1 gets validated gs.report_message_validation_result( &config.message_id(message1), &peers[0], @@ -3659,10 +3665,14 @@ fn test_scoring_p4_invalid_signature() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + // deactivate time in mesh + time_in_mesh_weight: 0.0, + // deactivate first time deliveries + first_message_deliveries_weight: 0.0, + // deactivate message deliveries + mesh_message_deliveries_weight: 0.0, + // deactivate mesh failure penalties + mesh_failure_penalty_weight: 0.0, invalid_message_deliveries_weight: -2.0, invalid_message_deliveries_decay: 0.9, topic_weight: 0.7, @@ -3672,7 +3682,7 @@ fn test_scoring_p4_invalid_signature() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -3685,7 +3695,7 @@ fn test_scoring_p4_invalid_signature() { let mut seq = 0; - //peer 0 delivers message with invalid signature + // peer 0 delivers message with invalid signature let m = random_message(&mut seq, &topics); gs.on_connection_handler_event( @@ -3717,10 +3727,14 @@ fn test_scoring_p4_message_from_self() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + // deactivate time in mesh + time_in_mesh_weight: 0.0, + // deactivate first time deliveries + first_message_deliveries_weight: 0.0, + // deactivate message deliveries + mesh_message_deliveries_weight: 0.0, + // deactivate mesh failure penalties + mesh_failure_penalty_weight: 0.0, invalid_message_deliveries_weight: -2.0, invalid_message_deliveries_decay: 0.9, topic_weight: 0.7, @@ -3730,7 +3744,7 @@ fn test_scoring_p4_message_from_self() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with two peers + // build mesh with two peers let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -3746,7 +3760,7 @@ fn test_scoring_p4_message_from_self() { gs.handle_received_message(msg, &peers[index]); }; - //peer 0 delivers invalid message from self + // peer 0 delivers invalid message from self let mut m = random_message(&mut seq, &topics); m.source = Some(*gs.publish_config.get_own_id().unwrap()); @@ -3767,10 +3781,14 @@ fn test_scoring_p4_ignored_message() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + // deactivate time in mesh + time_in_mesh_weight: 0.0, + // deactivate first time deliveries + first_message_deliveries_weight: 0.0, + // deactivate message deliveries + mesh_message_deliveries_weight: 0.0, + // deactivate mesh failure penalties + mesh_failure_penalty_weight: 0.0, invalid_message_deliveries_weight: -2.0, invalid_message_deliveries_decay: 0.9, topic_weight: 0.7, @@ -3780,7 +3798,7 @@ fn test_scoring_p4_ignored_message() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with two peers + // build mesh with two peers let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -3796,7 +3814,7 @@ fn test_scoring_p4_ignored_message() { gs.handle_received_message(msg, &peers[index]); }; - //peer 0 delivers ignored message + // peer 0 delivers ignored message let m1 = random_message(&mut seq, &topics); deliver_message(&mut gs, 0, m1.clone()); @@ -3805,7 +3823,7 @@ fn test_scoring_p4_ignored_message() { // Transform the inbound message let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); - //message m1 gets ignored + // message m1 gets ignored gs.report_message_validation_result( &config.message_id(message1), &peers[0], @@ -3825,10 +3843,14 @@ fn test_scoring_p4_application_invalidated_message() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + // deactivate time in mesh + time_in_mesh_weight: 0.0, + // deactivate first time deliveries + first_message_deliveries_weight: 0.0, + // deactivate message deliveries + mesh_message_deliveries_weight: 0.0, + // deactivate mesh failure penalties + mesh_failure_penalty_weight: 0.0, invalid_message_deliveries_weight: -2.0, invalid_message_deliveries_decay: 0.9, topic_weight: 0.7, @@ -3838,7 +3860,7 @@ fn test_scoring_p4_application_invalidated_message() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with two peers + // build mesh with two peers let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -3854,7 +3876,7 @@ fn test_scoring_p4_application_invalidated_message() { gs.handle_received_message(msg, &peers[index]); }; - //peer 0 delivers invalid message + // peer 0 delivers invalid message let m1 = random_message(&mut seq, &topics); deliver_message(&mut gs, 0, m1.clone()); @@ -3863,7 +3885,7 @@ fn test_scoring_p4_application_invalidated_message() { // Transform the inbound message let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); - //message m1 gets rejected + // message m1 gets rejected gs.report_message_validation_result( &config.message_id(message1), &peers[0], @@ -3886,10 +3908,14 @@ fn test_scoring_p4_application_invalid_message_from_two_peers() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + // deactivate time in mesh + time_in_mesh_weight: 0.0, + // deactivate first time deliveries + first_message_deliveries_weight: 0.0, + // deactivate message deliveries + mesh_message_deliveries_weight: 0.0, + // deactivate mesh failure penalties + mesh_failure_penalty_weight: 0.0, invalid_message_deliveries_weight: -2.0, invalid_message_deliveries_decay: 0.9, topic_weight: 0.7, @@ -3899,7 +3925,7 @@ fn test_scoring_p4_application_invalid_message_from_two_peers() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with two peers + // build mesh with two peers let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(2) .topics(vec!["test".into()]) @@ -3915,20 +3941,20 @@ fn test_scoring_p4_application_invalid_message_from_two_peers() { gs.handle_received_message(msg, &peers[index]); }; - //peer 0 delivers invalid message + // peer 0 delivers invalid message let m1 = random_message(&mut seq, &topics); deliver_message(&mut gs, 0, m1.clone()); // Transform the inbound message let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); - //peer 1 delivers same message + // peer 1 delivers same message deliver_message(&mut gs, 1, m1); assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[1]), 0.0); - //message m1 gets rejected + // message m1 gets rejected gs.report_message_validation_result( &config.message_id(message1), &peers[0], @@ -3955,10 +3981,14 @@ fn test_scoring_p4_three_application_invalid_messages() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + // deactivate time in mesh + time_in_mesh_weight: 0.0, + // deactivate first time deliveries + first_message_deliveries_weight: 0.0, + // deactivate message deliveries + mesh_message_deliveries_weight: 0.0, + // deactivate mesh failure penalties + mesh_failure_penalty_weight: 0.0, invalid_message_deliveries_weight: -2.0, invalid_message_deliveries_decay: 0.9, topic_weight: 0.7, @@ -3968,7 +3998,7 @@ fn test_scoring_p4_three_application_invalid_messages() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -3984,7 +4014,7 @@ fn test_scoring_p4_three_application_invalid_messages() { gs.handle_received_message(msg, &peers[index]); }; - //peer 0 delivers two invalid message + // peer 0 delivers two invalid message let m1 = random_message(&mut seq, &topics); let m2 = random_message(&mut seq, &topics); let m3 = random_message(&mut seq, &topics); @@ -4002,7 +4032,7 @@ fn test_scoring_p4_three_application_invalid_messages() { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); - //messages gets rejected + // messages gets rejected gs.report_message_validation_result( &config.message_id(message1), &peers[0], @@ -4021,7 +4051,7 @@ fn test_scoring_p4_three_application_invalid_messages() { MessageAcceptance::Reject, ); - //number of invalid messages gets squared + // number of invalid messages gets squared assert_eq!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 9.0 * -2.0 * 0.7 @@ -4038,10 +4068,14 @@ fn test_scoring_p4_decay() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + // deactivate time in mesh + time_in_mesh_weight: 0.0, + // deactivate first time deliveries + first_message_deliveries_weight: 0.0, + // deactivate message deliveries + mesh_message_deliveries_weight: 0.0, + // deactivate mesh failure penalties + mesh_failure_penalty_weight: 0.0, invalid_message_deliveries_weight: -2.0, invalid_message_deliveries_decay: 0.9, topic_weight: 0.7, @@ -4051,7 +4085,7 @@ fn test_scoring_p4_decay() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -4067,7 +4101,7 @@ fn test_scoring_p4_decay() { gs.handle_received_message(msg, &peers[index]); }; - //peer 0 delivers invalid message + // peer 0 delivers invalid message let m1 = random_message(&mut seq, &topics); deliver_message(&mut gs, 0, m1.clone()); @@ -4075,7 +4109,7 @@ fn test_scoring_p4_decay() { let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); - //message m1 gets rejected + // message m1 gets rejected gs.report_message_validation_result( &config.message_id(message1), &peers[0], @@ -4087,7 +4121,7 @@ fn test_scoring_p4_decay() { -2.0 * 0.7 ); - //we decay + // we decay gs.peer_score.as_mut().unwrap().0.refresh_scores(); // the number of invalids gets decayed to 0.9 and then squared in the score @@ -4104,7 +4138,7 @@ fn test_scoring_p5() { ..PeerScoreParams::default() }; - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, _, _) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -4141,7 +4175,7 @@ fn test_scoring_p6() { .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) .create_network(); - //create 5 peers with the same ip + // create 5 peers with the same ip let addr = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 3)); let peers = vec![ add_peer_with_addr(&mut gs, &[], false, false, addr.clone()).0, @@ -4151,7 +4185,7 @@ fn test_scoring_p6() { add_peer_with_addr(&mut gs, &[], true, true, addr.clone()).0, ]; - //create 4 other peers with other ip + // create 4 other peers with other ip let addr2 = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 4)); let others = vec![ add_peer_with_addr(&mut gs, &[], false, false, addr2.clone()).0, @@ -4160,12 +4194,12 @@ fn test_scoring_p6() { add_peer_with_addr(&mut gs, &[], true, false, addr2.clone()).0, ]; - //no penalties yet + // no penalties yet for peer in peers.iter().chain(others.iter()) { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 0.0); } - //add additional connection for 3 others with addr + // add additional connection for 3 others with addr for id in others.iter().take(3) { gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id: *id, @@ -4180,14 +4214,14 @@ fn test_scoring_p6() { })); } - //penalties apply squared + // penalties apply squared for peer in peers.iter().chain(others.iter().take(3)) { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); } - //fourth other peer still no penalty + // fourth other peer still no penalty assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&others[3]), 0.0); - //add additional connection for 3 of the peers to addr2 + // add additional connection for 3 of the peers to addr2 for peer in peers.iter().take(3) { gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id: *peer, @@ -4202,7 +4236,7 @@ fn test_scoring_p6() { })); } - //double penalties for the first three of each + // double penalties for the first three of each for peer in peers.iter().take(3).chain(others.iter().take(3)) { assert_eq!( gs.peer_score.as_ref().unwrap().0.score(peer), @@ -4210,7 +4244,7 @@ fn test_scoring_p6() { ); } - //single penalties for the rest + // single penalties for the rest for peer in peers.iter().skip(3) { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); } @@ -4219,7 +4253,7 @@ fn test_scoring_p6() { 4.0 * -2.0 ); - //two times same ip doesn't count twice + // two times same ip doesn't count twice gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id: peers[0], connection_id: ConnectionId::new_unchecked(0), @@ -4232,8 +4266,8 @@ fn test_scoring_p6() { other_established: 2, })); - //nothing changed - //double penalties for the first three of each + // nothing changed + // double penalties for the first three of each for peer in peers.iter().take(3).chain(others.iter().take(3)) { assert_eq!( gs.peer_score.as_ref().unwrap().0.score(peer), @@ -4241,7 +4275,7 @@ fn test_scoring_p6() { ); } - //single penalties for the rest + // single penalties for the rest for peer in peers.iter().skip(3) { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); } @@ -4274,7 +4308,7 @@ fn test_scoring_p7_grafts_before_backoff() { .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) .create_network(); - //remove peers from mesh and send prune to them => this adds a backoff for the peers + // remove peers from mesh and send prune to them => this adds a backoff for the peers for peer in peers.iter().take(2) { gs.mesh.get_mut(&topics[0]).unwrap().remove(peer); gs.send_graft_prune( @@ -4284,31 +4318,31 @@ fn test_scoring_p7_grafts_before_backoff() { ); } - //wait 50 millisecs + // wait 50 millisecs sleep(Duration::from_millis(50)); - //first peer tries to graft + // first peer tries to graft gs.handle_graft(&peers[0], vec![topics[0].clone()]); - //double behaviour penalty for first peer (squared) + // double behaviour penalty for first peer (squared) assert_eq!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 4.0 * -2.0 ); - //wait 100 millisecs + // wait 100 millisecs sleep(Duration::from_millis(100)); - //second peer tries to graft + // second peer tries to graft gs.handle_graft(&peers[1], vec![topics[0].clone()]); - //single behaviour penalty for second peer + // single behaviour penalty for second peer assert_eq!( gs.peer_score.as_ref().unwrap().0.score(&peers[1]), 1.0 * -2.0 ); - //test decay + // test decay gs.peer_score.as_mut().unwrap().0.refresh_scores(); assert_eq!( @@ -4327,7 +4361,7 @@ fn test_opportunistic_grafting() { .mesh_n_low(3) .mesh_n(5) .mesh_n_high(7) - .mesh_outbound_min(0) //deactivate outbound handling + .mesh_outbound_min(0) // deactivate outbound handling .opportunistic_graft_ticks(2) .opportunistic_graft_peers(2) .build() @@ -4351,30 +4385,30 @@ fn test_opportunistic_grafting() { .scoring(Some((peer_score_params, thresholds))) .create_network(); - //fill mesh with 5 peers + // fill mesh with 5 peers for peer in &peers { gs.handle_graft(peer, topics.clone()); } - //add additional 5 peers + // add additional 5 peers let others: Vec<_> = (0..5) .map(|_| add_peer(&mut gs, &topics, false, false)) .collect(); - //currently mesh equals peers + // currently mesh equals peers assert_eq!(gs.mesh[&topics[0]], peers.iter().cloned().collect()); - //give others high scores (but the first two have not high enough scores) + // give others high scores (but the first two have not high enough scores) for (i, peer) in peers.iter().enumerate().take(5) { gs.set_application_score(peer, 0.0 + i as f64); } - //set scores for peers in the mesh + // set scores for peers in the mesh for (i, (peer, _receiver)) in others.iter().enumerate().take(5) { gs.set_application_score(peer, 0.0 + i as f64); } - //this gives a median of exactly 2.0 => should not apply opportunistic grafting + // this gives a median of exactly 2.0 => should not apply opportunistic grafting gs.heartbeat(); gs.heartbeat(); @@ -4384,10 +4418,10 @@ fn test_opportunistic_grafting() { "should not apply opportunistic grafting" ); - //reduce middle score to 1.0 giving a median of 1.0 + // reduce middle score to 1.0 giving a median of 1.0 gs.set_application_score(&peers[2], 1.0); - //opportunistic grafting after two heartbeats + // opportunistic grafting after two heartbeats gs.heartbeat(); assert_eq!( @@ -4417,17 +4451,17 @@ fn test_opportunistic_grafting() { #[test] fn test_ignore_graft_from_unknown_topic() { - //build gossipsub without subscribing to any topics + // build gossipsub without subscribing to any topics let (mut gs, peers, receivers, _) = inject_nodes1() .peer_no(1) .topics(vec![]) .to_subscribe(false) .create_network(); - //handle an incoming graft for some topic + // handle an incoming graft for some topic gs.handle_graft(&peers[0], vec![Topic::new("test").hash()]); - //assert that no prune got created + // assert that no prune got created let (control_msgs, _) = count_control_msgs(receivers, |_, a| matches!(a, RpcOut::Prune { .. })); assert_eq!( control_msgs, 0, @@ -4438,18 +4472,18 @@ fn test_ignore_graft_from_unknown_topic() { #[test] fn test_ignore_too_many_iwants_from_same_peer_for_same_message() { let config = Config::default(); - //build gossipsub with full mesh + // build gossipsub with full mesh let (mut gs, _, mut receivers, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(false) .create_network(); - //add another peer not in the mesh + // add another peer not in the mesh let (peer, receiver) = add_peer(&mut gs, &topics, false, false); receivers.insert(peer, receiver); - //receive a message + // receive a message let mut seq = 0; let m1 = random_message(&mut seq, &topics); @@ -4460,10 +4494,10 @@ fn test_ignore_too_many_iwants_from_same_peer_for_same_message() { gs.handle_received_message(m1, &PeerId::random()); - //clear events + // clear events let receivers = flush_events(&mut gs, receivers); - //the first gossip_retransimission many iwants return the valid message, all others are + // the first gossip_retransimission many iwants return the valid message, all others are // ignored. for _ in 0..(2 * config.gossip_retransimission() + 10) { gs.handle_iwant(&peer, vec![id.clone()]); @@ -4490,7 +4524,7 @@ fn test_ignore_too_many_ihaves() { .max_ihave_messages(10) .build() .unwrap(); - //build gossipsub with full mesh + // build gossipsub with full mesh let (mut gs, _, mut receivers, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) @@ -4498,15 +4532,15 @@ fn test_ignore_too_many_ihaves() { .gs_config(config.clone()) .create_network(); - //add another peer not in the mesh + // add another peer not in the mesh let (peer, receiver) = add_peer(&mut gs, &topics, false, false); receivers.insert(peer, receiver); - //peer has 20 messages + // peer has 20 messages let mut seq = 0; let messages: Vec<_> = (0..20).map(|_| random_message(&mut seq, &topics)).collect(); - //peer sends us one ihave for each message in order + // peer sends us one ihave for each message in order for raw_message in &messages { // Transform the inbound message let message = &gs @@ -4527,7 +4561,7 @@ fn test_ignore_too_many_ihaves() { .map(|m| config.message_id(&m)) .collect(); - //we send iwant only for the first 10 messages + // we send iwant only for the first 10 messages let (control_msgs, receivers) = count_control_msgs(receivers, |p, action| { p == &peer && matches!(action, RpcOut::IWant(IWant { message_ids }) if message_ids.len() == 1 && first_ten.contains(&message_ids[0])) @@ -4537,7 +4571,7 @@ fn test_ignore_too_many_ihaves() { "exactly the first ten ihaves should be processed and one iwant for each created" ); - //after a heartbeat everything is forgotten + // after a heartbeat everything is forgotten gs.heartbeat(); for raw_message in messages[10..].iter() { @@ -4553,7 +4587,7 @@ fn test_ignore_too_many_ihaves() { ); } - //we sent iwant for all 10 messages + // we sent iwant for all 10 messages let (control_msgs, _) = count_control_msgs(receivers, |p, action| { p == &peer && matches!(action, RpcOut::IWant(IWant { message_ids }) if message_ids.len() == 1) @@ -4568,7 +4602,7 @@ fn test_ignore_too_many_messages_in_ihave() { .max_ihave_length(10) .build() .unwrap(); - //build gossipsub with full mesh + // build gossipsub with full mesh let (mut gs, _, mut receivers, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) @@ -4576,11 +4610,11 @@ fn test_ignore_too_many_messages_in_ihave() { .gs_config(config.clone()) .create_network(); - //add another peer not in the mesh + // add another peer not in the mesh let (peer, receiver) = add_peer(&mut gs, &topics, false, false); receivers.insert(peer, receiver); - //peer has 20 messages + // peer has 20 messages let mut seq = 0; let message_ids: Vec<_> = (0..20) .map(|_| random_message(&mut seq, &topics)) @@ -4588,7 +4622,7 @@ fn test_ignore_too_many_messages_in_ihave() { .map(|msg| config.message_id(&msg)) .collect(); - //peer sends us three ihaves + // peer sends us three ihaves gs.handle_ihave(&peer, vec![(topics[0].clone(), message_ids[0..8].to_vec())]); gs.handle_ihave( &peer, @@ -4601,7 +4635,7 @@ fn test_ignore_too_many_messages_in_ihave() { let first_twelve: HashSet<_> = message_ids.iter().take(12).collect(); - //we send iwant only for the first 10 messages + // we send iwant only for the first 10 messages let mut sum = 0; let (control_msgs, receivers) = count_control_msgs(receivers, |p, rpc| match rpc { RpcOut::IWant(IWant { message_ids }) => { @@ -4620,14 +4654,14 @@ fn test_ignore_too_many_messages_in_ihave() { assert_eq!(sum, 10, "exactly the first ten ihaves should be processed"); - //after a heartbeat everything is forgotten + // after a heartbeat everything is forgotten gs.heartbeat(); gs.handle_ihave( &peer, vec![(topics[0].clone(), message_ids[10..20].to_vec())], ); - //we sent 10 iwant messages ids via a IWANT rpc. + // we sent 10 iwant messages ids via a IWANT rpc. let mut sum = 0; let (control_msgs, _) = count_control_msgs(receivers, |p, rpc| match rpc { RpcOut::IWant(IWant { message_ids }) => { @@ -4649,7 +4683,7 @@ fn test_limit_number_of_message_ids_inside_ihave() { .max_ihave_length(100) .build() .unwrap(); - //build gossipsub with full mesh + // build gossipsub with full mesh let (mut gs, peers, mut receivers, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) @@ -4657,24 +4691,24 @@ fn test_limit_number_of_message_ids_inside_ihave() { .gs_config(config) .create_network(); - //graft to all peers to really fill the mesh with all the peers + // graft to all peers to really fill the mesh with all the peers for peer in peers { gs.handle_graft(&peer, topics.clone()); } - //add two other peers not in the mesh + // add two other peers not in the mesh let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); receivers.insert(p1, receiver1); let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); receivers.insert(p2, receiver2); - //receive 200 messages from another peer + // receive 200 messages from another peer let mut seq = 0; for _ in 0..200 { gs.handle_received_message(random_message(&mut seq, &topics), &PeerId::random()); } - //emit gossip + // emit gossip gs.emit_gossip(); // both peers should have gotten 100 random ihave messages, to assert the randomness, we @@ -4727,12 +4761,10 @@ fn test_limit_number_of_message_ids_inside_ihave() { #[test] fn test_iwant_penalties() { - /* - use tracing_subscriber::EnvFilter; - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); - */ + // use tracing_subscriber::EnvFilter; + // let _ = tracing_subscriber::fmt() + // .with_env_filter(EnvFilter::from_default_env()) + // .try_init(); let config = ConfigBuilder::default() .iwant_followup_time(Duration::from_secs(4)) .build() @@ -4862,7 +4894,7 @@ fn test_publish_to_floodsub_peers_without_flood_publish() { .gs_config(config) .create_network(); - //add two floodsub peer, one explicit, one implicit + // add two floodsub peer, one explicit, one implicit let (p1, receiver1) = add_peer_with_addr_and_kind( &mut gs, &topics, @@ -4877,10 +4909,10 @@ fn test_publish_to_floodsub_peers_without_flood_publish() { add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); receivers.insert(p2, receiver2); - //p1 and p2 are not in the mesh + // p1 and p2 are not in the mesh assert!(!gs.mesh[&topics[0]].contains(&p1) && !gs.mesh[&topics[0]].contains(&p2)); - //publish a message + // publish a message let publish_data = vec![0; 42]; gs.publish(Topic::new("test"), publish_data).unwrap(); @@ -4921,7 +4953,7 @@ fn test_do_not_use_floodsub_in_fanout() { let topic = Topic::new("test"); let topics = vec![topic.hash()]; - //add two floodsub peer, one explicit, one implicit + // add two floodsub peer, one explicit, one implicit let (p1, receiver1) = add_peer_with_addr_and_kind( &mut gs, &topics, @@ -4936,7 +4968,7 @@ fn test_do_not_use_floodsub_in_fanout() { add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); receivers.insert(p2, receiver2); - //publish a message + // publish a message let publish_data = vec![0; 42]; gs.publish(Topic::new("test"), publish_data).unwrap(); @@ -4977,7 +5009,7 @@ fn test_dont_add_floodsub_peers_to_mesh_on_join() { let topic = Topic::new("test"); let topics = vec![topic.hash()]; - //add two floodsub peer, one explicit, one implicit + // add two floodsub peer, one explicit, one implicit let _p1 = add_peer_with_addr_and_kind( &mut gs, &topics, @@ -5004,7 +5036,7 @@ fn test_dont_send_px_to_old_gossipsub_peers() { .to_subscribe(false) .create_network(); - //add an old gossipsub peer + // add an old gossipsub peer let (p1, _receiver1) = add_peer_with_addr_and_kind( &mut gs, &topics, @@ -5014,14 +5046,14 @@ fn test_dont_send_px_to_old_gossipsub_peers() { Some(PeerKind::Gossipsub), ); - //prune the peer + // prune the peer gs.send_graft_prune( HashMap::new(), vec![(p1, topics.clone())].into_iter().collect(), HashSet::new(), ); - //check that prune does not contain px + // check that prune does not contain px let (control_msgs, _) = count_control_msgs(receivers, |_, m| match m { RpcOut::Prune(Prune { peers: px, .. }) => !px.is_empty(), _ => false, @@ -5031,14 +5063,14 @@ fn test_dont_send_px_to_old_gossipsub_peers() { #[test] fn test_dont_send_floodsub_peers_in_px() { - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, receivers, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) .create_network(); - //add two floodsub peers + // add two floodsub peers let _p1 = add_peer_with_addr_and_kind( &mut gs, &topics, @@ -5049,14 +5081,14 @@ fn test_dont_send_floodsub_peers_in_px() { ); let _p2 = add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); - //prune only mesh node + // prune only mesh node gs.send_graft_prune( HashMap::new(), vec![(peers[0], topics.clone())].into_iter().collect(), HashSet::new(), ); - //check that px in prune message is empty + // check that px in prune message is empty let (control_msgs, _) = count_control_msgs(receivers, |_, m| match m { RpcOut::Prune(Prune { peers: px, .. }) => !px.is_empty(), _ => false, @@ -5072,7 +5104,7 @@ fn test_dont_add_floodsub_peers_to_mesh_in_heartbeat() { .to_subscribe(false) .create_network(); - //add two floodsub peer, one explicit, one implicit + // add two floodsub peer, one explicit, one implicit let _p1 = add_peer_with_addr_and_kind( &mut gs, &topics, @@ -5139,7 +5171,7 @@ fn test_subscribe_to_invalid_topic() { #[test] fn test_subscribe_and_graft_with_negative_score() { - //simulate a communication between two gossipsub instances + // simulate a communication between two gossipsub instances let (mut gs1, _, _, topic_hashes) = inject_nodes1() .topics(vec!["test".into()]) .scoring(Some(( @@ -5157,12 +5189,12 @@ fn test_subscribe_and_graft_with_negative_score() { let (p2, _receiver1) = add_peer(&mut gs1, &Vec::new(), true, false); let (p1, _receiver2) = add_peer(&mut gs2, &topic_hashes, false, false); - //add penalty to peer p2 + // add penalty to peer p2 gs1.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); let original_score = gs1.peer_score.as_ref().unwrap().0.score(&p2); - //subscribe to topic in gs2 + // subscribe to topic in gs2 gs2.subscribe(&topic).unwrap(); let forward_messages_to_p1 = |gs1: &mut Behaviour<_, _>, @@ -5191,17 +5223,17 @@ fn test_subscribe_and_graft_with_negative_score() { new_receivers }; - //forward the subscribe message + // forward the subscribe message let receivers = forward_messages_to_p1(&mut gs1, p1, p2, connection_id, receivers); - //heartbeats on both + // heartbeats on both gs1.heartbeat(); gs2.heartbeat(); - //forward messages again + // forward messages again forward_messages_to_p1(&mut gs1, p1, p2, connection_id, receivers); - //nobody got penalized + // nobody got penalized assert!(gs1.peer_score.as_ref().unwrap().0.score(&p2) >= original_score); } diff --git a/protocols/gossipsub/src/config.rs b/protocols/gossipsub/src/config.rs index 6e7861bae10..d53908ad267 100644 --- a/protocols/gossipsub/src/config.rs +++ b/protocols/gossipsub/src/config.rs @@ -18,22 +18,22 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::borrow::Cow; -use std::sync::Arc; -use std::time::Duration; - -use crate::error::ConfigBuilderError; -use crate::protocol::{ProtocolConfig, ProtocolId, FLOODSUB_PROTOCOL}; -use crate::types::{Message, MessageId, PeerKind}; +use std::{borrow::Cow, sync::Arc, time::Duration}; use libp2p_identity::PeerId; use libp2p_swarm::StreamProtocol; +use crate::{ + error::ConfigBuilderError, + protocol::{ProtocolConfig, ProtocolId, FLOODSUB_PROTOCOL}, + types::{Message, MessageId, PeerKind}, +}; + /// The types of message validation that can be employed by gossipsub. #[derive(Debug, Clone)] pub enum ValidationMode { - /// This is the default setting. This requires the message author to be a valid [`PeerId`] and to - /// be present as well as the sequence number. All messages must have valid signatures. + /// This is the default setting. This requires the message author to be a valid [`PeerId`] and + /// to be present as well as the sequence number. All messages must have valid signatures. /// /// NOTE: This setting will reject messages from nodes using /// [`crate::behaviour::MessageAuthenticity::Anonymous`] and all messages that do not have @@ -134,8 +134,8 @@ impl Config { /// Affects how peers are selected when pruning a mesh due to over subscription. /// - /// At least `retain_scores` of the retained peers will be high-scoring, while the remainder are - /// chosen randomly (D_score in the spec, default is 4). + /// At least `retain_scores` of the retained peers will be high-scoring, while the remainder + /// are chosen randomly (D_score in the spec, default is 4). pub fn retain_scores(&self) -> usize { self.retain_scores } @@ -423,7 +423,9 @@ impl Default for ConfigBuilder { }), allow_self_origin: false, do_px: false, - prune_peers: 0, // NOTE: Increasing this currently has little effect until Signed records are implemented. + // NOTE: Increasing this currently has little effect until Signed + // records are implemented. + prune_peers: 0, prune_backoff: Duration::from_secs(60), unsubscribe_backoff: Duration::from_secs(10), backoff_slack: 1, @@ -457,7 +459,8 @@ impl From for ConfigBuilder { } impl ConfigBuilder { - /// The protocol id prefix to negotiate this protocol (default is `/meshsub/1.1.0` and `/meshsub/1.0.0`). + /// The protocol id prefix to negotiate this protocol (default is `/meshsub/1.1.0` and + /// `/meshsub/1.0.0`). pub fn protocol_id_prefix( &mut self, protocol_id_prefix: impl Into>, @@ -547,8 +550,8 @@ impl ConfigBuilder { /// Affects how peers are selected when pruning a mesh due to over subscription. /// - /// At least [`Self::retain_scores`] of the retained peers will be high-scoring, while the remainder are - /// chosen randomly (D_score in the spec, default is 4). + /// At least [`Self::retain_scores`] of the retained peers will be high-scoring, while the + /// remainder are chosen randomly (D_score in the spec, default is 4). pub fn retain_scores(&mut self, retain_scores: usize) -> &mut Self { self.config.retain_scores = retain_scores; self @@ -902,12 +905,15 @@ impl std::fmt::Debug for Config { #[cfg(test)] mod test { - use super::*; - use crate::topic::IdentityHash; - use crate::Topic; + use std::{ + collections::hash_map::DefaultHasher, + hash::{Hash, Hasher}, + }; + use libp2p_core::UpgradeInfo; - use std::collections::hash_map::DefaultHasher; - use std::hash::{Hash, Hasher}; + + use super::*; + use crate::{topic::IdentityHash, Topic}; #[test] fn create_config_with_message_id_as_plain_function() { diff --git a/protocols/gossipsub/src/error.rs b/protocols/gossipsub/src/error.rs index 047d50f2338..eae4c51214e 100644 --- a/protocols/gossipsub/src/error.rs +++ b/protocols/gossipsub/src/error.rs @@ -36,8 +36,8 @@ pub enum PublishError { MessageTooLarge, /// The compression algorithm failed. TransformFailed(std::io::Error), - /// Messages could not be sent because the queues for all peers were full. The usize represents the - /// number of peers that were attempted. + /// Messages could not be sent because the queues for all peers were full. The usize represents + /// the number of peers that were attempted. AllQueuesFull(usize), } diff --git a/protocols/gossipsub/src/gossip_promises.rs b/protocols/gossipsub/src/gossip_promises.rs index bdf58b74fc2..b64811bb062 100644 --- a/protocols/gossipsub/src/gossip_promises.rs +++ b/protocols/gossipsub/src/gossip_promises.rs @@ -18,13 +18,13 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::peer_score::RejectReason; -use crate::MessageId; -use crate::ValidationError; -use libp2p_identity::PeerId; use std::collections::HashMap; + +use libp2p_identity::PeerId; use web_time::Instant; +use crate::{peer_score::RejectReason, MessageId, ValidationError}; + /// Tracks recently sent `IWANT` messages and checks if peers respond to them. #[derive(Default)] pub(crate) struct GossipPromises { diff --git a/protocols/gossipsub/src/handler.rs b/protocols/gossipsub/src/handler.rs index 5f9669c02c2..2936182c3f8 100644 --- a/protocols/gossipsub/src/handler.rs +++ b/protocols/gossipsub/src/handler.rs @@ -18,27 +18,31 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::protocol::{GossipsubCodec, ProtocolConfig}; -use crate::rpc::Receiver; -use crate::rpc_proto::proto; -use crate::types::{PeerKind, RawMessage, Rpc, RpcOut}; -use crate::ValidationError; -use asynchronous_codec::Framed; -use futures::future::Either; -use futures::prelude::*; -use futures::StreamExt; -use libp2p_core::upgrade::DeniedUpgrade; -use libp2p_swarm::handler::{ - ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, - FullyNegotiatedInbound, FullyNegotiatedOutbound, StreamUpgradeError, SubstreamProtocol, -}; -use libp2p_swarm::Stream; use std::{ pin::Pin, task::{Context, Poll}, }; + +use asynchronous_codec::Framed; +use futures::{future::Either, prelude::*, StreamExt}; +use libp2p_core::upgrade::DeniedUpgrade; +use libp2p_swarm::{ + handler::{ + ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, + FullyNegotiatedInbound, FullyNegotiatedOutbound, StreamUpgradeError, SubstreamProtocol, + }, + Stream, +}; use web_time::Instant; +use crate::{ + protocol::{GossipsubCodec, ProtocolConfig}, + rpc::Receiver, + rpc_proto::proto, + types::{PeerKind, RawMessage, Rpc, RpcOut}, + ValidationError, +}; + /// The event emitted by the Handler. This informs the behaviour of various events created /// by the handler. #[derive(Debug)] @@ -111,7 +115,6 @@ pub struct EnabledHandler { peer_kind: Option, /// Keeps track on whether we have sent the peer kind to the behaviour. - // // NOTE: Use this flag rather than checking the substream count each poll. peer_kind_sent: bool, diff --git a/protocols/gossipsub/src/lib.rs b/protocols/gossipsub/src/lib.rs index f6a51da4a51..87db1b771d1 100644 --- a/protocols/gossipsub/src/lib.rs +++ b/protocols/gossipsub/src/lib.rs @@ -43,22 +43,23 @@ //! implementations, due to undefined elements in the current specification. //! //! - **Topics** - In gossipsub, topics configurable by the `hash_topics` configuration parameter. -//! Topics are of type [`TopicHash`]. The current go implementation uses raw utf-8 strings, and this -//! is default configuration in rust-libp2p. Topics can be hashed (SHA256 hashed then base64 +//! Topics are of type [`TopicHash`]. The current go implementation uses raw utf-8 strings, and +//! this is default configuration in rust-libp2p. Topics can be hashed (SHA256 hashed then base64 //! encoded) by setting the `hash_topics` configuration parameter to true. //! //! - **Sequence Numbers** - A message on the gossipsub network is identified by the source -//! [`PeerId`](libp2p_identity::PeerId) and a nonce (sequence number) of the message. The sequence numbers in -//! this implementation are sent as raw bytes across the wire. They are 64-bit big-endian unsigned -//! integers. When messages are signed, they are monotonically increasing integers starting from a -//! random value and wrapping around u64::MAX. When messages are unsigned, they are chosen at random. -//! NOTE: These numbers are sequential in the current go implementation. +//! [`PeerId`](libp2p_identity::PeerId) and a nonce (sequence number) of the message. The sequence +//! numbers in this implementation are sent as raw bytes across the wire. They are 64-bit +//! big-endian unsigned integers. When messages are signed, they are monotonically increasing +//! integers starting from a random value and wrapping around u64::MAX. When messages are +//! unsigned, they are chosen at random. NOTE: These numbers are sequential in the current go +//! implementation. //! //! # Peer Discovery //! //! Gossipsub does not provide peer discovery by itself. Peer discovery is the process by which -//! peers in a p2p network exchange information about each other among other reasons to become resistant -//! against the failure or replacement of the +//! peers in a p2p network exchange information about each other among other reasons to become +//! resistant against the failure or replacement of the //! [boot nodes](https://docs.libp2p.io/reference/glossary/#boot-node) of the network. //! //! Peer @@ -111,22 +112,24 @@ mod topic; mod transform; mod types; -pub use self::behaviour::{Behaviour, Event, MessageAuthenticity}; -pub use self::config::{Config, ConfigBuilder, ValidationMode, Version}; -pub use self::error::{ConfigBuilderError, PublishError, SubscriptionError, ValidationError}; -pub use self::metrics::Config as MetricsConfig; -pub use self::peer_score::{ - score_parameter_decay, score_parameter_decay_with_base, PeerScoreParams, PeerScoreThresholds, - TopicScoreParams, +pub use self::{ + behaviour::{Behaviour, Event, MessageAuthenticity}, + config::{Config, ConfigBuilder, ValidationMode, Version}, + error::{ConfigBuilderError, PublishError, SubscriptionError, ValidationError}, + metrics::Config as MetricsConfig, + peer_score::{ + score_parameter_decay, score_parameter_decay_with_base, PeerScoreParams, + PeerScoreThresholds, TopicScoreParams, + }, + subscription_filter::{ + AllowAllSubscriptionFilter, CallbackSubscriptionFilter, CombinedSubscriptionFilters, + MaxCountSubscriptionFilter, RegexSubscriptionFilter, TopicSubscriptionFilter, + WhitelistSubscriptionFilter, + }, + topic::{Hasher, Topic, TopicHash}, + transform::{DataTransform, IdentityTransform}, + types::{FailedMessages, Message, MessageAcceptance, MessageId, RawMessage}, }; -pub use self::subscription_filter::{ - AllowAllSubscriptionFilter, CallbackSubscriptionFilter, CombinedSubscriptionFilters, - MaxCountSubscriptionFilter, RegexSubscriptionFilter, TopicSubscriptionFilter, - WhitelistSubscriptionFilter, -}; -pub use self::topic::{Hasher, Topic, TopicHash}; -pub use self::transform::{DataTransform, IdentityTransform}; -pub use self::types::{FailedMessages, Message, MessageAcceptance, MessageId, RawMessage}; #[deprecated(note = "Will be removed from the public API.")] pub type Rpc = self::types::Rpc; diff --git a/protocols/gossipsub/src/mcache.rs b/protocols/gossipsub/src/mcache.rs index aa65e3b7f1d..8ed71ea07f2 100644 --- a/protocols/gossipsub/src/mcache.rs +++ b/protocols/gossipsub/src/mcache.rs @@ -18,14 +18,17 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::topic::TopicHash; -use crate::types::{MessageId, RawMessage}; -use libp2p_identity::PeerId; -use std::collections::hash_map::Entry; -use std::fmt::Debug; use std::{ - collections::{HashMap, HashSet}, + collections::{hash_map::Entry, HashMap, HashSet}, fmt, + fmt::Debug, +}; + +use libp2p_identity::PeerId; + +use crate::{ + topic::TopicHash, + types::{MessageId, RawMessage}, }; /// CacheEntry stored in the history. @@ -210,7 +213,7 @@ impl MessageCache { &mut self, message_id: &MessageId, ) -> Option<(RawMessage, HashSet)> { - //We only remove the message from msgs and iwant_count and keep the message_id in the + // We only remove the message from msgs and iwant_count and keep the message_id in the // history vector. Zhe id in the history vector will simply be ignored on popping. self.iwant_counts.remove(message_id); diff --git a/protocols/gossipsub/src/metrics.rs b/protocols/gossipsub/src/metrics.rs index 40af1af2cac..2519da64b73 100644 --- a/protocols/gossipsub/src/metrics.rs +++ b/protocols/gossipsub/src/metrics.rs @@ -23,15 +23,21 @@ use std::collections::HashMap; -use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; -use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::family::{Family, MetricConstructor}; -use prometheus_client::metrics::gauge::Gauge; -use prometheus_client::metrics::histogram::{linear_buckets, Histogram}; -use prometheus_client::registry::Registry; - -use crate::topic::TopicHash; -use crate::types::{MessageAcceptance, PeerKind}; +use prometheus_client::{ + encoding::{EncodeLabelSet, EncodeLabelValue}, + metrics::{ + counter::Counter, + family::{Family, MetricConstructor}, + gauge::Gauge, + histogram::{linear_buckets, Histogram}, + }, + registry::Registry, +}; + +use crate::{ + topic::TopicHash, + types::{MessageAcceptance, PeerKind}, +}; // Default value that limits for how many topics do we store metrics. const DEFAULT_MAX_TOPICS: usize = 300; @@ -100,7 +106,7 @@ type EverSubscribed = bool; /// A collection of metrics used throughout the Gossipsub behaviour. pub(crate) struct Metrics { - /* Configuration parameters */ + // Configuration parameters /// Maximum number of topics for which we store metrics. This helps keep the metrics bounded. max_topics: usize, /// Maximum number of topics for which we store metrics, where the topic in not one to which we @@ -108,11 +114,11 @@ pub(crate) struct Metrics { /// from received messages and not explicit application subscriptions. max_never_subscribed_topics: usize, - /* Auxiliary variables */ + // Auxiliary variables /// Information needed to decide if a topic is allowed or not. topic_info: HashMap, - /* Metrics per known topic */ + // Metrics per known topic /// Status of our subscription to this topic. This metric allows analyzing other topic metrics /// filtered by our current subscription status. topic_subscription_status: Family, @@ -134,7 +140,7 @@ pub(crate) struct Metrics { /// The number of messages that timed out and could not be sent. timedout_messages_dropped: Family, - /* Metrics regarding mesh state */ + // Metrics regarding mesh state /// Number of peers in our mesh. This metric should be updated with the count of peers for a /// topic in the mesh regardless of inclusion and churn events. mesh_peer_counts: Family, @@ -143,7 +149,7 @@ pub(crate) struct Metrics { /// Number of times we remove peers in a topic mesh for different reasons. mesh_peer_churn_events: Family, - /* Metrics regarding messages sent/received */ + // Metrics regarding messages sent/received /// Number of gossip messages sent to each topic. topic_msg_sent_counts: Family, /// Bytes from gossip messages sent to each topic. @@ -158,13 +164,13 @@ pub(crate) struct Metrics { /// Bytes received from gossip messages for each topic. topic_msg_recv_bytes: Family, - /* Metrics related to scoring */ + // Metrics related to scoring /// Histogram of the scores for each mesh topic. score_per_mesh: Family, /// A counter of the kind of penalties being applied to peers. scoring_penalties: Family, - /* General Metrics */ + // General Metrics /// Gossipsub supports floodsub, gossipsub v1.0 and gossipsub v1.1. Peers are classified based /// on which protocol they support. This metric keeps track of the number of peers that are /// connected of each type. @@ -172,7 +178,7 @@ pub(crate) struct Metrics { /// The time it takes to complete one iteration of the heartbeat. heartbeat_duration: Histogram, - /* Performance metrics */ + // Performance metrics /// When the user validates a message, it tries to re propagate it to its mesh peers. If the /// message expires from the memcache before it can be validated, we count this a cache miss /// and it is an indicator that the memcache size should be increased. @@ -414,7 +420,7 @@ impl Metrics { } } - /* Mesh related methods */ + // Mesh related methods /// Registers the subscription to a topic if the configured limits allow it. /// Sets the registered number of peers in the mesh to 0. diff --git a/protocols/gossipsub/src/peer_score.rs b/protocols/gossipsub/src/peer_score.rs index e8d1a6e5f97..33573ebeacc 100644 --- a/protocols/gossipsub/src/peer_score.rs +++ b/protocols/gossipsub/src/peer_score.rs @@ -18,25 +18,31 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! //! Manages and stores the Scoring logic of a particular peer on the gossipsub behaviour. -use crate::metrics::{Metrics, Penalty}; -use crate::time_cache::TimeCache; -use crate::{MessageId, TopicHash}; +use std::{ + collections::{hash_map, HashMap, HashSet}, + net::IpAddr, + time::Duration, +}; + use libp2p_identity::PeerId; -use std::collections::{hash_map, HashMap, HashSet}; -use std::net::IpAddr; -use std::time::Duration; use web_time::Instant; +use crate::{ + metrics::{Metrics, Penalty}, + time_cache::TimeCache, + MessageId, TopicHash, +}; + mod params; -use crate::ValidationError; pub use params::{ score_parameter_decay, score_parameter_decay_with_base, PeerScoreParams, PeerScoreThresholds, TopicScoreParams, }; +use crate::ValidationError; + #[cfg(test)] mod tests; @@ -96,8 +102,9 @@ impl Default for PeerStats { } impl PeerStats { - /// Returns a mutable reference to topic stats if they exist, otherwise if the supplied parameters score the - /// topic, inserts the default stats and returns a reference to those. If neither apply, returns None. + /// Returns a mutable reference to topic stats if they exist, otherwise if the supplied + /// parameters score the topic, inserts the default stats and returns a reference to those. + /// If neither apply, returns None. pub(crate) fn stats_or_default_mut( &mut self, topic_hash: TopicHash, @@ -285,12 +292,14 @@ impl PeerScore { } // P3b: - // NOTE: the weight of P3b is negative (validated in TopicScoreParams.validate), so this detracts. + // NOTE: the weight of P3b is negative (validated in TopicScoreParams.validate), so + // this detracts. let p3b = topic_stats.mesh_failure_penalty; topic_score += p3b * topic_params.mesh_failure_penalty_weight; // P4: invalid messages - // NOTE: the weight of P4 is negative (validated in TopicScoreParams.validate), so this detracts. + // NOTE: the weight of P4 is negative (validated in TopicScoreParams.validate), so + // this detracts. let p4 = topic_stats.invalid_message_deliveries * topic_stats.invalid_message_deliveries; topic_score += p4 * topic_params.invalid_message_deliveries_weight; @@ -391,8 +400,8 @@ impl PeerScore { } // we don't decay retained scores, as the peer is not active. - // this way the peer cannot reset a negative score by simply disconnecting and reconnecting, - // unless the retention period has elapsed. + // this way the peer cannot reset a negative score by simply disconnecting and + // reconnecting, unless the retention period has elapsed. // similarly, a well behaved peer does not lose its score by getting disconnected. return true; } @@ -638,7 +647,8 @@ impl PeerScore { } } - /// Similar to `reject_message` except does not require the message id or reason for an invalid message. + /// Similar to `reject_message` except does not require the message id or reason for an invalid + /// message. pub(crate) fn reject_invalid_message(&mut self, from: &PeerId, topic_hash: &TopicHash) { tracing::debug!( peer=%from, @@ -679,8 +689,8 @@ impl PeerScore { } if let RejectReason::ValidationIgnored = reason { - // we were explicitly instructed by the validator to ignore the message but not penalize - // the peer + // we were explicitly instructed by the validator to ignore the message but not + // penalize the peer record.status = DeliveryStatus::Ignored; record.peers.clear(); return; @@ -882,13 +892,14 @@ impl PeerScore { .get(topic_hash) .expect("Topic must exist if there are known topic_stats"); - // check against the mesh delivery window -- if the validated time is passed as 0, then - // the message was received before we finished validation and thus falls within the mesh + // check against the mesh delivery window -- if the validated time is passed as + // 0, then the message was received before we finished + // validation and thus falls within the mesh // delivery window. let mut falls_in_mesh_deliver_window = true; if let Some(validated_time) = validated_time { if let Some(now) = &now { - //should always be true + // should always be true let window_time = validated_time .checked_add(topic_params.mesh_message_deliveries_window) .unwrap_or(*now); diff --git a/protocols/gossipsub/src/peer_score/params.rs b/protocols/gossipsub/src/peer_score/params.rs index ae70991f7fb..cc48df8f61b 100644 --- a/protocols/gossipsub/src/peer_score/params.rs +++ b/protocols/gossipsub/src/peer_score/params.rs @@ -18,10 +18,13 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + collections::{HashMap, HashSet}, + net::IpAddr, + time::Duration, +}; + use crate::TopicHash; -use std::collections::{HashMap, HashSet}; -use std::net::IpAddr; -use std::time::Duration; /// The default number of seconds for a decay interval. const DEFAULT_DECAY_INTERVAL: u64 = 1; @@ -117,12 +120,13 @@ pub struct PeerScoreParams { /// P6: IP-colocation factor. /// The parameter has an associated counter which counts the number of peers with the same IP. - /// If the number of peers in the same IP exceeds `ip_colocation_factor_threshold, then the value - /// is the square of the difference, ie `(peers_in_same_ip - ip_colocation_threshold)^2`. - /// If the number of peers in the same IP is less than the threshold, then the value is 0. - /// The weight of the parameter MUST be negative, unless you want to disable for testing. - /// Note: In order to simulate many IPs in a manageable manner when testing, you can set the weight to 0 - /// thus disabling the IP colocation penalty. + /// If the number of peers in the same IP exceeds `ip_colocation_factor_threshold, then the + /// value is the square of the difference, ie `(peers_in_same_ip - + /// ip_colocation_threshold)^2`. If the number of peers in the same IP is less than the + /// threshold, then the value is 0. The weight of the parameter MUST be negative, unless + /// you want to disable for testing. Note: In order to simulate many IPs in a manageable + /// manner when testing, you can set the weight to 0 thus disabling the IP + /// colocation penalty. pub ip_colocation_factor_weight: f64, pub ip_colocation_factor_threshold: f64, pub ip_colocation_factor_whitelist: HashSet, @@ -239,16 +243,16 @@ pub struct TopicScoreParams { /// P1: time in the mesh /// This is the time the peer has been grafted in the mesh. - /// The value of the parameter is the `time/time_in_mesh_quantum`, capped by `time_in_mesh_cap` - /// The weight of the parameter must be positive (or zero to disable). + /// The value of the parameter is the `time/time_in_mesh_quantum`, capped by + /// `time_in_mesh_cap` The weight of the parameter must be positive (or zero to disable). pub time_in_mesh_weight: f64, pub time_in_mesh_quantum: Duration, pub time_in_mesh_cap: f64, /// P2: first message deliveries /// This is the number of message deliveries in the topic. - /// The value of the parameter is a counter, decaying with `first_message_deliveries_decay`, and capped - /// by `first_message_deliveries_cap`. + /// The value of the parameter is a counter, decaying with `first_message_deliveries_decay`, + /// and capped by `first_message_deliveries_cap`. /// The weight of the parameter MUST be positive (or zero to disable). pub first_message_deliveries_weight: f64, pub first_message_deliveries_decay: f64, @@ -264,8 +268,8 @@ pub struct TopicScoreParams { /// before we have forwarded it to them. /// The parameter has an associated counter, decaying with `mesh_message_deliveries_decay`. /// If the counter exceeds the threshold, its value is 0. - /// If the counter is below the `mesh_message_deliveries_threshold`, the value is the square of - /// the deficit, ie (`message_deliveries_threshold - counter)^2` + /// If the counter is below the `mesh_message_deliveries_threshold`, the value is the square + /// of the deficit, ie (`message_deliveries_threshold - counter)^2` /// The penalty is only activated after `mesh_message_deliveries_activation` time in the mesh. /// The weight of the parameter MUST be negative (or zero to disable). pub mesh_message_deliveries_weight: f64, diff --git a/protocols/gossipsub/src/peer_score/tests.rs b/protocols/gossipsub/src/peer_score/tests.rs index 064e277eed7..9e20cea2dde 100644 --- a/protocols/gossipsub/src/peer_score/tests.rs +++ b/protocols/gossipsub/src/peer_score/tests.rs @@ -20,9 +20,7 @@ /// A collection of unit tests mostly ported from the go implementation. use super::*; - -use crate::types::RawMessage; -use crate::{IdentTopic as Topic, Message}; +use crate::{types::RawMessage, IdentTopic as Topic, Message}; // estimates a value within variance fn within_variance(value: f64, expected: f64, variance: f64) -> bool { @@ -447,7 +445,8 @@ fn test_score_mesh_message_deliveries_decay() { } let score_a = peer_score.score(&peer_id_a); - // the penalty is the difference between the threshold and the (decayed) mesh deliveries, squared. + // the penalty is the difference between the threshold and the (decayed) + // mesh deliveries, squared. let deficit = topic_params.mesh_message_deliveries_threshold - decayed_delivery_count; let penalty = deficit * deficit; let expected = diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index 8d33fe51a90..e4272737342 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -18,15 +18,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::config::ValidationMode; -use crate::handler::HandlerEvent; -use crate::rpc_proto::proto; -use crate::topic::TopicHash; -use crate::types::{ - ControlAction, Graft, IHave, IWant, MessageId, PeerInfo, PeerKind, Prune, RawMessage, Rpc, - Subscription, SubscriptionAction, -}; -use crate::ValidationError; +use std::{convert::Infallible, pin::Pin}; + use asynchronous_codec::{Decoder, Encoder, Framed}; use byteorder::{BigEndian, ByteOrder}; use bytes::BytesMut; @@ -35,8 +28,18 @@ use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use libp2p_identity::{PeerId, PublicKey}; use libp2p_swarm::StreamProtocol; use quick_protobuf::Writer; -use std::convert::Infallible; -use std::pin::Pin; + +use crate::{ + config::ValidationMode, + handler::HandlerEvent, + rpc_proto::proto, + topic::TopicHash, + types::{ + ControlAction, Graft, IHave, IWant, MessageId, PeerInfo, PeerKind, Prune, RawMessage, Rpc, + Subscription, SubscriptionAction, + }, + ValidationError, +}; pub(crate) const SIGNING_PREFIX: &[u8] = b"libp2p-pubsub:"; @@ -136,7 +139,7 @@ where } } -/* Gossip codec for the framing */ +// Gossip codec for the framing pub struct GossipsubCodec { /// Determines the level of validation performed on incoming messages. @@ -506,13 +509,14 @@ impl Decoder for GossipsubCodec { #[cfg(test)] mod tests { - use super::*; - use crate::config::Config; - use crate::{Behaviour, ConfigBuilder, MessageAuthenticity}; - use crate::{IdentTopic as Topic, Version}; use libp2p_identity::Keypair; use quickcheck::*; + use super::*; + use crate::{ + config::Config, Behaviour, ConfigBuilder, IdentTopic as Topic, MessageAuthenticity, Version, + }; + #[derive(Clone, Debug)] struct Message(RawMessage); diff --git a/protocols/gossipsub/src/rpc.rs b/protocols/gossipsub/src/rpc.rs index c90e46a85da..b5f05c7b2e5 100644 --- a/protocols/gossipsub/src/rpc.rs +++ b/protocols/gossipsub/src/rpc.rs @@ -18,7 +18,6 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::{stream::Peekable, Stream, StreamExt}; use std::{ future::Future, pin::Pin, @@ -29,6 +28,8 @@ use std::{ task::{Context, Poll}, }; +use futures::{stream::Peekable, Stream, StreamExt}; + use crate::types::RpcOut; /// `RpcOut` sender that is priority aware. diff --git a/protocols/gossipsub/src/rpc_proto.rs b/protocols/gossipsub/src/rpc_proto.rs index 94c7aafbc3e..2f6832a01a1 100644 --- a/protocols/gossipsub/src/rpc_proto.rs +++ b/protocols/gossipsub/src/rpc_proto.rs @@ -26,12 +26,12 @@ pub(crate) mod proto { #[cfg(test)] mod test { - use crate::rpc_proto::proto::compat; - use crate::IdentTopic as Topic; use libp2p_identity::PeerId; use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer}; use rand::Rng; + use crate::{rpc_proto::proto::compat, IdentTopic as Topic}; + #[test] fn test_multi_topic_message_compatibility() { let topic1 = Topic::new("t1").hash(); diff --git a/protocols/gossipsub/src/subscription_filter.rs b/protocols/gossipsub/src/subscription_filter.rs index 02bb9b4eab6..c051b6c333b 100644 --- a/protocols/gossipsub/src/subscription_filter.rs +++ b/protocols/gossipsub/src/subscription_filter.rs @@ -18,10 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::types::Subscription; -use crate::TopicHash; use std::collections::{BTreeSet, HashMap, HashSet}; +use crate::{types::Subscription, TopicHash}; + pub trait TopicSubscriptionFilter { /// Returns true iff the topic is of interest and we can subscribe to it. fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool; @@ -82,7 +82,7 @@ pub trait TopicSubscriptionFilter { } } -//some useful implementers +// some useful implementers /// Allows all subscriptions #[derive(Default, Clone)] @@ -199,7 +199,7 @@ where } } -///A subscription filter that filters topics based on a regular expression. +/// A subscription filter that filters topics based on a regular expression. pub struct RegexSubscriptionFilter(pub regex::Regex); impl TopicSubscriptionFilter for RegexSubscriptionFilter { diff --git a/protocols/gossipsub/src/time_cache.rs b/protocols/gossipsub/src/time_cache.rs index a3e5c01ac4c..ace02606e88 100644 --- a/protocols/gossipsub/src/time_cache.rs +++ b/protocols/gossipsub/src/time_cache.rs @@ -20,13 +20,18 @@ //! This implements a time-based LRU cache for checking gossipsub message duplicates. -use fnv::FnvHashMap; -use std::collections::hash_map::{ - self, - Entry::{Occupied, Vacant}, +use std::{ + collections::{ + hash_map::{ + self, + Entry::{Occupied, Vacant}, + }, + VecDeque, + }, + time::Duration, }; -use std::collections::VecDeque; -use std::time::Duration; + +use fnv::FnvHashMap; use web_time::Instant; struct ExpiringElement { @@ -206,7 +211,7 @@ mod test { cache.insert("t"); assert!(!cache.insert("t")); cache.insert("e"); - //assert!(!cache.insert("t")); + // assert!(!cache.insert("t")); assert!(!cache.insert("e")); // sleep until cache expiry std::thread::sleep(Duration::from_millis(101)); diff --git a/protocols/gossipsub/src/topic.rs b/protocols/gossipsub/src/topic.rs index a73496b53f2..4793c23a8e1 100644 --- a/protocols/gossipsub/src/topic.rs +++ b/protocols/gossipsub/src/topic.rs @@ -18,12 +18,14 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::rpc_proto::proto; +use std::fmt; + use base64::prelude::*; use prometheus_client::encoding::EncodeLabelSet; use quick_protobuf::Writer; use sha2::{Digest, Sha256}; -use std::fmt; + +use crate::rpc_proto::proto; /// A generic trait that can be extended for various hashing types for a topic. pub trait Hasher { diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index bb1916fefd0..bcb1f279ae5 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -19,20 +19,18 @@ // DEALINGS IN THE SOFTWARE. //! A collection of types using the Gossipsub system. -use crate::rpc::Sender; -use crate::TopicHash; +use std::{collections::BTreeSet, fmt, fmt::Debug}; + use futures_timer::Delay; use libp2p_identity::PeerId; use libp2p_swarm::ConnectionId; use prometheus_client::encoding::EncodeLabelValue; use quick_protobuf::MessageWrite; -use std::fmt::Debug; -use std::{collections::BTreeSet, fmt}; - -use crate::rpc_proto::proto; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +use crate::{rpc::Sender, rpc_proto::proto, TopicHash}; + /// Messages that have expired while attempting to be sent to a peer. #[derive(Clone, Debug, Default)] pub struct FailedMessages { @@ -42,7 +40,8 @@ pub struct FailedMessages { pub forward: usize, /// The number of messages that were failed to be sent to the priority queue as it was full. pub priority: usize, - /// The number of messages that were failed to be sent to the non-priority queue as it was full. + /// The number of messages that were failed to be sent to the non-priority queue as it was + /// full. pub non_priority: usize, /// The number of messages that timed out and could not be sent. pub timeout: usize, @@ -230,9 +229,9 @@ pub enum SubscriptionAction { #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub(crate) struct PeerInfo { pub(crate) peer_id: Option, - //TODO add this when RFC: Signed Address Records got added to the spec (see pull request + // TODO add this when RFC: Signed Address Records got added to the spec (see pull request // https://github.com/libp2p/specs/pull/217) - //pub signed_peer_record: ?, + // pub signed_peer_record: ?, } /// A Control message received by the gossipsub system. @@ -240,7 +239,8 @@ pub(crate) struct PeerInfo { pub enum ControlAction { /// Node broadcasts known messages per topic - IHave control message. IHave(IHave), - /// The node requests specific message ids (peer_id + sequence _number) - IWant control message. + /// The node requests specific message ids (peer_id + sequence _number) - IWant control + /// message. IWant(IWant), /// The node has been added to the mesh - Graft control message. Graft(Graft), diff --git a/protocols/gossipsub/tests/smoke.rs b/protocols/gossipsub/tests/smoke.rs index 3b6261afa54..85038665b4d 100644 --- a/protocols/gossipsub/tests/smoke.rs +++ b/protocols/gossipsub/tests/smoke.rs @@ -18,15 +18,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::stream::{FuturesUnordered, SelectAll}; -use futures::StreamExt; +use std::{task::Poll, time::Duration}; + +use futures::{ + stream::{FuturesUnordered, SelectAll}, + StreamExt, +}; use libp2p_gossipsub as gossipsub; use libp2p_gossipsub::{MessageAuthenticity, ValidationMode}; use libp2p_swarm::Swarm; use libp2p_swarm_test::SwarmExt as _; use quickcheck::{QuickCheck, TestResult}; use rand::{seq::SliceRandom, SeedableRng}; -use std::{task::Poll, time::Duration}; use tokio::{runtime::Runtime, time}; use tracing_subscriber::EnvFilter; diff --git a/protocols/identify/src/behaviour.rs b/protocols/identify/src/behaviour.rs index b69f2014d81..0cd27d90717 100644 --- a/protocols/identify/src/behaviour.rs +++ b/protocols/identify/src/behaviour.rs @@ -18,28 +18,27 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::handler::{self, Handler, InEvent}; -use crate::protocol::{Info, UpgradeError}; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::transport::PortUse; -use libp2p_core::{multiaddr, ConnectedPoint, Endpoint, Multiaddr}; -use libp2p_identity::PeerId; -use libp2p_identity::PublicKey; -use libp2p_swarm::behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}; +use std::{ + collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, + num::NonZeroUsize, + task::{Context, Poll}, + time::Duration, +}; + +use libp2p_core::{ + multiaddr, multiaddr::Protocol, transport::PortUse, ConnectedPoint, Endpoint, Multiaddr, +}; +use libp2p_identity::{PeerId, PublicKey}; use libp2p_swarm::{ - ConnectionDenied, DialError, ExternalAddresses, ListenAddresses, NetworkBehaviour, - NotifyHandler, PeerAddresses, StreamUpgradeError, THandlerInEvent, ToSwarm, - _address_translation, + behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}, + ConnectionDenied, ConnectionId, DialError, ExternalAddresses, ListenAddresses, + NetworkBehaviour, NotifyHandler, PeerAddresses, StreamUpgradeError, THandler, THandlerInEvent, + THandlerOutEvent, ToSwarm, _address_translation, }; -use libp2p_swarm::{ConnectionId, THandler, THandlerOutEvent}; -use std::collections::hash_map::Entry; -use std::num::NonZeroUsize; -use std::{ - collections::{HashMap, HashSet, VecDeque}, - task::Context, - task::Poll, - time::Duration, +use crate::{ + handler::{self, Handler, InEvent}, + protocol::{Info, UpgradeError}, }; /// Whether an [`Multiaddr`] is a valid for the QUIC transport. @@ -323,7 +322,8 @@ impl Behaviour { .contains(&connection_id) { // Apply address translation to the candidate address. - // For TCP without port-reuse, the observed address contains an ephemeral port which needs to be replaced by the port of a listen address. + // For TCP without port-reuse, the observed address contains an ephemeral port which + // needs to be replaced by the port of a listen address. let translated_addresses = { let mut addrs: Vec<_> = self .listen_addresses @@ -398,7 +398,8 @@ impl NetworkBehaviour for Behaviour { ) -> Result, ConnectionDenied> { // Contrary to inbound events, outbound events are full-p2p qualified // so we remove /p2p/ in order to be homogeneous - // this will avoid Autonatv2 to probe twice the same address (fully-p2p-qualified + not fully-p2p-qualified) + // this will avoid Autonatv2 to probe twice the same address (fully-p2p-qualified + not + // fully-p2p-qualified) let mut addr = addr.clone(); if matches!(addr.iter().last(), Some(multiaddr::Protocol::P2p(_))) { addr.pop(); @@ -415,7 +416,9 @@ impl NetworkBehaviour for Behaviour { self.config.local_public_key.clone(), self.config.protocol_version.clone(), self.config.agent_version.clone(), - addr.clone(), // TODO: This is weird? That is the public address we dialed, shouldn't need to tell the other party? + // TODO: This is weird? That is the public address we dialed, + // shouldn't need to tell the other party? + addr.clone(), self.all_addresses(), )) } diff --git a/protocols/identify/src/handler.rs b/protocols/identify/src/handler.rs index dd073d50ed6..cda49f992b8 100644 --- a/protocols/identify/src/handler.rs +++ b/protocols/identify/src/handler.rs @@ -18,29 +18,38 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::protocol::{Info, PushInfo, UpgradeError}; -use crate::{protocol, PROTOCOL_NAME, PUSH_PROTOCOL_NAME}; +use std::{ + collections::HashSet, + task::{Context, Poll}, + time::Duration, +}; + use either::Either; use futures::prelude::*; use futures_bounded::Timeout; use futures_timer::Delay; -use libp2p_core::upgrade::{ReadyUpgrade, SelectUpgrade}; -use libp2p_core::Multiaddr; -use libp2p_identity::PeerId; -use libp2p_identity::PublicKey; -use libp2p_swarm::handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, - ProtocolSupport, +use libp2p_core::{ + upgrade::{ReadyUpgrade, SelectUpgrade}, + Multiaddr, }; +use libp2p_identity::{PeerId, PublicKey}; use libp2p_swarm::{ + handler::{ + ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, + ProtocolSupport, + }, ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, StreamUpgradeError, SubstreamProtocol, SupportedProtocols, }; use smallvec::SmallVec; -use std::collections::HashSet; -use std::{task::Context, task::Poll, time::Duration}; use tracing::Level; +use crate::{ + protocol, + protocol::{Info, PushInfo, UpgradeError}, + PROTOCOL_NAME, PUSH_PROTOCOL_NAME, +}; + const STREAM_TIMEOUT: Duration = Duration::from_secs(60); const MAX_CONCURRENT_STREAMS_PER_CONNECTION: usize = 10; diff --git a/protocols/identify/src/lib.rs b/protocols/identify/src/lib.rs index 7d28e5b5cc7..868ace87aeb 100644 --- a/protocols/identify/src/lib.rs +++ b/protocols/identify/src/lib.rs @@ -28,10 +28,10 @@ //! //! # Important Discrepancies //! -//! - **Using Identify with other protocols** Unlike some other libp2p implementations, -//! rust-libp2p does not treat Identify as a core protocol. This means that other protocols cannot -//! rely upon the existence of Identify, and need to be manually hooked up to Identify in order to -//! make use of its capabilities. +//! - **Using Identify with other protocols** Unlike some other libp2p implementations, rust-libp2p +//! does not treat Identify as a core protocol. This means that other protocols cannot rely upon +//! the existence of Identify, and need to be manually hooked up to Identify in order to make use +//! of its capabilities. //! //! # Usage //! @@ -41,8 +41,10 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -pub use self::behaviour::{Behaviour, Config, Event}; -pub use self::protocol::{Info, UpgradeError, PROTOCOL_NAME, PUSH_PROTOCOL_NAME}; +pub use self::{ + behaviour::{Behaviour, Config, Event}, + protocol::{Info, UpgradeError, PROTOCOL_NAME, PUSH_PROTOCOL_NAME}, +}; mod behaviour; mod handler; diff --git a/protocols/identify/src/protocol.rs b/protocols/identify/src/protocol.rs index f4dfd544dd1..33aeedb7c4f 100644 --- a/protocols/identify/src/protocol.rs +++ b/protocols/identify/src/protocol.rs @@ -18,16 +18,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; +use std::io; + use asynchronous_codec::{FramedRead, FramedWrite}; use futures::prelude::*; use libp2p_core::{multiaddr, Multiaddr}; use libp2p_identity as identity; use libp2p_identity::PublicKey; use libp2p_swarm::StreamProtocol; -use std::io; use thiserror::Error; +use crate::proto; + const MAX_MESSAGE_SIZE_BYTES: usize = 4096; pub const PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/ipfs/id/1.0.0"); @@ -77,7 +79,8 @@ impl Info { } /// Identify push information of a peer sent in protocol messages. -/// Note that missing fields should be ignored, as peers may choose to send partial updates containing only the fields whose values have changed. +/// Note that missing fields should be ignored, as peers may choose to send partial updates +/// containing only the fields whose values have changed. #[derive(Debug, Clone)] pub struct PushInfo { pub public_key: Option, @@ -264,9 +267,10 @@ pub enum UpgradeError { #[cfg(test)] mod tests { - use super::*; use libp2p_identity as identity; + use super::*; + #[test] fn skip_invalid_multiaddr() { let valid_multiaddr: Multiaddr = "/ip6/2001:db8::/tcp/1234".parse().unwrap(); diff --git a/protocols/identify/tests/smoke.rs b/protocols/identify/tests/smoke.rs index d624005408e..dd48b314173 100644 --- a/protocols/identify/tests/smoke.rs +++ b/protocols/identify/tests/smoke.rs @@ -1,10 +1,13 @@ +use std::{ + collections::HashSet, + iter, + time::{Duration, Instant}, +}; + use futures::StreamExt; use libp2p_identify as identify; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; -use std::collections::HashSet; -use std::iter; -use std::time::{Duration, Instant}; use tracing_subscriber::EnvFilter; #[async_std::test] @@ -34,8 +37,7 @@ async fn periodic_identify() { let (swarm2_memory_listen, swarm2_tcp_listen_addr) = swarm2.listen().await; swarm2.connect(&mut swarm1).await; - use identify::Event::Received; - use identify::Event::Sent; + use identify::Event::{Received, Sent}; match libp2p_swarm_test::drive(&mut swarm1, &mut swarm2).await { ( @@ -67,7 +69,8 @@ async fn periodic_identify() { assert_eq!(s2_info.agent_version, "b"); assert!(!s2_info.protocols.is_empty()); - // Cannot assert observed address of dialer because memory transport uses ephemeral, outgoing ports. + // Cannot assert observed address of dialer because memory transport uses ephemeral, + // outgoing ports. // assert_eq!( // s2_info.observed_addr, // swarm2_memory_listen.with(Protocol::P2p(swarm2_peer_id.into())) diff --git a/protocols/kad/src/addresses.rs b/protocols/kad/src/addresses.rs index 0b3dc71e649..c2168be661e 100644 --- a/protocols/kad/src/addresses.rs +++ b/protocols/kad/src/addresses.rs @@ -18,9 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::fmt; + use libp2p_core::Multiaddr; use smallvec::SmallVec; -use std::fmt; /// A non-empty list of (unique) addresses of a peer in the routing table. /// Every address must be a fully-qualified /p2p address. diff --git a/protocols/kad/src/behaviour.rs b/protocols/kad/src/behaviour.rs index f577971167f..988a16dc41f 100644 --- a/protocols/kad/src/behaviour.rs +++ b/protocols/kad/src/behaviour.rs @@ -22,41 +22,46 @@ mod test; -use crate::addresses::Addresses; -use crate::handler::{Handler, HandlerEvent, HandlerIn, RequestId}; -use crate::kbucket::{self, Distance, KBucketConfig, KBucketsTable, NodeStatus}; -use crate::protocol::{ConnectionType, KadPeer, ProtocolConfig}; -use crate::query::{Query, QueryConfig, QueryId, QueryPool, QueryPoolState}; -use crate::record::{ - self, - store::{self, RecordStore}, - ProviderRecord, Record, +use std::{ + collections::{BTreeMap, HashMap, HashSet, VecDeque}, + fmt, + num::NonZeroUsize, + task::{Context, Poll, Waker}, + time::Duration, + vec, }; -use crate::{bootstrap, K_VALUE}; -use crate::{jobs::*, protocol}; + use fnv::FnvHashSet; use libp2p_core::{transport::PortUse, ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::behaviour::{ - AddressChange, ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm, -}; use libp2p_swarm::{ + behaviour::{AddressChange, ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}, dial_opts::{self, DialOpts}, ConnectionDenied, ConnectionHandler, ConnectionId, DialError, ExternalAddresses, ListenAddresses, NetworkBehaviour, NotifyHandler, StreamProtocol, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::fmt; -use std::num::NonZeroUsize; -use std::task::{Context, Poll, Waker}; -use std::time::Duration; -use std::vec; use thiserror::Error; use tracing::Level; use web_time::Instant; pub use crate::query::QueryStats; +use crate::{ + addresses::Addresses, + bootstrap, + handler::{Handler, HandlerEvent, HandlerIn, RequestId}, + jobs::*, + kbucket::{self, Distance, KBucketConfig, KBucketsTable, NodeStatus}, + protocol, + protocol::{ConnectionType, KadPeer, ProtocolConfig}, + query::{Query, QueryConfig, QueryId, QueryPool, QueryPoolState}, + record::{ + self, + store::{self, RecordStore}, + ProviderRecord, Record, + }, + K_VALUE, +}; /// `Behaviour` is a `NetworkBehaviour` that implements the libp2p /// Kademlia protocol. @@ -157,8 +162,9 @@ pub enum StoreInserts { /// the record is forwarded immediately to the [`RecordStore`]. Unfiltered, /// Whenever a (provider) record is received, an event is emitted. - /// Provider records generate a [`InboundRequest::AddProvider`] under [`Event::InboundRequest`], - /// normal records generate a [`InboundRequest::PutRecord`] under [`Event::InboundRequest`]. + /// Provider records generate a [`InboundRequest::AddProvider`] under + /// [`Event::InboundRequest`], normal records generate a [`InboundRequest::PutRecord`] + /// under [`Event::InboundRequest`]. /// /// When deemed valid, a (provider) record needs to be explicitly stored in /// the [`RecordStore`] via [`RecordStore::put`] or [`RecordStore::add_provider`], @@ -205,9 +211,10 @@ pub enum Caching { /// [`GetRecordOk::FinishedWithNoAdditionalRecord`] is always empty. Disabled, /// Up to `max_peers` peers not returning a record that are closest to the key - /// being looked up are tracked and returned in [`GetRecordOk::FinishedWithNoAdditionalRecord`]. - /// The write-back operation must be performed explicitly, if - /// desired and after choosing a record from the results, via [`Behaviour::put_record_to`]. + /// being looked up are tracked and returned in + /// [`GetRecordOk::FinishedWithNoAdditionalRecord`]. The write-back operation must be + /// performed explicitly, if desired and after choosing a record from the results, via + /// [`Behaviour::put_record_to`]. Enabled { max_peers: u16 }, } @@ -442,16 +449,17 @@ impl Config { self } - /// Sets the time to wait before calling [`Behaviour::bootstrap`] after a new peer is inserted in the routing table. - /// This prevent cascading bootstrap requests when multiple peers are inserted into the routing table "at the same time". - /// This also allows to wait a little bit for other potential peers to be inserted into the routing table before - /// triggering a bootstrap, giving more context to the future bootstrap request. + /// Sets the time to wait before calling [`Behaviour::bootstrap`] after a new peer is inserted + /// in the routing table. This prevent cascading bootstrap requests when multiple peers are + /// inserted into the routing table "at the same time". This also allows to wait a little + /// bit for other potential peers to be inserted into the routing table before triggering a + /// bootstrap, giving more context to the future bootstrap request. /// /// * Default to `500` ms. - /// * Set to `Some(Duration::ZERO)` to never wait before triggering a bootstrap request when a new peer - /// is inserted in the routing table. - /// * Set to `None` to disable automatic bootstrap (no bootstrap request will be triggered when a new - /// peer is inserted in the routing table). + /// * Set to `Some(Duration::ZERO)` to never wait before triggering a bootstrap request when a + /// new peer is inserted in the routing table. + /// * Set to `None` to disable automatic bootstrap (no bootstrap request will be triggered when + /// a new peer is inserted in the routing table). #[cfg(test)] pub(crate) fn set_automatic_bootstrap_throttle( &mut self, @@ -573,15 +581,13 @@ where /// /// Explicitly adding addresses of peers serves two purposes: /// - /// 1. In order for a node to join the DHT, it must know about at least - /// one other node of the DHT. + /// 1. In order for a node to join the DHT, it must know about at least one other node of the + /// DHT. /// - /// 2. When a remote peer initiates a connection and that peer is not - /// yet in the routing table, the `Kademlia` behaviour must be - /// informed of an address on which that peer is listening for - /// connections before it can be added to the routing table - /// from where it can subsequently be discovered by all peers - /// in the DHT. + /// 2. When a remote peer initiates a connection and that peer is not yet in the routing + /// table, the `Kademlia` behaviour must be informed of an address on which that peer is + /// listening for connections before it can be added to the routing table from where it can + /// subsequently be discovered by all peers in the DHT. /// /// If the routing table has been updated as a result of this operation, /// a [`Event::RoutingUpdated`] event is emitted. @@ -983,7 +989,8 @@ where /// /// > **Note**: Bootstrap does not require to be called manually. It is periodically /// > invoked at regular intervals based on the configured `periodic_bootstrap_interval` (see - /// > [`Config::set_periodic_bootstrap_interval`] for details) and it is also automatically invoked + /// > [`Config::set_periodic_bootstrap_interval`] for details) and it is also automatically + /// > invoked /// > when a new peer is inserted in the routing table. /// > This parameter is used to call [`Behaviour::bootstrap`] periodically and automatically /// > to ensure a healthy routing table. @@ -1107,10 +1114,12 @@ where /// Set the [`Mode`] in which we should operate. /// - /// By default, we are in [`Mode::Client`] and will swap into [`Mode::Server`] as soon as we have a confirmed, external address via [`FromSwarm::ExternalAddrConfirmed`]. + /// By default, we are in [`Mode::Client`] and will swap into [`Mode::Server`] as soon as we + /// have a confirmed, external address via [`FromSwarm::ExternalAddrConfirmed`]. /// - /// Setting a mode via this function disables this automatic behaviour and unconditionally operates in the specified mode. - /// To reactivate the automatic configuration, pass [`None`] instead. + /// Setting a mode via this function disables this automatic behaviour and unconditionally + /// operates in the specified mode. To reactivate the automatic configuration, pass [`None`] + /// instead. pub fn set_mode(&mut self, mode: Option) { match mode { Some(mode) => { @@ -1191,8 +1200,8 @@ where "Previous match arm handled empty list" ); - // Previously, server-mode, now also server-mode because > 1 external address. Don't log anything to avoid spam. - + // Previously, server-mode, now also server-mode because > 1 external address. + // Don't log anything to avoid spam. Mode::Server } }; @@ -2157,7 +2166,8 @@ where } } - /// Preloads a new [`Handler`] with requests that are waiting to be sent to the newly connected peer. + /// Preloads a new [`Handler`] with requests that are waiting + /// to be sent to the newly connected peer. fn preload_new_handler( &mut self, handler: &mut Handler, @@ -2755,7 +2765,6 @@ pub struct PeerRecord { #[allow(clippy::large_enum_variant)] pub enum Event { /// An inbound request has been received and handled. - // // Note on the difference between 'request' and 'query': A request is a // single request-response style exchange with a single remote peer. A query // is made of multiple requests across multiple remote peers. diff --git a/protocols/kad/src/behaviour/test.rs b/protocols/kad/src/behaviour/test.rs index 7409168ac2a..82749ffb5fd 100644 --- a/protocols/kad/src/behaviour/test.rs +++ b/protocols/kad/src/behaviour/test.rs @@ -20,10 +20,6 @@ #![cfg(test)] -use super::*; - -use crate::record::{store::MemoryStore, Key}; -use crate::{K_VALUE, PROTOCOL_NAME, SHA_256_MH}; use futures::{executor::block_on, future::poll_fn, prelude::*}; use futures_timer::Delay; use libp2p_core::{ @@ -39,6 +35,12 @@ use libp2p_yamux as yamux; use quickcheck::*; use rand::{random, rngs::StdRng, thread_rng, Rng, SeedableRng}; +use super::*; +use crate::{ + record::{store::MemoryStore, Key}, + K_VALUE, PROTOCOL_NAME, SHA_256_MH, +}; + type TestSwarm = Swarm>; fn build_node() -> (Multiaddr, TestSwarm) { @@ -164,7 +166,8 @@ fn bootstrap() { let num_group = rng.gen_range(1..(num_total % K_VALUE.get()) + 2); let mut cfg = Config::new(PROTOCOL_NAME); - // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from triggering automatically. + // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from + // triggering automatically. cfg.set_periodic_bootstrap_interval(None); cfg.set_automatic_bootstrap_throttle(None); if rng.gen() { @@ -246,7 +249,8 @@ fn query_iter() { fn run(rng: &mut impl Rng) { let num_total = rng.gen_range(2..20); let mut config = Config::new(PROTOCOL_NAME); - // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from triggering automatically. + // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from + // triggering automatically. config.set_periodic_bootstrap_interval(None); config.set_automatic_bootstrap_throttle(None); let mut swarms = build_connected_nodes_with_config(num_total, 1, config) @@ -561,7 +565,8 @@ fn put_record() { let mut config = Config::new(PROTOCOL_NAME); config.set_replication_factor(replication_factor); - // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from triggering automatically. + // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from + // triggering automatically. config.set_periodic_bootstrap_interval(None); config.set_automatic_bootstrap_throttle(None); if rng.gen() { @@ -933,7 +938,8 @@ fn add_provider() { let mut config = Config::new(PROTOCOL_NAME); config.set_replication_factor(replication_factor); - // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from triggering automatically. + // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from + // triggering automatically. config.set_periodic_bootstrap_interval(None); config.set_automatic_bootstrap_throttle(None); if rng.gen() { @@ -1161,7 +1167,8 @@ fn disjoint_query_does_not_finish_before_all_paths_did() { config.disjoint_query_paths(true); // I.e. setting the amount disjoint paths to be explored to 2. config.set_parallelism(NonZeroUsize::new(2).unwrap()); - // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from triggering automatically. + // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from triggering + // automatically. config.set_periodic_bootstrap_interval(None); config.set_automatic_bootstrap_throttle(None); diff --git a/protocols/kad/src/bootstrap.rs b/protocols/kad/src/bootstrap.rs index 40acdfd88ee..d6576a3ef54 100644 --- a/protocols/kad/src/bootstrap.rs +++ b/protocols/kad/src/bootstrap.rs @@ -1,7 +1,9 @@ -use futures::FutureExt; -use std::task::{Context, Poll, Waker}; -use std::time::Duration; +use std::{ + task::{Context, Poll, Waker}, + time::Duration, +}; +use futures::FutureExt; use futures_timer::Delay; /// Default value chosen at ``. @@ -9,18 +11,18 @@ pub(crate) const DEFAULT_AUTOMATIC_THROTTLE: Duration = Duration::from_millis(50 #[derive(Debug)] pub(crate) struct Status { - /// If the user did not disable periodic bootstrap (by providing `None` for `periodic_interval`) - /// this is the periodic interval and the delay of the current period. When `Delay` finishes, - /// a bootstrap will be triggered and the `Delay` will be reset. + /// If the user did not disable periodic bootstrap (by providing `None` for + /// `periodic_interval`) this is the periodic interval and the delay of the current period. + /// When `Delay` finishes, a bootstrap will be triggered and the `Delay` will be reset. interval_and_delay: Option<(Duration, Delay)>, /// Configured duration to wait before triggering a bootstrap when a new peer /// is inserted in the routing table. `None` if automatic bootstrap is disabled. automatic_throttle: Option, /// Timer that will be set (if automatic bootstrap is not disabled) when a new peer is inserted - /// in the routing table. When it finishes, it will trigger a bootstrap and will be set to `None` - /// again. If an other new peer is inserted in the routing table before this timer finishes, - /// the timer is reset. + /// in the routing table. When it finishes, it will trigger a bootstrap and will be set to + /// `None` again. If an other new peer is inserted in the routing table before this timer + /// finishes, the timer is reset. throttle_timer: Option, /// Number of bootstrap requests currently in progress. We ensure neither periodic bootstrap @@ -108,16 +110,19 @@ impl Status { // A `throttle_timer` has been registered. It means one or more peers have been // inserted into the routing table and that a bootstrap request should be triggered. // However, to not risk cascading bootstrap requests, we wait a little time to ensure - // the user will not add more peers in the routing table in the next "throttle_timer" remaining. + // the user will not add more peers in the routing table in the next "throttle_timer" + // remaining. if throttle_delay.poll_unpin(cx).is_ready() { // The `throttle_timer` is finished, triggering bootstrap right now. // The call to `on_started` will reset `throttle_delay`. return Poll::Ready(()); } - // The `throttle_timer` is not finished but the periodic interval for triggering bootstrap might be reached. + // The `throttle_timer` is not finished but the periodic interval for triggering + // bootstrap might be reached. } else { - // No new peer has recently been inserted into the routing table or automatic bootstrap is disabled. + // No new peer has recently been inserted into the routing table or automatic bootstrap + // is disabled. } // Checking if the user has enabled the periodic bootstrap feature. @@ -131,7 +136,8 @@ impl Status { // The user disabled periodic bootstrap. } - // Registering the `waker` so that we can wake up when calling `on_new_peer_in_routing_table`. + // Registering the `waker` so that we can wake up when calling + // `on_new_peer_in_routing_table`. self.waker = Some(cx.waker().clone()); Poll::Pending } @@ -175,9 +181,10 @@ impl futures::Future for ThrottleTimer { #[cfg(test)] mod tests { - use super::*; use web_time::Instant; + use super::*; + const MS_5: Duration = Duration::from_millis(5); const MS_100: Duration = Duration::from_millis(100); @@ -296,7 +303,8 @@ mod tests { let elapsed = Instant::now().duration_since(start); - assert!(elapsed > (i * MS_100 - Duration::from_millis(10))); // Subtract 10ms to avoid flakes. + // Subtract 10ms to avoid flakes. + assert!(elapsed > (i * MS_100 - Duration::from_millis(10))); } } @@ -308,7 +316,8 @@ mod tests { status.trigger(); for _ in 0..10 { Delay::new(MS_100 / 2).await; - status.trigger(); // should reset throttle_timer + // should reset throttle_timer + status.trigger(); } assert!( status.next().now_or_never().is_none(), @@ -330,9 +339,12 @@ mod tests { ) { let mut status = Status::new(Some(MS_100), None); - status.on_started(); // first manually triggering - status.on_started(); // second manually triggering - status.on_finish(); // one finishes + // first manually triggering + status.on_started(); + // second manually triggering + status.on_started(); + // one finishes + status.on_finish(); assert!( async_std::future::timeout(10 * MS_100, status.next()) diff --git a/protocols/kad/src/handler.rs b/protocols/kad/src/handler.rs index 384ebc3f2b1..6b4e944e2b0 100644 --- a/protocols/kad/src/handler.rs +++ b/protocols/kad/src/handler.rs @@ -18,27 +18,33 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::behaviour::Mode; -use crate::protocol::{ - KadInStreamSink, KadOutStreamSink, KadPeer, KadRequestMsg, KadResponseMsg, ProtocolConfig, +use std::{ + collections::VecDeque, + error, fmt, io, + marker::PhantomData, + pin::Pin, + task::{Context, Poll, Waker}, + time::Duration, }; -use crate::record::{self, Record}; -use crate::QueryId; + use either::Either; -use futures::channel::oneshot; -use futures::prelude::*; -use futures::stream::SelectAll; +use futures::{channel::oneshot, prelude::*, stream::SelectAll}; use libp2p_core::{upgrade, ConnectedPoint}; use libp2p_identity::PeerId; -use libp2p_swarm::handler::{ConnectionEvent, FullyNegotiatedInbound, FullyNegotiatedOutbound}; use libp2p_swarm::{ + handler::{ConnectionEvent, FullyNegotiatedInbound, FullyNegotiatedOutbound}, ConnectionHandler, ConnectionHandlerEvent, Stream, StreamUpgradeError, SubstreamProtocol, SupportedProtocols, }; -use std::collections::VecDeque; -use std::task::Waker; -use std::time::Duration; -use std::{error, fmt, io, marker::PhantomData, pin::Pin, task::Context, task::Poll}; + +use crate::{ + behaviour::Mode, + protocol::{ + KadInStreamSink, KadOutStreamSink, KadPeer, KadRequestMsg, KadResponseMsg, ProtocolConfig, + }, + record::{self, Record}, + QueryId, +}; const MAX_NUM_STREAMS: usize = 32; @@ -550,7 +556,8 @@ impl Handler { }); } - /// Takes the given [`KadRequestMsg`] and composes it into an outbound request-response protocol handshake using a [`oneshot::channel`]. + /// Takes the given [`KadRequestMsg`] and composes it into an outbound request-response protocol + /// handshake using a [`oneshot::channel`]. fn queue_new_stream(&mut self, id: QueryId, msg: KadRequestMsg) { let (sender, receiver) = oneshot::channel(); @@ -1060,10 +1067,11 @@ fn process_kad_response(event: KadResponseMsg, query_id: QueryId) -> HandlerEven #[cfg(test)] mod tests { - use super::*; use quickcheck::{Arbitrary, Gen}; use tracing_subscriber::EnvFilter; + use super::*; + impl Arbitrary for ProtocolStatus { fn arbitrary(g: &mut Gen) -> Self { Self { diff --git a/protocols/kad/src/jobs.rs b/protocols/kad/src/jobs.rs index fa558878a38..56b3e080d96 100644 --- a/protocols/kad/src/jobs.rs +++ b/protocols/kad/src/jobs.rs @@ -25,12 +25,11 @@ //! To ensure persistence of records in the DHT, a Kademlia node //! must periodically (re-)publish and (re-)replicate its records: //! -//! 1. (Re-)publishing: The original publisher or provider of a record -//! must regularly re-publish in order to prolong the expiration. +//! 1. (Re-)publishing: The original publisher or provider of a record must regularly re-publish +//! in order to prolong the expiration. //! -//! 2. (Re-)replication: Every node storing a replica of a record must -//! regularly re-replicate it to the closest nodes to the key in -//! order to ensure the record is present at these nodes. +//! 2. (Re-)replication: Every node storing a replica of a record must regularly re-replicate it +//! to the closest nodes to the key in order to ensure the record is present at these nodes. //! //! Re-publishing primarily ensures persistence of the record beyond its //! initial TTL, for as long as the publisher stores (or provides) the record, @@ -41,11 +40,10 @@ //! //! This module implements two periodic jobs: //! -//! * [`PutRecordJob`]: For (re-)publication and (re-)replication of -//! regular (value-)records. +//! * [`PutRecordJob`]: For (re-)publication and (re-)replication of regular (value-)records. //! -//! * [`AddProviderJob`]: For (re-)publication of provider records. -//! Provider records currently have no separate replication mechanism. +//! * [`AddProviderJob`]: For (re-)publication of provider records. Provider records currently +//! have no separate replication mechanism. //! //! A periodic job is driven like a `Future` or `Stream` by `poll`ing it. //! Once a job starts running it emits records to send to the `k` closest @@ -61,17 +59,21 @@ //! > to the size of all stored records. As a job runs, the records are moved //! > out of the job to the consumer, where they can be dropped after being sent. -use crate::record::{self, store::RecordStore, ProviderRecord, Record}; +use std::{ + collections::HashSet, + pin::Pin, + task::{Context, Poll}, + time::Duration, + vec, +}; + use futures::prelude::*; use futures_timer::Delay; use libp2p_identity::PeerId; -use std::collections::HashSet; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::time::Duration; -use std::vec; use web_time::Instant; +use crate::record::{self, store::RecordStore, ProviderRecord, Record}; + /// The maximum number of queries towards which background jobs /// are allowed to start new queries on an invocation of /// `Behaviour::poll`. @@ -335,12 +337,13 @@ impl AddProviderJob { #[cfg(test)] mod tests { - use super::*; - use crate::record::store::MemoryStore; use futures::{executor::block_on, future::poll_fn}; use quickcheck::*; use rand::Rng; + use super::*; + use crate::record::store::MemoryStore; + fn rand_put_record_job() -> PutRecordJob { let mut rng = rand::thread_rng(); let id = PeerId::random(); diff --git a/protocols/kad/src/kbucket.rs b/protocols/kad/src/kbucket.rs index 99d534fa669..1c6d8857c9c 100644 --- a/protocols/kad/src/kbucket.rs +++ b/protocols/kad/src/kbucket.rs @@ -72,13 +72,11 @@ mod entry; #[allow(clippy::assign_op_pattern)] mod key; -pub use bucket::NodeStatus; -pub use entry::*; +use std::{collections::VecDeque, num::NonZeroUsize, time::Duration}; use bucket::KBucket; -use std::collections::VecDeque; -use std::num::NonZeroUsize; -use std::time::Duration; +pub use bucket::NodeStatus; +pub use entry::*; use web_time::Instant; /// Maximum number of k-buckets. @@ -561,10 +559,11 @@ where #[cfg(test)] mod tests { - use super::*; use libp2p_identity::PeerId; use quickcheck::*; + use super::*; + type TestTable = KBucketsTable; impl Arbitrary for TestTable { diff --git a/protocols/kad/src/kbucket/bucket.rs b/protocols/kad/src/kbucket/bucket.rs index ec2b7756c43..244525238ec 100644 --- a/protocols/kad/src/kbucket/bucket.rs +++ b/protocols/kad/src/kbucket/bucket.rs @@ -311,19 +311,18 @@ where /// /// The status of the node to insert determines the result as follows: /// - /// * `NodeStatus::Connected`: If the bucket is full and either all nodes are connected - /// or there is already a pending node, insertion fails with `InsertResult::Full`. - /// If the bucket is full but at least one node is disconnected and there is no pending - /// node, the new node is inserted as pending, yielding `InsertResult::Pending`. - /// Otherwise the bucket has free slots and the new node is added to the end of the - /// bucket as the most-recently connected node. + /// * `NodeStatus::Connected`: If the bucket is full and either all nodes are connected or + /// there is already a pending node, insertion fails with `InsertResult::Full`. If the + /// bucket is full but at least one node is disconnected and there is no pending node, the + /// new node is inserted as pending, yielding `InsertResult::Pending`. Otherwise the bucket + /// has free slots and the new node is added to the end of the bucket as the most-recently + /// connected node. /// /// * `NodeStatus::Disconnected`: If the bucket is full, insertion fails with - /// `InsertResult::Full`. Otherwise the bucket has free slots and the new node - /// is inserted at the position preceding the first connected node, - /// i.e. as the most-recently disconnected node. If there are no connected nodes, - /// the new node is added as the last element of the bucket. - /// + /// `InsertResult::Full`. Otherwise the bucket has free slots and the new node is inserted + /// at the position preceding the first connected node, i.e. as the most-recently + /// disconnected node. If there are no connected nodes, the new node is added as the last + /// element of the bucket. pub(crate) fn insert( &mut self, node: Node, @@ -443,10 +442,11 @@ where #[cfg(test)] mod tests { - use super::*; use libp2p_identity::PeerId; use quickcheck::*; + use super::*; + impl Arbitrary for KBucket, ()> { fn arbitrary(g: &mut Gen) -> KBucket, ()> { let timeout = Duration::from_secs(g.gen_range(1..g.size()) as u64); diff --git a/protocols/kad/src/kbucket/entry.rs b/protocols/kad/src/kbucket/entry.rs index 808db08d858..bdf8b9b5a18 100644 --- a/protocols/kad/src/kbucket/entry.rs +++ b/protocols/kad/src/kbucket/entry.rs @@ -23,7 +23,6 @@ pub(crate) use super::bucket::{AppliedPending, InsertResult, Node, K_VALUE}; pub use super::key::*; - use super::*; /// An immutable by-reference view of a bucket entry. diff --git a/protocols/kad/src/kbucket/key.rs b/protocols/kad/src/kbucket/key.rs index f35849c6b26..367dfa807d3 100644 --- a/protocols/kad/src/kbucket/key.rs +++ b/protocols/kad/src/kbucket/key.rs @@ -18,15 +18,21 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::record; +use std::{ + borrow::Borrow, + hash::{Hash, Hasher}, +}; + use libp2p_core::multihash::Multihash; use libp2p_identity::PeerId; -use sha2::digest::generic_array::{typenum::U32, GenericArray}; -use sha2::{Digest, Sha256}; -use std::borrow::Borrow; -use std::hash::{Hash, Hasher}; +use sha2::{ + digest::generic_array::{typenum::U32, GenericArray}, + Digest, Sha256, +}; use uint::*; +use crate::record; + construct_uint! { /// 256-bit unsigned integer. pub(super) struct U256(4); @@ -200,9 +206,10 @@ impl Distance { #[cfg(test)] mod tests { + use quickcheck::*; + use super::*; use crate::SHA_256_MH; - use quickcheck::*; impl Arbitrary for Key { fn arbitrary(_: &mut Gen) -> Key { diff --git a/protocols/kad/src/lib.rs b/protocols/kad/src/lib.rs index 060bfc518e4..8ab45665c9b 100644 --- a/protocols/kad/src/lib.rs +++ b/protocols/kad/src/lib.rs @@ -54,36 +54,34 @@ mod proto { }; } +use std::num::NonZeroUsize; + pub use addresses::Addresses; pub use behaviour::{ AddProviderContext, AddProviderError, AddProviderOk, AddProviderPhase, AddProviderResult, - BootstrapError, BootstrapOk, BootstrapResult, GetClosestPeersError, GetClosestPeersOk, - GetClosestPeersResult, GetProvidersError, GetProvidersOk, GetProvidersResult, GetRecordError, - GetRecordOk, GetRecordResult, InboundRequest, Mode, NoKnownPeers, PeerInfo, PeerRecord, - PutRecordContext, PutRecordError, PutRecordOk, PutRecordPhase, PutRecordResult, QueryInfo, - QueryMut, QueryRef, QueryResult, QueryStats, RoutingUpdate, -}; -pub use behaviour::{ - Behaviour, BucketInserts, Caching, Config, Event, ProgressStep, Quorum, StoreInserts, + Behaviour, BootstrapError, BootstrapOk, BootstrapResult, BucketInserts, Caching, Config, Event, + GetClosestPeersError, GetClosestPeersOk, GetClosestPeersResult, GetProvidersError, + GetProvidersOk, GetProvidersResult, GetRecordError, GetRecordOk, GetRecordResult, + InboundRequest, Mode, NoKnownPeers, PeerInfo, PeerRecord, ProgressStep, PutRecordContext, + PutRecordError, PutRecordOk, PutRecordPhase, PutRecordResult, QueryInfo, QueryMut, QueryRef, + QueryResult, QueryStats, Quorum, RoutingUpdate, StoreInserts, }; pub use kbucket::{ Distance as KBucketDistance, EntryView, KBucketRef, Key as KBucketKey, NodeStatus, }; +use libp2p_swarm::StreamProtocol; pub use protocol::{ConnectionType, KadPeer}; pub use query::QueryId; pub use record::{store, Key as RecordKey, ProviderRecord, Record}; -use libp2p_swarm::StreamProtocol; -use std::num::NonZeroUsize; - /// The `k` parameter of the Kademlia specification. /// /// This parameter determines: /// /// 1) The (fixed) maximum number of nodes in a bucket. -/// 2) The (default) replication factor, which in turn determines: -/// a) The number of closer peers returned in response to a request. -/// b) The number of closest peers to a key to search for in an iterative query. +/// 2) The (default) replication factor, which in turn determines: a) The number of closer peers +/// returned in response to a request. b) The number of closest peers to a key to search for in +/// an iterative query. /// /// The choice of (1) is fixed to this constant. The replication factor is configurable /// but should generally be no greater than `K_VALUE`. All nodes in a Kademlia diff --git a/protocols/kad/src/protocol.rs b/protocols/kad/src/protocol.rs index 9d2ef56f5d8..9d0d69b670e 100644 --- a/protocols/kad/src/protocol.rs +++ b/protocols/kad/src/protocol.rs @@ -26,21 +26,25 @@ //! to poll the underlying transport for incoming messages, and the `Sink` component //! is used to send messages to remote peers. -use crate::proto; -use crate::record::{self, Record}; +use std::{io, iter, marker::PhantomData, time::Duration}; + use asynchronous_codec::{Decoder, Encoder, Framed}; use bytes::BytesMut; use futures::prelude::*; -use libp2p_core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; -use libp2p_core::Multiaddr; +use libp2p_core::{ + upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}, + Multiaddr, +}; use libp2p_identity::PeerId; use libp2p_swarm::StreamProtocol; -use std::marker::PhantomData; -use std::time::Duration; -use std::{io, iter}; use tracing::debug; use web_time::Instant; +use crate::{ + proto, + record::{self, Record}, +}; + /// The protocol name used for negotiating with multistream-select. pub(crate) const DEFAULT_PROTO_NAME: StreamProtocol = StreamProtocol::new("/ipfs/kad/1.0.0"); /// The default maximum size for a varint length-delimited packet. @@ -667,92 +671,92 @@ mod tests { assert_eq!(peer.multiaddrs, vec![valid_multiaddr]) } - /*// TODO: restore - use self::libp2p_tcp::TcpTransport; - use self::tokio::runtime::current_thread::Runtime; - use futures::{Future, Sink, Stream}; - use libp2p_core::{PeerId, PublicKey, Transport}; - use multihash::{encode, Hash}; - use protocol::{ConnectionType, KadPeer, ProtocolConfig}; - use std::sync::mpsc; - use std::thread; - - #[test] - fn correct_transfer() { - // We open a server and a client, send a message between the two, and check that they were - // successfully received. - - test_one(KadMsg::Ping); - test_one(KadMsg::FindNodeReq { - key: PeerId::random(), - }); - test_one(KadMsg::FindNodeRes { - closer_peers: vec![KadPeer { - node_id: PeerId::random(), - multiaddrs: vec!["/ip4/100.101.102.103/tcp/20105".parse().unwrap()], - connection_ty: ConnectionType::Connected, - }], - }); - test_one(KadMsg::GetProvidersReq { - key: encode(Hash::SHA2256, &[9, 12, 0, 245, 245, 201, 28, 95]).unwrap(), - }); - test_one(KadMsg::GetProvidersRes { - closer_peers: vec![KadPeer { - node_id: PeerId::random(), - multiaddrs: vec!["/ip4/100.101.102.103/tcp/20105".parse().unwrap()], - connection_ty: ConnectionType::Connected, - }], - provider_peers: vec![KadPeer { - node_id: PeerId::random(), - multiaddrs: vec!["/ip4/200.201.202.203/tcp/1999".parse().unwrap()], - connection_ty: ConnectionType::NotConnected, - }], - }); - test_one(KadMsg::AddProvider { - key: encode(Hash::SHA2256, &[9, 12, 0, 245, 245, 201, 28, 95]).unwrap(), - provider_peer: KadPeer { - node_id: PeerId::random(), - multiaddrs: vec!["/ip4/9.1.2.3/udp/23".parse().unwrap()], - connection_ty: ConnectionType::Connected, - }, - }); - // TODO: all messages - - fn test_one(msg_server: KadMsg) { - let msg_client = msg_server.clone(); - let (tx, rx) = mpsc::channel(); - - let bg_thread = thread::spawn(move || { - let transport = TcpTransport::default().with_upgrade(ProtocolConfig); - - let (listener, addr) = transport - .listen_on( "/ip4/127.0.0.1/tcp/0".parse().unwrap()) - .unwrap(); - tx.send(addr).unwrap(); - - let future = listener - .into_future() - .map_err(|(err, _)| err) - .and_then(|(client, _)| client.unwrap().0) - .and_then(|proto| proto.into_future().map_err(|(err, _)| err).map(|(v, _)| v)) - .map(|recv_msg| { - assert_eq!(recv_msg.unwrap(), msg_server); - () - }); - let mut rt = Runtime::new().unwrap(); - let _ = rt.block_on(future).unwrap(); - }); - - let transport = TcpTransport::default().with_upgrade(ProtocolConfig); - - let future = transport - .dial(rx.recv().unwrap()) - .unwrap() - .and_then(|proto| proto.send(msg_client)) - .map(|_| ()); - let mut rt = Runtime::new().unwrap(); - let _ = rt.block_on(future).unwrap(); - bg_thread.join().unwrap(); - } - }*/ + // // TODO: restore + // use self::libp2p_tcp::TcpTransport; + // use self::tokio::runtime::current_thread::Runtime; + // use futures::{Future, Sink, Stream}; + // use libp2p_core::{PeerId, PublicKey, Transport}; + // use multihash::{encode, Hash}; + // use protocol::{ConnectionType, KadPeer, ProtocolConfig}; + // use std::sync::mpsc; + // use std::thread; + // + // #[test] + // fn correct_transfer() { + // We open a server and a client, send a message between the two, and check that they were + // successfully received. + // + // test_one(KadMsg::Ping); + // test_one(KadMsg::FindNodeReq { + // key: PeerId::random(), + // }); + // test_one(KadMsg::FindNodeRes { + // closer_peers: vec![KadPeer { + // node_id: PeerId::random(), + // multiaddrs: vec!["/ip4/100.101.102.103/tcp/20105".parse().unwrap()], + // connection_ty: ConnectionType::Connected, + // }], + // }); + // test_one(KadMsg::GetProvidersReq { + // key: encode(Hash::SHA2256, &[9, 12, 0, 245, 245, 201, 28, 95]).unwrap(), + // }); + // test_one(KadMsg::GetProvidersRes { + // closer_peers: vec![KadPeer { + // node_id: PeerId::random(), + // multiaddrs: vec!["/ip4/100.101.102.103/tcp/20105".parse().unwrap()], + // connection_ty: ConnectionType::Connected, + // }], + // provider_peers: vec![KadPeer { + // node_id: PeerId::random(), + // multiaddrs: vec!["/ip4/200.201.202.203/tcp/1999".parse().unwrap()], + // connection_ty: ConnectionType::NotConnected, + // }], + // }); + // test_one(KadMsg::AddProvider { + // key: encode(Hash::SHA2256, &[9, 12, 0, 245, 245, 201, 28, 95]).unwrap(), + // provider_peer: KadPeer { + // node_id: PeerId::random(), + // multiaddrs: vec!["/ip4/9.1.2.3/udp/23".parse().unwrap()], + // connection_ty: ConnectionType::Connected, + // }, + // }); + // TODO: all messages + // + // fn test_one(msg_server: KadMsg) { + // let msg_client = msg_server.clone(); + // let (tx, rx) = mpsc::channel(); + // + // let bg_thread = thread::spawn(move || { + // let transport = TcpTransport::default().with_upgrade(ProtocolConfig); + // + // let (listener, addr) = transport + // .listen_on( "/ip4/127.0.0.1/tcp/0".parse().unwrap()) + // .unwrap(); + // tx.send(addr).unwrap(); + // + // let future = listener + // .into_future() + // .map_err(|(err, _)| err) + // .and_then(|(client, _)| client.unwrap().0) + // .and_then(|proto| proto.into_future().map_err(|(err, _)| err).map(|(v, _)| v)) + // .map(|recv_msg| { + // assert_eq!(recv_msg.unwrap(), msg_server); + // () + // }); + // let mut rt = Runtime::new().unwrap(); + // let _ = rt.block_on(future).unwrap(); + // }); + // + // let transport = TcpTransport::default().with_upgrade(ProtocolConfig); + // + // let future = transport + // .dial(rx.recv().unwrap()) + // .unwrap() + // .and_then(|proto| proto.send(msg_client)) + // .map(|_| ()); + // let mut rt = Runtime::new().unwrap(); + // let _ = rt.block_on(future).unwrap(); + // bg_thread.join().unwrap(); + // } + // } } diff --git a/protocols/kad/src/query.rs b/protocols/kad/src/query.rs index 1a895d9627c..69257f73b26 100644 --- a/protocols/kad/src/query.rs +++ b/protocols/kad/src/query.rs @@ -20,24 +20,27 @@ mod peers; -use libp2p_core::Multiaddr; -use peers::closest::{ - disjoint::ClosestDisjointPeersIter, ClosestPeersIter, ClosestPeersIterConfig, -}; -use peers::fixed::FixedPeersIter; -use peers::PeersIterState; -use smallvec::SmallVec; +use std::{num::NonZeroUsize, time::Duration}; -use crate::behaviour::PeerInfo; -use crate::handler::HandlerIn; -use crate::kbucket::{Key, KeyBytes}; -use crate::{QueryInfo, ALPHA_VALUE, K_VALUE}; use either::Either; use fnv::FnvHashMap; +use libp2p_core::Multiaddr; use libp2p_identity::PeerId; -use std::{num::NonZeroUsize, time::Duration}; +use peers::{ + closest::{disjoint::ClosestDisjointPeersIter, ClosestPeersIter, ClosestPeersIterConfig}, + fixed::FixedPeersIter, + PeersIterState, +}; +use smallvec::SmallVec; use web_time::Instant; +use crate::{ + behaviour::PeerInfo, + handler::HandlerIn, + kbucket::{Key, KeyBytes}, + QueryInfo, ALPHA_VALUE, K_VALUE, +}; + /// A `QueryPool` provides an aggregate state machine for driving `Query`s to completion. /// /// Internally, a `Query` is in turn driven by an underlying `QueryPeerIter` diff --git a/protocols/kad/src/query/peers.rs b/protocols/kad/src/query/peers.rs index 11b8f974de9..fe8ada51e44 100644 --- a/protocols/kad/src/query/peers.rs +++ b/protocols/kad/src/query/peers.rs @@ -23,13 +23,11 @@ //! Using a peer iterator in a query involves performing the following steps //! repeatedly and in an alternating fashion: //! -//! 1. Calling `next` to observe the next state of the iterator and determine -//! what to do, which is to either issue new requests to peers or continue -//! waiting for responses. +//! 1. Calling `next` to observe the next state of the iterator and determine what to do, which is +//! to either issue new requests to peers or continue waiting for responses. //! -//! 2. When responses are received or requests fail, providing input to the -//! iterator via the `on_success` and `on_failure` callbacks, -//! respectively, followed by repeating step (1). +//! 2. When responses are received or requests fail, providing input to the iterator via the +//! `on_success` and `on_failure` callbacks, respectively, followed by repeating step (1). //! //! When a call to `next` returns [`Finished`], no more peers can be obtained //! from the iterator and the results can be obtained from `into_result`. @@ -40,9 +38,10 @@ pub(crate) mod closest; pub(crate) mod fixed; -use libp2p_identity::PeerId; use std::borrow::Cow; +use libp2p_identity::PeerId; + /// The state of a peer iterator. #[derive(Debug, Clone, PartialEq, Eq)] pub enum PeersIterState<'a> { diff --git a/protocols/kad/src/query/peers/closest.rs b/protocols/kad/src/query/peers/closest.rs index 2505ee2e9b2..2d1f91f050c 100644 --- a/protocols/kad/src/query/peers/closest.rs +++ b/protocols/kad/src/query/peers/closest.rs @@ -18,14 +18,20 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::*; +use std::{ + collections::btree_map::{BTreeMap, Entry}, + num::NonZeroUsize, + time::Duration, +}; -use crate::kbucket::{Distance, Key, KeyBytes}; -use crate::{ALPHA_VALUE, K_VALUE}; -use std::collections::btree_map::{BTreeMap, Entry}; -use std::{num::NonZeroUsize, time::Duration}; use web_time::Instant; +use super::*; +use crate::{ + kbucket::{Distance, Key, KeyBytes}, + ALPHA_VALUE, K_VALUE, +}; + pub(crate) mod disjoint; /// A peer iterator for a dynamically changing list of peers, sorted by increasing /// distance to a chosen target. @@ -494,12 +500,14 @@ enum PeerState { #[cfg(test)] mod tests { - use super::*; - use crate::SHA_256_MH; + use std::iter; + use libp2p_core::multihash::Multihash; use quickcheck::*; use rand::{rngs::StdRng, Rng, SeedableRng}; - use std::iter; + + use super::*; + use crate::SHA_256_MH; fn random_peers(n: usize, g: &mut R) -> Vec { (0..n) diff --git a/protocols/kad/src/query/peers/closest/disjoint.rs b/protocols/kad/src/query/peers/closest/disjoint.rs index cafe87b6ef4..70ded360c7e 100644 --- a/protocols/kad/src/query/peers/closest/disjoint.rs +++ b/protocols/kad/src/query/peers/closest/disjoint.rs @@ -18,13 +18,14 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::*; use std::{ collections::HashMap, iter::{Cycle, Map, Peekable}, ops::{Index, IndexMut, Range}, }; +use super::*; + /// Wraps around a set of [`ClosestPeersIter`], enforcing a disjoint discovery /// path per configured parallelism according to the S/Kademlia paper. pub(crate) struct ClosestDisjointPeersIter { @@ -373,7 +374,6 @@ enum ResponseState { /// Iterator combining the result of multiple [`ClosestPeersIter`] into a single /// deduplicated ordered iterator. -// // Note: This operates under the assumption that `I` is ordered. #[derive(Clone, Debug)] struct ResultIter @@ -433,13 +433,13 @@ impl>> Iterator for ResultIter { #[cfg(test)] mod tests { - use super::*; + use std::{collections::HashSet, iter}; - use crate::SHA_256_MH; use libp2p_core::multihash::Multihash; use quickcheck::*; - use std::collections::HashSet; - use std::iter; + + use super::*; + use crate::SHA_256_MH; impl Arbitrary for ResultIter>> { fn arbitrary(g: &mut Gen) -> Self { diff --git a/protocols/kad/src/query/peers/fixed.rs b/protocols/kad/src/query/peers/fixed.rs index 2d0b312454d..41cb3559f1b 100644 --- a/protocols/kad/src/query/peers/fixed.rs +++ b/protocols/kad/src/query/peers/fixed.rs @@ -18,10 +18,11 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::*; +use std::{collections::hash_map::Entry, num::NonZeroUsize, vec}; use fnv::FnvHashMap; -use std::{collections::hash_map::Entry, num::NonZeroUsize, vec}; + +use super::*; /// A peer iterator for a fixed set of peers. pub(crate) struct FixedPeersIter { diff --git a/protocols/kad/src/record.rs b/protocols/kad/src/record.rs index b8a644acdd6..fea17f826a4 100644 --- a/protocols/kad/src/record.rs +++ b/protocols/kad/src/record.rs @@ -22,13 +22,16 @@ pub mod store; +use std::{ + borrow::Borrow, + hash::{Hash, Hasher}, +}; + use bytes::Bytes; use libp2p_core::{multihash::Multihash, Multiaddr}; use libp2p_identity::PeerId; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use std::borrow::Borrow; -use std::hash::{Hash, Hasher}; use web_time::Instant; /// The (opaque) key of a record. @@ -160,10 +163,12 @@ impl ProviderRecord { #[cfg(test)] mod tests { + use std::time::Duration; + + use quickcheck::*; + use super::*; use crate::SHA_256_MH; - use quickcheck::*; - use std::time::Duration; impl Arbitrary for Key { fn arbitrary(g: &mut Gen) -> Key { diff --git a/protocols/kad/src/record/store.rs b/protocols/kad/src/record/store.rs index 5c25bc8b2fa..ee40f568bb3 100644 --- a/protocols/kad/src/record/store.rs +++ b/protocols/kad/src/record/store.rs @@ -20,12 +20,13 @@ mod memory; +use std::borrow::Cow; + pub use memory::{MemoryStore, MemoryStoreConfig}; use thiserror::Error; use super::*; use crate::K_VALUE; -use std::borrow::Cow; /// The result of an operation on a `RecordStore`. pub type Result = std::result::Result; @@ -50,20 +51,16 @@ pub enum Error { /// /// There are two types of records managed by a `RecordStore`: /// -/// 1. Regular (value-)records. These records store an arbitrary value -/// associated with a key which is distributed to the closest nodes -/// to the key in the Kademlia DHT as per the standard Kademlia "push-model". -/// These records are subject to re-replication and re-publication as +/// 1. Regular (value-)records. These records store an arbitrary value associated with a key which +/// is distributed to the closest nodes to the key in the Kademlia DHT as per the standard +/// Kademlia "push-model". These records are subject to re-replication and re-publication as /// per the standard Kademlia protocol. /// -/// 2. Provider records. These records associate the ID of a peer with a key -/// who can supposedly provide the associated value. These records are -/// mere "pointers" to the data which may be followed by contacting these -/// providers to obtain the value. These records are specific to the -/// libp2p Kademlia specification and realise a "pull-model" for distributed -/// content. Just like a regular record, a provider record is distributed -/// to the closest nodes to the key. -/// +/// 2. Provider records. These records associate the ID of a peer with a key who can supposedly +/// provide the associated value. These records are mere "pointers" to the data which may be +/// followed by contacting these providers to obtain the value. These records are specific to +/// the libp2p Kademlia specification and realise a "pull-model" for distributed content. Just +/// like a regular record, a provider record is distributed to the closest nodes to the key. pub trait RecordStore { type RecordsIter<'a>: Iterator> where diff --git a/protocols/kad/src/record/store/memory.rs b/protocols/kad/src/record/store/memory.rs index 3fb6d2be3e8..28f6a55044f 100644 --- a/protocols/kad/src/record/store/memory.rs +++ b/protocols/kad/src/record/store/memory.rs @@ -18,12 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::*; +use std::{ + collections::{hash_map, hash_set, HashMap, HashSet}, + iter, +}; -use crate::kbucket; use smallvec::SmallVec; -use std::collections::{hash_map, hash_set, HashMap, HashSet}; -use std::iter; + +use super::*; +use crate::kbucket; /// In-memory implementation of a `RecordStore`. pub struct MemoryStore { @@ -208,11 +211,12 @@ impl RecordStore for MemoryStore { #[cfg(test)] mod tests { - use super::*; - use crate::SHA_256_MH; use quickcheck::*; use rand::Rng; + use super::*; + use crate::SHA_256_MH; + fn random_multihash() -> Multihash<64> { Multihash::wrap(SHA_256_MH, &rand::thread_rng().gen::<[u8; 32]>()).unwrap() } diff --git a/protocols/kad/tests/client_mode.rs b/protocols/kad/tests/client_mode.rs index 2c8d11beac7..3275c525890 100644 --- a/protocols/kad/tests/client_mode.rs +++ b/protocols/kad/tests/client_mode.rs @@ -1,7 +1,6 @@ use libp2p_identify as identify; use libp2p_identity as identity; -use libp2p_kad::store::MemoryStore; -use libp2p_kad::{Behaviour, Config, Event, Mode}; +use libp2p_kad::{store::MemoryStore, Behaviour, Config, Event, Mode}; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; use tracing_subscriber::EnvFilter; @@ -104,7 +103,9 @@ async fn adding_an_external_addresses_activates_server_mode_on_existing_connecti // Server learns its external address (this could be through AutoNAT or some other mechanism). server.add_external_address(memory_addr); - // The server reconfigured its connection to the client to be in server mode, pushes that information to client which as a result updates its routing table and triggers a mode change to Mode::Server. + // The server reconfigured its connection to the client to be in server mode, + // pushes that information to client which as a result updates its routing + // table and triggers a mode change to Mode::Server. match libp2p_swarm_test::drive(&mut client, &mut server).await { ( [Identify(identify::Event::Received { .. }), Kad(RoutingUpdated { peer: peer1, .. })], diff --git a/protocols/mdns/src/behaviour.rs b/protocols/mdns/src/behaviour.rs index cecd27bf78b..b6dde8f4487 100644 --- a/protocols/mdns/src/behaviour.rs +++ b/protocols/mdns/src/behaviour.rs @@ -22,25 +22,34 @@ mod iface; mod socket; mod timer; -use self::iface::InterfaceState; -use crate::behaviour::{socket::AsyncSocket, timer::Builder}; -use crate::Config; -use futures::channel::mpsc; -use futures::{Stream, StreamExt}; +use std::{ + cmp, + collections::hash_map::{Entry, HashMap}, + fmt, + future::Future, + io, + net::IpAddr, + pin::Pin, + sync::{Arc, RwLock}, + task::{Context, Poll}, + time::Instant, +}; + +use futures::{channel::mpsc, Stream, StreamExt}; use if_watch::IfEvent; -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr}; +use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::behaviour::FromSwarm; use libp2p_swarm::{ - dummy, ConnectionDenied, ConnectionId, ListenAddresses, NetworkBehaviour, THandler, - THandlerInEvent, THandlerOutEvent, ToSwarm, + behaviour::FromSwarm, dummy, ConnectionDenied, ConnectionId, ListenAddresses, NetworkBehaviour, + THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use smallvec::SmallVec; -use std::collections::hash_map::{Entry, HashMap}; -use std::future::Future; -use std::sync::{Arc, RwLock}; -use std::{cmp, fmt, io, net::IpAddr, pin::Pin, task::Context, task::Poll, time::Instant}; + +use self::iface::InterfaceState; +use crate::{ + behaviour::{socket::AsyncSocket, timer::Builder}, + Config, +}; /// An abstraction to allow for compatibility with various async runtimes. pub trait Provider: 'static { @@ -68,11 +77,13 @@ pub trait Abort { /// The type of a [`Behaviour`] using the `async-io` implementation. #[cfg(feature = "async-io")] pub mod async_io { - use super::Provider; - use crate::behaviour::{socket::asio::AsyncUdpSocket, timer::asio::AsyncTimer, Abort}; + use std::future::Future; + use async_std::task::JoinHandle; use if_watch::smol::IfWatcher; - use std::future::Future; + + use super::Provider; + use crate::behaviour::{socket::asio::AsyncUdpSocket, timer::asio::AsyncTimer, Abort}; #[doc(hidden)] pub enum AsyncIo {} @@ -104,12 +115,14 @@ pub mod async_io { /// The type of a [`Behaviour`] using the `tokio` implementation. #[cfg(feature = "tokio")] pub mod tokio { - use super::Provider; - use crate::behaviour::{socket::tokio::TokioUdpSocket, timer::tokio::TokioTimer, Abort}; - use if_watch::tokio::IfWatcher; use std::future::Future; + + use if_watch::tokio::IfWatcher; use tokio::task::JoinHandle; + use super::Provider; + use crate::behaviour::{socket::tokio::TokioUdpSocket, timer::tokio::TokioTimer, Abort}; + #[doc(hidden)] pub enum Tokio {} @@ -170,7 +183,8 @@ where /// The current set of listen addresses. /// /// This is shared across all interface tasks using an [`RwLock`]. - /// The [`Behaviour`] updates this upon new [`FromSwarm`] events where as [`InterfaceState`]s read from it to answer inbound mDNS queries. + /// The [`Behaviour`] updates this upon new [`FromSwarm`] + /// events where as [`InterfaceState`]s read from it to answer inbound mDNS queries. listen_addresses: Arc>, local_peer_id: PeerId, diff --git a/protocols/mdns/src/behaviour/iface.rs b/protocols/mdns/src/behaviour/iface.rs index 9302065cde2..873bb8a307b 100644 --- a/protocols/mdns/src/behaviour/iface.rs +++ b/protocols/mdns/src/behaviour/iface.rs @@ -21,27 +21,32 @@ mod dns; mod query; -use self::dns::{build_query, build_query_response, build_service_discovery_response}; -use self::query::MdnsPacket; -use crate::behaviour::{socket::AsyncSocket, timer::Builder}; -use crate::Config; -use futures::channel::mpsc; -use futures::{SinkExt, StreamExt}; -use libp2p_core::Multiaddr; -use libp2p_identity::PeerId; -use libp2p_swarm::ListenAddresses; -use socket2::{Domain, Socket, Type}; -use std::future::Future; -use std::sync::{Arc, RwLock}; use std::{ collections::VecDeque, + future::Future, io, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket}, pin::Pin, + sync::{Arc, RwLock}, task::{Context, Poll}, time::{Duration, Instant}, }; +use futures::{channel::mpsc, SinkExt, StreamExt}; +use libp2p_core::Multiaddr; +use libp2p_identity::PeerId; +use libp2p_swarm::ListenAddresses; +use socket2::{Domain, Socket, Type}; + +use self::{ + dns::{build_query, build_query_response, build_service_discovery_response}, + query::MdnsPacket, +}; +use crate::{ + behaviour::{socket::AsyncSocket, timer::Builder}, + Config, +}; + /// Initial interval for starting probe const INITIAL_TIMEOUT_INTERVAL: Duration = Duration::from_millis(500); diff --git a/protocols/mdns/src/behaviour/iface/dns.rs b/protocols/mdns/src/behaviour/iface/dns.rs index 39dbf08c731..35cba44f4af 100644 --- a/protocols/mdns/src/behaviour/iface/dns.rs +++ b/protocols/mdns/src/behaviour/iface/dns.rs @@ -20,12 +20,13 @@ //! (M)DNS encoding and decoding on top of the `dns_parser` library. -use crate::{META_QUERY_SERVICE, SERVICE_NAME}; +use std::{borrow::Cow, cmp, error, fmt, str, time::Duration}; + use libp2p_core::Multiaddr; use libp2p_identity::PeerId; -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; -use std::{borrow::Cow, cmp, error, fmt, str, time::Duration}; +use rand::{distributions::Alphanumeric, thread_rng, Rng}; + +use crate::{META_QUERY_SERVICE, SERVICE_NAME}; /// DNS TXT records can have up to 255 characters as a single string value. /// @@ -293,7 +294,6 @@ fn generate_peer_name() -> Vec { /// Panics if `name` has a zero-length component or a component that is too long. /// This is fine considering that this function is not public and is only called in a controlled /// environment. -/// fn append_qname(out: &mut Vec, name: &[u8]) { debug_assert!(name.is_ascii()); @@ -394,10 +394,11 @@ impl error::Error for MdnsResponseError {} #[cfg(test)] mod tests { - use super::*; use hickory_proto::op::Message; use libp2p_identity as identity; + use super::*; + #[test] fn build_query_correct() { let query = build_query(); diff --git a/protocols/mdns/src/behaviour/iface/query.rs b/protocols/mdns/src/behaviour/iface/query.rs index 70b84816d0f..7762ac5d214 100644 --- a/protocols/mdns/src/behaviour/iface/query.rs +++ b/protocols/mdns/src/behaviour/iface/query.rs @@ -18,18 +18,23 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::dns; -use crate::{META_QUERY_SERVICE_FQDN, SERVICE_NAME_FQDN}; +use std::{ + fmt, + net::SocketAddr, + str, + time::{Duration, Instant}, +}; + use hickory_proto::{ op::Message, rr::{Name, RData}, }; use libp2p_core::multiaddr::{Multiaddr, Protocol}; +use libp2p_identity::PeerId; use libp2p_swarm::_address_translation; -use libp2p_identity::PeerId; -use std::time::Instant; -use std::{fmt, net::SocketAddr, str, time::Duration}; +use super::dns; +use crate::{META_QUERY_SERVICE_FQDN, SERVICE_NAME_FQDN}; /// A valid mDNS packet received by the service. #[derive(Debug)] @@ -69,7 +74,8 @@ impl MdnsPacket { .iter() .any(|q| q.name().to_utf8() == META_QUERY_SERVICE_FQDN) { - // TODO: what if multiple questions, one with SERVICE_NAME and one with META_QUERY_SERVICE? + // TODO: what if multiple questions, + // one with SERVICE_NAME and one with META_QUERY_SERVICE? return Ok(Some(MdnsPacket::ServiceDiscovery(MdnsServiceDiscovery { from, query_id: packet.header().id(), @@ -307,8 +313,7 @@ impl fmt::Debug for MdnsPeer { #[cfg(test)] mod tests { - use super::super::dns::build_query_response; - use super::*; + use super::{super::dns::build_query_response, *}; #[test] fn test_create_mdns_peer() { diff --git a/protocols/mdns/src/behaviour/socket.rs b/protocols/mdns/src/behaviour/socket.rs index ebaad17e45f..cf11450fb4b 100644 --- a/protocols/mdns/src/behaviour/socket.rs +++ b/protocols/mdns/src/behaviour/socket.rs @@ -24,7 +24,8 @@ use std::{ task::{Context, Poll}, }; -/// Interface that must be implemented by the different runtimes to use the [`UdpSocket`] in async mode +/// Interface that must be implemented by the different runtimes to use the [`UdpSocket`] in async +/// mode #[allow(unreachable_pub)] // Users should not depend on this. pub trait AsyncSocket: Unpin + Send + 'static { /// Create the async socket from the [`std::net::UdpSocket`] @@ -32,7 +33,8 @@ pub trait AsyncSocket: Unpin + Send + 'static { where Self: Sized; - /// Attempts to receive a single packet on the socket from the remote address to which it is connected. + /// Attempts to receive a single packet on the socket + /// from the remote address to which it is connected. fn poll_read( &mut self, _cx: &mut Context, @@ -50,10 +52,11 @@ pub trait AsyncSocket: Unpin + Send + 'static { #[cfg(feature = "async-io")] pub(crate) mod asio { - use super::*; use async_io::Async; use futures::FutureExt; + use super::*; + /// AsyncIo UdpSocket pub(crate) type AsyncUdpSocket = Async; impl AsyncSocket for AsyncUdpSocket { @@ -92,9 +95,10 @@ pub(crate) mod asio { #[cfg(feature = "tokio")] pub(crate) mod tokio { - use super::*; use ::tokio::{io::ReadBuf, net::UdpSocket as TkUdpSocket}; + use super::*; + /// Tokio ASync Socket` pub(crate) type TokioUdpSocket = TkUdpSocket; impl AsyncSocket for TokioUdpSocket { diff --git a/protocols/mdns/src/behaviour/timer.rs b/protocols/mdns/src/behaviour/timer.rs index 5e284654676..5fdb1beffae 100644 --- a/protocols/mdns/src/behaviour/timer.rs +++ b/protocols/mdns/src/behaviour/timer.rs @@ -42,14 +42,16 @@ pub trait Builder: Send + Unpin + 'static { #[cfg(feature = "async-io")] pub(crate) mod asio { - use super::*; - use async_io::Timer as AsioTimer; - use futures::Stream; use std::{ pin::Pin, task::{Context, Poll}, }; + use async_io::Timer as AsioTimer; + use futures::Stream; + + use super::*; + /// Async Timer pub(crate) type AsyncTimer = Timer; impl Builder for AsyncTimer { @@ -83,14 +85,16 @@ pub(crate) mod asio { #[cfg(feature = "tokio")] pub(crate) mod tokio { - use super::*; - use ::tokio::time::{self, Instant as TokioInstant, Interval, MissedTickBehavior}; - use futures::Stream; use std::{ pin::Pin, task::{Context, Poll}, }; + use ::tokio::time::{self, Instant as TokioInstant, Interval, MissedTickBehavior}; + use futures::Stream; + + use super::*; + /// Tokio wrapper pub(crate) type TokioTimer = Timer; impl Builder for TokioTimer { diff --git a/protocols/mdns/src/lib.rs b/protocols/mdns/src/lib.rs index 4823d740272..a0086a0e2d5 100644 --- a/protocols/mdns/src/lib.rs +++ b/protocols/mdns/src/lib.rs @@ -31,21 +31,20 @@ //! This crate provides a `Mdns` and `TokioMdns`, depending on the enabled features, which //! implements the `NetworkBehaviour` trait. This struct will automatically discover other //! libp2p nodes on the local network. -//! #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use std::net::{Ipv4Addr, Ipv6Addr}; -use std::time::Duration; +use std::{ + net::{Ipv4Addr, Ipv6Addr}, + time::Duration, +}; mod behaviour; -pub use crate::behaviour::{Behaviour, Event}; - #[cfg(feature = "async-io")] pub use crate::behaviour::async_io; - #[cfg(feature = "tokio")] pub use crate::behaviour::tokio; +pub use crate::behaviour::{Behaviour, Event}; /// The DNS service name for all libp2p peers used to query for addresses. const SERVICE_NAME: &[u8] = b"_p2p._udp.local"; diff --git a/protocols/mdns/tests/use-async-std.rs b/protocols/mdns/tests/use-async-std.rs index 549f70978af..df08b39af07 100644 --- a/protocols/mdns/tests/use-async-std.rs +++ b/protocols/mdns/tests/use-async-std.rs @@ -18,12 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE.use futures::StreamExt; +use std::time::Duration; + use futures::future::Either; -use libp2p_mdns::Event; -use libp2p_mdns::{async_io::Behaviour, Config}; +use libp2p_mdns::{async_io::Behaviour, Config, Event}; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; -use std::time::Duration; use tracing_subscriber::EnvFilter; #[async_std::test] diff --git a/protocols/mdns/tests/use-tokio.rs b/protocols/mdns/tests/use-tokio.rs index cf0d9f4bed4..0ec90a52b90 100644 --- a/protocols/mdns/tests/use-tokio.rs +++ b/protocols/mdns/tests/use-tokio.rs @@ -17,11 +17,12 @@ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE.use futures::StreamExt; +use std::time::Duration; + use futures::future::Either; use libp2p_mdns::{tokio::Behaviour, Config, Event}; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; -use std::time::Duration; use tracing_subscriber::EnvFilter; #[tokio::test] diff --git a/protocols/perf/src/bin/perf.rs b/protocols/perf/src/bin/perf.rs index 9a4cfb8bcac..506455f081a 100644 --- a/protocols/perf/src/bin/perf.rs +++ b/protocols/perf/src/bin/perf.rs @@ -23,12 +23,13 @@ use std::{net::SocketAddr, str::FromStr}; use anyhow::{bail, Result}; use clap::Parser; use futures::StreamExt; -use libp2p::core::{multiaddr::Protocol, upgrade, Multiaddr}; -use libp2p::identity::PeerId; -use libp2p::swarm::{NetworkBehaviour, Swarm, SwarmEvent}; -use libp2p::SwarmBuilder; -use libp2p_perf::{client, server}; -use libp2p_perf::{Final, Intermediate, Run, RunParams, RunUpdate}; +use libp2p::{ + core::{multiaddr::Protocol, upgrade, Multiaddr}, + identity::PeerId, + swarm::{NetworkBehaviour, Swarm, SwarmEvent}, + SwarmBuilder, +}; +use libp2p_perf::{client, server, Final, Intermediate, Run, RunParams, RunUpdate}; use serde::{Deserialize, Serialize}; use tracing_subscriber::EnvFilter; use web_time::{Duration, Instant}; diff --git a/protocols/perf/src/client.rs b/protocols/perf/src/client.rs index 9f984a5bba1..7699bc85c17 100644 --- a/protocols/perf/src/client.rs +++ b/protocols/perf/src/client.rs @@ -21,11 +21,13 @@ mod behaviour; mod handler; -use std::sync::atomic::{AtomicUsize, Ordering}; +use std::{ + convert::Infallible, + sync::atomic::{AtomicUsize, Ordering}, +}; pub use behaviour::{Behaviour, Event}; use libp2p_swarm::StreamUpgradeError; -use std::convert::Infallible; static NEXT_RUN_ID: AtomicUsize = AtomicUsize::new(1); diff --git a/protocols/perf/src/client/behaviour.rs b/protocols/perf/src/client/behaviour.rs index 1b181557acc..86c85d61da9 100644 --- a/protocols/perf/src/client/behaviour.rs +++ b/protocols/perf/src/client/behaviour.rs @@ -32,10 +32,8 @@ use libp2p_swarm::{ NetworkBehaviour, NotifyHandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use crate::RunParams; -use crate::{client::handler::Handler, RunUpdate}; - use super::{RunError, RunId}; +use crate::{client::handler::Handler, RunParams, RunUpdate}; #[derive(Debug)] pub struct Event { diff --git a/protocols/perf/src/client/handler.rs b/protocols/perf/src/client/handler.rs index 85e864949f8..fc427d8134c 100644 --- a/protocols/perf/src/client/handler.rs +++ b/protocols/perf/src/client/handler.rs @@ -36,8 +36,10 @@ use libp2p_swarm::{ ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, SubstreamProtocol, }; -use crate::client::{RunError, RunId}; -use crate::{RunParams, RunUpdate}; +use crate::{ + client::{RunError, RunId}, + RunParams, RunUpdate, +}; #[derive(Debug)] pub struct Command { diff --git a/protocols/perf/src/protocol.rs b/protocols/perf/src/protocol.rs index f995bbe2d3b..d07c90fa951 100644 --- a/protocols/perf/src/protocol.rs +++ b/protocols/perf/src/protocol.rs @@ -18,14 +18,14 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures_timer::Delay; use std::time::Duration; -use web_time::Instant; use futures::{ future::{select, Either}, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, FutureExt, SinkExt, Stream, StreamExt, }; +use futures_timer::Delay; +use web_time::Instant; use crate::{Final, Intermediate, Run, RunDuration, RunParams, RunUpdate}; diff --git a/protocols/perf/src/server/behaviour.rs b/protocols/perf/src/server/behaviour.rs index 5408029e85d..22466bfe56a 100644 --- a/protocols/perf/src/server/behaviour.rs +++ b/protocols/perf/src/server/behaviour.rs @@ -31,8 +31,7 @@ use libp2p_swarm::{ ConnectionId, FromSwarm, NetworkBehaviour, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use crate::server::handler::Handler; -use crate::Run; +use crate::{server::handler::Handler, Run}; #[derive(Debug)] pub struct Event { diff --git a/protocols/perf/src/server/handler.rs b/protocols/perf/src/server/handler.rs index c1363ae2380..a78485cd9b5 100644 --- a/protocols/perf/src/server/handler.rs +++ b/protocols/perf/src/server/handler.rs @@ -18,7 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::task::{Context, Poll}; +use std::{ + convert::Infallible, + task::{Context, Poll}, +}; use futures::FutureExt; use libp2p_core::upgrade::{DeniedUpgrade, ReadyUpgrade}; @@ -29,7 +32,6 @@ use libp2p_swarm::{ }, ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, SubstreamProtocol, }; -use std::convert::Infallible; use tracing::error; use crate::Run; diff --git a/protocols/ping/src/handler.rs b/protocols/ping/src/handler.rs index 961716e934a..c7d65c64500 100644 --- a/protocols/ping/src/handler.rs +++ b/protocols/ping/src/handler.rs @@ -18,27 +18,29 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::{protocol, PROTOCOL_NAME}; -use futures::future::{BoxFuture, Either}; -use futures::prelude::*; -use futures_timer::Delay; -use libp2p_core::upgrade::ReadyUpgrade; -use libp2p_swarm::handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, -}; -use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError, - SubstreamProtocol, -}; -use std::collections::VecDeque; -use std::convert::Infallible; use std::{ + collections::VecDeque, + convert::Infallible, error::Error, fmt, io, task::{Context, Poll}, time::Duration, }; +use futures::{ + future::{BoxFuture, Either}, + prelude::*, +}; +use futures_timer::Delay; +use libp2p_core::upgrade::ReadyUpgrade; +use libp2p_swarm::{ + handler::{ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound}, + ConnectionHandler, ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError, + SubstreamProtocol, +}; + +use crate::{protocol, PROTOCOL_NAME}; + /// The configuration for outbound pings. #[derive(Debug, Clone)] pub struct Config { @@ -57,8 +59,7 @@ impl Config { /// These settings have the following effect: /// /// * A ping is sent every 15 seconds on a healthy connection. - /// * Every ping sent must yield a response within 20 seconds in order to - /// be successful. + /// * Every ping sent must yield a response within 20 seconds in order to be successful. pub fn new() -> Self { Self { timeout: Duration::from_secs(20), diff --git a/protocols/ping/src/lib.rs b/protocols/ping/src/lib.rs index 82f240cab6b..d48bcbc98ab 100644 --- a/protocols/ping/src/lib.rs +++ b/protocols/ping/src/lib.rs @@ -27,9 +27,11 @@ //! # Usage //! //! The [`Behaviour`] struct implements the [`NetworkBehaviour`] trait. -//! It will respond to inbound ping requests and periodically send outbound ping requests on every established connection. +//! It will respond to inbound ping requests and periodically send outbound ping requests on every +//! established connection. //! -//! It is up to the user to implement a health-check / connection management policy based on the ping protocol. +//! It is up to the user to implement a health-check / connection management policy based on the +//! ping protocol. //! //! For example: //! @@ -39,8 +41,10 @@ //! //! Users should inspect emitted [`Event`]s and call APIs on [`Swarm`]: //! -//! - [`Swarm::close_connection`](libp2p_swarm::Swarm::close_connection) to close a specific connection -//! - [`Swarm::disconnect_peer_id`](libp2p_swarm::Swarm::disconnect_peer_id) to close all connections to a peer +//! - [`Swarm::close_connection`](libp2p_swarm::Swarm::close_connection) to close a specific +//! connection +//! - [`Swarm::disconnect_peer_id`](libp2p_swarm::Swarm::disconnect_peer_id) to close all +//! connections to a peer //! //! [`Swarm`]: libp2p_swarm::Swarm //! [`Transport`]: libp2p_core::Transport @@ -50,22 +54,22 @@ mod handler; mod protocol; +use std::{ + collections::VecDeque, + task::{Context, Poll}, + time::Duration, +}; + use handler::Handler; -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr}; +pub use handler::{Config, Failure}; +use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ behaviour::FromSwarm, ConnectionDenied, ConnectionId, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use std::time::Duration; -use std::{ - collections::VecDeque, - task::{Context, Poll}, -}; pub use self::protocol::PROTOCOL_NAME; -pub use handler::{Config, Failure}; /// A [`NetworkBehaviour`] that responds to inbound pings and /// periodically sends outbound pings on every established connection. diff --git a/protocols/ping/src/protocol.rs b/protocols/ping/src/protocol.rs index 101c219aac4..5e84f55e090 100644 --- a/protocols/ping/src/protocol.rs +++ b/protocols/ping/src/protocol.rs @@ -18,10 +18,11 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{io, time::Duration}; + use futures::prelude::*; use libp2p_swarm::StreamProtocol; use rand::{distributions, prelude::*}; -use std::{io, time::Duration}; use web_time::Instant; pub const PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/ipfs/ping/1.0.0"); @@ -40,10 +41,10 @@ pub const PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/ipfs/ping/1.0.0" /// Successful pings report the round-trip time. /// /// > **Note**: The round-trip time of a ping may be subject to delays induced -/// > by the underlying transport, e.g. in the case of TCP there is -/// > Nagle's algorithm, delayed acks and similar configuration options -/// > which can affect latencies especially on otherwise low-volume -/// > connections. +/// > by the underlying transport, e.g. in the case of TCP there is +/// > Nagle's algorithm, delayed acks and similar configuration options +/// > which can affect latencies especially on otherwise low-volume +/// > connections. const PING_SIZE: usize = 32; /// Sends a ping and waits for the pong. @@ -81,7 +82,6 @@ where #[cfg(test)] mod tests { - use super::*; use futures::StreamExt; use libp2p_core::{ multiaddr::multiaddr, @@ -89,6 +89,8 @@ mod tests { Endpoint, }; + use super::*; + #[tokio::test] async fn ping_pong() { let mem_addr = multiaddr![Memory(thread_rng().gen::())]; diff --git a/protocols/ping/tests/ping.rs b/protocols/ping/tests/ping.rs index 0752b1fced9..210f9435e4a 100644 --- a/protocols/ping/tests/ping.rs +++ b/protocols/ping/tests/ping.rs @@ -20,12 +20,12 @@ //! Integration tests for the `Ping` network behaviour. +use std::{num::NonZeroU8, time::Duration}; + use libp2p_ping as ping; -use libp2p_swarm::dummy; -use libp2p_swarm::{Swarm, SwarmEvent}; +use libp2p_swarm::{dummy, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; use quickcheck::*; -use std::{num::NonZeroU8, time::Duration}; #[tokio::test] async fn ping_pong() { diff --git a/protocols/relay/src/behaviour.rs b/protocols/relay/src/behaviour.rs index e854ed2a1ff..968642b3f1f 100644 --- a/protocols/relay/src/behaviour.rs +++ b/protocols/relay/src/behaviour.rs @@ -22,27 +22,31 @@ pub(crate) mod handler; pub(crate) mod rate_limiter; -use crate::behaviour::handler::Handler; -use crate::multiaddr_ext::MultiaddrExt; -use crate::proto; -use crate::protocol::{inbound_hop, outbound_stop}; +use std::{ + collections::{hash_map, HashMap, HashSet, VecDeque}, + num::NonZeroU32, + ops::Add, + task::{Context, Poll}, + time::Duration, +}; + use either::Either; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::transport::PortUse; -use libp2p_core::{ConnectedPoint, Endpoint, Multiaddr}; +use libp2p_core::{multiaddr::Protocol, transport::PortUse, ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::behaviour::{ConnectionClosed, FromSwarm}; use libp2p_swarm::{ + behaviour::{ConnectionClosed, FromSwarm}, dummy, ConnectionDenied, ConnectionId, ExternalAddresses, NetworkBehaviour, NotifyHandler, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use std::collections::{hash_map, HashMap, HashSet, VecDeque}; -use std::num::NonZeroU32; -use std::ops::Add; -use std::task::{Context, Poll}; -use std::time::Duration; use web_time::Instant; +use crate::{ + behaviour::handler::Handler, + multiaddr_ext::MultiaddrExt, + proto, + protocol::{inbound_hop, outbound_stop}, +}; + /// Configuration for the relay [`Behaviour`]. /// /// # Panics @@ -120,12 +124,14 @@ impl std::fmt::Debug for Config { impl Default for Config { fn default() -> Self { let reservation_rate_limiters = vec![ - // For each peer ID one reservation every 2 minutes with up to 30 reservations per hour. + // For each peer ID one reservation every 2 minutes with up + // to 30 reservations per hour. rate_limiter::new_per_peer(rate_limiter::GenericRateLimiterConfig { limit: NonZeroU32::new(30).expect("30 > 0"), interval: Duration::from_secs(60 * 2), }), - // For each IP address one reservation every minute with up to 60 reservations per hour. + // For each IP address one reservation every minute with up + // to 60 reservations per hour. rate_limiter::new_per_ip(rate_limiter::GenericRateLimiterConfig { limit: NonZeroU32::new(60).expect("60 > 0"), interval: Duration::from_secs(60), @@ -386,7 +392,8 @@ impl NetworkBehaviour for Behaviour { ); let action = if - // Deny if it is a new reservation and exceeds `max_reservations_per_peer`. + // Deny if it is a new reservation and exceeds + // `max_reservations_per_peer`. (!renewed && self .reservations diff --git a/protocols/relay/src/behaviour/handler.rs b/protocols/relay/src/behaviour/handler.rs index 92e45720f3f..0a4fe11c00a 100644 --- a/protocols/relay/src/behaviour/handler.rs +++ b/protocols/relay/src/behaviour/handler.rs @@ -18,32 +18,38 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::behaviour::CircuitId; -use crate::copy_future::CopyFuture; -use crate::protocol::{inbound_hop, outbound_stop}; -use crate::{proto, HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME}; +use std::{ + collections::{HashMap, VecDeque}, + fmt, io, + task::{Context, Poll}, + time::Duration, +}; + use bytes::Bytes; use either::Either; -use futures::future::{BoxFuture, FutureExt, TryFutureExt}; -use futures::io::AsyncWriteExt; -use futures::stream::{FuturesUnordered, StreamExt}; +use futures::{ + future::{BoxFuture, FutureExt, TryFutureExt}, + io::AsyncWriteExt, + stream::{FuturesUnordered, StreamExt}, +}; use futures_timer::Delay; -use libp2p_core::upgrade::ReadyUpgrade; -use libp2p_core::{ConnectedPoint, Multiaddr}; +use libp2p_core::{upgrade::ReadyUpgrade, ConnectedPoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, -}; use libp2p_swarm::{ + handler::{ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound}, ConnectionHandler, ConnectionHandlerEvent, ConnectionId, Stream, StreamProtocol, StreamUpgradeError, SubstreamProtocol, }; -use std::collections::{HashMap, VecDeque}; -use std::task::{Context, Poll}; -use std::time::Duration; -use std::{fmt, io}; use web_time::Instant; +use crate::{ + behaviour::CircuitId, + copy_future::CopyFuture, + proto, + protocol::{inbound_hop, outbound_stop}, + HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME, +}; + const MAX_CONCURRENT_STREAMS_PER_CONNECTION: usize = 10; const STREAM_TIMEOUT: Duration = Duration::from_secs(60); diff --git a/protocols/relay/src/behaviour/rate_limiter.rs b/protocols/relay/src/behaviour/rate_limiter.rs index 45b701c1b50..4b97c3d5090 100644 --- a/protocols/relay/src/behaviour/rate_limiter.rs +++ b/protocols/relay/src/behaviour/rate_limiter.rs @@ -18,18 +18,20 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + collections::{HashMap, VecDeque}, + hash::Hash, + net::IpAddr, + num::NonZeroU32, + time::Duration, +}; + use libp2p_core::multiaddr::{Multiaddr, Protocol}; use libp2p_identity::PeerId; -use std::collections::{HashMap, VecDeque}; -use std::hash::Hash; -use std::net::IpAddr; -use std::num::NonZeroU32; -use std::time::Duration; use web_time::Instant; /// Allows rate limiting access to some resource based on the [`PeerId`] and /// [`Multiaddr`] of a remote peer. -// // See [`new_per_peer`] and [`new_per_ip`] for precast implementations. Use // [`GenericRateLimiter`] to build your own, e.g. based on the autonomous system // number of a peers IP address. @@ -170,9 +172,10 @@ impl GenericRateLimiter { #[cfg(test)] mod tests { - use super::*; use quickcheck::{QuickCheck, TestResult}; + use super::*; + #[test] fn first() { let id = 1; diff --git a/protocols/relay/src/copy_future.rs b/protocols/relay/src/copy_future.rs index c0039c29534..ae7ef22d648 100644 --- a/protocols/relay/src/copy_future.rs +++ b/protocols/relay/src/copy_future.rs @@ -24,16 +24,19 @@ //! //! Inspired by [`futures::io::Copy`]. -use futures::future::Future; -use futures::future::FutureExt; -use futures::io::{AsyncBufRead, BufReader}; -use futures::io::{AsyncRead, AsyncWrite}; -use futures::ready; +use std::{ + io, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; + +use futures::{ + future::{Future, FutureExt}, + io::{AsyncBufRead, AsyncRead, AsyncWrite, BufReader}, + ready, +}; use futures_timer::Delay; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::time::Duration; pub(crate) struct CopyFuture { src: BufReader, @@ -161,12 +164,13 @@ fn forward_data( #[cfg(test)] mod tests { - use super::*; - use futures::executor::block_on; - use futures::io::BufWriter; - use quickcheck::QuickCheck; use std::io::ErrorKind; + use futures::{executor::block_on, io::BufWriter}; + use quickcheck::QuickCheck; + + use super::*; + #[test] fn quickcheck() { struct Connection { @@ -356,13 +360,14 @@ mod tests { } } - // The source has two reads available, handing them out on `AsyncRead::poll_read` one by one. + // The source has two reads available, handing them out + // on `AsyncRead::poll_read` one by one. let mut source = BufReader::new(NeverEndingSource { read: vec![1, 2] }); // The destination is wrapped by a `BufWriter` with a capacity of `3`, i.e. one larger than // the available reads of the source. Without an explicit `AsyncWrite::poll_flush` the two - // reads would thus never make it to the destination, but instead be stuck in the buffer of - // the `BufWrite`. + // reads would thus never make it to the destination, + // but instead be stuck in the buffer of the `BufWrite`. let mut destination = BufWriter::with_capacity( 3, RecordingDestination { diff --git a/protocols/relay/src/lib.rs b/protocols/relay/src/lib.rs index eca3578d599..dba07015765 100644 --- a/protocols/relay/src/lib.rs +++ b/protocols/relay/src/lib.rs @@ -32,10 +32,10 @@ mod protocol; mod proto { #![allow(unreachable_pub)] include!("generated/mod.rs"); - pub(crate) use self::message_v2::pb::mod_HopMessage::Type as HopMessageType; pub use self::message_v2::pb::mod_StopMessage::Type as StopMessageType; pub(crate) use self::message_v2::pb::{ - HopMessage, Limit, Peer, Reservation, Status, StopMessage, + mod_HopMessage::Type as HopMessageType, HopMessage, Limit, Peer, Reservation, Status, + StopMessage, }; } diff --git a/protocols/relay/src/multiaddr_ext.rs b/protocols/relay/src/multiaddr_ext.rs index 6991a8b9ded..7c06eb7eab0 100644 --- a/protocols/relay/src/multiaddr_ext.rs +++ b/protocols/relay/src/multiaddr_ext.rs @@ -1,5 +1,4 @@ -use libp2p_core::multiaddr::Protocol; -use libp2p_core::Multiaddr; +use libp2p_core::{multiaddr::Protocol, Multiaddr}; pub(crate) trait MultiaddrExt { fn is_relayed(&self) -> bool; diff --git a/protocols/relay/src/priv_client.rs b/protocols/relay/src/priv_client.rs index fc9d28e66ed..7ac9b716700 100644 --- a/protocols/relay/src/priv_client.rs +++ b/protocols/relay/src/priv_client.rs @@ -23,33 +23,39 @@ pub(crate) mod handler; pub(crate) mod transport; -use crate::multiaddr_ext::MultiaddrExt; -use crate::priv_client::handler::Handler; -use crate::protocol::{self, inbound_stop}; +use std::{ + collections::{hash_map, HashMap, VecDeque}, + convert::Infallible, + io::{Error, ErrorKind, IoSlice}, + pin::Pin, + task::{Context, Poll}, +}; + use bytes::Bytes; use either::Either; -use futures::channel::mpsc::Receiver; -use futures::future::{BoxFuture, FutureExt}; -use futures::io::{AsyncRead, AsyncWrite}; -use futures::ready; -use futures::stream::StreamExt; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr}; +use futures::{ + channel::mpsc::Receiver, + future::{BoxFuture, FutureExt}, + io::{AsyncRead, AsyncWrite}, + ready, + stream::StreamExt, +}; +use libp2p_core::{multiaddr::Protocol, transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::behaviour::{ConnectionClosed, ConnectionEstablished, FromSwarm}; -use libp2p_swarm::dial_opts::DialOpts; use libp2p_swarm::{ + behaviour::{ConnectionClosed, ConnectionEstablished, FromSwarm}, + dial_opts::DialOpts, dummy, ConnectionDenied, ConnectionHandler, ConnectionId, DialFailure, NetworkBehaviour, NotifyHandler, Stream, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use std::collections::{hash_map, HashMap, VecDeque}; -use std::convert::Infallible; -use std::io::{Error, ErrorKind, IoSlice}; -use std::pin::Pin; -use std::task::{Context, Poll}; use transport::Transport; +use crate::{ + multiaddr_ext::MultiaddrExt, + priv_client::handler::Handler, + protocol::{self, inbound_stop}, +}; + /// The events produced by the client `Behaviour`. #[derive(Debug)] pub enum Event { @@ -89,7 +95,8 @@ pub struct Behaviour { /// Stores the address of a pending or confirmed reservation. /// - /// This is indexed by the [`ConnectionId`] to a relay server and the address is the `/p2p-circuit` address we reserved on it. + /// This is indexed by the [`ConnectionId`] to a relay server and the address is the + /// `/p2p-circuit` address we reserved on it. reservation_addresses: HashMap, /// Queue of actions to return when polled. diff --git a/protocols/relay/src/priv_client/handler.rs b/protocols/relay/src/priv_client/handler.rs index 77b7f94ae60..8f60b689ec8 100644 --- a/protocols/relay/src/priv_client/handler.rs +++ b/protocols/relay/src/priv_client/handler.rs @@ -18,29 +18,35 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::client::Connection; -use crate::priv_client::transport; -use crate::priv_client::transport::ToListenerMsg; -use crate::protocol::{self, inbound_stop, outbound_hop}; -use crate::{priv_client, proto, HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME}; -use futures::channel::mpsc::Sender; -use futures::channel::{mpsc, oneshot}; -use futures::future::FutureExt; +use std::{ + collections::VecDeque, + convert::Infallible, + fmt, io, + task::{Context, Poll}, + time::Duration, +}; + +use futures::{ + channel::{mpsc, mpsc::Sender, oneshot}, + future::FutureExt, +}; use futures_timer::Delay; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::upgrade::ReadyUpgrade; -use libp2p_core::Multiaddr; +use libp2p_core::{multiaddr::Protocol, upgrade::ReadyUpgrade, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::handler::{ConnectionEvent, FullyNegotiatedInbound}; use libp2p_swarm::{ + handler::{ConnectionEvent, FullyNegotiatedInbound}, ConnectionHandler, ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError, SubstreamProtocol, }; -use std::collections::VecDeque; -use std::convert::Infallible; -use std::task::{Context, Poll}; -use std::time::Duration; -use std::{fmt, io}; + +use crate::{ + client::Connection, + priv_client, + priv_client::{transport, transport::ToListenerMsg}, + proto, + protocol::{self, inbound_stop, outbound_hop}, + HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME, +}; /// The maximum number of circuits being denied concurrently. /// diff --git a/protocols/relay/src/priv_client/transport.rs b/protocols/relay/src/priv_client/transport.rs index ec1e8ca5fb8..ed9faa946db 100644 --- a/protocols/relay/src/priv_client/transport.rs +++ b/protocols/relay/src/priv_client/transport.rs @@ -19,25 +19,35 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::multiaddr_ext::MultiaddrExt; -use crate::priv_client::Connection; -use crate::protocol::outbound_hop; -use crate::protocol::outbound_hop::{ConnectError, ReserveError}; -use crate::RequestId; -use futures::channel::mpsc; -use futures::channel::oneshot; -use futures::future::{ready, BoxFuture, FutureExt, Ready}; -use futures::sink::SinkExt; -use futures::stream::SelectAll; -use futures::stream::{Stream, StreamExt}; -use libp2p_core::multiaddr::{Multiaddr, Protocol}; -use libp2p_core::transport::{DialOpts, ListenerId, TransportError, TransportEvent}; +use std::{ + collections::VecDeque, + pin::Pin, + task::{Context, Poll, Waker}, +}; + +use futures::{ + channel::{mpsc, oneshot}, + future::{ready, BoxFuture, FutureExt, Ready}, + sink::SinkExt, + stream::{SelectAll, Stream, StreamExt}, +}; +use libp2p_core::{ + multiaddr::{Multiaddr, Protocol}, + transport::{DialOpts, ListenerId, TransportError, TransportEvent}, +}; use libp2p_identity::PeerId; -use std::collections::VecDeque; -use std::pin::Pin; -use std::task::{Context, Poll, Waker}; use thiserror::Error; +use crate::{ + multiaddr_ext::MultiaddrExt, + priv_client::Connection, + protocol::{ + outbound_hop, + outbound_hop::{ConnectError, ReserveError}, + }, + RequestId, +}; + /// A [`Transport`] enabling client relay capabilities. /// /// Note: The transport only handles listening and dialing on relayed [`Multiaddr`], and depends on @@ -49,7 +59,8 @@ use thiserror::Error; /// 1. Establish relayed connections by dialing `/p2p-circuit` addresses. /// /// ``` -/// # use libp2p_core::{Multiaddr, multiaddr::{Protocol}, Transport, transport::{DialOpts, PortUse}, connection::Endpoint}; +/// # use libp2p_core::{Multiaddr, multiaddr::{Protocol}, Transport, +/// # transport::{DialOpts, PortUse}, connection::Endpoint}; /// # use libp2p_core::transport::memory::MemoryTransport; /// # use libp2p_core::transport::choice::OrTransport; /// # use libp2p_relay as relay; @@ -307,8 +318,9 @@ pub(crate) struct Listener { queued_events: VecDeque<::Item>, /// Channel for messages from the behaviour [`Handler`][super::handler::Handler]. from_behaviour: mpsc::Receiver, - /// The listener can be closed either manually with [`Transport::remove_listener`](libp2p_core::Transport) or if - /// the sender side of the `from_behaviour` channel is dropped. + /// The listener can be closed either manually with + /// [`Transport::remove_listener`](libp2p_core::Transport) or if the sender side of the + /// `from_behaviour` channel is dropped. is_closed: bool, waker: Option, } @@ -344,7 +356,8 @@ impl Stream for Listener { } if self.is_closed { - // Terminate the stream if the listener closed and all remaining events have been reported. + // Terminate the stream if the listener closed and + // all remaining events have been reported. self.waker = None; return Poll::Ready(None); } diff --git a/protocols/relay/src/protocol.rs b/protocols/relay/src/protocol.rs index b94151259cd..b1adeedaaf5 100644 --- a/protocols/relay/src/protocol.rs +++ b/protocols/relay/src/protocol.rs @@ -18,10 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; -use libp2p_swarm::StreamProtocol; use std::time::Duration; +use libp2p_swarm::StreamProtocol; + +use crate::proto; + pub(crate) mod inbound_hop; pub(crate) mod inbound_stop; pub(crate) mod outbound_hop; diff --git a/protocols/relay/src/protocol/inbound_hop.rs b/protocols/relay/src/protocol/inbound_hop.rs index 401c6258176..01280d70897 100644 --- a/protocols/relay/src/protocol/inbound_hop.rs +++ b/protocols/relay/src/protocol/inbound_hop.rs @@ -19,21 +19,18 @@ // DEALINGS IN THE SOFTWARE. use std::time::Duration; -use web_time::SystemTime; use asynchronous_codec::{Framed, FramedParts}; use bytes::Bytes; use either::Either; use futures::prelude::*; -use thiserror::Error; - use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_swarm::Stream; +use thiserror::Error; +use web_time::SystemTime; -use crate::proto; -use crate::proto::message_v2::pb::mod_HopMessage::Type; -use crate::protocol::MAX_MESSAGE_SIZE; +use crate::{proto, proto::message_v2::pb::mod_HopMessage::Type, protocol::MAX_MESSAGE_SIZE}; #[derive(Debug, Error)] pub enum Error { diff --git a/protocols/relay/src/protocol/inbound_stop.rs b/protocols/relay/src/protocol/inbound_stop.rs index b698a5ff769..8994c2cff73 100644 --- a/protocols/relay/src/protocol/inbound_stop.rs +++ b/protocols/relay/src/protocol/inbound_stop.rs @@ -18,16 +18,20 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; -use crate::protocol::{self, MAX_MESSAGE_SIZE}; +use std::io; + use asynchronous_codec::{Framed, FramedParts}; use bytes::Bytes; use futures::prelude::*; use libp2p_identity::PeerId; use libp2p_swarm::Stream; -use std::io; use thiserror::Error; +use crate::{ + proto, + protocol::{self, MAX_MESSAGE_SIZE}, +}; + pub(crate) async fn handle_open_circuit(io: Stream) -> Result { let mut substream = Framed::new(io, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE)); diff --git a/protocols/relay/src/protocol/outbound_hop.rs b/protocols/relay/src/protocol/outbound_hop.rs index b349f8848be..216c6d115bf 100644 --- a/protocols/relay/src/protocol/outbound_hop.rs +++ b/protocols/relay/src/protocol/outbound_hop.rs @@ -18,22 +18,23 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::io; -use std::time::Duration; +use std::{io, time::Duration}; use asynchronous_codec::{Framed, FramedParts}; use bytes::Bytes; use futures::prelude::*; use futures_timer::Delay; -use thiserror::Error; -use web_time::SystemTime; - use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_swarm::Stream; +use thiserror::Error; +use web_time::SystemTime; -use crate::protocol::{Limit, MAX_MESSAGE_SIZE}; -use crate::{proto, HOP_PROTOCOL_NAME}; +use crate::{ + proto, + protocol::{Limit, MAX_MESSAGE_SIZE}, + HOP_PROTOCOL_NAME, +}; #[derive(Debug, Error)] pub enum ConnectError { diff --git a/protocols/relay/src/protocol/outbound_stop.rs b/protocols/relay/src/protocol/outbound_stop.rs index 525ebc10821..272aa24eef6 100644 --- a/protocols/relay/src/protocol/outbound_stop.rs +++ b/protocols/relay/src/protocol/outbound_stop.rs @@ -18,19 +18,16 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::io; -use std::time::Duration; +use std::{io, time::Duration}; use asynchronous_codec::{Framed, FramedParts}; use bytes::Bytes; use futures::prelude::*; -use thiserror::Error; - use libp2p_identity::PeerId; use libp2p_swarm::Stream; +use thiserror::Error; -use crate::protocol::MAX_MESSAGE_SIZE; -use crate::{proto, STOP_PROTOCOL_NAME}; +use crate::{proto, protocol::MAX_MESSAGE_SIZE, STOP_PROTOCOL_NAME}; #[derive(Debug, Error)] pub enum Error { diff --git a/protocols/relay/tests/lib.rs b/protocols/relay/tests/lib.rs index 2b28d5a50cd..125f0dbb4ad 100644 --- a/protocols/relay/tests/lib.rs +++ b/protocols/relay/tests/lib.rs @@ -18,26 +18,28 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::executor::LocalPool; -use futures::future::FutureExt; -use futures::io::{AsyncRead, AsyncWrite}; -use futures::stream::StreamExt; -use futures::task::Spawn; -use libp2p_core::multiaddr::{Multiaddr, Protocol}; -use libp2p_core::muxing::StreamMuxerBox; -use libp2p_core::transport::choice::OrTransport; -use libp2p_core::transport::{Boxed, MemoryTransport, Transport}; -use libp2p_core::upgrade; +use std::{error::Error, time::Duration}; + +use futures::{ + executor::LocalPool, + future::FutureExt, + io::{AsyncRead, AsyncWrite}, + stream::StreamExt, + task::Spawn, +}; +use libp2p_core::{ + multiaddr::{Multiaddr, Protocol}, + muxing::StreamMuxerBox, + transport::{choice::OrTransport, Boxed, MemoryTransport, Transport}, + upgrade, +}; use libp2p_identity as identity; use libp2p_identity::PeerId; use libp2p_ping as ping; use libp2p_plaintext as plaintext; use libp2p_relay as relay; -use libp2p_swarm::dial_opts::DialOpts; -use libp2p_swarm::{Config, DialError, NetworkBehaviour, Swarm, SwarmEvent}; +use libp2p_swarm::{dial_opts::DialOpts, Config, DialError, NetworkBehaviour, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; -use std::error::Error; -use std::time::Duration; use tracing_subscriber::EnvFilter; #[test] diff --git a/protocols/rendezvous/src/client.rs b/protocols/rendezvous/src/client.rs index a794252ff0b..019b23c092b 100644 --- a/protocols/rendezvous/src/client.rs +++ b/protocols/rendezvous/src/client.rs @@ -18,24 +18,28 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::codec::Message::*; -use crate::codec::{Cookie, ErrorCode, Message, Namespace, NewRegistration, Registration, Ttl}; -use futures::future::BoxFuture; -use futures::future::FutureExt; -use futures::stream::FuturesUnordered; -use futures::stream::StreamExt; -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr, PeerRecord}; +use std::{ + collections::HashMap, + iter, + task::{Context, Poll}, + time::Duration, +}; + +use futures::{ + future::{BoxFuture, FutureExt}, + stream::{FuturesUnordered, StreamExt}, +}; +use libp2p_core::{transport::PortUse, Endpoint, Multiaddr, PeerRecord}; use libp2p_identity::{Keypair, PeerId, SigningError}; use libp2p_request_response::{OutboundRequestId, ProtocolSupport}; use libp2p_swarm::{ ConnectionDenied, ConnectionId, ExternalAddresses, FromSwarm, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use std::collections::HashMap; -use std::iter; -use std::task::{Context, Poll}; -use std::time::Duration; + +use crate::codec::{ + Cookie, ErrorCode, Message, Message::*, Namespace, NewRegistration, Registration, Ttl, +}; pub struct Behaviour { inner: libp2p_request_response::Behaviour, @@ -47,12 +51,14 @@ pub struct Behaviour { /// Hold addresses of all peers that we have discovered so far. /// - /// Storing these internally allows us to assist the [`libp2p_swarm::Swarm`] in dialing by returning addresses from [`NetworkBehaviour::handle_pending_outbound_connection`]. + /// Storing these internally allows us to assist the [`libp2p_swarm::Swarm`] in dialing by + /// returning addresses from [`NetworkBehaviour::handle_pending_outbound_connection`]. discovered_peers: HashMap<(PeerId, Namespace), Vec>, registered_namespaces: HashMap<(PeerId, Namespace), Ttl>, - /// Tracks the expiry of registrations that we have discovered and stored in `discovered_peers` otherwise we have a memory leak. + /// Tracks the expiry of registrations that we have discovered and stored in `discovered_peers` + /// otherwise we have a memory leak. expiring_registrations: FuturesUnordered>, external_addresses: ExternalAddresses, @@ -81,8 +87,9 @@ impl Behaviour { /// Register our external addresses in the given namespace with the given rendezvous peer. /// - /// External addresses are either manually added via [`libp2p_swarm::Swarm::add_external_address`] or reported - /// by other [`NetworkBehaviour`]s via [`ToSwarm::ExternalAddrConfirmed`]. + /// External addresses are either manually added via + /// [`libp2p_swarm::Swarm::add_external_address`] or reported by other [`NetworkBehaviour`]s + /// via [`ToSwarm::ExternalAddrConfirmed`]. pub fn register( &mut self, namespace: Namespace, diff --git a/protocols/rendezvous/src/codec.rs b/protocols/rendezvous/src/codec.rs index cad3688e00b..60f9f14f332 100644 --- a/protocols/rendezvous/src/codec.rs +++ b/protocols/rendezvous/src/codec.rs @@ -18,16 +18,17 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::DEFAULT_TTL; +use std::{fmt, io}; + use async_trait::async_trait; -use asynchronous_codec::{BytesMut, Decoder, Encoder}; -use asynchronous_codec::{FramedRead, FramedWrite}; +use asynchronous_codec::{BytesMut, Decoder, Encoder, FramedRead, FramedWrite}; use futures::{AsyncRead, AsyncWrite, SinkExt, StreamExt}; use libp2p_core::{peer_record, signed_envelope, PeerRecord, SignedEnvelope}; use libp2p_swarm::StreamProtocol; use quick_protobuf_codec::Codec as ProtobufCodec; use rand::RngCore; -use std::{fmt, io}; + +use crate::DEFAULT_TTL; pub type Ttl = u64; pub(crate) type Limit = u64; @@ -54,7 +55,9 @@ pub struct Namespace(String); impl Namespace { /// Creates a new [`Namespace`] from a static string. /// - /// This will panic if the namespace is too long. We accepting panicking in this case because we are enforcing a `static lifetime which means this value can only be a constant in the program and hence we hope the developer checked that it is of an acceptable length. + /// This will panic if the namespace is too long. We accepting panicking in this case because we + /// are enforcing a `static lifetime which means this value can only be a constant in the + /// program and hence we hope the developer checked that it is of an acceptable length. pub fn from_static(value: &'static str) -> Self { if value.len() > crate::MAX_NAMESPACE { panic!("Namespace '{value}' is too long!") @@ -109,7 +112,8 @@ pub struct Cookie { impl Cookie { /// Construct a new [`Cookie`] for a given namespace. /// - /// This cookie will only be valid for subsequent DISCOVER requests targeting the same namespace. + /// This cookie will only be valid for subsequent DISCOVER requests targeting the same + /// namespace. pub fn for_namespace(namespace: Namespace) -> Self { Self { id: rand::thread_rng().next_u64(), diff --git a/protocols/rendezvous/src/lib.rs b/protocols/rendezvous/src/lib.rs index 7c607085f20..221178728af 100644 --- a/protocols/rendezvous/src/lib.rs +++ b/protocols/rendezvous/src/lib.rs @@ -22,9 +22,10 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -pub use self::codec::{Cookie, ErrorCode, Namespace, NamespaceTooLong, Registration, Ttl}; use libp2p_swarm::StreamProtocol; +pub use self::codec::{Cookie, ErrorCode, Namespace, NamespaceTooLong, Registration, Ttl}; + mod codec; /// If unspecified, rendezvous nodes should assume a TTL of 2h. diff --git a/protocols/rendezvous/src/server.rs b/protocols/rendezvous/src/server.rs index 45a525d9573..8aafcfb48e3 100644 --- a/protocols/rendezvous/src/server.rs +++ b/protocols/rendezvous/src/server.rs @@ -18,25 +18,27 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::codec::{Cookie, ErrorCode, Message, Namespace, NewRegistration, Registration, Ttl}; -use crate::{MAX_TTL, MIN_TTL}; +use std::{ + collections::{HashMap, HashSet}, + iter, + task::{ready, Context, Poll}, + time::Duration, +}; + use bimap::BiMap; -use futures::future::BoxFuture; -use futures::stream::FuturesUnordered; -use futures::{FutureExt, StreamExt}; -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr}; +use futures::{future::BoxFuture, stream::FuturesUnordered, FutureExt, StreamExt}; +use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_request_response::ProtocolSupport; -use libp2p_swarm::behaviour::FromSwarm; use libp2p_swarm::{ - ConnectionDenied, ConnectionId, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, - ToSwarm, + behaviour::FromSwarm, ConnectionDenied, ConnectionId, NetworkBehaviour, THandler, + THandlerInEvent, THandlerOutEvent, ToSwarm, +}; + +use crate::{ + codec::{Cookie, ErrorCode, Message, Namespace, NewRegistration, Registration, Ttl}, + MAX_TTL, MIN_TTL, }; -use std::collections::{HashMap, HashSet}; -use std::iter; -use std::task::{ready, Context, Poll}; -use std::time::Duration; pub struct Behaviour { inner: libp2p_request_response::Behaviour, @@ -534,10 +536,9 @@ pub struct CookieNamespaceMismatch; #[cfg(test)] mod tests { - use web_time::SystemTime; - use libp2p_core::PeerRecord; use libp2p_identity as identity; + use web_time::SystemTime; use super::*; @@ -792,7 +793,8 @@ mod tests { .unwrap_err(); } - /// Polls [`Registrations`] for at most `seconds` and panics if doesn't return an event within that time. + /// Polls [`Registrations`] for at most `seconds` and panics if doesn't + /// return an event within that time. async fn next_event_in_at_most(&mut self, seconds: u64) -> ExpiredRegistration { tokio::time::timeout(Duration::from_secs(seconds), self.next_event()) .await diff --git a/protocols/rendezvous/tests/rendezvous.rs b/protocols/rendezvous/tests/rendezvous.rs index d9200780ece..2305c2ef412 100644 --- a/protocols/rendezvous/tests/rendezvous.rs +++ b/protocols/rendezvous/tests/rendezvous.rs @@ -18,16 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::stream::FuturesUnordered; -use futures::StreamExt; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::Multiaddr; +use std::time::Duration; + +use futures::{stream::FuturesUnordered, StreamExt}; +use libp2p_core::{multiaddr::Protocol, Multiaddr}; use libp2p_identity as identity; use libp2p_rendezvous as rendezvous; use libp2p_rendezvous::client::RegisterError; use libp2p_swarm::{DialError, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; -use std::time::Duration; use tracing_subscriber::EnvFilter; #[tokio::test] @@ -471,9 +470,11 @@ async fn new_combined_node() -> Swarm { } async fn new_impersonating_client() -> Swarm { - // In reality, if Eve were to try and fake someones identity, she would obviously only know the public key. - // Due to the type-safe API of the `Rendezvous` behaviour and `PeerRecord`, we actually cannot construct a bad `PeerRecord` (i.e. one that is claims to be someone else). - // As such, the best we can do is hand eve a completely different keypair from what she is using to authenticate her connection. + // In reality, if Eve were to try and fake someones identity, she would obviously only know the + // public key. Due to the type-safe API of the `Rendezvous` behaviour and `PeerRecord`, we + // actually cannot construct a bad `PeerRecord` (i.e. one that is claims to be someone else). + // As such, the best we can do is hand eve a completely different keypair from what she is using + // to authenticate her connection. let someone_else = identity::Keypair::generate_ed25519(); let mut eve = Swarm::new_ephemeral(move |_| rendezvous::client::Behaviour::new(someone_else)); eve.listen().with_memory_addr_external().await; diff --git a/protocols/request-response/src/cbor.rs b/protocols/request-response/src/cbor.rs index a27d069e758..744d94cb961 100644 --- a/protocols/request-response/src/cbor.rs +++ b/protocols/request-response/src/cbor.rs @@ -37,19 +37,23 @@ /// } /// /// let behaviour = cbor::Behaviour::::new( -/// [(StreamProtocol::new("/my-cbor-protocol"), ProtocolSupport::Full)], -/// request_response::Config::default() +/// [( +/// StreamProtocol::new("/my-cbor-protocol"), +/// ProtocolSupport::Full, +/// )], +/// request_response::Config::default(), /// ); /// ``` pub type Behaviour = crate::Behaviour>; mod codec { + use std::{collections::TryReserveError, convert::Infallible, io, marker::PhantomData}; + use async_trait::async_trait; use cbor4ii::core::error::DecodeError; use futures::prelude::*; use libp2p_swarm::StreamProtocol; use serde::{de::DeserializeOwned, Serialize}; - use std::{collections::TryReserveError, convert::Infallible, io, marker::PhantomData}; /// Max request size in bytes const REQUEST_SIZE_MAXIMUM: u64 = 1024 * 1024; @@ -168,13 +172,13 @@ mod codec { #[cfg(test)] mod tests { - use crate::cbor::codec::Codec; - use crate::Codec as _; use futures::AsyncWriteExt; use futures_ringbuf::Endpoint; use libp2p_swarm::StreamProtocol; use serde::{Deserialize, Serialize}; + use crate::{cbor::codec::Codec, Codec as _}; + #[async_std::test] async fn test_codec() { let expected_request = TestRequest { diff --git a/protocols/request-response/src/codec.rs b/protocols/request-response/src/codec.rs index d26b729acae..d396a75ad7b 100644 --- a/protocols/request-response/src/codec.rs +++ b/protocols/request-response/src/codec.rs @@ -18,9 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::io; + use async_trait::async_trait; use futures::prelude::*; -use std::io; /// A `Codec` defines the request and response types /// for a request-response [`Behaviour`](crate::Behaviour) protocol or diff --git a/protocols/request-response/src/handler.rs b/protocols/request-response/src/handler.rs index dbd7a0708ce..133cff87f40 100644 --- a/protocols/request-response/src/handler.rs +++ b/protocols/request-response/src/handler.rs @@ -20,23 +20,6 @@ pub(crate) mod protocol; -pub use protocol::ProtocolSupport; - -use crate::codec::Codec; -use crate::handler::protocol::Protocol; -use crate::{InboundRequestId, OutboundRequestId, EMPTY_QUEUE_SHRINK_THRESHOLD}; - -use futures::channel::mpsc; -use futures::{channel::oneshot, prelude::*}; -use libp2p_swarm::handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, - ListenUpgradeError, -}; -use libp2p_swarm::{ - handler::{ConnectionHandler, ConnectionHandlerEvent, StreamUpgradeError}, - SubstreamProtocol, -}; -use smallvec::SmallVec; use std::{ collections::VecDeque, fmt, io, @@ -48,6 +31,25 @@ use std::{ time::Duration, }; +use futures::{ + channel::{mpsc, oneshot}, + prelude::*, +}; +use libp2p_swarm::{ + handler::{ + ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, + FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, StreamUpgradeError, + }, + SubstreamProtocol, +}; +pub use protocol::ProtocolSupport; +use smallvec::SmallVec; + +use crate::{ + codec::Codec, handler::protocol::Protocol, InboundRequestId, OutboundRequestId, + EMPTY_QUEUE_SHRINK_THRESHOLD, +}; + /// A connection handler for a request response [`Behaviour`](super::Behaviour) protocol. pub struct Handler where diff --git a/protocols/request-response/src/json.rs b/protocols/request-response/src/json.rs index 85e78e7ddda..9bd5b8c6df9 100644 --- a/protocols/request-response/src/json.rs +++ b/protocols/request-response/src/json.rs @@ -18,7 +18,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -/// A request-response behaviour using [`serde_json`] for serializing and deserializing the messages. +/// A request-response behaviour using [`serde_json`] for serializing and deserializing the +/// messages. /// /// # Example /// @@ -36,18 +37,22 @@ /// } /// /// let behaviour = json::Behaviour::::new( -/// [(StreamProtocol::new("/my-json-protocol"), ProtocolSupport::Full)], -/// request_response::Config::default() +/// [( +/// StreamProtocol::new("/my-json-protocol"), +/// ProtocolSupport::Full, +/// )], +/// request_response::Config::default(), /// ); /// ``` pub type Behaviour = crate::Behaviour>; mod codec { + use std::{io, marker::PhantomData}; + use async_trait::async_trait; use futures::prelude::*; use libp2p_swarm::StreamProtocol; use serde::{de::DeserializeOwned, Serialize}; - use std::{io, marker::PhantomData}; /// Max request size in bytes const REQUEST_SIZE_MAXIMUM: u64 = 1024 * 1024; @@ -140,12 +145,13 @@ mod codec { #[cfg(test)] mod tests { - use crate::Codec; use futures::AsyncWriteExt; use futures_ringbuf::Endpoint; use libp2p_swarm::StreamProtocol; use serde::{Deserialize, Serialize}; + use crate::Codec; + #[async_std::test] async fn test_codec() { let expected_request = TestRequest { diff --git a/protocols/request-response/src/lib.rs b/protocols/request-response/src/lib.rs index e627f5668ff..052e1e87e2b 100644 --- a/protocols/request-response/src/lib.rs +++ b/protocols/request-response/src/lib.rs @@ -73,12 +73,18 @@ mod handler; #[cfg(feature = "json")] pub mod json; -pub use codec::Codec; -pub use handler::ProtocolSupport; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + fmt, io, + sync::{atomic::AtomicU64, Arc}, + task::{Context, Poll}, + time::Duration, +}; -use crate::handler::OutboundMessage; +pub use codec::Codec; use futures::channel::oneshot; use handler::Handler; +pub use handler::ProtocolSupport; use libp2p_core::{transport::PortUse, ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ @@ -88,13 +94,8 @@ use libp2p_swarm::{ PeerAddresses, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use smallvec::SmallVec; -use std::{ - collections::{HashMap, HashSet, VecDeque}, - fmt, io, - sync::{atomic::AtomicU64, Arc}, - task::{Context, Poll}, - time::Duration, -}; + +use crate::handler::OutboundMessage; /// An inbound request or response. #[derive(Debug)] @@ -353,8 +354,8 @@ where /// Pending events to return from `poll`. pending_events: VecDeque, OutboundMessage>>, - /// The currently connected peers, their pending outbound and inbound responses and their known, - /// reachable addresses, if any. + /// The currently connected peers, their pending outbound and inbound responses and their + /// known, reachable addresses, if any. connected: HashMap>, /// Externally managed addresses via `add_address` and `remove_address`. addresses: PeerAddresses, @@ -367,7 +368,8 @@ impl Behaviour where TCodec: Codec + Default + Clone + Send + 'static, { - /// Creates a new `Behaviour` for the given protocols and configuration, using [`Default`] to construct the codec. + /// Creates a new `Behaviour` for the given protocols and configuration, using [`Default`] to + /// construct the codec. pub fn new(protocols: I, cfg: Config) -> Self where I: IntoIterator, @@ -693,7 +695,8 @@ where } } - /// Preloads a new [`Handler`] with requests that are waiting to be sent to the newly connected peer. + /// Preloads a new [`Handler`] with requests that are + /// waiting to be sent to the newly connected peer. fn preload_new_handler( &mut self, handler: &mut Handler, diff --git a/protocols/request-response/tests/error_reporting.rs b/protocols/request-response/tests/error_reporting.rs index 19f323e169f..d1f26378a77 100644 --- a/protocols/request-response/tests/error_reporting.rs +++ b/protocols/request-response/tests/error_reporting.rs @@ -1,3 +1,5 @@ +use std::{io, iter, pin::pin, time::Duration}; + use anyhow::{bail, Result}; use async_std::task::sleep; use async_trait::async_trait; @@ -10,9 +12,6 @@ use libp2p_swarm_test::SwarmExt; use request_response::{ Codec, InboundFailure, InboundRequestId, OutboundFailure, OutboundRequestId, ResponseChannel, }; -use std::pin::pin; -use std::time::Duration; -use std::{io, iter}; use tracing_subscriber::EnvFilter; #[async_std::test] diff --git a/protocols/request-response/tests/peer_address.rs b/protocols/request-response/tests/peer_address.rs index 0ed7ffe5551..603e2d09dc0 100644 --- a/protocols/request-response/tests/peer_address.rs +++ b/protocols/request-response/tests/peer_address.rs @@ -1,10 +1,11 @@ +use std::iter; + use libp2p_core::ConnectedPoint; use libp2p_request_response as request_response; use libp2p_request_response::ProtocolSupport; use libp2p_swarm::{StreamProtocol, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; use serde::{Deserialize, Serialize}; -use std::iter; use tracing_subscriber::EnvFilter; #[async_std::test] diff --git a/protocols/request-response/tests/ping.rs b/protocols/request-response/tests/ping.rs index 827afae249c..e53fe99d6cf 100644 --- a/protocols/request-response/tests/ping.rs +++ b/protocols/request-response/tests/ping.rs @@ -20,6 +20,8 @@ //! Integration tests for the `Behaviour`. +use std::{io, iter}; + use futures::prelude::*; use libp2p_identity::PeerId; use libp2p_request_response as request_response; @@ -28,7 +30,6 @@ use libp2p_swarm::{StreamProtocol, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; use rand::Rng; use serde::{Deserialize, Serialize}; -use std::{io, iter}; use tracing_subscriber::EnvFilter; #[async_std::test] diff --git a/protocols/stream/src/control.rs b/protocols/stream/src/control.rs index 036d285b2a3..2149c6bca48 100644 --- a/protocols/stream/src/control.rs +++ b/protocols/stream/src/control.rs @@ -6,9 +6,6 @@ use std::{ task::{Context, Poll}, }; -use crate::AlreadyRegistered; -use crate::{handler::NewStream, shared::Shared}; - use futures::{ channel::{mpsc, oneshot}, SinkExt as _, StreamExt as _, @@ -16,6 +13,8 @@ use futures::{ use libp2p_identity::PeerId; use libp2p_swarm::{Stream, StreamProtocol}; +use crate::{handler::NewStream, shared::Shared, AlreadyRegistered}; + /// A (remote) control for opening new streams and registration of inbound protocols. /// /// A [`Control`] can be cloned and thus allows for concurrent access. @@ -31,13 +30,15 @@ impl Control { /// Attempt to open a new stream for the given protocol and peer. /// - /// In case we are currently not connected to the peer, we will attempt to make a new connection. + /// In case we are currently not connected to the peer, + /// we will attempt to make a new connection. /// /// ## Backpressure /// /// [`Control`]s support backpressure similarly to bounded channels: /// Each [`Control`] has a guaranteed slot for internal messages. - /// A single control will always open one stream at a time which is enforced by requiring `&mut self`. + /// A single control will always open one stream at a + /// time which is enforced by requiring `&mut self`. /// /// This backpressure mechanism breaks if you clone [`Control`]s excessively. pub async fn open_stream( diff --git a/protocols/stream/src/handler.rs b/protocols/stream/src/handler.rs index b7ec516d3b1..d626f48fb09 100644 --- a/protocols/stream/src/handler.rs +++ b/protocols/stream/src/handler.rs @@ -162,7 +162,8 @@ impl ConnectionHandler for Handler { } } -/// Message from a [`Control`](crate::Control) to a [`ConnectionHandler`] to negotiate a new outbound stream. +/// Message from a [`Control`](crate::Control) to +/// a [`ConnectionHandler`] to negotiate a new outbound stream. #[derive(Debug)] pub(crate) struct NewStream { pub(crate) protocol: StreamProtocol, diff --git a/protocols/stream/src/shared.rs b/protocols/stream/src/shared.rs index 48aa6613d83..62d7b3cfe68 100644 --- a/protocols/stream/src/shared.rs +++ b/protocols/stream/src/shared.rs @@ -12,9 +12,11 @@ use rand::seq::IteratorRandom as _; use crate::{handler::NewStream, AlreadyRegistered, IncomingStreams}; pub(crate) struct Shared { - /// Tracks the supported inbound protocols created via [`Control::accept`](crate::Control::accept). + /// Tracks the supported inbound protocols created via + /// [`Control::accept`](crate::Control::accept). /// - /// For each [`StreamProtocol`], we hold the [`mpsc::Sender`] corresponding to the [`mpsc::Receiver`] in [`IncomingStreams`]. + /// For each [`StreamProtocol`], we hold the [`mpsc::Sender`] corresponding to the + /// [`mpsc::Receiver`] in [`IncomingStreams`]. supported_inbound_protocols: HashMap>, connections: HashMap, @@ -25,7 +27,8 @@ pub(crate) struct Shared { /// Sender for peers we want to dial. /// - /// We manage this through a channel to avoid locks as part of [`NetworkBehaviour::poll`](libp2p_swarm::NetworkBehaviour::poll). + /// We manage this through a channel to avoid locks as part of + /// [`NetworkBehaviour::poll`](libp2p_swarm::NetworkBehaviour::poll). dial_sender: mpsc::Sender, } diff --git a/protocols/upnp/src/behaviour.rs b/protocols/upnp/src/behaviour.rs index ee985042b68..cea8efb1e3f 100644 --- a/protocols/upnp/src/behaviour.rs +++ b/protocols/upnp/src/behaviour.rs @@ -32,7 +32,6 @@ use std::{ time::Duration, }; -use crate::tokio::{is_addr_global, Gateway}; use futures::{channel::oneshot, Future, StreamExt}; use futures_timer::Delay; use igd_next::PortMappingProtocol; @@ -46,6 +45,8 @@ use libp2p_swarm::{ NetworkBehaviour, NewListenAddr, ToSwarm, }; +use crate::tokio::{is_addr_global, Gateway}; + /// The duration in seconds of a port mapping on the gateway. const MAPPING_DURATION: u32 = 3600; @@ -286,8 +287,9 @@ impl NetworkBehaviour for Behaviour { match &mut self.state { GatewayState::Searching(_) => { - // As the gateway is not yet available we add the mapping with `MappingState::Inactive` - // so that when and if it becomes available we map it. + // As the gateway is not yet available we add the mapping with + // `MappingState::Inactive` so that when and if it + // becomes available we map it. self.mappings.insert( Mapping { listener_id, diff --git a/protocols/upnp/src/lib.rs b/protocols/upnp/src/lib.rs index 8a74d7e8f63..d7a746f78df 100644 --- a/protocols/upnp/src/lib.rs +++ b/protocols/upnp/src/lib.rs @@ -24,7 +24,6 @@ //! implements the [`libp2p_swarm::NetworkBehaviour`] trait. //! This struct will automatically try to map the ports externally to internal //! addresses on the gateway. -//! #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] diff --git a/protocols/upnp/src/tokio.rs b/protocols/upnp/src/tokio.rs index b2cad6fa5a7..67ef52f9608 100644 --- a/protocols/upnp/src/tokio.rs +++ b/protocols/upnp/src/tokio.rs @@ -20,7 +20,6 @@ use std::{error::Error, net::IpAddr}; -use crate::behaviour::{GatewayEvent, GatewayRequest}; use futures::{ channel::{mpsc, oneshot}, SinkExt, StreamExt, @@ -28,8 +27,9 @@ use futures::{ use igd_next::SearchOptions; pub use crate::behaviour::Behaviour; +use crate::behaviour::{GatewayEvent, GatewayRequest}; -//TODO: remove when `IpAddr::is_global` stabilizes. +// TODO: remove when `IpAddr::is_global` stabilizes. pub(crate) fn is_addr_global(addr: IpAddr) -> bool { match addr { IpAddr::V4(ip) => { diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 00000000000..1e61bc16abf --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,10 @@ +# Imports +reorder_imports = true +imports_granularity = "Crate" +group_imports = "StdExternalCrate" + +# Docs +wrap_comments = true +comment_width = 100 +normalize_comments = true +format_code_in_doc_comments = true diff --git a/swarm-derive/src/lib.rs b/swarm-derive/src/lib.rs index 258c0b976c8..41b909f329f 100644 --- a/swarm-derive/src/lib.rs +++ b/swarm-derive/src/lib.rs @@ -23,12 +23,12 @@ mod syn_ext; -use crate::syn_ext::RequireStrLit; use heck::ToUpperCamelCase; use proc_macro::TokenStream; use quote::quote; -use syn::punctuated::Punctuated; -use syn::{parse_macro_input, Data, DataStruct, DeriveInput, Meta, Token}; +use syn::{parse_macro_input, punctuated::Punctuated, Data, DataStruct, DeriveInput, Meta, Token}; + +use crate::syn_ext::RequireStrLit; /// Generates a delegating `NetworkBehaviour` implementation for the struct this is used for. See /// the trait documentation for better description. diff --git a/swarm-test/src/lib.rs b/swarm-test/src/lib.rs index bcab6e5b700..0edf02473e6 100644 --- a/swarm-test/src/lib.rs +++ b/swarm-test/src/lib.rs @@ -18,27 +18,32 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{fmt::Debug, future::IntoFuture, time::Duration}; + use async_trait::async_trait; -use futures::future::{BoxFuture, Either}; -use futures::{FutureExt, StreamExt}; +use futures::{ + future::{BoxFuture, Either}, + FutureExt, StreamExt, +}; use libp2p_core::{multiaddr::Protocol, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::dial_opts::PeerCondition; -use libp2p_swarm::{dial_opts::DialOpts, NetworkBehaviour, Swarm, SwarmEvent}; -use std::fmt::Debug; -use std::future::IntoFuture; -use std::time::Duration; +use libp2p_swarm::{ + dial_opts::{DialOpts, PeerCondition}, + NetworkBehaviour, Swarm, SwarmEvent, +}; -/// An extension trait for [`Swarm`] that makes it easier to set up a network of [`Swarm`]s for tests. +/// An extension trait for [`Swarm`] that makes it +/// easier to set up a network of [`Swarm`]s for tests. #[async_trait] pub trait SwarmExt { type NB: NetworkBehaviour; /// Create a new [`Swarm`] with an ephemeral identity and the `async-std` runtime. /// - /// The swarm will use a [`libp2p_core::transport::MemoryTransport`] together with a [`libp2p_plaintext::Config`] authentication layer and - /// [`libp2p_yamux::Config`] as the multiplexer. However, these details should not be relied upon by the test - /// and may change at any time. + /// The swarm will use a [`libp2p_core::transport::MemoryTransport`] together with a + /// [`libp2p_plaintext::Config`] authentication layer and [`libp2p_yamux::Config`] as the + /// multiplexer. However, these details should not be relied + /// upon by the test and may change at any time. #[cfg(feature = "async-std")] fn new_ephemeral(behaviour_fn: impl FnOnce(libp2p_identity::Keypair) -> Self::NB) -> Self where @@ -46,19 +51,22 @@ pub trait SwarmExt { /// Create a new [`Swarm`] with an ephemeral identity and the `tokio` runtime. /// - /// The swarm will use a [`libp2p_core::transport::MemoryTransport`] together with a [`libp2p_plaintext::Config`] authentication layer and - /// [`libp2p_yamux::Config`] as the multiplexer. However, these details should not be relied upon by the test - /// and may change at any time. + /// The swarm will use a [`libp2p_core::transport::MemoryTransport`] together with a + /// [`libp2p_plaintext::Config`] authentication layer and [`libp2p_yamux::Config`] as the + /// multiplexer. However, these details should not be relied + /// upon by the test and may change at any time. #[cfg(feature = "tokio")] fn new_ephemeral_tokio(behaviour_fn: impl FnOnce(libp2p_identity::Keypair) -> Self::NB) -> Self where Self: Sized; - /// Establishes a connection to the given [`Swarm`], polling both of them until the connection is established. + /// Establishes a connection to the given [`Swarm`], polling both of them until the connection + /// is established. /// /// This will take addresses from the `other` [`Swarm`] via [`Swarm::external_addresses`]. /// By default, this iterator will not yield any addresses. - /// To add listen addresses as external addresses, use [`ListenFuture::with_memory_addr_external`] or [`ListenFuture::with_tcp_addr_external`]. + /// To add listen addresses as external addresses, use + /// [`ListenFuture::with_memory_addr_external`] or [`ListenFuture::with_tcp_addr_external`]. async fn connect(&mut self, other: &mut Swarm) where T: NetworkBehaviour + Send, @@ -66,10 +74,12 @@ pub trait SwarmExt { /// Dial the provided address and wait until a connection has been established. /// - /// In a normal test scenario, you should prefer [`SwarmExt::connect`] but that is not always possible. - /// This function only abstracts away the "dial and wait for `ConnectionEstablished` event" part. + /// In a normal test scenario, you should prefer [`SwarmExt::connect`] but that is not always + /// possible. This function only abstracts away the "dial and wait for + /// `ConnectionEstablished` event" part. /// - /// Because we don't have access to the other [`Swarm`], we can't guarantee that it makes progress. + /// Because we don't have access to the other [`Swarm`], + /// we can't guarantee that it makes progress. async fn dial_and_wait(&mut self, addr: Multiaddr) -> PeerId; /// Wait for specified condition to return `Some`. @@ -78,7 +88,8 @@ pub trait SwarmExt { P: Fn(SwarmEvent<::ToSwarm>) -> Option, P: Send; - /// Listens for incoming connections, polling the [`Swarm`] until the transport is ready to accept connections. + /// Listens for incoming connections, polling the [`Swarm`] until the + /// transport is ready to accept connections. /// /// The first address is for the memory transport, the second one for the TCP transport. fn listen(&mut self) -> ListenFuture<&mut Self>; @@ -102,17 +113,19 @@ pub trait SwarmExt { /// /// ## Number of events /// -/// The number of events is configured via const generics based on the array size of the return type. -/// This allows the compiler to infer how many events you are expecting based on how you use this function. -/// For example, if you expect the first [`Swarm`] to emit 2 events, you should assign the first variable of the returned tuple value to an array of size 2. -/// This works especially well if you directly pattern-match on the return value. +/// The number of events is configured via const generics based on the array size of the return +/// type. This allows the compiler to infer how many events you are expecting based on how you use +/// this function. For example, if you expect the first [`Swarm`] to emit 2 events, you should +/// assign the first variable of the returned tuple value to an array of size 2. This works +/// especially well if you directly pattern-match on the return value. /// /// ## Type of event /// /// This function utilizes the [`TryIntoOutput`] trait. /// Similar as to the number of expected events, the type of event is inferred based on your usage. /// If you match against a [`SwarmEvent`], the first [`SwarmEvent`] will be returned. -/// If you match against your [`NetworkBehaviour::ToSwarm`] type, [`SwarmEvent`]s which are not [`SwarmEvent::Behaviour`] will be skipped until the [`Swarm`] returns a behaviour event. +/// If you match against your [`NetworkBehaviour::ToSwarm`] type, [`SwarmEvent`]s which are not +/// [`SwarmEvent::Behaviour`] will be skipped until the [`Swarm`] returns a behaviour event. /// /// You can implement the [`TryIntoOutput`] for any other type to further customize this behaviour. /// @@ -120,13 +133,16 @@ pub trait SwarmExt { /// /// This function is similar to joining two futures with two crucial differences: /// 1. As described above, it allows you to obtain more than a single event. -/// 2. More importantly, it will continue to poll the [`Swarm`]s **even if they already has emitted all expected events**. +/// 2. More importantly, it will continue to poll the [`Swarm`]s **even if they already has emitted +/// all expected events**. /// /// Especially (2) is crucial for our usage of this function. /// If a [`Swarm`] is not polled, nothing within it makes progress. -/// This can "starve" the other swarm which for example may wait for another message to be sent on a connection. +/// This can "starve" the other swarm which for example may wait for another message to be sent on a +/// connection. /// -/// Using [`drive`] instead of [`futures::future::join`] ensures that a [`Swarm`] continues to be polled, even after it emitted its events. +/// Using [`drive`] instead of [`futures::future::join`] ensures that a [`Swarm`] continues to be +/// polled, even after it emitted its events. pub async fn drive< TBehaviour1, const NUM_EVENTS_SWARM_1: usize, @@ -231,7 +247,12 @@ where behaviour_fn(identity), peer_id, libp2p_swarm::Config::with_async_std_executor() - .with_idle_connection_timeout(Duration::from_secs(5)), // Some tests need connections to be kept alive beyond what the individual behaviour configures., + // Some tests need + // connections to be kept + // alive beyond what the + // individual behaviour + // configures., + .with_idle_connection_timeout(Duration::from_secs(5)), ) } @@ -259,7 +280,11 @@ where behaviour_fn(identity), peer_id, libp2p_swarm::Config::with_tokio_executor() - .with_idle_connection_timeout(Duration::from_secs(5)), // Some tests need connections to be kept alive beyond what the individual behaviour configures., + .with_idle_connection_timeout(Duration::from_secs(5)), /* Some tests need + * connections to be kept + * alive beyond what the + * individual behaviour + * configures., */ ) } @@ -385,20 +410,24 @@ pub struct ListenFuture { } impl ListenFuture { - /// Adds the memory address we are starting to listen on as an external address using [`Swarm::add_external_address`]. + /// Adds the memory address we are starting to listen on as an external address using + /// [`Swarm::add_external_address`]. /// - /// This is typically "safe" for tests because within a process, memory addresses are "globally" reachable. - /// However, some tests depend on which addresses are external and need this to be configurable so it is not a good default. + /// This is typically "safe" for tests because within a process, memory addresses are "globally" + /// reachable. However, some tests depend on which addresses are external and need this to + /// be configurable so it is not a good default. pub fn with_memory_addr_external(mut self) -> Self { self.add_memory_external = true; self } - /// Adds the TCP address we are starting to listen on as an external address using [`Swarm::add_external_address`]. + /// Adds the TCP address we are starting to listen on as an external address using + /// [`Swarm::add_external_address`]. /// - /// This is typically "safe" for tests because on the same machine, 127.0.0.1 is reachable for other [`Swarm`]s. - /// However, some tests depend on which addresses are external and need this to be configurable so it is not a good default. + /// This is typically "safe" for tests because on the same machine, 127.0.0.1 is reachable for + /// other [`Swarm`]s. However, some tests depend on which addresses are external and need + /// this to be configurable so it is not a good default. pub fn with_tcp_addr_external(mut self) -> Self { self.add_tcp_external = true; diff --git a/swarm/benches/connection_handler.rs b/swarm/benches/connection_handler.rs index 09340421f83..a5e47528308 100644 --- a/swarm/benches/connection_handler.rs +++ b/swarm/benches/connection_handler.rs @@ -1,3 +1,5 @@ +use std::{convert::Infallible, sync::atomic::AtomicUsize}; + use async_std::stream::StreamExt; use criterion::{criterion_group, criterion_main, Criterion}; use libp2p_core::{ @@ -5,7 +7,6 @@ use libp2p_core::{ }; use libp2p_identity::PeerId; use libp2p_swarm::{ConnectionHandler, NetworkBehaviour, StreamProtocol}; -use std::{convert::Infallible, sync::atomic::AtomicUsize}; use web_time::Duration; macro_rules! gen_behaviour { @@ -82,7 +83,7 @@ benchmarks! { SpinningBehaviour20::bench().name(m).poll_count(500).protocols_per_behaviour(100), ]; } -//fn main() {} +// fn main() {} trait BigBehaviour: Sized { fn behaviours(&mut self) -> &mut [SpinningBehaviour]; diff --git a/swarm/src/behaviour.rs b/swarm/src/behaviour.rs index 35aed12fba5..8c8c5998f67 100644 --- a/swarm/src/behaviour.rs +++ b/swarm/src/behaviour.rs @@ -24,23 +24,22 @@ mod listen_addresses; mod peer_addresses; pub mod toggle; -pub use external_addresses::ExternalAddresses; -pub use listen_addresses::ListenAddresses; -pub use peer_addresses::PeerAddresses; +use std::task::{Context, Poll}; -use crate::connection::ConnectionId; -use crate::dial_opts::DialOpts; -use crate::listen_opts::ListenOpts; -use crate::{ - ConnectionDenied, ConnectionError, ConnectionHandler, DialError, ListenError, THandler, - THandlerInEvent, THandlerOutEvent, -}; +pub use external_addresses::ExternalAddresses; use libp2p_core::{ transport::{ListenerId, PortUse}, ConnectedPoint, Endpoint, Multiaddr, }; use libp2p_identity::PeerId; -use std::{task::Context, task::Poll}; +pub use listen_addresses::ListenAddresses; +pub use peer_addresses::PeerAddresses; + +use crate::{ + connection::ConnectionId, dial_opts::DialOpts, listen_opts::ListenOpts, ConnectionDenied, + ConnectionError, ConnectionHandler, DialError, ListenError, THandler, THandlerInEvent, + THandlerOutEvent, +}; /// A [`NetworkBehaviour`] defines the behaviour of the local node on the network. /// @@ -101,25 +100,25 @@ use std::{task::Context, task::Poll}; /// #[behaviour(to_swarm = "Event")] /// # #[behaviour(prelude = "libp2p_swarm::derive_prelude")] /// struct MyBehaviour { -/// identify: identify::Behaviour, -/// ping: ping::Behaviour, +/// identify: identify::Behaviour, +/// ping: ping::Behaviour, /// } /// /// enum Event { -/// Identify(identify::Event), -/// Ping(ping::Event), +/// Identify(identify::Event), +/// Ping(ping::Event), /// } /// /// impl From for Event { -/// fn from(event: identify::Event) -> Self { -/// Self::Identify(event) -/// } +/// fn from(event: identify::Event) -> Self { +/// Self::Identify(event) +/// } /// } /// /// impl From for Event { -/// fn from(event: ping::Event) -> Self { -/// Self::Ping(event) -/// } +/// fn from(event: ping::Event) -> Self { +/// Self::Ping(event) +/// } /// } /// ``` pub trait NetworkBehaviour: 'static { @@ -131,8 +130,8 @@ pub trait NetworkBehaviour: 'static { /// Callback that is invoked for every new inbound connection. /// - /// At this point in the connection lifecycle, only the remote's and our local address are known. - /// We have also already allocated a [`ConnectionId`]. + /// At this point in the connection lifecycle, only the remote's and our local address are + /// known. We have also already allocated a [`ConnectionId`]. /// /// Any error returned from this function will immediately abort the dial attempt. fn handle_pending_inbound_connection( @@ -148,9 +147,10 @@ pub trait NetworkBehaviour: 'static { /// /// This is invoked once another peer has successfully dialed us. /// - /// At this point, we have verified their [`PeerId`] and we know, which particular [`Multiaddr`] succeeded in the dial. - /// In order to actually use this connection, this function must return a [`ConnectionHandler`]. - /// Returning an error will immediately close the connection. + /// At this point, we have verified their [`PeerId`] and we know, which particular [`Multiaddr`] + /// succeeded in the dial. In order to actually use this connection, this function must + /// return a [`ConnectionHandler`]. Returning an error will immediately close the + /// connection. /// /// Note when any composed behaviour returns an error the connection will be closed and a /// [`FromSwarm::ListenFailure`] event will be emitted. @@ -168,10 +168,14 @@ pub trait NetworkBehaviour: 'static { /// /// - The [`PeerId`], if known. Remember that we can dial without a [`PeerId`]. /// - All addresses passed to [`DialOpts`] are passed in here too. - /// - The effective [`Role`](Endpoint) of this peer in the dial attempt. Typically, this is set to [`Endpoint::Dialer`] except if we are attempting a hole-punch. - /// - The [`ConnectionId`] identifying the future connection resulting from this dial, if successful. + /// - The effective [`Role`](Endpoint) of this peer in the dial attempt. Typically, this is set + /// to [`Endpoint::Dialer`] except if we are attempting a hole-punch. + /// - The [`ConnectionId`] identifying the future connection resulting from this dial, if + /// successful. /// - /// Note that the addresses returned from this function are only used for dialing if [`WithPeerIdWithAddresses::extend_addresses_through_behaviour`](crate::dial_opts::WithPeerIdWithAddresses::extend_addresses_through_behaviour) is set. + /// Note that the addresses returned from this function are only used for dialing if + /// [`WithPeerIdWithAddresses::extend_addresses_through_behaviour`](crate::dial_opts::WithPeerIdWithAddresses::extend_addresses_through_behaviour) + /// is set. /// /// Any error returned from this function will immediately abort the dial attempt. fn handle_pending_outbound_connection( @@ -187,9 +191,10 @@ pub trait NetworkBehaviour: 'static { /// Callback that is invoked for every established outbound connection. /// /// This is invoked once we have successfully dialed a peer. - /// At this point, we have verified their [`PeerId`] and we know, which particular [`Multiaddr`] succeeded in the dial. - /// In order to actually use this connection, this function must return a [`ConnectionHandler`]. - /// Returning an error will immediately close the connection. + /// At this point, we have verified their [`PeerId`] and we know, which particular [`Multiaddr`] + /// succeeded in the dial. In order to actually use this connection, this function must + /// return a [`ConnectionHandler`]. Returning an error will immediately close the + /// connection. /// /// Note when any composed behaviour returns an error the connection will be closed and a /// [`FromSwarm::DialFailure`] event will be emitted. @@ -240,8 +245,9 @@ pub enum ToSwarm { /// On failure, [`NetworkBehaviour::on_swarm_event`] with `DialFailure` is invoked. /// /// [`DialOpts`] provides access to the [`ConnectionId`] via [`DialOpts::connection_id`]. - /// This [`ConnectionId`] will be used throughout the connection's lifecycle to associate events with it. - /// This allows a [`NetworkBehaviour`] to identify a connection that resulted out of its own dial request. + /// This [`ConnectionId`] will be used throughout the connection's lifecycle to associate + /// events with it. This allows a [`NetworkBehaviour`] to identify a connection that + /// resulted out of its own dial request. Dial { opts: DialOpts }, /// Instructs the [`Swarm`](crate::Swarm) to listen on the provided address. @@ -253,8 +259,8 @@ pub enum ToSwarm { /// Instructs the `Swarm` to send an event to the handler dedicated to a /// connection with a peer. /// - /// If the `Swarm` is connected to the peer, the message is delivered to the [`ConnectionHandler`] - /// instance identified by the peer ID and connection ID. + /// If the `Swarm` is connected to the peer, the message is delivered to the + /// [`ConnectionHandler`] instance identified by the peer ID and connection ID. /// /// If the specified connection no longer exists, the event is silently dropped. /// @@ -278,11 +284,12 @@ pub enum ToSwarm { /// /// The emphasis on a **new** candidate is important. /// Protocols MUST take care to only emit a candidate once per "source". - /// For example, the observed address of a TCP connection does not change throughout its lifetime. - /// Thus, only one candidate should be emitted per connection. + /// For example, the observed address of a TCP connection does not change throughout its + /// lifetime. Thus, only one candidate should be emitted per connection. /// - /// This makes the report frequency of an address a meaningful data-point for consumers of this event. - /// This address will be shared with all [`NetworkBehaviour`]s via [`FromSwarm::NewExternalAddrCandidate`]. + /// This makes the report frequency of an address a meaningful data-point for consumers of this + /// event. This address will be shared with all [`NetworkBehaviour`]s via + /// [`FromSwarm::NewExternalAddrCandidate`]. /// /// This address could come from a variety of sources: /// - A protocol such as identify obtained it from a remote. @@ -290,25 +297,32 @@ pub enum ToSwarm { /// - We made an educated guess based on one of our listen addresses. NewExternalAddrCandidate(Multiaddr), - /// Indicates to the [`Swarm`](crate::Swarm) that the provided address is confirmed to be externally reachable. + /// Indicates to the [`Swarm`](crate::Swarm) that the provided address is confirmed to be + /// externally reachable. /// - /// This is intended to be issued in response to a [`FromSwarm::NewExternalAddrCandidate`] if we are indeed externally reachable on this address. - /// This address will be shared with all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrConfirmed`]. + /// This is intended to be issued in response to a [`FromSwarm::NewExternalAddrCandidate`] if + /// we are indeed externally reachable on this address. This address will be shared with + /// all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrConfirmed`]. ExternalAddrConfirmed(Multiaddr), - /// Indicates to the [`Swarm`](crate::Swarm) that we are no longer externally reachable under the provided address. + /// Indicates to the [`Swarm`](crate::Swarm) that we are no longer externally reachable under + /// the provided address. /// /// This expires an address that was earlier confirmed via [`ToSwarm::ExternalAddrConfirmed`]. - /// This address will be shared with all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrExpired`]. + /// This address will be shared with all [`NetworkBehaviour`]s via + /// [`FromSwarm::ExternalAddrExpired`]. ExternalAddrExpired(Multiaddr), - /// Instructs the `Swarm` to initiate a graceful close of one or all connections with the given peer. + /// Instructs the `Swarm` to initiate a graceful close of one or all connections with the given + /// peer. /// - /// Closing a connection via [`ToSwarm::CloseConnection`] will poll [`ConnectionHandler::poll_close`] to completion. - /// In most cases, stopping to "use" a connection is enough to have it closed. - /// The keep-alive algorithm will close a connection automatically once all [`ConnectionHandler`]s are idle. + /// Closing a connection via [`ToSwarm::CloseConnection`] will poll + /// [`ConnectionHandler::poll_close`] to completion. In most cases, stopping to "use" a + /// connection is enough to have it closed. The keep-alive algorithm will close a + /// connection automatically once all [`ConnectionHandler`]s are idle. /// - /// Use this command if you want to close a connection _despite_ it still being in use by one or more handlers. + /// Use this command if you want to close a connection _despite_ it still being in use by one + /// or more handlers. CloseConnection { /// The peer to disconnect. peer_id: PeerId, @@ -316,7 +330,8 @@ pub enum ToSwarm { connection: CloseConnection, }, - /// Reports external address of a remote peer to the [`Swarm`](crate::Swarm) and through that to other [`NetworkBehaviour`]s. + /// Reports external address of a remote peer to the [`Swarm`](crate::Swarm) and through that + /// to other [`NetworkBehaviour`]s. NewExternalAddrOfPeer { peer_id: PeerId, address: Multiaddr }, } @@ -440,8 +455,8 @@ pub enum FromSwarm<'a> { /// Informs the behaviour that an error /// happened on an incoming connection during its initial handshake. /// - /// This can include, for example, an error during the handshake of the encryption layer, or the - /// connection unexpectedly closed. + /// This can include, for example, an error during the handshake of the encryption layer, or + /// the connection unexpectedly closed. ListenFailure(ListenFailure<'a>), /// Informs the behaviour that a new listener was created. NewListener(NewListener), @@ -455,11 +470,13 @@ pub enum FromSwarm<'a> { ListenerError(ListenerError<'a>), /// Informs the behaviour that a listener closed. ListenerClosed(ListenerClosed<'a>), - /// Informs the behaviour that we have discovered a new candidate for an external address for us. + /// Informs the behaviour that we have discovered a new candidate for an external address for + /// us. NewExternalAddrCandidate(NewExternalAddrCandidate<'a>), /// Informs the behaviour that an external address of the local node was confirmed. ExternalAddrConfirmed(ExternalAddrConfirmed<'a>), - /// Informs the behaviour that an external address of the local node expired, i.e. is no-longer confirmed. + /// Informs the behaviour that an external address of the local node expired, i.e. is no-longer + /// confirmed. ExternalAddrExpired(ExternalAddrExpired<'a>), /// Informs the behaviour that we have discovered a new external address for a remote peer. NewExternalAddrOfPeer(NewExternalAddrOfPeer<'a>), @@ -559,7 +576,8 @@ pub struct ListenerClosed<'a> { pub reason: Result<(), &'a std::io::Error>, } -/// [`FromSwarm`] variant that informs the behaviour about a new candidate for an external address for us. +/// [`FromSwarm`] variant that informs the behaviour about a new candidate for an external address +/// for us. #[derive(Debug, Clone, Copy)] pub struct NewExternalAddrCandidate<'a> { pub addr: &'a Multiaddr, @@ -577,7 +595,8 @@ pub struct ExternalAddrExpired<'a> { pub addr: &'a Multiaddr, } -/// [`FromSwarm`] variant that informs the behaviour that a new external address for a remote peer was detected. +/// [`FromSwarm`] variant that informs the behaviour that a new external address for a remote peer +/// was detected. #[derive(Clone, Copy, Debug)] pub struct NewExternalAddrOfPeer<'a> { pub peer_id: PeerId, diff --git a/swarm/src/behaviour/either.rs b/swarm/src/behaviour/either.rs index 7a51303e74d..b9a86e1b9d8 100644 --- a/swarm/src/behaviour/either.rs +++ b/swarm/src/behaviour/either.rs @@ -18,14 +18,17 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::behaviour::{self, NetworkBehaviour, ToSwarm}; -use crate::connection::ConnectionId; -use crate::{ConnectionDenied, THandler, THandlerInEvent, THandlerOutEvent}; +use std::task::{Context, Poll}; + use either::Either; -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr}; +use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use std::{task::Context, task::Poll}; + +use crate::{ + behaviour::{self, NetworkBehaviour, ToSwarm}, + connection::ConnectionId, + ConnectionDenied, THandler, THandlerInEvent, THandlerOutEvent, +}; /// Implementation of [`NetworkBehaviour`] that can be either of two implementations. impl NetworkBehaviour for Either diff --git a/swarm/src/behaviour/external_addresses.rs b/swarm/src/behaviour/external_addresses.rs index 579f46fe486..ba2dd3eb890 100644 --- a/swarm/src/behaviour/external_addresses.rs +++ b/swarm/src/behaviour/external_addresses.rs @@ -1,6 +1,7 @@ -use crate::behaviour::{ExternalAddrConfirmed, ExternalAddrExpired, FromSwarm}; use libp2p_core::Multiaddr; +use crate::behaviour::{ExternalAddrConfirmed, ExternalAddrExpired, FromSwarm}; + /// The maximum number of local external addresses. When reached any /// further externally reported addresses are ignored. The behaviour always /// tracks all its listen addresses. @@ -78,17 +79,20 @@ impl ExternalAddresses { } fn push_front(&mut self, addr: &Multiaddr) { - self.addresses.insert(0, addr.clone()); // We have at most `MAX_LOCAL_EXTERNAL_ADDRS` so this isn't very expensive. + // We have at most `MAX_LOCAL_EXTERNAL_ADDRS` so + // this isn't very expensive. + self.addresses.insert(0, addr.clone()); } } #[cfg(test)] mod tests { - use super::*; use libp2p_core::multiaddr::Protocol; use once_cell::sync::Lazy; use rand::Rng; + use super::*; + #[test] fn new_external_addr_returns_correct_changed_value() { let mut addresses = ExternalAddresses::default(); diff --git a/swarm/src/behaviour/listen_addresses.rs b/swarm/src/behaviour/listen_addresses.rs index 6076f5e7923..0c685d798c7 100644 --- a/swarm/src/behaviour/listen_addresses.rs +++ b/swarm/src/behaviour/listen_addresses.rs @@ -1,7 +1,9 @@ -use crate::behaviour::{ExpiredListenAddr, FromSwarm, NewListenAddr}; -use libp2p_core::Multiaddr; use std::collections::HashSet; +use libp2p_core::Multiaddr; + +use crate::behaviour::{ExpiredListenAddr, FromSwarm, NewListenAddr}; + /// Utility struct for tracking the addresses a [`Swarm`](crate::Swarm) is listening on. #[derive(Debug, Default, Clone)] pub struct ListenAddresses { @@ -32,10 +34,11 @@ impl ListenAddresses { #[cfg(test)] mod tests { - use super::*; use libp2p_core::{multiaddr::Protocol, transport::ListenerId}; use once_cell::sync::Lazy; + use super::*; + #[test] fn new_listen_addr_returns_correct_changed_value() { let mut addresses = ListenAddresses::default(); diff --git a/swarm/src/behaviour/peer_addresses.rs b/swarm/src/behaviour/peer_addresses.rs index 1eeead56ca1..5aeae7741d5 100644 --- a/swarm/src/behaviour/peer_addresses.rs +++ b/swarm/src/behaviour/peer_addresses.rs @@ -1,12 +1,10 @@ -use crate::behaviour::FromSwarm; -use crate::{DialError, DialFailure, NewExternalAddrOfPeer}; +use std::num::NonZeroUsize; use libp2p_core::Multiaddr; use libp2p_identity::PeerId; - use lru::LruCache; -use std::num::NonZeroUsize; +use crate::{behaviour::FromSwarm, DialError, DialFailure, NewExternalAddrOfPeer}; /// Struct for tracking peers' external addresses of the [`Swarm`](crate::Swarm). #[derive(Debug)] @@ -46,7 +44,6 @@ impl PeerAddresses { /// Appends address to the existing set if peer addresses already exist. /// Creates a new cache entry for peer_id if no addresses are present. /// Returns true if the newly added address was not previously in the cache. - /// pub fn add(&mut self, peer: PeerId, address: Multiaddr) -> bool { match prepare_addr(&peer, &address) { Ok(address) => { @@ -98,17 +95,17 @@ impl Default for PeerAddresses { #[cfg(test)] mod tests { - use super::*; use std::io; - use crate::ConnectionId; use libp2p_core::{ multiaddr::Protocol, transport::{memory::MemoryTransportError, TransportError}, }; - use once_cell::sync::Lazy; + use super::*; + use crate::ConnectionId; + #[test] fn new_peer_addr_returns_correct_changed_value() { let mut cache = PeerAddresses::default(); diff --git a/swarm/src/behaviour/toggle.rs b/swarm/src/behaviour/toggle.rs index 3dde364bf19..e70e6cf9896 100644 --- a/swarm/src/behaviour/toggle.rs +++ b/swarm/src/behaviour/toggle.rs @@ -18,22 +18,24 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::behaviour::FromSwarm; -use crate::connection::ConnectionId; -use crate::handler::{ - AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, - FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, SubstreamProtocol, -}; -use crate::upgrade::SendWrapper; -use crate::{ - ConnectionDenied, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, -}; +use std::task::{Context, Poll}; + use either::Either; use futures::future; -use libp2p_core::transport::PortUse; -use libp2p_core::{upgrade::DeniedUpgrade, Endpoint, Multiaddr}; +use libp2p_core::{transport::PortUse, upgrade::DeniedUpgrade, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use std::{task::Context, task::Poll}; + +use crate::{ + behaviour::FromSwarm, + connection::ConnectionId, + handler::{ + AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, + DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, + SubstreamProtocol, + }, + upgrade::SendWrapper, + ConnectionDenied, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, +}; /// Implementation of `NetworkBehaviour` that can be either in the disabled or enabled state. /// diff --git a/swarm/src/connection.rs b/swarm/src/connection.rs index 78c007fd71d..32cae54a5ef 100644 --- a/swarm/src/connection.rs +++ b/swarm/src/connection.rs @@ -23,42 +23,47 @@ mod error; pub(crate) mod pool; mod supported_protocols; +use std::{ + collections::{HashMap, HashSet}, + fmt, + fmt::{Display, Formatter}, + future::Future, + io, mem, + pin::Pin, + sync::atomic::{AtomicUsize, Ordering}, + task::{Context, Poll, Waker}, + time::Duration, +}; + pub use error::ConnectionError; pub(crate) use error::{ PendingConnectionError, PendingInboundConnectionError, PendingOutboundConnectionError, }; -use libp2p_core::transport::PortUse; +use futures::{future::BoxFuture, stream, stream::FuturesUnordered, FutureExt, StreamExt}; +use futures_timer::Delay; +use libp2p_core::{ + connection::ConnectedPoint, + multiaddr::Multiaddr, + muxing::{StreamMuxerBox, StreamMuxerEvent, StreamMuxerExt, SubstreamBox}, + transport::PortUse, + upgrade, + upgrade::{NegotiationError, ProtocolError}, + Endpoint, +}; +use libp2p_identity::PeerId; pub use supported_protocols::SupportedProtocols; +use web_time::Instant; -use crate::handler::{ - AddressChange, ConnectionEvent, ConnectionHandler, DialUpgradeError, FullyNegotiatedInbound, - FullyNegotiatedOutbound, ListenUpgradeError, ProtocolSupport, ProtocolsChange, UpgradeInfoSend, -}; -use crate::stream::ActiveStreamCounter; -use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend}; use crate::{ + handler::{ + AddressChange, ConnectionEvent, ConnectionHandler, DialUpgradeError, + FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, ProtocolSupport, + ProtocolsChange, UpgradeInfoSend, + }, + stream::ActiveStreamCounter, + upgrade::{InboundUpgradeSend, OutboundUpgradeSend}, ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError, SubstreamProtocol, }; -use futures::future::BoxFuture; -use futures::stream::FuturesUnordered; -use futures::StreamExt; -use futures::{stream, FutureExt}; -use futures_timer::Delay; -use libp2p_core::connection::ConnectedPoint; -use libp2p_core::multiaddr::Multiaddr; -use libp2p_core::muxing::{StreamMuxerBox, StreamMuxerEvent, StreamMuxerExt, SubstreamBox}; -use libp2p_core::upgrade; -use libp2p_core::upgrade::{NegotiationError, ProtocolError}; -use libp2p_core::Endpoint; -use libp2p_identity::PeerId; -use std::collections::{HashMap, HashSet}; -use std::fmt::{Display, Formatter}; -use std::future::Future; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::task::Waker; -use std::time::Duration; -use std::{fmt, io, mem, pin::Pin, task::Context, task::Poll}; -use web_time::Instant; static NEXT_CONNECTION_ID: AtomicUsize = AtomicUsize::new(1); @@ -72,7 +77,8 @@ impl ConnectionId { /// [`Swarm`](crate::Swarm) enforces that [`ConnectionId`]s are unique and not reused. /// This constructor does not, hence the _unchecked_. /// - /// It is primarily meant for allowing manual tests of [`NetworkBehaviour`](crate::NetworkBehaviour)s. + /// It is primarily meant for allowing manual tests of + /// [`NetworkBehaviour`](crate::NetworkBehaviour)s. pub fn new_unchecked(id: usize) -> Self { Self(id) } @@ -147,8 +153,8 @@ where max_negotiating_inbound_streams: usize, /// Contains all upgrades that are waiting for a new outbound substream. /// - /// The upgrade timeout is already ticking here so this may fail in case the remote is not quick - /// enough in providing us with a new stream. + /// The upgrade timeout is already ticking here so this may fail in case the remote is not + /// quick enough in providing us with a new stream. requested_substreams: FuturesUnordered< SubstreamRequested, >, @@ -223,7 +229,8 @@ where self.handler.on_behaviour_event(event); } - /// Begins an orderly shutdown of the connection, returning a stream of final events and a `Future` that resolves when connection shutdown is complete. + /// Begins an orderly shutdown of the connection, returning a stream of final events and a + /// `Future` that resolves when connection shutdown is complete. pub(crate) fn close( self, ) -> ( @@ -320,7 +327,8 @@ where } } - // In case the [`ConnectionHandler`] can not make any more progress, poll the negotiating outbound streams. + // In case the [`ConnectionHandler`] can not make any more progress, poll the + // negotiating outbound streams. match negotiating_out.poll_next_unpin(cx) { Poll::Pending | Poll::Ready(None) => {} Poll::Ready(Some((info, Ok(protocol)))) => { @@ -368,7 +376,8 @@ where } // Check if the connection (and handler) should be shut down. - // As long as we're still negotiating substreams or have any active streams shutdown is always postponed. + // As long as we're still negotiating substreams or have + // any active streams shutdown is always postponed. if negotiating_in.is_empty() && negotiating_out.is_empty() && requested_substreams.is_empty() @@ -419,7 +428,9 @@ where stream_counter.clone(), )); - continue; // Go back to the top, handler can potentially make progress again. + // Go back to the top, + // handler can potentially make progress again. + continue; } } } @@ -436,7 +447,9 @@ where stream_counter.clone(), )); - continue; // Go back to the top, handler can potentially make progress again. + // Go back to the top, + // handler can potentially make progress again. + continue; } } } @@ -451,10 +464,12 @@ where for change in changes { handler.on_connection_event(ConnectionEvent::LocalProtocolsChange(change)); } - continue; // Go back to the top, handler can potentially make progress again. + // Go back to the top, handler can potentially make progress again. + continue; } - return Poll::Pending; // Nothing can make progress, return `Pending`. + // Nothing can make progress, return `Pending`. + return Poll::Pending; } } @@ -482,7 +497,8 @@ fn compute_new_shutdown( ) -> Option { match (current_shutdown, handler_keep_alive) { (_, false) if idle_timeout == Duration::ZERO => Some(Shutdown::Asap), - (Shutdown::Later(_), false) => None, // Do nothing, i.e. let the shutdown timer continue to tick. + // Do nothing, i.e. let the shutdown timer continue to tick. + (Shutdown::Later(_), false) => None, (_, false) => { let now = Instant::now(); let safe_keep_alive = checked_add_fraction(now, idle_timeout); @@ -493,10 +509,12 @@ fn compute_new_shutdown( } } -/// Repeatedly halves and adds the [`Duration`] to the [`Instant`] until [`Instant::checked_add`] succeeds. +/// Repeatedly halves and adds the [`Duration`] +/// to the [`Instant`] until [`Instant::checked_add`] succeeds. /// -/// [`Instant`] depends on the underlying platform and has a limit of which points in time it can represent. -/// The [`Duration`] computed by the this function may not be the longest possible that we can add to `now` but it will work. +/// [`Instant`] depends on the underlying platform and has a limit of which points in time it can +/// represent. The [`Duration`] computed by the this function may not be the longest possible that +/// we can add to `now` but it will work. fn checked_add_fraction(start: Instant, mut duration: Duration) -> Duration { while start.checked_add(duration).is_none() { tracing::debug!(start=?start, duration=?duration, "start + duration cannot be presented, halving duration"); @@ -767,19 +785,23 @@ impl> std::hash::Hash for AsStrHashEq { #[cfg(test)] mod tests { - use super::*; - use crate::dummy; - use futures::future; - use futures::AsyncRead; - use futures::AsyncWrite; - use libp2p_core::upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; - use libp2p_core::StreamMuxer; + use std::{ + convert::Infallible, + sync::{Arc, Weak}, + time::Instant, + }; + + use futures::{future, AsyncRead, AsyncWrite}; + use libp2p_core::{ + upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}, + StreamMuxer, + }; use quickcheck::*; - use std::convert::Infallible; - use std::sync::{Arc, Weak}; - use std::time::Instant; use tracing_subscriber::EnvFilter; + use super::*; + use crate::dummy; + #[test] fn max_negotiating_inbound_streams() { let _ = tracing_subscriber::fmt() @@ -906,7 +928,8 @@ mod tests { ); assert!(connection.handler.remote_removed.is_empty()); - // Third, stop listening on a protocol it never advertised (we can't control what handlers do so this needs to be handled gracefully). + // Third, stop listening on a protocol it never advertised (we can't control what handlers + // do so this needs to be handled gracefully). connection.handler.remote_removes_support_for(&["/baz"]); let _ = connection.poll_noop_waker(); diff --git a/swarm/src/connection/error.rs b/swarm/src/connection/error.rs index 33aa81c19a9..39e5a88fca6 100644 --- a/swarm/src/connection/error.rs +++ b/swarm/src/connection/error.rs @@ -18,11 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::transport::TransportError; -use crate::Multiaddr; -use crate::{ConnectedPoint, PeerId}; use std::{fmt, io}; +use crate::{transport::TransportError, ConnectedPoint, Multiaddr, PeerId}; + /// Errors that can occur in the context of an established `Connection`. #[derive(Debug)] pub enum ConnectionError { diff --git a/swarm/src/connection/pool.rs b/swarm/src/connection/pool.rs index 7964ecbfa69..f42fd1f305c 100644 --- a/swarm/src/connection/pool.rs +++ b/swarm/src/connection/pool.rs @@ -18,41 +18,41 @@ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::connection::{Connection, ConnectionId, PendingPoint}; -use crate::{ - connection::{ - Connected, ConnectionError, IncomingInfo, PendingConnectionError, - PendingInboundConnectionError, PendingOutboundConnectionError, - }, - transport::TransportError, - ConnectedPoint, ConnectionHandler, Executor, Multiaddr, PeerId, +use std::{ + collections::HashMap, + convert::Infallible, + fmt, + num::{NonZeroU8, NonZeroUsize}, + pin::Pin, + task::{Context, Poll, Waker}, }; + use concurrent_dial::ConcurrentDial; use fnv::FnvHashMap; -use futures::prelude::*; -use futures::stream::SelectAll; use futures::{ channel::{mpsc, oneshot}, future::{poll_fn, BoxFuture, Either}, + prelude::*, ready, - stream::FuturesUnordered, + stream::{FuturesUnordered, SelectAll}, }; -use libp2p_core::connection::Endpoint; -use libp2p_core::muxing::{StreamMuxerBox, StreamMuxerExt}; -use libp2p_core::transport::PortUse; -use std::convert::Infallible; -use std::task::Waker; -use std::{ - collections::HashMap, - fmt, - num::{NonZeroU8, NonZeroUsize}, - pin::Pin, - task::Context, - task::Poll, +use libp2p_core::{ + connection::Endpoint, + muxing::{StreamMuxerBox, StreamMuxerExt}, + transport::PortUse, }; use tracing::Instrument; use web_time::{Duration, Instant}; +use crate::{ + connection::{ + Connected, Connection, ConnectionError, ConnectionId, IncomingInfo, PendingConnectionError, + PendingInboundConnectionError, PendingOutboundConnectionError, PendingPoint, + }, + transport::TransportError, + ConnectedPoint, ConnectionHandler, Executor, Multiaddr, PeerId, +}; + mod concurrent_dial; mod task; @@ -115,7 +115,8 @@ where /// See [`Connection::max_negotiating_inbound_streams`]. max_negotiating_inbound_streams: usize, - /// How many [`task::EstablishedConnectionEvent`]s can be buffered before the connection is back-pressured. + /// How many [`task::EstablishedConnectionEvent`]s can be buffered before the connection is + /// back-pressured. per_connection_event_buffer_size: usize, /// The executor to use for running connection tasks. Can either be a global executor @@ -247,13 +248,11 @@ pub(crate) enum PoolEvent { /// /// A connection may close if /// - /// * it encounters an error, which includes the connection being - /// closed by the remote. In this case `error` is `Some`. - /// * it was actively closed by [`EstablishedConnection::start_close`], - /// i.e. a successful, orderly close. - /// * it was actively closed by [`Pool::disconnect`], i.e. - /// dropped without an orderly close. - /// + /// * it encounters an error, which includes the connection being closed by the remote. In + /// this case `error` is `Some`. + /// * it was actively closed by [`EstablishedConnection::start_close`], i.e. a successful, + /// orderly close. + /// * it was actively closed by [`Pool::disconnect`], i.e. dropped without an orderly close. ConnectionClosed { id: ConnectionId, /// Information about the connection that errored. diff --git a/swarm/src/connection/pool/concurrent_dial.rs b/swarm/src/connection/pool/concurrent_dial.rs index 57e4b078098..99f0b385884 100644 --- a/swarm/src/connection/pool/concurrent_dial.rs +++ b/swarm/src/connection/pool/concurrent_dial.rs @@ -18,7 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::{transport::TransportError, Multiaddr}; +use std::{ + num::NonZeroU8, + pin::Pin, + task::{Context, Poll}, +}; + use futures::{ future::{BoxFuture, Future}, ready, @@ -26,11 +31,8 @@ use futures::{ }; use libp2p_core::muxing::StreamMuxerBox; use libp2p_identity::PeerId; -use std::{ - num::NonZeroU8, - pin::Pin, - task::{Context, Poll}, -}; + +use crate::{transport::TransportError, Multiaddr}; type Dial = BoxFuture< 'static, diff --git a/swarm/src/connection/pool/task.rs b/swarm/src/connection/pool/task.rs index 3b808a30fd1..3a82e5c11d1 100644 --- a/swarm/src/connection/pool/task.rs +++ b/swarm/src/connection/pool/task.rs @@ -21,6 +21,15 @@ //! Async functions driving pending and established connections in the form of a task. +use std::{convert::Infallible, pin::Pin}; + +use futures::{ + channel::{mpsc, oneshot}, + future::{poll_fn, Either, Future}, + SinkExt, StreamExt, +}; +use libp2p_core::muxing::StreamMuxerBox; + use super::concurrent_dial::ConcurrentDial; use crate::{ connection::{ @@ -30,14 +39,6 @@ use crate::{ transport::TransportError, ConnectionHandler, Multiaddr, PeerId, }; -use futures::{ - channel::{mpsc, oneshot}, - future::{poll_fn, Either, Future}, - SinkExt, StreamExt, -}; -use libp2p_core::muxing::StreamMuxerBox; -use std::convert::Infallible; -use std::pin::Pin; /// Commands that can be sent to a task driving an established connection. #[derive(Debug)] diff --git a/swarm/src/connection/supported_protocols.rs b/swarm/src/connection/supported_protocols.rs index 124ec93d669..c167bf88649 100644 --- a/swarm/src/connection/supported_protocols.rs +++ b/swarm/src/connection/supported_protocols.rs @@ -1,7 +1,7 @@ -use crate::handler::ProtocolsChange; -use crate::StreamProtocol; use std::collections::HashSet; +use crate::{handler::ProtocolsChange, StreamProtocol}; + #[derive(Default, Clone, Debug)] pub struct SupportedProtocols { protocols: HashSet, diff --git a/swarm/src/dial_opts.rs b/swarm/src/dial_opts.rs index 4f5b621327c..cdaaeb358b2 100644 --- a/swarm/src/dial_opts.rs +++ b/swarm/src/dial_opts.rs @@ -19,14 +19,13 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::ConnectionId; -use libp2p_core::connection::Endpoint; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::transport::PortUse; -use libp2p_core::Multiaddr; -use libp2p_identity::PeerId; use std::num::NonZeroU8; +use libp2p_core::{connection::Endpoint, multiaddr::Protocol, transport::PortUse, Multiaddr}; +use libp2p_identity::PeerId; + +use crate::ConnectionId; + macro_rules! fn_override_role { () => { /// Override role of local node on connection. I.e. execute the dial _as a @@ -130,7 +129,8 @@ impl DialOpts { /// Get the [`ConnectionId`] of this dial attempt. /// /// All future events of this dial will be associated with this ID. - /// See [`DialFailure`](crate::DialFailure) and [`ConnectionEstablished`](crate::behaviour::ConnectionEstablished). + /// See [`DialFailure`](crate::DialFailure) and + /// [`ConnectionEstablished`](crate::behaviour::ConnectionEstablished). pub fn connection_id(&self) -> ConnectionId { self.connection_id } @@ -324,8 +324,8 @@ impl WithoutPeerIdWithAddress { /// # use libp2p_identity::PeerId; /// # /// DialOpts::peer_id(PeerId::random()) -/// .condition(PeerCondition::Disconnected) -/// .build(); +/// .condition(PeerCondition::Disconnected) +/// .build(); /// ``` #[derive(Debug, Copy, Clone, Default)] pub enum PeerCondition { diff --git a/swarm/src/dummy.rs b/swarm/src/dummy.rs index b87ef32c8f7..5452c382cd4 100644 --- a/swarm/src/dummy.rs +++ b/swarm/src/dummy.rs @@ -1,19 +1,18 @@ -use crate::behaviour::{FromSwarm, NetworkBehaviour, ToSwarm}; -use crate::connection::ConnectionId; -use crate::handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, +use std::{ + convert::Infallible, + task::{Context, Poll}, }; + +use libp2p_core::{transport::PortUse, upgrade::DeniedUpgrade, Endpoint, Multiaddr}; +use libp2p_identity::PeerId; + use crate::{ + behaviour::{FromSwarm, NetworkBehaviour, ToSwarm}, + connection::ConnectionId, + handler::{ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound}, ConnectionDenied, ConnectionHandlerEvent, StreamUpgradeError, SubstreamProtocol, THandler, THandlerInEvent, THandlerOutEvent, }; -use libp2p_core::transport::PortUse; -use libp2p_core::upgrade::DeniedUpgrade; -use libp2p_core::Endpoint; -use libp2p_core::Multiaddr; -use libp2p_identity::PeerId; -use std::convert::Infallible; -use std::task::{Context, Poll}; /// Implementation of [`NetworkBehaviour`] that doesn't do anything. pub struct Behaviour; @@ -61,7 +60,8 @@ impl NetworkBehaviour for Behaviour { fn on_swarm_event(&mut self, _event: FromSwarm) {} } -/// An implementation of [`ConnectionHandler`] that neither handles any protocols nor does it keep the connection alive. +/// An implementation of [`ConnectionHandler`] that neither handles any protocols nor does it keep +/// the connection alive. #[derive(Clone)] pub struct ConnectionHandler; diff --git a/swarm/src/executor.rs b/swarm/src/executor.rs index a2abbbde6ef..db5ed6b2da4 100644 --- a/swarm/src/executor.rs +++ b/swarm/src/executor.rs @@ -1,14 +1,15 @@ //! Provides executors for spawning background tasks. -use futures::executor::ThreadPool; use std::{future::Future, pin::Pin}; +use futures::executor::ThreadPool; + /// Implemented on objects that can run a `Future` in the background. /// /// > **Note**: While it may be tempting to implement this trait on types such as -/// > [`futures::stream::FuturesUnordered`], please note that passing an `Executor` is -/// > optional, and that `FuturesUnordered` (or a similar struct) will automatically -/// > be used as fallback by libp2p. The `Executor` trait should therefore only be -/// > about running `Future`s on a separate task. +/// > [`futures::stream::FuturesUnordered`], please note that passing an `Executor` is +/// > optional, and that `FuturesUnordered` (or a similar struct) will automatically +/// > be used as fallback by libp2p. The `Executor` trait should therefore only be +/// > about running `Future`s on a separate task. pub trait Executor { /// Run the given future in the background until it ends. #[track_caller] diff --git a/swarm/src/handler.rs b/swarm/src/handler.rs index 9e31592d68d..3d0407b4f70 100644 --- a/swarm/src/handler.rs +++ b/swarm/src/handler.rs @@ -34,9 +34,9 @@ //! used protocol(s) determined by the associated types of the handlers. //! //! > **Note**: A [`ConnectionHandler`] handles one or more protocols in the context of a single -//! > connection with a remote. In order to handle a protocol that requires knowledge of -//! > the network as a whole, see the -//! > [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) trait. +//! > connection with a remote. In order to handle a protocol that requires knowledge of +//! > the network as a whole, see the +//! > [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) trait. pub mod either; mod map_in; @@ -46,8 +46,15 @@ mod one_shot; mod pending; mod select; -use crate::connection::AsStrHashEq; -pub use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend, SendWrapper, UpgradeInfoSend}; +use core::slice; +use std::{ + collections::{HashMap, HashSet}, + error, fmt, io, + task::{Context, Poll}, + time::Duration, +}; + +use libp2p_core::Multiaddr; pub use map_in::MapInEvent; pub use map_out::MapOutEvent; pub use one_shot::{OneShotHandler, OneShotHandlerConfig}; @@ -55,11 +62,8 @@ pub use pending::PendingConnectionHandler; pub use select::ConnectionHandlerSelect; use smallvec::SmallVec; -use crate::StreamProtocol; -use core::slice; -use libp2p_core::Multiaddr; -use std::collections::{HashMap, HashSet}; -use std::{error, fmt, io, task::Context, task::Poll, time::Duration}; +pub use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend, SendWrapper, UpgradeInfoSend}; +use crate::{connection::AsStrHashEq, StreamProtocol}; /// A handler for a set of protocols used on a connection with a remote. /// @@ -71,17 +75,17 @@ use std::{error, fmt, io, task::Context, task::Poll, time::Duration}; /// Communication with a remote over a set of protocols is initiated in one of two ways: /// /// 1. Dialing by initiating a new outbound substream. In order to do so, -/// [`ConnectionHandler::poll()`] must return an [`ConnectionHandlerEvent::OutboundSubstreamRequest`], -/// providing an instance of [`libp2p_core::upgrade::OutboundUpgrade`] that is used to negotiate the -/// protocol(s). Upon success, [`ConnectionHandler::on_connection_event`] is called with +/// [`ConnectionHandler::poll()`] must return an +/// [`ConnectionHandlerEvent::OutboundSubstreamRequest`], providing an instance of +/// [`libp2p_core::upgrade::OutboundUpgrade`] that is used to negotiate the protocol(s). Upon +/// success, [`ConnectionHandler::on_connection_event`] is called with /// [`ConnectionEvent::FullyNegotiatedOutbound`] translating the final output of the upgrade. /// -/// 2. Listening by accepting a new inbound substream. When a new inbound substream -/// is created on a connection, [`ConnectionHandler::listen_protocol`] is called -/// to obtain an instance of [`libp2p_core::upgrade::InboundUpgrade`] that is used to -/// negotiate the protocol(s). Upon success, -/// [`ConnectionHandler::on_connection_event`] is called with [`ConnectionEvent::FullyNegotiatedInbound`] -/// translating the final output of the upgrade. +/// 2. Listening by accepting a new inbound substream. When a new inbound substream is created on +/// a connection, [`ConnectionHandler::listen_protocol`] is called to obtain an instance of +/// [`libp2p_core::upgrade::InboundUpgrade`] that is used to negotiate the protocol(s). Upon +/// success, [`ConnectionHandler::on_connection_event`] is called with +/// [`ConnectionEvent::FullyNegotiatedInbound`] translating the final output of the upgrade. /// /// /// # Connection Keep-Alive @@ -95,9 +99,13 @@ use std::{error, fmt, io, task::Context, task::Poll, time::Duration}; /// When a connection is closed gracefully, the substreams used by the handler may still /// continue reading data until the remote closes its side of the connection. pub trait ConnectionHandler: Send + 'static { - /// A type representing the message(s) a [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) can send to a [`ConnectionHandler`] via [`ToSwarm::NotifyHandler`](crate::behaviour::ToSwarm::NotifyHandler) + /// A type representing the message(s) a + /// [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) can send to a [`ConnectionHandler`] + /// via [`ToSwarm::NotifyHandler`](crate::behaviour::ToSwarm::NotifyHandler) type FromBehaviour: fmt::Debug + Send + 'static; - /// A type representing message(s) a [`ConnectionHandler`] can send to a [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) via [`ConnectionHandlerEvent::NotifyBehaviour`]. + /// A type representing message(s) a [`ConnectionHandler`] can send to a + /// [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) via + /// [`ConnectionHandlerEvent::NotifyBehaviour`]. type ToBehaviour: fmt::Debug + Send + 'static; /// The inbound upgrade for the protocol(s) used by the handler. type InboundProtocol: InboundUpgradeSend; @@ -112,9 +120,9 @@ pub trait ConnectionHandler: Send + 'static { /// substreams to negotiate the desired protocols. /// /// > **Note**: The returned `InboundUpgrade` should always accept all the generally - /// > supported protocols, even if in a specific context a particular one is - /// > not supported, (eg. when only allowing one substream at a time for a protocol). - /// > This allows a remote to put the list of supported protocols in a cache. + /// > supported protocols, even if in a specific context a particular one is + /// > not supported, (eg. when only allowing one substream at a time for a protocol). + /// > This allows a remote to put the list of supported protocols in a cache. fn listen_protocol(&self) -> SubstreamProtocol; /// Returns whether the connection should be kept alive. @@ -127,15 +135,21 @@ pub trait ConnectionHandler: Send + 'static { /// - We are negotiating inbound or outbound streams. /// - There are active [`Stream`](crate::Stream)s on the connection. /// - /// The combination of the above means that _most_ protocols will not need to override this method. - /// This method is only invoked when all of the above are `false`, i.e. when the connection is entirely idle. + /// The combination of the above means that _most_ protocols will not need to override this + /// method. This method is only invoked when all of the above are `false`, i.e. when the + /// connection is entirely idle. /// /// ## Exceptions /// - /// - Protocols like [circuit-relay v2](https://github.com/libp2p/specs/blob/master/relay/circuit-v2.md) need to keep a connection alive beyond these circumstances and can thus override this method. - /// - Protocols like [ping](https://github.com/libp2p/specs/blob/master/ping/ping.md) **don't** want to keep a connection alive despite an active streams. + /// - Protocols like [circuit-relay v2](https://github.com/libp2p/specs/blob/master/relay/circuit-v2.md) + /// need to keep a connection alive beyond these circumstances and can thus override this + /// method. + /// - Protocols like [ping](https://github.com/libp2p/specs/blob/master/ping/ping.md) **don't** + /// want to keep a connection alive despite an active streams. /// - /// In that case, protocol authors can use [`Stream::ignore_for_keep_alive`](crate::Stream::ignore_for_keep_alive) to opt-out a particular stream from the keep-alive algorithm. + /// In that case, protocol authors can use + /// [`Stream::ignore_for_keep_alive`](crate::Stream::ignore_for_keep_alive) to opt-out a + /// particular stream from the keep-alive algorithm. fn connection_keep_alive(&self) -> bool { false } @@ -160,7 +174,8 @@ pub trait ConnectionHandler: Send + 'static { /// To signal completion, [`Poll::Ready(None)`] should be returned. /// /// Implementations MUST have a [`fuse`](futures::StreamExt::fuse)-like behaviour. - /// That is, [`Poll::Ready(None)`] MUST be returned on repeated calls to [`ConnectionHandler::poll_close`]. + /// That is, [`Poll::Ready(None)`] MUST be returned on repeated calls to + /// [`ConnectionHandler::poll_close`]. fn poll_close(&mut self, _: &mut Context<'_>) -> Poll> { Poll::Ready(None) } @@ -308,7 +323,8 @@ pub struct FullyNegotiatedInbound { pub info: IOI, } -/// [`ConnectionEvent`] variant that informs the handler about successful upgrade on a new outbound stream. +/// [`ConnectionEvent`] variant that informs the handler about successful upgrade on a new outbound +/// stream. /// /// The `protocol` field is the information that was previously passed to /// [`ConnectionHandlerEvent::OutboundSubstreamRequest`]. @@ -318,13 +334,15 @@ pub struct FullyNegotiatedOutbound { pub info: OOI, } -/// [`ConnectionEvent`] variant that informs the handler about a change in the address of the remote. +/// [`ConnectionEvent`] variant that informs the handler about a change in the address of the +/// remote. #[derive(Debug)] pub struct AddressChange<'a> { pub new_address: &'a Multiaddr, } -/// [`ConnectionEvent`] variant that informs the handler about a change in the protocols supported on the connection. +/// [`ConnectionEvent`] variant that informs the handler about a change in the protocols supported +/// on the connection. #[derive(Debug, Clone)] pub enum ProtocolsChange<'a> { Added(ProtocolsAdded<'a>), @@ -373,9 +391,11 @@ impl<'a> ProtocolsChange<'a> { })) } - /// Compute the [`ProtocolsChange`] that results from removing `to_remove` from `existing_protocols`. Removes the protocols from `existing_protocols`. + /// Compute the [`ProtocolsChange`] that results from removing `to_remove` from + /// `existing_protocols`. Removes the protocols from `existing_protocols`. /// - /// Returns `None` if the change is a no-op, i.e. none of the protocols in `to_remove` are in `existing_protocols`. + /// Returns `None` if the change is a no-op, i.e. none of the protocols in `to_remove` are in + /// `existing_protocols`. pub(crate) fn remove( existing_protocols: &mut HashSet, to_remove: HashSet, @@ -397,7 +417,8 @@ impl<'a> ProtocolsChange<'a> { })) } - /// Compute the [`ProtocolsChange`]s required to go from `existing_protocols` to `new_protocols`. + /// Compute the [`ProtocolsChange`]s required to go from `existing_protocols` to + /// `new_protocols`. pub(crate) fn from_full_sets>( existing_protocols: &mut HashMap, bool>, new_protocols: impl IntoIterator, @@ -429,7 +450,8 @@ impl<'a> ProtocolsChange<'a> { let num_new_protocols = buffer.len(); // Drain all protocols that we haven't visited. - // For existing protocols that are not in `new_protocols`, the boolean will be false, meaning we need to remove it. + // For existing protocols that are not in `new_protocols`, the boolean will be false, + // meaning we need to remove it. existing_protocols.retain(|p, &mut is_supported| { if !is_supported { buffer.extend(StreamProtocol::try_from_owned(p.0.as_ref().to_owned()).ok()); diff --git a/swarm/src/handler/either.rs b/swarm/src/handler/either.rs index a5aab9b5fee..1dc62e0eb2a 100644 --- a/swarm/src/handler/either.rs +++ b/swarm/src/handler/either.rs @@ -18,14 +18,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::handler::{ - ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, FullyNegotiatedInbound, - InboundUpgradeSend, ListenUpgradeError, SubstreamProtocol, -}; -use crate::upgrade::SendWrapper; +use std::task::{Context, Poll}; + use either::Either; use futures::future; -use std::task::{Context, Poll}; + +use crate::{ + handler::{ + ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, FullyNegotiatedInbound, + InboundUpgradeSend, ListenUpgradeError, SubstreamProtocol, + }, + upgrade::SendWrapper, +}; impl FullyNegotiatedInbound, SendWrapper>, Either> diff --git a/swarm/src/handler/map_in.rs b/swarm/src/handler/map_in.rs index 9316ef4d2ce..55885b351bb 100644 --- a/swarm/src/handler/map_in.rs +++ b/swarm/src/handler/map_in.rs @@ -18,10 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + fmt::Debug, + marker::PhantomData, + task::{Context, Poll}, +}; + use crate::handler::{ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, SubstreamProtocol, }; -use std::{fmt::Debug, marker::PhantomData, task::Context, task::Poll}; /// Wrapper around a protocol handler that turns the input event into something else. #[derive(Debug)] diff --git a/swarm/src/handler/map_out.rs b/swarm/src/handler/map_out.rs index f877bfa6f64..6d05551aeec 100644 --- a/swarm/src/handler/map_out.rs +++ b/swarm/src/handler/map_out.rs @@ -18,12 +18,16 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + fmt::Debug, + task::{Context, Poll}, +}; + +use futures::ready; + use crate::handler::{ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, SubstreamProtocol, }; -use futures::ready; -use std::fmt::Debug; -use std::task::{Context, Poll}; /// Wrapper around a protocol handler that turns the output event into something else. #[derive(Debug)] diff --git a/swarm/src/handler/multi.rs b/swarm/src/handler/multi.rs index 5efcde5c2bb..73af1b1109e 100644 --- a/swarm/src/handler/multi.rs +++ b/swarm/src/handler/multi.rs @@ -21,14 +21,6 @@ //! A [`ConnectionHandler`] implementation that combines multiple other [`ConnectionHandler`]s //! indexed by some key. -use crate::handler::{ - AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, - FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, SubstreamProtocol, -}; -use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend, UpgradeInfoSend}; -use crate::Stream; -use futures::{future::BoxFuture, prelude::*, ready}; -use rand::Rng; use std::{ cmp, collections::{HashMap, HashSet}, @@ -40,6 +32,19 @@ use std::{ time::Duration, }; +use futures::{future::BoxFuture, prelude::*, ready}; +use rand::Rng; + +use crate::{ + handler::{ + AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, + DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, + SubstreamProtocol, + }, + upgrade::{InboundUpgradeSend, OutboundUpgradeSend, UpgradeInfoSend}, + Stream, +}; + /// A [`ConnectionHandler`] for multiple [`ConnectionHandler`]s of the same type. #[derive(Clone)] pub struct MultiHandler { @@ -248,7 +253,8 @@ where return Poll::Pending; } - // Not always polling handlers in the same order should give anyone the chance to make progress. + // Not always polling handlers in the same order + // should give anyone the chance to make progress. let pos = rand::thread_rng().gen_range(0..self.handlers.len()); for (k, h) in self.handlers.iter_mut().skip(pos) { diff --git a/swarm/src/handler/one_shot.rs b/swarm/src/handler/one_shot.rs index 7c84f4bb11a..29f2811522b 100644 --- a/swarm/src/handler/one_shot.rs +++ b/swarm/src/handler/one_shot.rs @@ -18,14 +18,23 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::handler::{ - ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, - FullyNegotiatedInbound, FullyNegotiatedOutbound, SubstreamProtocol, +use std::{ + error, + fmt::Debug, + task::{Context, Poll}, + time::Duration, }; -use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend}; -use crate::StreamUpgradeError; + use smallvec::SmallVec; -use std::{error, fmt::Debug, task::Context, task::Poll, time::Duration}; + +use crate::{ + handler::{ + ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, + FullyNegotiatedInbound, FullyNegotiatedOutbound, SubstreamProtocol, + }, + upgrade::{InboundUpgradeSend, OutboundUpgradeSend}, + StreamUpgradeError, +}; /// A [`ConnectionHandler`] that opens a new substream for each request. // TODO: Debug @@ -71,7 +80,7 @@ where /// Returns a reference to the listen protocol configuration. /// /// > **Note**: If you modify the protocol, modifications will only applies to future inbound - /// > substreams, not the ones already being negotiated. + /// > substreams, not the ones already being negotiated. pub fn listen_protocol_ref(&self) -> &SubstreamProtocol { &self.listen_protocol } @@ -79,7 +88,7 @@ where /// Returns a mutable reference to the listen protocol configuration. /// /// > **Note**: If you modify the protocol, modifications will only applies to future inbound - /// > substreams, not the ones already being negotiated. + /// > substreams, not the ones already being negotiated. pub fn listen_protocol_mut(&mut self) -> &mut SubstreamProtocol { &mut self.listen_protocol } @@ -212,12 +221,12 @@ impl Default for OneShotHandlerConfig { #[cfg(test)] mod tests { - use super::*; + use std::convert::Infallible; - use futures::executor::block_on; - use futures::future::poll_fn; + use futures::{executor::block_on, future::poll_fn}; use libp2p_core::upgrade::DeniedUpgrade; - use std::convert::Infallible; + + use super::*; #[test] fn do_not_keep_idle_connection_alive() { diff --git a/swarm/src/handler/pending.rs b/swarm/src/handler/pending.rs index 656a38849d5..483c4b3d6e8 100644 --- a/swarm/src/handler/pending.rs +++ b/swarm/src/handler/pending.rs @@ -19,13 +19,17 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + convert::Infallible, + task::{Context, Poll}, +}; + +use libp2p_core::upgrade::PendingUpgrade; + use crate::handler::{ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, FullyNegotiatedInbound, FullyNegotiatedOutbound, SubstreamProtocol, }; -use libp2p_core::upgrade::PendingUpgrade; -use std::convert::Infallible; -use std::task::{Context, Poll}; /// Implementation of [`ConnectionHandler`] that returns a pending upgrade. #[derive(Clone, Debug)] diff --git a/swarm/src/handler/select.rs b/swarm/src/handler/select.rs index e049252d448..0f6dbe988ff 100644 --- a/swarm/src/handler/select.rs +++ b/swarm/src/handler/select.rs @@ -18,16 +18,23 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::handler::{ - AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, - FullyNegotiatedInbound, FullyNegotiatedOutbound, InboundUpgradeSend, ListenUpgradeError, - OutboundUpgradeSend, StreamUpgradeError, SubstreamProtocol, +use std::{ + cmp, + task::{Context, Poll}, }; -use crate::upgrade::SendWrapper; + use either::Either; use futures::{future, ready}; use libp2p_core::upgrade::SelectUpgrade; -use std::{cmp, task::Context, task::Poll}; + +use crate::{ + handler::{ + AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, + DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, InboundUpgradeSend, + ListenUpgradeError, OutboundUpgradeSend, StreamUpgradeError, SubstreamProtocol, + }, + upgrade::SendWrapper, +}; /// Implementation of [`ConnectionHandler`] that combines two protocols into one. #[derive(Debug, Clone)] diff --git a/swarm/src/lib.rs b/swarm/src/lib.rs index 12280e99f07..639906a1a09 100644 --- a/swarm/src/lib.rs +++ b/swarm/src/lib.rs @@ -31,12 +31,11 @@ //! Creating a `Swarm` requires three things: //! //! 1. A network identity of the local node in form of a [`PeerId`]. -//! 2. An implementation of the [`Transport`] trait. This is the type that -//! will be used in order to reach nodes on the network based on their -//! address. See the `transport` module for more information. -//! 3. An implementation of the [`NetworkBehaviour`] trait. This is a state -//! machine that defines how the swarm should behave once it is connected -//! to a node. +//! 2. An implementation of the [`Transport`] trait. This is the type that will be used in order to +//! reach nodes on the network based on their address. See the `transport` module for more +//! information. +//! 3. An implementation of the [`NetworkBehaviour`] trait. This is a state machine that defines +//! how the swarm should behave once it is connected to a node. //! //! # Network Behaviour //! @@ -51,7 +50,6 @@ //! The [`ConnectionHandler`] trait defines how each active connection to a //! remote should behave: how to handle incoming substreams, which protocols //! are supported, when to open a new outbound substream, etc. -//! #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] @@ -73,69 +71,55 @@ mod translation; /// Bundles all symbols required for the [`libp2p_swarm_derive::NetworkBehaviour`] macro. #[doc(hidden)] pub mod derive_prelude { - pub use crate::behaviour::AddressChange; - pub use crate::behaviour::ConnectionClosed; - pub use crate::behaviour::ConnectionEstablished; - pub use crate::behaviour::DialFailure; - pub use crate::behaviour::ExpiredListenAddr; - pub use crate::behaviour::ExternalAddrConfirmed; - pub use crate::behaviour::ExternalAddrExpired; - pub use crate::behaviour::FromSwarm; - pub use crate::behaviour::ListenFailure; - pub use crate::behaviour::ListenerClosed; - pub use crate::behaviour::ListenerError; - pub use crate::behaviour::NewExternalAddrCandidate; - pub use crate::behaviour::NewExternalAddrOfPeer; - pub use crate::behaviour::NewListenAddr; - pub use crate::behaviour::NewListener; - pub use crate::connection::ConnectionId; - pub use crate::ConnectionDenied; - pub use crate::ConnectionHandler; - pub use crate::ConnectionHandlerSelect; - pub use crate::DialError; - pub use crate::NetworkBehaviour; - pub use crate::THandler; - pub use crate::THandlerInEvent; - pub use crate::THandlerOutEvent; - pub use crate::ToSwarm; pub use either::Either; pub use futures::prelude as futures; - pub use libp2p_core::transport::{ListenerId, PortUse}; - pub use libp2p_core::ConnectedPoint; - pub use libp2p_core::Endpoint; - pub use libp2p_core::Multiaddr; + pub use libp2p_core::{ + transport::{ListenerId, PortUse}, + ConnectedPoint, Endpoint, Multiaddr, + }; pub use libp2p_identity::PeerId; + + pub use crate::{ + behaviour::{ + AddressChange, ConnectionClosed, ConnectionEstablished, DialFailure, ExpiredListenAddr, + ExternalAddrConfirmed, ExternalAddrExpired, FromSwarm, ListenFailure, ListenerClosed, + ListenerError, NewExternalAddrCandidate, NewExternalAddrOfPeer, NewListenAddr, + NewListener, + }, + connection::ConnectionId, + ConnectionDenied, ConnectionHandler, ConnectionHandlerSelect, DialError, NetworkBehaviour, + THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + }; } +use std::{ + collections::{HashMap, HashSet, VecDeque}, + error, fmt, io, + num::{NonZeroU32, NonZeroU8, NonZeroUsize}, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; + pub use behaviour::{ AddressChange, CloseConnection, ConnectionClosed, DialFailure, ExpiredListenAddr, ExternalAddrExpired, ExternalAddresses, FromSwarm, ListenAddresses, ListenFailure, ListenerClosed, ListenerError, NetworkBehaviour, NewExternalAddrCandidate, NewExternalAddrOfPeer, NewListenAddr, NotifyHandler, PeerAddresses, ToSwarm, }; -pub use connection::pool::ConnectionCounters; -pub use connection::{ConnectionError, ConnectionId, SupportedProtocols}; +pub use connection::{pool::ConnectionCounters, ConnectionError, ConnectionId, SupportedProtocols}; +use connection::{ + pool::{EstablishedConnection, Pool, PoolConfig, PoolEvent}, + IncomingInfo, PendingConnectionError, PendingInboundConnectionError, + PendingOutboundConnectionError, +}; +use dial_opts::{DialOpts, PeerCondition}; pub use executor::Executor; +use futures::{prelude::*, stream::FusedStream}; pub use handler::{ ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerSelect, OneShotHandler, OneShotHandlerConfig, StreamUpgradeError, SubstreamProtocol, }; -#[cfg(feature = "macros")] -pub use libp2p_swarm_derive::NetworkBehaviour; -pub use listen_opts::ListenOpts; -pub use stream::Stream; -pub use stream_protocol::{InvalidProtocol, StreamProtocol}; - -use crate::behaviour::ExternalAddrConfirmed; -use crate::handler::UpgradeInfoSend; -use connection::pool::{EstablishedConnection, Pool, PoolConfig, PoolEvent}; -use connection::IncomingInfo; -use connection::{ - PendingConnectionError, PendingInboundConnectionError, PendingOutboundConnectionError, -}; -use dial_opts::{DialOpts, PeerCondition}; -use futures::{prelude::*, stream::FusedStream}; - use libp2p_core::{ connection::ConnectedPoint, muxing::StreamMuxerBox, @@ -143,20 +127,18 @@ use libp2p_core::{ Multiaddr, Transport, }; use libp2p_identity::PeerId; - +#[cfg(feature = "macros")] +pub use libp2p_swarm_derive::NetworkBehaviour; +pub use listen_opts::ListenOpts; use smallvec::SmallVec; -use std::collections::{HashMap, HashSet, VecDeque}; -use std::num::{NonZeroU32, NonZeroU8, NonZeroUsize}; -use std::time::Duration; -use std::{ - error, fmt, io, - pin::Pin, - task::{Context, Poll}, -}; +pub use stream::Stream; +pub use stream_protocol::{InvalidProtocol, StreamProtocol}; use tracing::Instrument; #[doc(hidden)] pub use translation::_address_translation; +use crate::{behaviour::ExternalAddrConfirmed, handler::UpgradeInfoSend}; + /// Event generated by the [`NetworkBehaviour`] that the swarm will report back. type TBehaviourOutEvent = ::ToSwarm; @@ -219,8 +201,8 @@ pub enum SwarmEvent { /// Identifier of the connection. connection_id: ConnectionId, /// Local connection address. - /// This address has been earlier reported with a [`NewListenAddr`](SwarmEvent::NewListenAddr) - /// event. + /// This address has been earlier reported with a + /// [`NewListenAddr`](SwarmEvent::NewListenAddr) event. local_addr: Multiaddr, /// Address used to send back data to the remote. send_back_addr: Multiaddr, @@ -233,8 +215,8 @@ pub enum SwarmEvent { /// Identifier of the connection. connection_id: ConnectionId, /// Local connection address. - /// This address has been earlier reported with a [`NewListenAddr`](SwarmEvent::NewListenAddr) - /// event. + /// This address has been earlier reported with a + /// [`NewListenAddr`](SwarmEvent::NewListenAddr) event. local_addr: Multiaddr, /// Address used to send back data to the remote. send_back_addr: Multiaddr, @@ -308,7 +290,8 @@ pub enum SwarmEvent { } impl SwarmEvent { - /// Extract the `TBehaviourOutEvent` from this [`SwarmEvent`] in case it is the `Behaviour` variant, otherwise fail. + /// Extract the `TBehaviourOutEvent` from this [`SwarmEvent`] in case it is the `Behaviour` + /// variant, otherwise fail. #[allow(clippy::result_large_err)] pub fn try_into_behaviour_event(self) -> Result { match self { @@ -610,7 +593,8 @@ where /// Add a **confirmed** external address for the local node. /// /// This function should only be called with addresses that are guaranteed to be reachable. - /// The address is broadcast to all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrConfirmed`]. + /// The address is broadcast to all [`NetworkBehaviour`]s via + /// [`FromSwarm::ExternalAddrConfirmed`]. pub fn add_external_address(&mut self, a: Multiaddr) { self.behaviour .on_swarm_event(FromSwarm::ExternalAddrConfirmed(ExternalAddrConfirmed { @@ -621,7 +605,8 @@ where /// Remove an external address for the local node. /// - /// The address is broadcast to all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrExpired`]. + /// The address is broadcast to all [`NetworkBehaviour`]s via + /// [`FromSwarm::ExternalAddrExpired`]. pub fn remove_external_address(&mut self, addr: &Multiaddr) { self.behaviour .on_swarm_event(FromSwarm::ExternalAddrExpired(ExternalAddrExpired { addr })); @@ -630,7 +615,8 @@ where /// Add a new external address of a remote peer. /// - /// The address is broadcast to all [`NetworkBehaviour`]s via [`FromSwarm::NewExternalAddrOfPeer`]. + /// The address is broadcast to all [`NetworkBehaviour`]s via + /// [`FromSwarm::NewExternalAddrOfPeer`]. pub fn add_peer_address(&mut self, peer_id: PeerId, addr: Multiaddr) { self.behaviour .on_swarm_event(FromSwarm::NewExternalAddrOfPeer(NewExternalAddrOfPeer { @@ -643,8 +629,9 @@ where /// /// Returns `Ok(())` if there was one or more established connections to the peer. /// - /// Closing a connection via [`Swarm::disconnect_peer_id`] will poll [`ConnectionHandler::poll_close`] to completion. - /// Use this function if you want to close a connection _despite_ it still being in use by one or more handlers. + /// Closing a connection via [`Swarm::disconnect_peer_id`] will poll + /// [`ConnectionHandler::poll_close`] to completion. Use this function if you want to close + /// a connection _despite_ it still being in use by one or more handlers. #[allow(clippy::result_unit_err)] pub fn disconnect_peer_id(&mut self, peer_id: PeerId) -> Result<(), ()> { let was_connected = self.pool.is_connected(peer_id); @@ -660,7 +647,8 @@ where /// Attempt to gracefully close a connection. /// /// Closing a connection is asynchronous but this function will return immediately. - /// A [`SwarmEvent::ConnectionClosed`] event will be emitted once the connection is actually closed. + /// A [`SwarmEvent::ConnectionClosed`] event will be emitted + /// once the connection is actually closed. /// /// # Returns /// @@ -1204,15 +1192,16 @@ where // // (1) is polled before (2) to prioritize local work over work coming from a remote. // - // (2) is polled before (3) to prioritize existing connections over upgrading new incoming connections. + // (2) is polled before (3) to prioritize existing connections + // over upgrading new incoming connections. loop { if let Some(swarm_event) = this.pending_swarm_events.pop_front() { return Poll::Ready(swarm_event); } match this.pending_handler_event.take() { - // Try to deliver the pending event emitted by the [`NetworkBehaviour`] in the previous - // iteration to the connection handler(s). + // Try to deliver the pending event emitted by the [`NetworkBehaviour`] in the + // previous iteration to the connection handler(s). Some((peer_id, handler, event)) => match handler { PendingNotifyHandler::One(conn_id) => { match this.pool.get_established(conn_id) { @@ -1518,7 +1507,8 @@ impl Config { pub enum DialError { /// The peer identity obtained on the connection matches the local peer. LocalPeerId { endpoint: ConnectedPoint }, - /// No addresses have been provided by [`NetworkBehaviour::handle_pending_outbound_connection`] and [`DialOpts`]. + /// No addresses have been provided by [`NetworkBehaviour::handle_pending_outbound_connection`] + /// and [`DialOpts`]. NoAddresses, /// The provided [`dial_opts::PeerCondition`] evaluated to false and thus /// the dial was aborted. @@ -1688,7 +1678,8 @@ impl error::Error for ListenError { /// A connection was denied. /// -/// To figure out which [`NetworkBehaviour`] denied the connection, use [`ConnectionDenied::downcast`]. +/// To figure out which [`NetworkBehaviour`] denied the connection, use +/// [`ConnectionDenied::downcast`]. #[derive(Debug)] pub struct ConnectionDenied { inner: Box, @@ -1759,18 +1750,21 @@ impl NetworkInfo { #[cfg(test)] mod tests { - use super::*; - use crate::test::{CallTraceBehaviour, MockBehaviour}; - use libp2p_core::multiaddr::multiaddr; - use libp2p_core::transport::memory::MemoryTransportError; - use libp2p_core::transport::{PortUse, TransportEvent}; - use libp2p_core::Endpoint; - use libp2p_core::{multiaddr, transport, upgrade}; + use libp2p_core::{ + multiaddr, + multiaddr::multiaddr, + transport, + transport::{memory::MemoryTransportError, PortUse, TransportEvent}, + upgrade, Endpoint, + }; use libp2p_identity as identity; use libp2p_plaintext as plaintext; use libp2p_yamux as yamux; use quickcheck::*; + use super::*; + use crate::test::{CallTraceBehaviour, MockBehaviour}; + // Test execution state. // Connection => Disconnecting => Connecting. enum State { @@ -1842,8 +1836,9 @@ mod tests { /// Establishes multiple connections between two peers, /// after which one peer disconnects the other using [`Swarm::disconnect_peer_id`]. /// - /// The test expects both behaviours to be notified via calls to [`NetworkBehaviour::on_swarm_event`] - /// with pairs of [`FromSwarm::ConnectionEstablished`] / [`FromSwarm::ConnectionClosed`] + /// The test expects both behaviours to be notified via calls to + /// [`NetworkBehaviour::on_swarm_event`] with pairs of [`FromSwarm::ConnectionEstablished`] + /// / [`FromSwarm::ConnectionClosed`] #[tokio::test] async fn test_swarm_disconnect() { let mut swarm1 = new_test_swarm(Config::with_tokio_executor()); @@ -1905,8 +1900,9 @@ mod tests { /// after which one peer disconnects the other /// using [`ToSwarm::CloseConnection`] returned by a [`NetworkBehaviour`]. /// - /// The test expects both behaviours to be notified via calls to [`NetworkBehaviour::on_swarm_event`] - /// with pairs of [`FromSwarm::ConnectionEstablished`] / [`FromSwarm::ConnectionClosed`] + /// The test expects both behaviours to be notified via calls to + /// [`NetworkBehaviour::on_swarm_event`] with pairs of [`FromSwarm::ConnectionEstablished`] + /// / [`FromSwarm::ConnectionClosed`] #[tokio::test] async fn test_behaviour_disconnect_all() { let mut swarm1 = new_test_swarm(Config::with_tokio_executor()); @@ -1972,8 +1968,9 @@ mod tests { /// after which one peer closes a single connection /// using [`ToSwarm::CloseConnection`] returned by a [`NetworkBehaviour`]. /// - /// The test expects both behaviours to be notified via calls to [`NetworkBehaviour::on_swarm_event`] - /// with pairs of [`FromSwarm::ConnectionEstablished`] / [`FromSwarm::ConnectionClosed`] + /// The test expects both behaviours to be notified via calls to + /// [`NetworkBehaviour::on_swarm_event`] with pairs of [`FromSwarm::ConnectionEstablished`] + /// / [`FromSwarm::ConnectionClosed`] #[tokio::test] async fn test_behaviour_disconnect_one() { let mut swarm1 = new_test_swarm(Config::with_tokio_executor()); @@ -2175,8 +2172,10 @@ mod tests { // Dialing the same address we're listening should result in three events: // // - The incoming connection notification (before we know the incoming peer ID). - // - The connection error for the dialing endpoint (once we've determined that it's our own ID). - // - The connection error for the listening endpoint (once we've determined that it's our own ID). + // - The connection error for the dialing endpoint (once we've determined that it's our own + // ID). + // - The connection error for the listening endpoint (once we've determined that it's our + // own ID). // // The last two can happen in any order. @@ -2190,8 +2189,9 @@ mod tests { }) .await; - swarm.listened_addrs.clear(); // This is a hack to actually execute the dial to ourselves which would otherwise be filtered. - + // This is a hack to actually execute the dial + // to ourselves which would otherwise be filtered. + swarm.listened_addrs.clear(); swarm.dial(local_address.clone()).unwrap(); let mut got_dial_err = false; @@ -2342,7 +2342,8 @@ mod tests { let string = format!("{error}"); - // Unfortunately, we have some "empty" errors that lead to multiple colons without text but that is the best we can do. + // Unfortunately, we have some "empty" errors + // that lead to multiple colons without text but that is the best we can do. assert_eq!("Failed to negotiate transport protocol(s): [(/ip4/127.0.0.1/tcp/80: : No listener on the given port.)]", string) } } diff --git a/swarm/src/listen_opts.rs b/swarm/src/listen_opts.rs index 9c4d69a6fa0..1fcb33cd348 100644 --- a/swarm/src/listen_opts.rs +++ b/swarm/src/listen_opts.rs @@ -1,6 +1,7 @@ -use crate::ListenerId; use libp2p_core::Multiaddr; +use crate::ListenerId; + #[derive(Debug)] pub struct ListenOpts { id: ListenerId, diff --git a/swarm/src/stream.rs b/swarm/src/stream.rs index 871352f3c6a..d3936cb557a 100644 --- a/swarm/src/stream.rs +++ b/swarm/src/stream.rs @@ -1,6 +1,3 @@ -use futures::{AsyncRead, AsyncWrite}; -use libp2p_core::muxing::SubstreamBox; -use libp2p_core::Negotiated; use std::{ io::{IoSlice, IoSliceMut}, pin::Pin, @@ -8,6 +5,9 @@ use std::{ task::{Context, Poll}, }; +use futures::{AsyncRead, AsyncWrite}; +use libp2p_core::{muxing::SubstreamBox, Negotiated}; + /// Counter for the number of active streams on a connection. #[derive(Debug, Clone)] pub(crate) struct ActiveStreamCounter(Arc<()>); diff --git a/swarm/src/stream_protocol.rs b/swarm/src/stream_protocol.rs index f746429a3d7..abf8068238e 100644 --- a/swarm/src/stream_protocol.rs +++ b/swarm/src/stream_protocol.rs @@ -1,7 +1,10 @@ +use std::{ + fmt, + hash::{Hash, Hasher}, + sync::Arc, +}; + use either::Either; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::sync::Arc; /// Identifies a protocol for a stream. /// @@ -39,7 +42,9 @@ impl StreamProtocol { } Ok(StreamProtocol { - inner: Either::Right(Arc::from(protocol)), // FIXME: Can we somehow reuse the allocation from the owned string? + // FIXME: Can we somehow reuse the + // allocation from the owned string? + inner: Either::Right(Arc::from(protocol)), }) } } diff --git a/swarm/src/test.rs b/swarm/src/test.rs index a6cb7c4d4eb..59aadf7e3c7 100644 --- a/swarm/src/test.rs +++ b/swarm/src/test.rs @@ -18,19 +18,27 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::behaviour::{ - ConnectionClosed, ConnectionEstablished, DialFailure, ExpiredListenAddr, ExternalAddrExpired, - FromSwarm, ListenerClosed, ListenerError, NewExternalAddrCandidate, NewListenAddr, NewListener, +use std::{ + collections::HashMap, + task::{Context, Poll}, }; + +use libp2p_core::{ + multiaddr::Multiaddr, + transport::{ListenerId, PortUse}, + ConnectedPoint, Endpoint, +}; +use libp2p_identity::PeerId; + use crate::{ + behaviour::{ + ConnectionClosed, ConnectionEstablished, DialFailure, ExpiredListenAddr, + ExternalAddrExpired, FromSwarm, ListenerClosed, ListenerError, NewExternalAddrCandidate, + NewListenAddr, NewListener, + }, ConnectionDenied, ConnectionHandler, ConnectionId, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use libp2p_core::transport::PortUse; -use libp2p_core::{multiaddr::Multiaddr, transport::ListenerId, ConnectedPoint, Endpoint}; -use libp2p_identity::PeerId; -use std::collections::HashMap; -use std::task::{Context, Poll}; /// A `MockBehaviour` is a `NetworkBehaviour` that allows for /// the instrumentation of return values, without keeping @@ -42,7 +50,8 @@ where TOutEvent: Send + 'static, { /// The prototype protocols handler that is cloned for every - /// invocation of [`NetworkBehaviour::handle_established_inbound_connection`] and [`NetworkBehaviour::handle_established_outbound_connection`] + /// invocation of [`NetworkBehaviour::handle_established_inbound_connection`] and + /// [`NetworkBehaviour::handle_established_outbound_connection`] pub(crate) handler_proto: THandler, /// The addresses to return from [`NetworkBehaviour::handle_established_outbound_connection`]. pub(crate) addresses: HashMap>, @@ -266,8 +275,8 @@ where }) .take(other_established); - // We are informed that there are `other_established` additional connections. Ensure that the - // number of previous connections is consistent with this + // We are informed that there are `other_established` additional connections. Ensure that + // the number of previous connections is consistent with this if let Some(&prev) = other_peer_connections.next() { if prev < other_established { assert_eq!( @@ -319,8 +328,8 @@ where }) .take(remaining_established); - // We are informed that there are `other_established` additional connections. Ensure that the - // number of previous connections is consistent with this + // We are informed that there are `other_established` additional connections. Ensure that + // the number of previous connections is consistent with this if let Some(&prev) = other_closed_connections.next() { if prev < remaining_established { assert_eq!( diff --git a/swarm/src/upgrade.rs b/swarm/src/upgrade.rs index f6c6648a373..ba40e5606bb 100644 --- a/swarm/src/upgrade.rs +++ b/swarm/src/upgrade.rs @@ -18,11 +18,11 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::Stream; - use futures::prelude::*; use libp2p_core::upgrade; +use crate::Stream; + /// Implemented automatically on all types that implement [`UpgradeInfo`](upgrade::UpgradeInfo) /// and `Send + 'static`. /// @@ -65,7 +65,8 @@ pub trait OutboundUpgradeSend: UpgradeInfoSend { /// Equivalent to [`OutboundUpgrade::Future`](upgrade::OutboundUpgrade::Future). type Future: Future> + Send + 'static; - /// Equivalent to [`OutboundUpgrade::upgrade_outbound`](upgrade::OutboundUpgrade::upgrade_outbound). + /// Equivalent to + /// [`OutboundUpgrade::upgrade_outbound`](upgrade::OutboundUpgrade::upgrade_outbound). fn upgrade_outbound(self, socket: Stream, info: Self::Info) -> Self::Future; } @@ -126,7 +127,7 @@ where /// [`InboundUpgrade`](upgrade::InboundUpgrade). /// /// > **Note**: This struct is mostly an implementation detail of the library and normally -/// > doesn't need to be used directly. +/// > doesn't need to be used directly. pub struct SendWrapper(pub T); impl upgrade::UpgradeInfo for SendWrapper { diff --git a/swarm/tests/connection_close.rs b/swarm/tests/connection_close.rs index 1d1a25eb84b..bc71216870a 100644 --- a/swarm/tests/connection_close.rs +++ b/swarm/tests/connection_close.rs @@ -1,16 +1,16 @@ -use libp2p_core::transport::PortUse; -use libp2p_core::upgrade::DeniedUpgrade; -use libp2p_core::{Endpoint, Multiaddr}; +use std::{ + convert::Infallible, + task::{Context, Poll}, +}; + +use libp2p_core::{transport::PortUse, upgrade::DeniedUpgrade, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::handler::ConnectionEvent; use libp2p_swarm::{ - ConnectionDenied, ConnectionHandler, ConnectionHandlerEvent, ConnectionId, FromSwarm, - NetworkBehaviour, SubstreamProtocol, Swarm, SwarmEvent, THandler, THandlerInEvent, - THandlerOutEvent, ToSwarm, + handler::ConnectionEvent, ConnectionDenied, ConnectionHandler, ConnectionHandlerEvent, + ConnectionId, FromSwarm, NetworkBehaviour, SubstreamProtocol, Swarm, SwarmEvent, THandler, + THandlerInEvent, THandlerOutEvent, ToSwarm, }; use libp2p_swarm_test::SwarmExt; -use std::convert::Infallible; -use std::task::{Context, Poll}; #[async_std::test] async fn sends_remaining_events_to_behaviour_on_connection_close() { diff --git a/swarm/tests/listener.rs b/swarm/tests/listener.rs index 74b23cf3f7f..01d5784cfa5 100644 --- a/swarm/tests/listener.rs +++ b/swarm/tests/listener.rs @@ -15,7 +15,6 @@ use libp2p_swarm::{ ListenerClosed, ListenerError, NetworkBehaviour, NewListenAddr, Swarm, SwarmEvent, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; - use libp2p_swarm_test::SwarmExt; #[async_std::test] diff --git a/swarm/tests/swarm_derive.rs b/swarm/tests/swarm_derive.rs index 334d1b9d304..a1c8bc5ff73 100644 --- a/swarm/tests/swarm_derive.rs +++ b/swarm/tests/swarm_derive.rs @@ -18,6 +18,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::fmt::Debug; + use futures::StreamExt; use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identify as identify; @@ -26,19 +28,18 @@ use libp2p_swarm::{ behaviour::FromSwarm, dummy, ConnectionDenied, NetworkBehaviour, SwarmEvent, THandler, THandlerInEvent, THandlerOutEvent, }; -use std::fmt::Debug; /// Small utility to check that a type implements `NetworkBehaviour`. #[allow(dead_code)] fn require_net_behaviour() {} // TODO: doesn't compile -/*#[test] -fn empty() { - #[allow(dead_code)] - #[derive(NetworkBehaviour)] - struct Foo {} -}*/ +// #[test] +// fn empty() { +// #[allow(dead_code)] +// #[derive(NetworkBehaviour)] +// struct Foo {} +// } #[test] fn one_field() { @@ -537,10 +538,10 @@ fn multiple_behaviour_attributes() { #[test] fn custom_out_event_no_type_parameters() { + use std::task::{Context, Poll}; + use libp2p_identity::PeerId; use libp2p_swarm::{ConnectionId, ToSwarm}; - use std::task::Context; - use std::task::Poll; pub(crate) struct TemplatedBehaviour { _data: T, diff --git a/transports/dns/src/lib.rs b/transports/dns/src/lib.rs index 7d92cc8ecfc..d47f1e464db 100644 --- a/transports/dns/src/lib.rs +++ b/transports/dns/src/lib.rs @@ -40,26 +40,26 @@ //! On Unix systems, if no custom configuration is given, [trust-dns-resolver] //! will try to parse the `/etc/resolv.conf` file. This approach comes with a //! few caveats to be aware of: -//! 1) This fails (panics even!) if `/etc/resolv.conf` does not exist. This is -//! the case on all versions of Android. -//! 2) DNS configuration is only evaluated during startup. Runtime changes are -//! thus ignored. -//! 3) DNS resolution is obviously done in process and consequently not using -//! any system APIs (like libc's `gethostbyname`). Again this is -//! problematic on platforms like Android, where there's a lot of -//! complexity hidden behind the system APIs. +//! 1) This fails (panics even!) if `/etc/resolv.conf` does not exist. This is the case on all +//! versions of Android. +//! 2) DNS configuration is only evaluated during startup. Runtime changes are thus ignored. +//! 3) DNS resolution is obviously done in process and consequently not using any system APIs +//! (like libc's `gethostbyname`). Again this is problematic on platforms like Android, where +//! there's a lot of complexity hidden behind the system APIs. //! //! If the implementation requires different characteristics, one should //! consider providing their own implementation of [`Transport`] or use //! platform specific APIs to extract the host's DNS configuration (if possible) //! and provide a custom [`ResolverConfig`]. //! -//![trust-dns-resolver]: https://docs.rs/trust-dns-resolver/latest/trust_dns_resolver/#dns-over-tls-and-dns-over-https +//! [trust-dns-resolver]: https://docs.rs/trust-dns-resolver/latest/trust_dns_resolver/#dns-over-tls-and-dns-over-https #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #[cfg(feature = "async-std")] pub mod async_std { + use std::{io, sync::Arc}; + use async_std_resolver::AsyncStdResolver; use futures::FutureExt; use hickory_resolver::{ @@ -67,7 +67,6 @@ pub mod async_std { system_conf, }; use parking_lot::Mutex; - use std::{io, sync::Arc}; /// A `Transport` wrapper for performing DNS lookups when dialing `Multiaddr`esses /// using `async-std` for all async I/O. @@ -116,9 +115,10 @@ pub mod async_std { #[cfg(feature = "tokio")] pub mod tokio { + use std::sync::Arc; + use hickory_resolver::{system_conf, TokioAsyncResolver}; use parking_lot::Mutex; - use std::sync::Arc; /// A `Transport` wrapper for performing DNS lookups when dialing `Multiaddr`esses /// using `tokio` for all async I/O. @@ -146,18 +146,9 @@ pub mod tokio { } } -use async_trait::async_trait; -use futures::{future::BoxFuture, prelude::*}; -use libp2p_core::{ - multiaddr::{Multiaddr, Protocol}, - transport::{DialOpts, ListenerId, TransportError, TransportEvent}, -}; -use parking_lot::Mutex; -use smallvec::SmallVec; -use std::io; -use std::net::{Ipv4Addr, Ipv6Addr}; use std::{ - error, fmt, iter, + error, fmt, io, iter, + net::{Ipv4Addr, Ipv6Addr}, ops::DerefMut, pin::Pin, str, @@ -165,12 +156,24 @@ use std::{ task::{Context, Poll}, }; -pub use hickory_resolver::config::{ResolverConfig, ResolverOpts}; -pub use hickory_resolver::error::{ResolveError, ResolveErrorKind}; -use hickory_resolver::lookup::{Ipv4Lookup, Ipv6Lookup, TxtLookup}; -use hickory_resolver::lookup_ip::LookupIp; -use hickory_resolver::name_server::ConnectionProvider; -use hickory_resolver::AsyncResolver; +use async_trait::async_trait; +use futures::{future::BoxFuture, prelude::*}; +pub use hickory_resolver::{ + config::{ResolverConfig, ResolverOpts}, + error::{ResolveError, ResolveErrorKind}, +}; +use hickory_resolver::{ + lookup::{Ipv4Lookup, Ipv6Lookup, TxtLookup}, + lookup_ip::LookupIp, + name_server::ConnectionProvider, + AsyncResolver, +}; +use libp2p_core::{ + multiaddr::{Multiaddr, Protocol}, + transport::{DialOpts, ListenerId, TransportError, TransportEvent}, +}; +use parking_lot::Mutex; +use smallvec::SmallVec; /// The prefix for `dnsaddr` protocol TXT record lookups. const DNSADDR_PREFIX: &str = "_dnsaddr."; @@ -191,7 +194,8 @@ const MAX_DNS_LOOKUPS: usize = 32; const MAX_TXT_RECORDS: usize = 16; /// A [`Transport`] for performing DNS lookups when dialing `Multiaddr`esses. -/// You shouldn't need to use this type directly. Use [`tokio::Transport`] or [`async_std::Transport`] instead. +/// You shouldn't need to use this type directly. Use [`tokio::Transport`] or +/// [`async_std::Transport`] instead. #[derive(Debug)] pub struct Transport { /// The underlying transport. @@ -613,7 +617,6 @@ where #[cfg(all(test, any(feature = "tokio", feature = "async-std")))] mod tests { - use super::*; use futures::future::BoxFuture; use libp2p_core::{ multiaddr::{Multiaddr, Protocol}, @@ -622,6 +625,8 @@ mod tests { }; use libp2p_identity::PeerId; + use super::*; + #[test] fn basic_resolve() { let _ = tracing_subscriber::fmt() diff --git a/transports/noise/src/io.rs b/transports/noise/src/io.rs index 9cd4cfed52a..84aad79d76b 100644 --- a/transports/noise/src/io.rs +++ b/transports/noise/src/io.rs @@ -22,11 +22,6 @@ mod framed; pub(crate) mod handshake; -use asynchronous_codec::Framed; -use bytes::Bytes; -use framed::{Codec, MAX_FRAME_LEN}; -use futures::prelude::*; -use futures::ready; use std::{ cmp::min, fmt, io, @@ -34,6 +29,11 @@ use std::{ task::{Context, Poll}, }; +use asynchronous_codec::Framed; +use bytes::Bytes; +use framed::{Codec, MAX_FRAME_LEN}; +use futures::{prelude::*, ready}; + /// A noise session to a remote. /// /// `T` is the type of the underlying I/O resource. diff --git a/transports/noise/src/io/framed.rs b/transports/noise/src/io/framed.rs index 17254efb0a9..5aaad6f55e7 100644 --- a/transports/noise/src/io/framed.rs +++ b/transports/noise/src/io/framed.rs @@ -23,13 +23,14 @@ //! Alongside a [`asynchronous_codec::Framed`] this provides a [Sink](futures::Sink) //! and [Stream](futures::Stream) for length-delimited Noise protocol messages. -use super::handshake::proto; -use crate::{protocol::PublicKey, Error}; +use std::{io, mem::size_of}; + use asynchronous_codec::{Decoder, Encoder}; use bytes::{Buf, Bytes, BytesMut}; use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer}; -use std::io; -use std::mem::size_of; + +use super::handshake::proto; +use crate::{protocol::PublicKey, Error}; /// Max. size of a noise message. const MAX_NOISE_MSG_LEN: usize = 65535; @@ -170,7 +171,8 @@ impl Decoder for Codec { /// Encrypts the given cleartext to `dst`. /// -/// This is a standalone function to allow us reusing the `encrypt_buffer` and to use to across different session states of the noise protocol. +/// This is a standalone function to allow us reusing the `encrypt_buffer` and to use to across +/// different session states of the noise protocol. fn encrypt( cleartext: &[u8], dst: &mut BytesMut, @@ -191,8 +193,9 @@ fn encrypt( /// Encrypts the given ciphertext. /// -/// This is a standalone function so we can use it across different session states of the noise protocol. -/// In case `ciphertext` does not contain enough bytes to decrypt the entire frame, `Ok(None)` is returned. +/// This is a standalone function so we can use it across different session states of the noise +/// protocol. In case `ciphertext` does not contain enough bytes to decrypt the entire frame, +/// `Ok(None)` is returned. fn decrypt( ciphertext: &mut BytesMut, decrypt_fn: impl FnOnce(&[u8], &mut [u8]) -> Result, diff --git a/transports/noise/src/io/handshake.rs b/transports/noise/src/io/handshake.rs index d8dfb9b802e..d4727b91420 100644 --- a/transports/noise/src/io/handshake.rs +++ b/transports/noise/src/io/handshake.rs @@ -23,21 +23,23 @@ pub(super) mod proto { #![allow(unreachable_pub)] include!("../generated/mod.rs"); - pub use self::payload::proto::NoiseExtensions; - pub use self::payload::proto::NoiseHandshakePayload; + pub use self::payload::proto::{NoiseExtensions, NoiseHandshakePayload}; } -use super::framed::Codec; -use crate::io::Output; -use crate::protocol::{KeypairIdentity, PublicKey, STATIC_KEY_DOMAIN}; -use crate::Error; +use std::{collections::HashSet, io, mem}; + use asynchronous_codec::Framed; use futures::prelude::*; use libp2p_identity as identity; use multihash::Multihash; use quick_protobuf::MessageWrite; -use std::collections::HashSet; -use std::{io, mem}; + +use super::framed::Codec; +use crate::{ + io::Output, + protocol::{KeypairIdentity, PublicKey, STATIC_KEY_DOMAIN}, + Error, +}; ////////////////////////////////////////////////////////////////////////////// // Internal @@ -142,12 +144,16 @@ where } } -/// Maps the provided [`Framed`] from the [`snow::HandshakeState`] into the [`snow::TransportState`]. +/// Maps the provided [`Framed`] from the [`snow::HandshakeState`] into the +/// [`snow::TransportState`]. /// -/// This is a bit tricky because [`Framed`] cannot just be de-composed but only into its [`FramedParts`](asynchronous_codec::FramedParts). -/// However, we need to retain the original [`FramedParts`](asynchronous_codec::FramedParts) because they contain the active read & write buffers. +/// This is a bit tricky because [`Framed`] cannot just be de-composed but only into its +/// [`FramedParts`](asynchronous_codec::FramedParts). However, we need to retain the original +/// [`FramedParts`](asynchronous_codec::FramedParts) because they contain the active read & write +/// buffers. /// -/// Those are likely **not** empty because the remote may directly write to the stream again after the noise handshake finishes. +/// Those are likely **not** empty because the remote may directly write to the stream again after +/// the noise handshake finishes. fn map_into_transport( framed: Framed>, ) -> Result<(PublicKey, Framed>), Error> diff --git a/transports/noise/src/lib.rs b/transports/noise/src/lib.rs index 2557e76e276..e05556744fe 100644 --- a/transports/noise/src/lib.rs +++ b/transports/noise/src/lib.rs @@ -21,14 +21,14 @@ //! [Noise protocol framework][noise] support for libp2p. //! //! > **Note**: This crate is still experimental and subject to major breaking changes -//! > both on the API and the wire protocol. +//! > both on the API and the wire protocol. //! //! This crate provides `libp2p_core::InboundUpgrade` and `libp2p_core::OutboundUpgrade` //! implementations for various noise handshake patterns (currently `IK`, `IX`, and `XX`) //! over a particular choice of Diffie–Hellman key agreement (currently only X25519). //! //! > **Note**: Only the `XX` handshake pattern is currently guaranteed to provide -//! > interoperability with other libp2p implementations. +//! > interoperability with other libp2p implementations. //! //! All upgrades produce as output a pair, consisting of the remote's static public key //! and a `NoiseOutput` which represents the established cryptographic session with the @@ -39,14 +39,16 @@ //! Example: //! //! ``` -//! use libp2p_core::{Transport, upgrade, transport::MemoryTransport}; -//! use libp2p_noise as noise; +//! use libp2p_core::{transport::MemoryTransport, upgrade, Transport}; //! use libp2p_identity as identity; +//! use libp2p_noise as noise; //! //! # fn main() { //! let id_keys = identity::Keypair::generate_ed25519(); //! let noise = noise::Config::new(&id_keys).unwrap(); -//! let builder = MemoryTransport::default().upgrade(upgrade::Version::V1).authenticate(noise); +//! let builder = MemoryTransport::default() +//! .upgrade(upgrade::Version::V1) +//! .authenticate(noise); //! // let transport = builder.multiplex(...); //! # } //! ``` @@ -58,22 +60,25 @@ mod io; mod protocol; -pub use io::Output; +use std::{collections::HashSet, fmt::Write, pin::Pin}; -use crate::handshake::State; -use crate::io::handshake; -use crate::protocol::{noise_params_into_builder, AuthenticKeypair, Keypair, PARAMS_XX}; use futures::prelude::*; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; -use libp2p_core::UpgradeInfo; +pub use io::Output; +use libp2p_core::{ + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}, + UpgradeInfo, +}; use libp2p_identity as identity; use libp2p_identity::PeerId; use multiaddr::Protocol; use multihash::Multihash; use snow::params::NoiseParams; -use std::collections::HashSet; -use std::fmt::Write; -use std::pin::Pin; + +use crate::{ + handshake::State, + io::handshake, + protocol::{noise_params_into_builder, AuthenticKeypair, Keypair, PARAMS_XX}, +}; /// The configuration for the noise handshake. #[derive(Clone)] diff --git a/transports/noise/src/protocol.rs b/transports/noise/src/protocol.rs index 29d0c81e2e4..ca47ea0dfcd 100644 --- a/transports/noise/src/protocol.rs +++ b/transports/noise/src/protocol.rs @@ -20,7 +20,6 @@ //! Components of a Noise protocol. -use crate::Error; use libp2p_identity as identity; use once_cell::sync::Lazy; use rand::{Rng as _, SeedableRng}; @@ -28,6 +27,8 @@ use snow::params::NoiseParams; use x25519_dalek::{x25519, X25519_BASEPOINT_BYTES}; use zeroize::Zeroize; +use crate::Error; + /// Prefix of static key signatures for domain separation. pub(crate) const STATIC_KEY_DOMAIN: &str = "noise-libp2p-static-key:"; diff --git a/transports/noise/tests/smoke.rs b/transports/noise/tests/smoke.rs index 62b5d41d6b9..abc5a038f93 100644 --- a/transports/noise/tests/smoke.rs +++ b/transports/noise/tests/smoke.rs @@ -18,14 +18,17 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::io; + use futures::prelude::*; -use libp2p_core::transport::{MemoryTransport, Transport}; -use libp2p_core::upgrade; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; +use libp2p_core::{ + transport::{MemoryTransport, Transport}, + upgrade, + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}, +}; use libp2p_identity as identity; use libp2p_noise as noise; use quickcheck::*; -use std::io; use tracing_subscriber::EnvFilter; #[allow(dead_code)] diff --git a/transports/noise/tests/webtransport_certhashes.rs b/transports/noise/tests/webtransport_certhashes.rs index b3c924f8188..7fa28da0ebe 100644 --- a/transports/noise/tests/webtransport_certhashes.rs +++ b/transports/noise/tests/webtransport_certhashes.rs @@ -1,8 +1,9 @@ +use std::collections::HashSet; + use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; use libp2p_identity as identity; use libp2p_noise as noise; use multihash::Multihash; -use std::collections::HashSet; const SHA_256_MH: u64 = 0x12; diff --git a/transports/plaintext/src/error.rs b/transports/plaintext/src/error.rs index 7480874a85e..2d352562528 100644 --- a/transports/plaintext/src/error.rs +++ b/transports/plaintext/src/error.rs @@ -18,9 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::error; -use std::fmt; -use std::io::Error as IoError; +use std::{error, fmt, io::Error as IoError}; #[derive(Debug)] pub enum Error { diff --git a/transports/plaintext/src/handshake.rs b/transports/plaintext/src/handshake.rs index ddd5f7f8a9b..38a56b84862 100644 --- a/transports/plaintext/src/handshake.rs +++ b/transports/plaintext/src/handshake.rs @@ -18,14 +18,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::error::{DecodeError, Error}; -use crate::proto::Exchange; -use crate::Config; +use std::io::{Error as IoError, ErrorKind as IoErrorKind}; + use asynchronous_codec::{Framed, FramedParts}; use bytes::Bytes; use futures::prelude::*; use libp2p_identity::{PeerId, PublicKey}; -use std::io::{Error as IoError, ErrorKind as IoErrorKind}; + +use crate::{ + error::{DecodeError, Error}, + proto::Exchange, + Config, +}; pub(crate) async fn handshake(socket: S, config: Config) -> Result<(S, PublicKey, Bytes), Error> where diff --git a/transports/plaintext/src/lib.rs b/transports/plaintext/src/lib.rs index 4a322d63fab..f841a859a62 100644 --- a/transports/plaintext/src/lib.rs +++ b/transports/plaintext/src/lib.rs @@ -22,22 +22,23 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use crate::error::Error; - -use bytes::Bytes; -use futures::future::BoxFuture; -use futures::prelude::*; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; -use libp2p_core::UpgradeInfo; -use libp2p_identity as identity; -use libp2p_identity::PeerId; -use libp2p_identity::PublicKey; use std::{ io, iter, pin::Pin, task::{Context, Poll}, }; +use bytes::Bytes; +use futures::{future::BoxFuture, prelude::*}; +use libp2p_core::{ + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}, + UpgradeInfo, +}; +use libp2p_identity as identity; +use libp2p_identity::{PeerId, PublicKey}; + +use crate::error::Error; + mod error; mod handshake; mod proto { diff --git a/transports/pnet/src/crypt_writer.rs b/transports/pnet/src/crypt_writer.rs index 06f932fbe71..8b302089a1d 100644 --- a/transports/pnet/src/crypt_writer.rs +++ b/transports/pnet/src/crypt_writer.rs @@ -18,6 +18,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{fmt, pin::Pin}; + use futures::{ io::{self, AsyncWrite}, ready, @@ -25,7 +27,6 @@ use futures::{ }; use pin_project::pin_project; use salsa20::{cipher::StreamCipher, XSalsa20}; -use std::{fmt, pin::Pin}; /// A writer that encrypts and forwards to an inner writer #[pin_project] @@ -74,7 +75,8 @@ fn poll_flush_buf( // we made progress, so try again written += n; } else { - // we got Ok but got no progress whatsoever, so bail out so we don't spin writing 0 bytes. + // we got Ok but got no progress whatsoever, + // so bail out so we don't spin writing 0 bytes. ret = Poll::Ready(Err(io::Error::new( io::ErrorKind::WriteZero, "Failed to write buffered data", diff --git a/transports/pnet/src/lib.rs b/transports/pnet/src/lib.rs index 083ffff36a3..b27f9777c47 100644 --- a/transports/pnet/src/lib.rs +++ b/transports/pnet/src/lib.rs @@ -19,7 +19,6 @@ // DEALINGS IN THE SOFTWARE. //! Implementation of the [pnet](https://github.com/libp2p/specs/blob/master/pnet/Private-Networks-PSK-V1.md) protocol. -//! //| The `pnet` protocol implements *Pre-shared Key Based Private Networks in libp2p*. //! Libp2p nodes configured with a pre-shared key can only communicate with other nodes with //! the same key. @@ -27,15 +26,6 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod crypt_writer; -use crypt_writer::CryptWriter; -use futures::prelude::*; -use pin_project::pin_project; -use rand::RngCore; -use salsa20::{ - cipher::{KeyIvInit, StreamCipher}, - Salsa20, XSalsa20, -}; -use sha3::{digest::ExtendableOutput, Shake128}; use std::{ error, fmt::{self, Write}, @@ -47,6 +37,16 @@ use std::{ task::{Context, Poll}, }; +use crypt_writer::CryptWriter; +use futures::prelude::*; +use pin_project::pin_project; +use rand::RngCore; +use salsa20::{ + cipher::{KeyIvInit, StreamCipher}, + Salsa20, XSalsa20, +}; +use sha3::{digest::ExtendableOutput, Shake128}; + const KEY_SIZE: usize = 32; const NONCE_SIZE: usize = 24; const WRITE_BUFFER_SIZE: usize = 1024; @@ -319,9 +319,10 @@ impl fmt::Display for PnetError { #[cfg(test)] mod tests { - use super::*; use quickcheck::*; + use super::*; + impl Arbitrary for PreSharedKey { fn arbitrary(g: &mut Gen) -> PreSharedKey { let key = core::array::from_fn(|_| u8::arbitrary(g)); diff --git a/transports/pnet/tests/smoke.rs b/transports/pnet/tests/smoke.rs index 79ffaeab447..ae4fcc4b3fc 100644 --- a/transports/pnet/tests/smoke.rs +++ b/transports/pnet/tests/smoke.rs @@ -1,10 +1,9 @@ use std::time::Duration; use futures::{future, AsyncRead, AsyncWrite, StreamExt}; -use libp2p_core::transport::MemoryTransport; -use libp2p_core::upgrade::Version; -use libp2p_core::Transport; -use libp2p_core::{multiaddr::Protocol, Multiaddr}; +use libp2p_core::{ + multiaddr::Protocol, transport::MemoryTransport, upgrade::Version, Multiaddr, Transport, +}; use libp2p_pnet::{PnetConfig, PreSharedKey}; use libp2p_swarm::{dummy, Config, NetworkBehaviour, Swarm, SwarmEvent}; diff --git a/transports/quic/src/config.rs b/transports/quic/src/config.rs index 2456ed3e36f..c623632ddc6 100644 --- a/transports/quic/src/config.rs +++ b/transports/quic/src/config.rs @@ -18,11 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{sync::Arc, time::Duration}; + use quinn::{ crypto::rustls::{QuicClientConfig, QuicServerConfig}, MtuDiscoveryConfig, VarInt, }; -use std::{sync::Arc, time::Duration}; /// Config for the transport. #[derive(Clone)] diff --git a/transports/quic/src/connection.rs b/transports/quic/src/connection.rs index 783258a0130..a7375a1ca6d 100644 --- a/transports/quic/src/connection.rs +++ b/transports/quic/src/connection.rs @@ -21,18 +21,18 @@ mod connecting; mod stream; -pub use connecting::Connecting; -pub use stream::Stream; - -use crate::{ConnectionError, Error}; - -use futures::{future::BoxFuture, FutureExt}; -use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; use std::{ pin::Pin, task::{Context, Poll}, }; +pub use connecting::Connecting; +use futures::{future::BoxFuture, FutureExt}; +use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; +pub use stream::Stream; + +use crate::{ConnectionError, Error}; + /// State for a single opened QUIC connection. pub struct Connection { /// Underlying connection. diff --git a/transports/quic/src/connection/connecting.rs b/transports/quic/src/connection/connecting.rs index f6e397b4d1e..0ce7f9041db 100644 --- a/transports/quic/src/connection/connecting.rs +++ b/transports/quic/src/connection/connecting.rs @@ -20,7 +20,11 @@ //! Future that drives a QUIC connection until is has performed its TLS handshake. -use crate::{Connection, ConnectionError, Error}; +use std::{ + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; use futures::{ future::{select, Either, FutureExt, Select}, @@ -29,11 +33,8 @@ use futures::{ use futures_timer::Delay; use libp2p_identity::PeerId; use quinn::rustls::pki_types::CertificateDer; -use std::{ - pin::Pin, - task::{Context, Poll}, - time::Duration, -}; + +use crate::{Connection, ConnectionError, Error}; /// A QUIC connection currently being negotiated. #[derive(Debug)] diff --git a/transports/quic/src/hole_punching.rs b/transports/quic/src/hole_punching.rs index a38d123a6a4..6f1961081d2 100644 --- a/transports/quic/src/hole_punching.rs +++ b/transports/quic/src/hole_punching.rs @@ -1,15 +1,14 @@ -use crate::{provider::Provider, Error}; - -use futures::future::Either; - -use rand::{distributions, Rng}; - -use std::convert::Infallible; use std::{ + convert::Infallible, net::{SocketAddr, UdpSocket}, time::Duration, }; +use futures::future::Either; +use rand::{distributions, Rng}; + +use crate::{provider::Provider, Error}; + pub(crate) async fn hole_puncher( socket: UdpSocket, remote_addr: SocketAddr, diff --git a/transports/quic/src/lib.rs b/transports/quic/src/lib.rs index 7ae649b6914..9d97e6c4319 100644 --- a/transports/quic/src/lib.rs +++ b/transports/quic/src/lib.rs @@ -31,16 +31,20 @@ //! # #[cfg(feature = "async-std")] //! # fn main() -> std::io::Result<()> { //! # +//! use libp2p_core::{transport::ListenerId, Multiaddr, Transport}; //! use libp2p_quic as quic; -//! use libp2p_core::{Multiaddr, Transport, transport::ListenerId}; //! //! let keypair = libp2p_identity::Keypair::generate_ed25519(); //! let quic_config = quic::Config::new(&keypair); //! //! let mut quic_transport = quic::async_std::Transport::new(quic_config); //! -//! let addr = "/ip4/127.0.0.1/udp/12345/quic-v1".parse().expect("address should be valid"); -//! quic_transport.listen_on(ListenerId::next(), addr).expect("listen error."); +//! let addr = "/ip4/127.0.0.1/udp/12345/quic-v1" +//! .parse() +//! .expect("address should be valid"); +//! quic_transport +//! .listen_on(ListenerId::next(), addr) +//! .expect("listen error."); //! # //! # Ok(()) //! # } @@ -53,7 +57,6 @@ //! Note that QUIC provides transport, security, and multiplexing in a single protocol. Therefore, //! QUIC connections do not need to be upgraded. You will get a compile-time error if you try. //! Instead, you must pass all needed configuration into the constructor. -//! #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] @@ -67,7 +70,6 @@ use std::net::SocketAddr; pub use config::Config; pub use connection::{Connecting, Connection, Stream}; - #[cfg(feature = "async-std")] pub use provider::async_std; #[cfg(feature = "tokio")] diff --git a/transports/quic/src/provider.rs b/transports/quic/src/provider.rs index 6f1122ee55f..fdf88b460e8 100644 --- a/transports/quic/src/provider.rs +++ b/transports/quic/src/provider.rs @@ -18,8 +18,6 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::future::BoxFuture; -use if_watch::IfEvent; use std::{ io, net::{SocketAddr, UdpSocket}, @@ -27,6 +25,9 @@ use std::{ time::Duration, }; +use futures::future::BoxFuture; +use if_watch::IfEvent; + #[cfg(feature = "async-std")] pub mod async_std; #[cfg(feature = "tokio")] @@ -59,7 +60,8 @@ pub trait Provider: Unpin + Send + Sized + 'static { /// Sleep for specified amount of time. fn sleep(duration: Duration) -> BoxFuture<'static, ()>; - /// Sends data on the socket to the given address. On success, returns the number of bytes written. + /// Sends data on the socket to the given address. On success, + /// returns the number of bytes written. fn send_to<'a>( udp_socket: &'a UdpSocket, buf: &'a [u8], diff --git a/transports/quic/src/provider/async_std.rs b/transports/quic/src/provider/async_std.rs index a110058108c..b5c3ac917dc 100644 --- a/transports/quic/src/provider/async_std.rs +++ b/transports/quic/src/provider/async_std.rs @@ -18,7 +18,6 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::{future::BoxFuture, FutureExt}; use std::{ io, net::UdpSocket, @@ -26,6 +25,8 @@ use std::{ time::Duration, }; +use futures::{future::BoxFuture, FutureExt}; + use crate::GenTransport; /// Transport with [`async-std`] runtime. diff --git a/transports/quic/src/provider/tokio.rs b/transports/quic/src/provider/tokio.rs index 9cb148d6ef2..83753faac01 100644 --- a/transports/quic/src/provider/tokio.rs +++ b/transports/quic/src/provider/tokio.rs @@ -18,7 +18,6 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::{future::BoxFuture, FutureExt}; use std::{ io, net::{SocketAddr, UdpSocket}, @@ -26,6 +25,8 @@ use std::{ time::Duration, }; +use futures::{future::BoxFuture, FutureExt}; + use crate::GenTransport; /// Transport with [`tokio`] runtime. diff --git a/transports/quic/src/transport.rs b/transports/quic/src/transport.rs index 057d0f978d7..63a65ce99cc 100644 --- a/transports/quic/src/transport.rs +++ b/transports/quic/src/transport.rs @@ -18,38 +18,41 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::config::{Config, QuinnConfig}; -use crate::hole_punching::hole_puncher; -use crate::provider::Provider; -use crate::{ConnectError, Connecting, Connection, Error}; - -use futures::channel::oneshot; -use futures::future::{BoxFuture, Either}; -use futures::ready; -use futures::stream::StreamExt; -use futures::{prelude::*, stream::SelectAll}; +use std::{ + collections::{ + hash_map::{DefaultHasher, Entry}, + HashMap, HashSet, + }, + fmt, + hash::{Hash, Hasher}, + io, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket}, + pin::Pin, + task::{Context, Poll, Waker}, + time::Duration, +}; +use futures::{ + channel::oneshot, + future::{BoxFuture, Either}, + prelude::*, + ready, + stream::{SelectAll, StreamExt}, +}; use if_watch::IfEvent; - -use libp2p_core::transport::{DialOpts, PortUse}; -use libp2p_core::Endpoint; use libp2p_core::{ multiaddr::{Multiaddr, Protocol}, - transport::{ListenerId, TransportError, TransportEvent}, - Transport, + transport::{DialOpts, ListenerId, PortUse, TransportError, TransportEvent}, + Endpoint, Transport, }; use libp2p_identity::PeerId; use socket2::{Domain, Socket, Type}; -use std::collections::hash_map::{DefaultHasher, Entry}; -use std::collections::{HashMap, HashSet}; -use std::hash::{Hash, Hasher}; -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, UdpSocket}; -use std::time::Duration; -use std::{fmt, io}; -use std::{ - net::SocketAddr, - pin::Pin, - task::{Context, Poll, Waker}, + +use crate::{ + config::{Config, QuinnConfig}, + hole_punching::hole_puncher, + provider::Provider, + ConnectError, Connecting, Connection, Error, }; /// Implementation of the [`Transport`] trait for QUIC. @@ -745,9 +748,10 @@ fn socketaddr_to_multiaddr(socket_addr: &SocketAddr, version: ProtocolVersion) - #[cfg(test)] #[cfg(any(feature = "async-std", feature = "tokio"))] mod tests { - use super::*; use futures::future::poll_fn; + use super::*; + #[test] fn multiaddr_to_udp_conversion() { assert!(multiaddr_to_socketaddr( diff --git a/transports/quic/tests/smoke.rs b/transports/quic/tests/smoke.rs index 6a760f9997c..5fbef84649e 100644 --- a/transports/quic/tests/smoke.rs +++ b/transports/quic/tests/smoke.rs @@ -1,16 +1,31 @@ #![cfg(any(feature = "async-std", feature = "tokio"))] -use futures::channel::{mpsc, oneshot}; -use futures::future::BoxFuture; -use futures::future::{poll_fn, Either}; -use futures::stream::StreamExt; -use futures::{future, AsyncReadExt, AsyncWriteExt, FutureExt, SinkExt}; +use std::{ + future::Future, + io, + num::NonZeroU8, + pin::Pin, + sync::{Arc, Mutex}, + task::Poll, + time::Duration, +}; + +use futures::{ + channel::{mpsc, oneshot}, + future, + future::{poll_fn, BoxFuture, Either}, + stream::StreamExt, + AsyncReadExt, AsyncWriteExt, FutureExt, SinkExt, +}; use futures_timer::Delay; -use libp2p_core::muxing::{StreamMuxerBox, StreamMuxerExt, SubstreamBox}; -use libp2p_core::transport::{Boxed, DialOpts, OrTransport, PortUse, TransportEvent}; -use libp2p_core::transport::{ListenerId, TransportError}; -use libp2p_core::Endpoint; -use libp2p_core::{multiaddr::Protocol, upgrade, Multiaddr, Transport}; +use libp2p_core::{ + multiaddr::Protocol, + muxing::{StreamMuxerBox, StreamMuxerExt, SubstreamBox}, + transport::{ + Boxed, DialOpts, ListenerId, OrTransport, PortUse, TransportError, TransportEvent, + }, + upgrade, Endpoint, Multiaddr, Transport, +}; use libp2p_identity::PeerId; use libp2p_noise as noise; use libp2p_quic as quic; @@ -18,15 +33,6 @@ use libp2p_tcp as tcp; use libp2p_yamux as yamux; use quic::Provider; use rand::RngCore; -use std::future::Future; -use std::io; -use std::num::NonZeroU8; -use std::task::Poll; -use std::time::Duration; -use std::{ - pin::Pin, - sync::{Arc, Mutex}, -}; use tracing_subscriber::EnvFilter; #[cfg(feature = "tokio")] @@ -200,7 +206,8 @@ async fn wrapped_with_delay() { #[cfg(feature = "async-std")] #[async_std::test] -#[ignore] // Transport currently does not validate PeerId. Enable once we make use of PeerId validation in rustls. +#[ignore] // Transport currently does not validate PeerId. + // Enable once we make use of PeerId validation in rustls. async fn wrong_peerid() { use libp2p_identity::PeerId; diff --git a/transports/quic/tests/stream_compliance.rs b/transports/quic/tests/stream_compliance.rs index b0536473215..13c29f2caa0 100644 --- a/transports/quic/tests/stream_compliance.rs +++ b/transports/quic/tests/stream_compliance.rs @@ -1,10 +1,12 @@ -use futures::channel::oneshot; -use futures::StreamExt; -use libp2p_core::transport::{DialOpts, ListenerId, PortUse}; -use libp2p_core::{Endpoint, Transport}; -use libp2p_quic as quic; use std::time::Duration; +use futures::{channel::oneshot, StreamExt}; +use libp2p_core::{ + transport::{DialOpts, ListenerId, PortUse}, + Endpoint, Transport, +}; +use libp2p_quic as quic; + #[async_std::test] async fn close_implies_flush() { let (alice, bob) = connected_peers().await; diff --git a/transports/tcp/src/lib.rs b/transports/tcp/src/lib.rs index 4c4fa7c6b84..fefa18fb431 100644 --- a/transports/tcp/src/lib.rs +++ b/transports/tcp/src/lib.rs @@ -30,11 +30,15 @@ mod provider; -#[cfg(feature = "async-io")] -pub use provider::async_io; - -#[cfg(feature = "tokio")] -pub use provider::tokio; +use std::{ + collections::{HashSet, VecDeque}, + io, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, TcpListener}, + pin::Pin, + sync::{Arc, RwLock}, + task::{Context, Poll, Waker}, + time::Duration, +}; use futures::{future::Ready, prelude::*, stream::SelectAll}; use futures_timer::Delay; @@ -43,17 +47,12 @@ use libp2p_core::{ multiaddr::{Multiaddr, Protocol}, transport::{DialOpts, ListenerId, PortUse, TransportError, TransportEvent}, }; +#[cfg(feature = "async-io")] +pub use provider::async_io; +#[cfg(feature = "tokio")] +pub use provider::tokio; use provider::{Incoming, Provider}; use socket2::{Domain, Socket, Type}; -use std::{ - collections::{HashSet, VecDeque}, - io, - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, TcpListener}, - pin::Pin, - sync::{Arc, RwLock}, - task::{Context, Poll, Waker}, - time::Duration, -}; /// The configuration for a TCP/IP transport capability for libp2p. #[derive(Clone, Debug)] @@ -131,14 +130,11 @@ impl PortReuse { impl Config { /// Creates a new configuration for a TCP/IP transport: /// - /// * Nagle's algorithm, i.e. `TCP_NODELAY`, is _enabled_. - /// See [`Config::nodelay`]. - /// * Reuse of listening ports is _disabled_. - /// See [`Config::port_reuse`]. - /// * No custom `IP_TTL` is set. The default of the OS TCP stack applies. - /// See [`Config::ttl`]. - /// * The size of the listen backlog for new listening sockets is `1024`. - /// See [`Config::listen_backlog`]. + /// * Nagle's algorithm, i.e. `TCP_NODELAY`, is _enabled_. See [`Config::nodelay`]. + /// * Reuse of listening ports is _disabled_. See [`Config::port_reuse`]. + /// * No custom `IP_TTL` is set. The default of the OS TCP stack applies. See [`Config::ttl`]. + /// * The size of the listen backlog for new listening sockets is `1024`. See + /// [`Config::listen_backlog`]. pub fn new() -> Self { Self { ttl: None, @@ -241,8 +237,8 @@ where /// The configuration of port reuse when dialing. port_reuse: PortReuse, /// All the active listeners. - /// The [`ListenStream`] struct contains a stream that we want to be pinned. Since the `VecDeque` - /// can be resized, the only way is to use a `Pin>`. + /// The [`ListenStream`] struct contains a stream that we want to be pinned. Since the + /// `VecDeque` can be resized, the only way is to use a `Pin>`. listeners: SelectAll>, /// Pending transport events to return from [`libp2p_core::Transport::poll`]. pending_events: @@ -465,7 +461,8 @@ where pause: Option, /// Pending event to reported. pending_event: Option<::Item>, - /// The listener can be manually closed with [`Transport::remove_listener`](libp2p_core::Transport::remove_listener). + /// The listener can be manually closed with + /// [`Transport::remove_listener`](libp2p_core::Transport::remove_listener). is_closed: bool, /// The stream must be awaken after it has been closed to deliver the last event. close_listener_waker: Option, @@ -621,7 +618,8 @@ where } if self.is_closed { - // Terminate the stream if the listener closed and all remaining events have been reported. + // Terminate the stream if the listener closed + // and all remaining events have been reported. return Poll::Ready(None); } @@ -705,13 +703,13 @@ fn ip_to_multiaddr(ip: IpAddr, port: u16) -> Multiaddr { #[cfg(test)] mod tests { - use super::*; use futures::{ channel::{mpsc, oneshot}, future::poll_fn, }; - use libp2p_core::Endpoint; - use libp2p_core::Transport as _; + use libp2p_core::{Endpoint, Transport as _}; + + use super::*; #[test] fn multiaddr_to_tcp_conversion() { diff --git a/transports/tcp/src/provider.rs b/transports/tcp/src/provider.rs index d94da7a6fc3..7a609d9f031 100644 --- a/transports/tcp/src/provider.rs +++ b/transports/tcp/src/provider.rs @@ -26,13 +26,18 @@ pub mod async_io; #[cfg(feature = "tokio")] pub mod tokio; -use futures::future::BoxFuture; -use futures::io::{AsyncRead, AsyncWrite}; -use futures::Stream; +use std::{ + fmt, io, + net::{SocketAddr, TcpListener, TcpStream}, + task::{Context, Poll}, +}; + +use futures::{ + future::BoxFuture, + io::{AsyncRead, AsyncWrite}, + Stream, +}; use if_watch::{IfEvent, IpNet}; -use std::net::{SocketAddr, TcpListener, TcpStream}; -use std::task::{Context, Poll}; -use std::{fmt, io}; /// An incoming connection returned from [`Provider::poll_accept()`]. pub struct Incoming { diff --git a/transports/tcp/src/provider/async_io.rs b/transports/tcp/src/provider/async_io.rs index fe0abe42d54..4df9d928fbb 100644 --- a/transports/tcp/src/provider/async_io.rs +++ b/transports/tcp/src/provider/async_io.rs @@ -18,13 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::{Incoming, Provider}; +use std::{ + io, net, + task::{Context, Poll}, +}; use async_io::Async; use futures::future::{BoxFuture, FutureExt}; -use std::io; -use std::net; -use std::task::{Context, Poll}; + +use super::{Incoming, Provider}; /// A TCP [`Transport`](libp2p_core::Transport) that works with the `async-std` ecosystem. /// @@ -40,9 +42,14 @@ use std::task::{Context, Poll}; /// # async fn main() { /// let mut transport = tcp::async_io::Transport::new(tcp::Config::default()); /// let id = ListenerId::next(); -/// transport.listen_on(id, "/ip4/127.0.0.1/tcp/0".parse().unwrap()).unwrap(); +/// transport +/// .listen_on(id, "/ip4/127.0.0.1/tcp/0".parse().unwrap()) +/// .unwrap(); /// -/// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)).await.into_new_address().unwrap(); +/// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)) +/// .await +/// .into_new_address() +/// .unwrap(); /// /// println!("Listening on {addr}"); /// # } diff --git a/transports/tcp/src/provider/tokio.rs b/transports/tcp/src/provider/tokio.rs index ec2d098e3fb..a96c4dba858 100644 --- a/transports/tcp/src/provider/tokio.rs +++ b/transports/tcp/src/provider/tokio.rs @@ -18,16 +18,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::{Incoming, Provider}; +use std::{ + io, net, + pin::Pin, + task::{Context, Poll}, +}; use futures::{ future::{BoxFuture, FutureExt}, prelude::*, }; -use std::io; -use std::net; -use std::pin::Pin; -use std::task::{Context, Poll}; + +use super::{Incoming, Provider}; /// A TCP [`Transport`](libp2p_core::Transport) that works with the `tokio` ecosystem. /// @@ -42,9 +44,14 @@ use std::task::{Context, Poll}; /// # #[tokio::main] /// # async fn main() { /// let mut transport = tcp::tokio::Transport::new(tcp::Config::default()); -/// let id = transport.listen_on(ListenerId::next(), "/ip4/127.0.0.1/tcp/0".parse().unwrap()).unwrap(); +/// let id = transport +/// .listen_on(ListenerId::next(), "/ip4/127.0.0.1/tcp/0".parse().unwrap()) +/// .unwrap(); /// -/// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)).await.into_new_address().unwrap(); +/// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)) +/// .await +/// .into_new_address() +/// .unwrap(); /// /// println!("Listening on {addr}"); /// # } diff --git a/transports/tls/src/certificate.rs b/transports/tls/src/certificate.rs index 65b373bcf9b..3e7eeb22bf3 100644 --- a/transports/tls/src/certificate.rs +++ b/transports/tls/src/certificate.rs @@ -22,12 +22,12 @@ //! //! This module handles generation, signing, and verification of certificates. +use std::sync::Arc; + use libp2p_identity as identity; use libp2p_identity::PeerId; use x509_parser::{prelude::*, signature_algorithm::SignatureAlgorithm}; -use std::sync::Arc; - /// The libp2p Public Key Extension is a X.509 extension /// with the Object Identifier 1.3.6.1.4.1.53594.1.1, /// allocated by IANA to the libp2p project at Protocol Labs. @@ -283,8 +283,8 @@ impl P2pCertificate<'_> { self.extension.public_key.to_peer_id() } - /// Verify the `signature` of the `message` signed by the private key corresponding to the public key stored - /// in the certificate. + /// Verify the `signature` of the `message` signed by the private key corresponding to the + /// public key stored in the certificate. pub fn verify_signature( &self, signature_scheme: rustls::SignatureScheme, @@ -492,9 +492,10 @@ impl P2pCertificate<'_> { #[cfg(test)] mod tests { - use super::*; use hex_literal::hex; + use super::*; + #[test] fn sanity_check() { let keypair = identity::Keypair::generate_ed25519(); diff --git a/transports/tls/src/lib.rs b/transports/tls/src/lib.rs index 3aa66db12b3..57d7d69d4bd 100644 --- a/transports/tls/src/lib.rs +++ b/transports/tls/src/lib.rs @@ -29,14 +29,12 @@ pub mod certificate; mod upgrade; mod verifier; -use certificate::AlwaysResolvesCert; -use libp2p_identity::Keypair; -use libp2p_identity::PeerId; use std::sync::Arc; +use certificate::AlwaysResolvesCert; pub use futures_rustls::TlsStream; -pub use upgrade::Config; -pub use upgrade::UpgradeError; +use libp2p_identity::{Keypair, PeerId}; +pub use upgrade::{Config, UpgradeError}; const P2P_ALPN: [u8; 6] = *b"libp2p"; diff --git a/transports/tls/src/upgrade.rs b/transports/tls/src/upgrade.rs index 1c61d265ea6..a6d81ab36c9 100644 --- a/transports/tls/src/upgrade.rs +++ b/transports/tls/src/upgrade.rs @@ -18,20 +18,22 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::certificate; -use crate::certificate::P2pCertificate; -use futures::future::BoxFuture; -use futures::AsyncWrite; -use futures::{AsyncRead, FutureExt}; +use std::{ + net::{IpAddr, Ipv4Addr}, + sync::Arc, +}; + +use futures::{future::BoxFuture, AsyncRead, AsyncWrite, FutureExt}; use futures_rustls::TlsStream; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; -use libp2p_core::UpgradeInfo; +use libp2p_core::{ + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}, + UpgradeInfo, +}; use libp2p_identity as identity; use libp2p_identity::PeerId; use rustls::{pki_types::ServerName, CommonState}; -use std::net::{IpAddr, Ipv4Addr}; -use std::sync::Arc; +use crate::{certificate, certificate::P2pCertificate}; #[derive(thiserror::Error, Debug)] pub enum UpgradeError { @@ -102,8 +104,10 @@ where fn upgrade_outbound(self, socket: C, _: Self::Info) -> Self::Future { async move { - // Spec: In order to keep this flexibility for future versions, clients that only support the version of the handshake defined in this document MUST NOT send any value in the Server Name Indication. - // Setting `ServerName` to unspecified will disable the use of the SNI extension. + // Spec: In order to keep this flexibility for future versions, clients that only + // support the version of the handshake defined in this document MUST NOT send any value + // in the Server Name Indication. Setting `ServerName` to unspecified will + // disable the use of the SNI extension. let name = ServerName::IpAddress(rustls::pki_types::IpAddr::from(IpAddr::V4( Ipv4Addr::UNSPECIFIED, ))); diff --git a/transports/tls/src/verifier.rs b/transports/tls/src/verifier.rs index 65636cbe708..82b275bc7be 100644 --- a/transports/tls/src/verifier.rs +++ b/transports/tls/src/verifier.rs @@ -23,7 +23,8 @@ //! This module handles a verification of a client/server certificate chain //! and signatures allegedly by the given certificates. -use crate::certificate; +use std::sync::Arc; + use libp2p_identity::PeerId; use rustls::{ client::danger::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier}, @@ -35,7 +36,8 @@ use rustls::{ CertificateError, DigitallySignedStruct, DistinguishedName, OtherError, SignatureScheme, SupportedCipherSuite, SupportedProtocolVersion, }; -use std::sync::Arc; + +use crate::certificate; /// The protocol versions supported by this verifier. /// @@ -67,8 +69,8 @@ pub(crate) struct Libp2pCertificateVerifier { /// /// - Exactly one certificate must be presented. /// - The certificate must be self-signed. -/// - The certificate must have a valid libp2p extension that includes a -/// signature of its public key. +/// - The certificate must have a valid libp2p extension that includes a signature of its public +/// key. impl Libp2pCertificateVerifier { pub(crate) fn new() -> Self { Self { @@ -153,11 +155,11 @@ impl ServerCertVerifier for Libp2pCertificateVerifier { /// libp2p requires the following of X.509 client certificate chains: /// -/// - Exactly one certificate must be presented. In particular, client -/// authentication is mandatory in libp2p. +/// - Exactly one certificate must be presented. In particular, client authentication is mandatory +/// in libp2p. /// - The certificate must be self-signed. -/// - The certificate must have a valid libp2p extension that includes a -/// signature of its public key. +/// - The certificate must have a valid libp2p extension that includes a signature of its public +/// key. impl ClientCertVerifier for Libp2pCertificateVerifier { fn offer_client_auth(&self) -> bool { true diff --git a/transports/tls/tests/smoke.rs b/transports/tls/tests/smoke.rs index d488ae7846a..cf11f4c0b1d 100644 --- a/transports/tls/tests/smoke.rs +++ b/transports/tls/tests/smoke.rs @@ -1,10 +1,8 @@ +use std::time::Duration; + use futures::{future, StreamExt}; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::transport::MemoryTransport; -use libp2p_core::upgrade::Version; -use libp2p_core::Transport; +use libp2p_core::{multiaddr::Protocol, transport::MemoryTransport, upgrade::Version, Transport}; use libp2p_swarm::{dummy, Config, Swarm, SwarmEvent}; -use std::time::Duration; #[tokio::test] async fn can_establish_connection() { diff --git a/transports/uds/src/lib.rs b/transports/uds/src/lib.rs index 5c57e255b4d..74e19476595 100644 --- a/transports/uds/src/lib.rs +++ b/transports/uds/src/lib.rs @@ -38,21 +38,24 @@ ))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use futures::stream::BoxStream; +use std::{ + collections::VecDeque, + io, + path::PathBuf, + pin::Pin, + task::{Context, Poll}, +}; + use futures::{ future::{BoxFuture, Ready}, prelude::*, + stream::BoxStream, }; -use libp2p_core::transport::ListenerId; use libp2p_core::{ multiaddr::{Multiaddr, Protocol}, - transport::{DialOpts, TransportError, TransportEvent}, + transport::{DialOpts, ListenerId, TransportError, TransportEvent}, Transport, }; -use std::collections::VecDeque; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::{io, path::PathBuf}; pub type Listener = BoxStream< 'static, @@ -241,14 +244,16 @@ fn multiaddr_to_path(addr: &Multiaddr) -> Result { #[cfg(all(test, feature = "async-std"))] mod tests { - use super::{multiaddr_to_path, UdsConfig}; + use std::{borrow::Cow, path::Path}; + use futures::{channel::oneshot, prelude::*}; use libp2p_core::{ multiaddr::{Multiaddr, Protocol}, transport::{DialOpts, ListenerId, PortUse}, Endpoint, Transport, }; - use std::{borrow::Cow, path::Path}; + + use super::{multiaddr_to_path, UdsConfig}; #[test] fn multiaddr_to_path_conversion() { diff --git a/transports/webrtc-websys/src/connection.rs b/transports/webrtc-websys/src/connection.rs index d0c6ccd2238..01c1a8b3b60 100644 --- a/transports/webrtc-websys/src/connection.rs +++ b/transports/webrtc-websys/src/connection.rs @@ -1,17 +1,15 @@ //! A libp2p connection backed by an [RtcPeerConnection](https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection). -use super::{Error, Stream}; -use crate::stream::DropListener; -use futures::channel::mpsc; -use futures::stream::FuturesUnordered; -use futures::StreamExt; +use std::{ + pin::Pin, + task::{ready, Context, Poll, Waker}, +}; + +use futures::{channel::mpsc, stream::FuturesUnordered, StreamExt}; use js_sys::{Object, Reflect}; use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; use libp2p_webrtc_utils::Fingerprint; use send_wrapper::SendWrapper; -use std::pin::Pin; -use std::task::Waker; -use std::task::{ready, Context, Poll}; use wasm_bindgen::prelude::*; use wasm_bindgen_futures::JsFuture; use web_sys::{ @@ -19,6 +17,9 @@ use web_sys::{ RtcSessionDescriptionInit, }; +use super::{Error, Stream}; +use crate::stream::DropListener; + /// A WebRTC Connection. /// /// All connections need to be [`Send`] which is why some fields are wrapped in [`SendWrapper`]. @@ -31,7 +32,8 @@ pub struct Connection { closed: bool, /// An [`mpsc::channel`] for all inbound data channels. /// - /// Because the browser's WebRTC API is event-based, we need to use a channel to obtain all inbound data channels. + /// Because the browser's WebRTC API is event-based, we need to use a channel to obtain all + /// inbound data channels. inbound_data_channels: SendWrapper>, /// A list of futures, which, once completed, signal that a [`Stream`] has been dropped. drop_listeners: FuturesUnordered, @@ -43,7 +45,8 @@ pub struct Connection { impl Connection { /// Create a new inner WebRTC Connection pub(crate) fn new(peer_connection: RtcPeerConnection) -> Self { - // An ondatachannel Future enables us to poll for incoming data channel events in poll_incoming + // An ondatachannel Future enables us to poll for incoming data channel events in + // poll_incoming let (mut tx_ondatachannel, rx_ondatachannel) = mpsc::channel(4); // we may get more than one data channel opened on a single peer connection let ondatachannel_closure = Closure::new(move |ev: RtcDataChannelEvent| { @@ -120,7 +123,8 @@ impl StreamMuxer for Connection { Poll::Ready(Ok(stream)) } None => { - // This only happens if the [`RtcPeerConnection::ondatachannel`] closure gets freed which means we are most likely shutting down the connection. + // This only happens if the [`RtcPeerConnection::ondatachannel`] closure gets freed + // which means we are most likely shutting down the connection. tracing::debug!("`Sender` for inbound data channels has been dropped"); Poll::Ready(Err(Error::Connection("connection closed".to_owned()))) } diff --git a/transports/webrtc-websys/src/lib.rs b/transports/webrtc-websys/src/lib.rs index 04fced4111b..07207eb0ae8 100644 --- a/transports/webrtc-websys/src/lib.rs +++ b/transports/webrtc-websys/src/lib.rs @@ -7,7 +7,9 @@ mod stream; mod transport; mod upgrade; -pub use self::connection::Connection; -pub use self::error::Error; -pub use self::stream::Stream; -pub use self::transport::{Config, Transport}; +pub use self::{ + connection::Connection, + error::Error, + stream::Stream, + transport::{Config, Transport}, +}; diff --git a/transports/webrtc-websys/src/sdp.rs b/transports/webrtc-websys/src/sdp.rs index 9e63fd92462..628043111ee 100644 --- a/transports/webrtc-websys/src/sdp.rs +++ b/transports/webrtc-websys/src/sdp.rs @@ -1,5 +1,6 @@ -use libp2p_webrtc_utils::Fingerprint; use std::net::SocketAddr; + +use libp2p_webrtc_utils::Fingerprint; use web_sys::{RtcSdpType, RtcSessionDescriptionInit}; /// Creates the SDP answer used by the client. diff --git a/transports/webrtc-websys/src/stream.rs b/transports/webrtc-websys/src/stream.rs index 812aa5afbbf..ee0183b07f0 100644 --- a/transports/webrtc-websys/src/stream.rs +++ b/transports/webrtc-websys/src/stream.rs @@ -1,11 +1,15 @@ //! The WebRTC [Stream] over the Connection -use self::poll_data_channel::PollDataChannel; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; + use futures::{AsyncRead, AsyncWrite}; use send_wrapper::SendWrapper; -use std::pin::Pin; -use std::task::{Context, Poll}; use web_sys::RtcDataChannel; +use self::poll_data_channel::PollDataChannel; + mod poll_data_channel; /// A stream over a WebRTC connection. diff --git a/transports/webrtc-websys/src/stream/poll_data_channel.rs b/transports/webrtc-websys/src/stream/poll_data_channel.rs index 3ec744342eb..2abe499afce 100644 --- a/transports/webrtc-websys/src/stream/poll_data_channel.rs +++ b/transports/webrtc-websys/src/stream/poll_data_channel.rs @@ -1,19 +1,23 @@ -use std::cmp::min; -use std::io; -use std::pin::Pin; -use std::rc::Rc; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Mutex; -use std::task::{Context, Poll}; +use std::{ + cmp::min, + io, + pin::Pin, + rc::Rc, + sync::{ + atomic::{AtomicBool, Ordering}, + Mutex, + }, + task::{Context, Poll}, +}; use bytes::BytesMut; -use futures::task::AtomicWaker; -use futures::{AsyncRead, AsyncWrite}; +use futures::{task::AtomicWaker, AsyncRead, AsyncWrite}; use libp2p_webrtc_utils::MAX_MSG_LEN; use wasm_bindgen::prelude::*; use web_sys::{Event, MessageEvent, RtcDataChannel, RtcDataChannelEvent, RtcDataChannelState}; -/// [`PollDataChannel`] is a wrapper around [`RtcDataChannel`] which implements [`AsyncRead`] and [`AsyncWrite`]. +/// [`PollDataChannel`] is a wrapper around [`RtcDataChannel`] which implements [`AsyncRead`] and +/// [`AsyncWrite`]. #[derive(Debug, Clone)] pub(crate) struct PollDataChannel { /// The [`RtcDataChannel`] being wrapped. @@ -25,7 +29,8 @@ pub(crate) struct PollDataChannel { /// Waker for when we are waiting for the DC to be opened. open_waker: Rc, - /// Waker for when we are waiting to write (again) to the DC because we previously exceeded the [`MAX_MSG_LEN`] threshold. + /// Waker for when we are waiting to write (again) to the DC because we previously exceeded the + /// [`MAX_MSG_LEN`] threshold. write_waker: Rc, /// Waker for when we are waiting for the DC to be closed. @@ -33,9 +38,11 @@ pub(crate) struct PollDataChannel { /// Whether we've been overloaded with data by the remote. /// - /// This is set to `true` in case `read_buffer` overflows, i.e. the remote is sending us messages faster than we can read them. - /// In that case, we return an [`std::io::Error`] from [`AsyncRead`] or [`AsyncWrite`], depending which one gets called earlier. - /// Failing these will (very likely), cause the application developer to drop the stream which resets it. + /// This is set to `true` in case `read_buffer` overflows, i.e. the remote is sending us + /// messages faster than we can read them. In that case, we return an [`std::io::Error`] + /// from [`AsyncRead`] or [`AsyncWrite`], depending which one gets called earlier. + /// Failing these will (very likely), + /// cause the application developer to drop the stream which resets it. overloaded: Rc, // Store the closures for proper garbage collection. @@ -83,7 +90,9 @@ impl PollDataChannel { inner.set_onclose(Some(on_close_closure.as_ref().unchecked_ref())); let new_data_waker = Rc::new(AtomicWaker::new()); - let read_buffer = Rc::new(Mutex::new(BytesMut::new())); // We purposely don't use `with_capacity` so we don't eagerly allocate `MAX_READ_BUFFER` per stream. + // We purposely don't use `with_capacity` + // so we don't eagerly allocate `MAX_READ_BUFFER` per stream. + let read_buffer = Rc::new(Mutex::new(BytesMut::new())); let overloaded = Rc::new(AtomicBool::new(false)); let on_message_closure = Closure::::new({ diff --git a/transports/webrtc-websys/src/transport.rs b/transports/webrtc-websys/src/transport.rs index 836acb0b9f6..abf02520244 100644 --- a/transports/webrtc-websys/src/transport.rs +++ b/transports/webrtc-websys/src/transport.rs @@ -1,15 +1,18 @@ -use super::upgrade; -use super::Connection; -use super::Error; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + use futures::future::FutureExt; -use libp2p_core::multiaddr::Multiaddr; -use libp2p_core::muxing::StreamMuxerBox; -use libp2p_core::transport::DialOpts; -use libp2p_core::transport::{Boxed, ListenerId, Transport as _, TransportError, TransportEvent}; +use libp2p_core::{ + multiaddr::Multiaddr, + muxing::StreamMuxerBox, + transport::{Boxed, DialOpts, ListenerId, Transport as _, TransportError, TransportEvent}, +}; use libp2p_identity::{Keypair, PeerId}; -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; + +use super::{upgrade, Connection, Error}; /// Config for the [`Transport`]. #[derive(Clone)] diff --git a/transports/webrtc-websys/src/upgrade.rs b/transports/webrtc-websys/src/upgrade.rs index d42f2e3ae18..b1de908ae82 100644 --- a/transports/webrtc-websys/src/upgrade.rs +++ b/transports/webrtc-websys/src/upgrade.rs @@ -1,13 +1,11 @@ -use super::Error; -use crate::connection::RtcPeerConnection; -use crate::error::AuthenticationError; -use crate::sdp; -use crate::Connection; +use std::net::SocketAddr; + use libp2p_identity::{Keypair, PeerId}; -use libp2p_webrtc_utils::noise; -use libp2p_webrtc_utils::Fingerprint; +use libp2p_webrtc_utils::{noise, Fingerprint}; use send_wrapper::SendWrapper; -use std::net::SocketAddr; + +use super::Error; +use crate::{connection::RtcPeerConnection, error::AuthenticationError, sdp, Connection}; /// Upgrades an outbound WebRTC connection by creating the data channel /// and conducting a Noise handshake diff --git a/transports/webrtc/src/lib.rs b/transports/webrtc/src/lib.rs index ea1e6a4d646..99f0c7da658 100644 --- a/transports/webrtc/src/lib.rs +++ b/transports/webrtc/src/lib.rs @@ -23,7 +23,7 @@ //! //! # Overview //! -//! ## ICE +//!  ## ICE //! //! RFCs: 8839, 8445 See also: //! @@ -39,10 +39,9 @@ //! //! The ICE workflow works as follows: //! -//! - An "offerer" determines ways in which it could be accessible (either an -//! IP address or through a relay using a TURN server), which are called "candidates". It then -//! generates a small text payload in a format called SDP, that describes the request for a -//! connection. +//! - An "offerer" determines ways in which it could be accessible (either an IP address or through +//! a relay using a TURN server), which are called "candidates". It then generates a small text +//! payload in a format called SDP, that describes the request for a connection. //! - The offerer sends this SDP-encoded message to the answerer. The medium through which this //! exchange is done is out of scope of the ICE protocol. //! - The answerer then finds its own candidates, and generates an answer, again in the SDP format. diff --git a/transports/webrtc/src/tokio/certificate.rs b/transports/webrtc/src/tokio/certificate.rs index 81197af4132..7ff35d46bdd 100644 --- a/transports/webrtc/src/tokio/certificate.rs +++ b/transports/webrtc/src/tokio/certificate.rs @@ -100,9 +100,10 @@ enum Kind { #[cfg(all(test, feature = "pem"))] mod test { - use super::*; use rand::thread_rng; + use super::*; + #[test] fn test_certificate_serialize_pem_and_from_pem() { let cert = Certificate::generate(&mut thread_rng()).unwrap(); diff --git a/transports/webrtc/src/tokio/connection.rs b/transports/webrtc/src/tokio/connection.rs index 3bcc4c3193e..19232707e7f 100644 --- a/transports/webrtc/src/tokio/connection.rs +++ b/transports/webrtc/src/tokio/connection.rs @@ -18,26 +18,27 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::stream::FuturesUnordered; +use std::{ + pin::Pin, + sync::Arc, + task::{Context, Poll, Waker}, +}; + use futures::{ channel::{ mpsc, oneshot::{self, Sender}, }, + future::BoxFuture, lock::Mutex as FutMutex, + ready, + stream::FuturesUnordered, StreamExt, - {future::BoxFuture, ready}, }; use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; -use webrtc::data::data_channel::DataChannel as DetachedDataChannel; -use webrtc::data_channel::RTCDataChannel; -use webrtc::peer_connection::RTCPeerConnection; - -use std::task::Waker; -use std::{ - pin::Pin, - sync::Arc, - task::{Context, Poll}, +use webrtc::{ + data::data_channel::DataChannel as DetachedDataChannel, data_channel::RTCDataChannel, + peer_connection::RTCPeerConnection, }; use crate::tokio::{error::Error, stream, stream::Stream}; @@ -172,7 +173,9 @@ impl StreamMuxer for Connection { "Sender-end of channel should be owned by `RTCPeerConnection`" ); - Poll::Pending // Return `Pending` without registering a waker: If the channel is closed, we don't need to be called anymore. + // Return `Pending` without registering a waker: If the channel is + // closed, we don't need to be called anymore. + Poll::Pending } } } diff --git a/transports/webrtc/src/tokio/req_res_chan.rs b/transports/webrtc/src/tokio/req_res_chan.rs index fb29e16db27..a733c86d5cc 100644 --- a/transports/webrtc/src/tokio/req_res_chan.rs +++ b/transports/webrtc/src/tokio/req_res_chan.rs @@ -18,16 +18,16 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::{ - channel::{mpsc, oneshot}, - SinkExt, StreamExt, -}; - use std::{ io, task::{Context, Poll}, }; +use futures::{ + channel::{mpsc, oneshot}, + SinkExt, StreamExt, +}; + pub(crate) fn new(capacity: usize) -> (Sender, Receiver) { let (sender, receiver) = mpsc::channel(capacity); diff --git a/transports/webrtc/src/tokio/sdp.rs b/transports/webrtc/src/tokio/sdp.rs index 4be4c19f188..d9f869d4433 100644 --- a/transports/webrtc/src/tokio/sdp.rs +++ b/transports/webrtc/src/tokio/sdp.rs @@ -18,10 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -pub(crate) use libp2p_webrtc_utils::sdp::random_ufrag; -use libp2p_webrtc_utils::sdp::render_description; -use libp2p_webrtc_utils::Fingerprint; use std::net::SocketAddr; + +pub(crate) use libp2p_webrtc_utils::sdp::random_ufrag; +use libp2p_webrtc_utils::{sdp::render_description, Fingerprint}; use webrtc::peer_connection::sdp::session_description::RTCSessionDescription; /// Creates the SDP answer used by the client. diff --git a/transports/webrtc/src/tokio/stream.rs b/transports/webrtc/src/tokio/stream.rs index 4278a751e27..9d5a9faf440 100644 --- a/transports/webrtc/src/tokio/stream.rs +++ b/transports/webrtc/src/tokio/stream.rs @@ -40,8 +40,8 @@ pub struct Stream { pub(crate) type DropListener = libp2p_webrtc_utils::DropListener>; impl Stream { - /// Returns a new `Substream` and a listener, which will notify the receiver when/if the substream - /// is dropped. + /// Returns a new `Substream` and a listener, which will notify the receiver when/if the + /// substream is dropped. pub(crate) fn new(data_channel: Arc) -> (Self, DropListener) { let mut data_channel = PollDataChannel::new(data_channel).compat(); data_channel.get_mut().set_read_buf_capacity(MAX_MSG_LEN); diff --git a/transports/webrtc/src/tokio/transport.rs b/transports/webrtc/src/tokio/transport.rs index 62049c8f59b..29fad180d93 100644 --- a/transports/webrtc/src/tokio/transport.rs +++ b/transports/webrtc/src/tokio/transport.rs @@ -18,6 +18,13 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + io, + net::{IpAddr, SocketAddr}, + pin::Pin, + task::{Context, Poll, Waker}, +}; + use futures::{future::BoxFuture, prelude::*, stream::SelectAll}; use if_watch::{tokio::IfWatcher, IfEvent}; use libp2p_core::{ @@ -28,14 +35,6 @@ use libp2p_identity as identity; use libp2p_identity::PeerId; use webrtc::peer_connection::configuration::RTCConfiguration; -use std::net::IpAddr; -use std::{ - io, - net::SocketAddr, - pin::Pin, - task::{Context, Poll, Waker}, -}; - use crate::tokio::{ certificate::Certificate, connection::Connection, @@ -60,8 +59,8 @@ impl Transport { /// /// ``` /// use libp2p_identity as identity; + /// use libp2p_webrtc::tokio::{Certificate, Transport}; /// use rand::thread_rng; - /// use libp2p_webrtc::tokio::{Transport, Certificate}; /// /// let id_keys = identity::Keypair::generate_ed25519(); /// let transport = Transport::new(id_keys, Certificate::generate(&mut thread_rng()).unwrap()); @@ -124,8 +123,8 @@ impl libp2p_core::Transport for Transport { dial_opts: DialOpts, ) -> Result> { if dial_opts.role.is_listener() { - // TODO: As the listener of a WebRTC hole punch, we need to send a random UDP packet to the - // `addr`. See DCUtR specification below. + // TODO: As the listener of a WebRTC hole punch, we need to send a random UDP packet to + // the `addr`. See DCUtR specification below. // // https://github.com/libp2p/specs/blob/master/relay/DCUtR.md#the-protocol tracing::warn!("WebRTC hole punch is not yet supported"); @@ -426,11 +425,13 @@ fn parse_webrtc_listen_addr(addr: &Multiaddr) -> Option { #[cfg(test)] mod tests { - use super::*; + use std::net::Ipv6Addr; + use futures::future::poll_fn; use libp2p_core::Transport as _; use rand::thread_rng; - use std::net::Ipv6Addr; + + use super::*; #[test] fn missing_webrtc_protocol() { diff --git a/transports/webrtc/src/tokio/udp_mux.rs b/transports/webrtc/src/tokio/udp_mux.rs index 7a8d960826d..dcb88592c9b 100644 --- a/transports/webrtc/src/tokio/udp_mux.rs +++ b/transports/webrtc/src/tokio/udp_mux.rs @@ -18,6 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + collections::{HashMap, HashSet}, + io, + io::ErrorKind, + net::SocketAddr, + sync::Arc, + task::{Context, Poll}, +}; + use async_trait::async_trait; use futures::{ channel::oneshot, @@ -31,16 +40,9 @@ use stun::{ }; use thiserror::Error; use tokio::{io::ReadBuf, net::UdpSocket}; -use webrtc::ice::udp_mux::{UDPMux, UDPMuxConn, UDPMuxConnParams, UDPMuxWriter}; -use webrtc::util::{Conn, Error}; - -use std::{ - collections::{HashMap, HashSet}, - io, - io::ErrorKind, - net::SocketAddr, - sync::Arc, - task::{Context, Poll}, +use webrtc::{ + ice::udp_mux::{UDPMux, UDPMuxConn, UDPMuxConnParams, UDPMuxWriter}, + util::{Conn, Error}, }; use crate::tokio::req_res_chan; @@ -303,8 +305,8 @@ impl UDPMuxNewAddr { if let Poll::Ready(Some((ufrag, response))) = self.remove_conn_command.poll_next_unpin(cx) { - // Pion's ice implementation has both `RemoveConnByFrag` and `RemoveConn`, but since `conns` - // is keyed on `ufrag` their implementation is equivalent. + // Pion's ice implementation has both `RemoveConnByFrag` and `RemoveConn`, but since + // `conns` is keyed on `ufrag` their implementation is equivalent. if let Some(removed_conn) = self.conns.remove(&ufrag) { for address in removed_conn.get_addresses() { @@ -336,8 +338,9 @@ impl UDPMuxNewAddr { let conn = self.address_map.get(&addr); let conn = match conn { - // If we couldn't find the connection based on source address, see if - // this is a STUN message and if so if we can find the connection based on ufrag. + // If we couldn't find the connection based on source address, see + // if this is a STUN message and if + // so if we can find the connection based on ufrag. None if is_stun_message(read.filled()) => { match self.conn_from_stun_message(read.filled(), &addr) { Some(Ok(s)) => Some(s), diff --git a/transports/webrtc/src/tokio/upgrade.rs b/transports/webrtc/src/tokio/upgrade.rs index 4145a5e7510..9293a474084 100644 --- a/transports/webrtc/src/tokio/upgrade.rs +++ b/transports/webrtc/src/tokio/upgrade.rs @@ -18,27 +18,23 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use libp2p_webrtc_utils::{noise, Fingerprint}; +use std::{net::SocketAddr, sync::Arc, time::Duration}; -use futures::channel::oneshot; -use futures::future::Either; +use futures::{channel::oneshot, future::Either}; use futures_timer::Delay; use libp2p_identity as identity; use libp2p_identity::PeerId; -use std::{net::SocketAddr, sync::Arc, time::Duration}; -use webrtc::api::setting_engine::SettingEngine; -use webrtc::api::APIBuilder; -use webrtc::data::data_channel::DataChannel; -use webrtc::data_channel::data_channel_init::RTCDataChannelInit; -use webrtc::dtls_transport::dtls_role::DTLSRole; -use webrtc::ice::network_type::NetworkType; -use webrtc::ice::udp_mux::UDPMux; -use webrtc::ice::udp_network::UDPNetwork; -use webrtc::peer_connection::configuration::RTCConfiguration; -use webrtc::peer_connection::RTCPeerConnection; - -use crate::tokio::sdp::random_ufrag; -use crate::tokio::{error::Error, sdp, stream::Stream, Connection}; +use libp2p_webrtc_utils::{noise, Fingerprint}; +use webrtc::{ + api::{setting_engine::SettingEngine, APIBuilder}, + data::data_channel::DataChannel, + data_channel::data_channel_init::RTCDataChannelInit, + dtls_transport::dtls_role::DTLSRole, + ice::{network_type::NetworkType, udp_mux::UDPMux, udp_network::UDPNetwork}, + peer_connection::{configuration::RTCConfiguration, RTCPeerConnection}, +}; + +use crate::tokio::{error::Error, sdp, sdp::random_ufrag, stream::Stream, Connection}; /// Creates a new outbound WebRTC connection. pub(crate) async fn outbound( diff --git a/transports/webrtc/tests/smoke.rs b/transports/webrtc/tests/smoke.rs index d606d66c41f..5f67c09d962 100644 --- a/transports/webrtc/tests/smoke.rs +++ b/transports/webrtc/tests/smoke.rs @@ -18,21 +18,30 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::channel::mpsc; -use futures::future::{BoxFuture, Either}; -use futures::stream::StreamExt; -use futures::{future, ready, AsyncReadExt, AsyncWriteExt, FutureExt, SinkExt}; -use libp2p_core::muxing::{StreamMuxerBox, StreamMuxerExt}; -use libp2p_core::transport::{Boxed, DialOpts, ListenerId, PortUse, TransportEvent}; -use libp2p_core::{Endpoint, Multiaddr, Transport}; +use std::{ + future::Future, + num::NonZeroU8, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; + +use futures::{ + channel::mpsc, + future, + future::{BoxFuture, Either}, + ready, + stream::StreamExt, + AsyncReadExt, AsyncWriteExt, FutureExt, SinkExt, +}; +use libp2p_core::{ + muxing::{StreamMuxerBox, StreamMuxerExt}, + transport::{Boxed, DialOpts, ListenerId, PortUse, TransportEvent}, + Endpoint, Multiaddr, Transport, +}; use libp2p_identity::PeerId; use libp2p_webrtc as webrtc; use rand::{thread_rng, RngCore}; -use std::future::Future; -use std::num::NonZeroU8; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::time::Duration; use tracing_subscriber::EnvFilter; #[tokio::test] diff --git a/transports/websocket-websys/src/lib.rs b/transports/websocket-websys/src/lib.rs index 21789eeca66..72f4068610d 100644 --- a/transports/websocket-websys/src/lib.rs +++ b/transports/websocket-websys/src/lib.rs @@ -24,21 +24,25 @@ mod web_context; +use std::{ + cmp::min, + pin::Pin, + rc::Rc, + sync::{ + atomic::{AtomicBool, Ordering}, + Mutex, + }, + task::{Context, Poll}, +}; + use bytes::BytesMut; -use futures::task::AtomicWaker; -use futures::{future::Ready, io, prelude::*}; +use futures::{future::Ready, io, prelude::*, task::AtomicWaker}; use js_sys::Array; -use libp2p_core::transport::DialOpts; use libp2p_core::{ multiaddr::{Multiaddr, Protocol}, - transport::{ListenerId, TransportError, TransportEvent}, + transport::{DialOpts, ListenerId, TransportError, TransportEvent}, }; use send_wrapper::SendWrapper; -use std::cmp::min; -use std::rc::Rc; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Mutex; -use std::{pin::Pin, task::Context, task::Poll}; use wasm_bindgen::prelude::*; use web_sys::{CloseEvent, Event, MessageEvent, WebSocket}; @@ -62,7 +66,6 @@ use crate::web_context::WebContext; /// .multiplex(yamux::Config::default()) /// .boxed(); /// ``` -/// #[derive(Default)] pub struct Transport { _private: (), @@ -176,7 +179,8 @@ struct Inner { /// Waker for when we are waiting for the WebSocket to be opened. open_waker: Rc, - /// Waker for when we are waiting to write (again) to the WebSocket because we previously exceeded the [`MAX_BUFFER`] threshold. + /// Waker for when we are waiting to write (again) to the WebSocket because we previously + /// exceeded the [`MAX_BUFFER`] threshold. write_waker: Rc, /// Waker for when we are waiting for the WebSocket to be closed. @@ -308,7 +312,9 @@ impl Connection { .expect("to have a window or worker context") .set_interval_with_callback_and_timeout_and_arguments( on_buffered_amount_low_closure.as_ref().unchecked_ref(), - 100, // Chosen arbitrarily and likely worth tuning. Due to low impact of the /ws transport, no further effort was invested at the time. + // Chosen arbitrarily and likely worth tuning. Due to low impact of the /ws + // transport, no further effort was invested at the time. + 100, &Array::new(), ) .expect("to be able to set an interval"); @@ -434,7 +440,8 @@ impl AsyncWrite for Connection { impl Drop for Connection { fn drop(&mut self) { - // Unset event listeners, as otherwise they will be called by JS after the handlers have already been dropped. + // Unset event listeners, as otherwise they will be called by JS after the handlers have + // already been dropped. self.inner.socket.set_onclose(None); self.inner.socket.set_onerror(None); self.inner.socket.set_onopen(None); @@ -458,9 +465,10 @@ impl Drop for Connection { #[cfg(test)] mod tests { - use super::*; use libp2p_identity::PeerId; + use super::*; + #[test] fn extract_url() { let peer_id = PeerId::random(); diff --git a/transports/websocket/src/error.rs b/transports/websocket/src/error.rs index 7dc22331bcd..efab95a7621 100644 --- a/transports/websocket/src/error.rs +++ b/transports/websocket/src/error.rs @@ -18,10 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::tls; -use libp2p_core::Multiaddr; use std::{error, fmt}; +use libp2p_core::Multiaddr; + +use crate::tls; + /// Error in WebSockets. #[derive(Debug)] pub enum Error { diff --git a/transports/websocket/src/framed.rs b/transports/websocket/src/framed.rs index 259be6a68f8..3d2738cbc3c 100644 --- a/transports/websocket/src/framed.rs +++ b/transports/websocket/src/framed.rs @@ -18,11 +18,20 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::{error::Error, quicksink, tls}; +use std::{ + borrow::Cow, + collections::HashMap, + fmt, io, mem, + net::IpAddr, + ops::DerefMut, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; + use either::Either; use futures::{future::BoxFuture, prelude::*, ready, stream::BoxStream}; -use futures_rustls::rustls::pki_types::ServerName; -use futures_rustls::{client, server}; +use futures_rustls::{client, rustls::pki_types::ServerName, server}; use libp2p_core::{ multiaddr::{Multiaddr, Protocol}, transport::{DialOpts, ListenerId, TransportError, TransportEvent}, @@ -33,12 +42,10 @@ use soketto::{ connection::{self, CloseReason}, handshake, }; -use std::borrow::Cow; -use std::net::IpAddr; -use std::{collections::HashMap, ops::DerefMut, sync::Arc}; -use std::{fmt, io, mem, pin::Pin, task::Context, task::Poll}; use url::Url; +use crate::{error::Error, quicksink, tls}; + /// Max. number of payload bytes of a single frame. const MAX_DATA_SIZE: usize = 256 * 1024 * 1024; @@ -809,10 +816,12 @@ where #[cfg(test)] mod tests { - use super::*; - use libp2p_identity::PeerId; use std::io; + use libp2p_identity::PeerId; + + use super::*; + #[test] fn listen_addr() { let tcp_addr = "/ip4/0.0.0.0/tcp/2222".parse::().unwrap(); diff --git a/transports/websocket/src/lib.rs b/transports/websocket/src/lib.rs index cbc923613dd..fbed8fddc66 100644 --- a/transports/websocket/src/lib.rs +++ b/transports/websocket/src/lib.rs @@ -27,6 +27,12 @@ pub mod framed; mod quicksink; pub mod tls; +use std::{ + io, + pin::Pin, + task::{Context, Poll}, +}; + use error::Error; use framed::{Connection, Incoming}; use futures::{future::BoxFuture, prelude::*, ready}; @@ -37,11 +43,6 @@ use libp2p_core::{ Transport, }; use rw_stream_sink::RwStreamSink; -use std::{ - io, - pin::Pin, - task::{Context, Poll}, -}; /// A Websocket transport. /// @@ -75,18 +76,28 @@ use std::{ /// # #[async_std::main] /// # async fn main() { /// -/// let mut transport = websocket::WsConfig::new(dns::async_std::Transport::system( -/// tcp::async_io::Transport::new(tcp::Config::default()), -/// ).await.unwrap()); +/// let mut transport = websocket::WsConfig::new( +/// dns::async_std::Transport::system(tcp::async_io::Transport::new(tcp::Config::default())) +/// .await +/// .unwrap(), +/// ); /// /// let rcgen_cert = generate_simple_self_signed(vec!["localhost".to_string()]).unwrap(); /// let priv_key = websocket::tls::PrivateKey::new(rcgen_cert.serialize_private_key_der()); /// let cert = websocket::tls::Certificate::new(rcgen_cert.serialize_der().unwrap()); /// transport.set_tls_config(websocket::tls::Config::new(priv_key, vec![cert]).unwrap()); /// -/// let id = transport.listen_on(ListenerId::next(), "/ip4/127.0.0.1/tcp/0/tls/ws".parse().unwrap()).unwrap(); +/// let id = transport +/// .listen_on( +/// ListenerId::next(), +/// "/ip4/127.0.0.1/tcp/0/tls/ws".parse().unwrap(), +/// ) +/// .unwrap(); /// -/// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)).await.into_new_address().unwrap(); +/// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)) +/// .await +/// .into_new_address() +/// .unwrap(); /// println!("Listening on {addr}"); /// /// # } @@ -105,13 +116,20 @@ use std::{ /// # #[async_std::main] /// # async fn main() { /// -/// let mut transport = websocket::WsConfig::new( -/// tcp::async_io::Transport::new(tcp::Config::default()), -/// ); +/// let mut transport = +/// websocket::WsConfig::new(tcp::async_io::Transport::new(tcp::Config::default())); /// -/// let id = transport.listen_on(ListenerId::next(), "/ip4/127.0.0.1/tcp/0/ws".parse().unwrap()).unwrap(); +/// let id = transport +/// .listen_on( +/// ListenerId::next(), +/// "/ip4/127.0.0.1/tcp/0/ws".parse().unwrap(), +/// ) +/// .unwrap(); /// -/// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)).await.into_new_address().unwrap(); +/// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)) +/// .await +/// .into_new_address() +/// .unwrap(); /// println!("Listening on {addr}"); /// /// # } @@ -283,7 +301,6 @@ where #[cfg(test)] mod tests { - use super::WsConfig; use futures::prelude::*; use libp2p_core::{ multiaddr::Protocol, @@ -293,6 +310,8 @@ mod tests { use libp2p_identity::PeerId; use libp2p_tcp as tcp; + use super::WsConfig; + #[test] fn dialer_connects_to_listener_ipv4() { let a = "/ip4/127.0.0.1/tcp/0/ws".parse().unwrap(); diff --git a/transports/websocket/src/quicksink.rs b/transports/websocket/src/quicksink.rs index 4f620536ea1..a0e2fb8b0f6 100644 --- a/transports/websocket/src/quicksink.rs +++ b/transports/websocket/src/quicksink.rs @@ -19,26 +19,28 @@ // ```no_run // use async_std::io; // use futures::prelude::*; +// // use crate::quicksink::Action; // // crate::quicksink::make_sink(io::stdout(), |mut stdout, action| async move { // match action { // Action::Send(x) => stdout.write_all(x).await?, // Action::Flush => stdout.flush().await?, -// Action::Close => stdout.close().await? +// Action::Close => stdout.close().await?, // } // Ok::<_, io::Error>(stdout) // }); // ``` -use futures::{ready, sink::Sink}; -use pin_project_lite::pin_project; use std::{ future::Future, pin::Pin, task::{Context, Poll}, }; +use futures::{ready, sink::Sink}; +use pin_project_lite::pin_project; + /// Returns a `Sink` impl based on the initial value and the given closure. /// /// The closure will be applied to the initial value and an [`Action`] that @@ -291,10 +293,11 @@ where #[cfg(test)] mod tests { - use crate::quicksink::{make_sink, Action}; use async_std::{io, task}; use futures::{channel::mpsc, prelude::*}; + use crate::quicksink::{make_sink, Action}; + #[test] fn smoke_test() { task::block_on(async { diff --git a/transports/websocket/src/tls.rs b/transports/websocket/src/tls.rs index 77090e21675..598dcc22765 100644 --- a/transports/websocket/src/tls.rs +++ b/transports/websocket/src/tls.rs @@ -18,9 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures_rustls::{rustls, TlsAcceptor, TlsConnector}; use std::{fmt, io, sync::Arc}; +use futures_rustls::{rustls, TlsAcceptor, TlsConnector}; + /// TLS configuration. #[derive(Clone)] pub struct Config { diff --git a/transports/webtransport-websys/src/connection.rs b/transports/webtransport-websys/src/connection.rs index 956a66288af..75c8603864a 100644 --- a/transports/webtransport-websys/src/connection.rs +++ b/transports/webtransport-websys/src/connection.rs @@ -1,22 +1,29 @@ +use std::{ + collections::HashSet, + future::poll_fn, + pin::Pin, + task::{ready, Context, Poll}, +}; + use futures::FutureExt; -use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; -use libp2p_core::upgrade::OutboundConnectionUpgrade; -use libp2p_core::UpgradeInfo; +use libp2p_core::{ + muxing::{StreamMuxer, StreamMuxerEvent}, + upgrade::OutboundConnectionUpgrade, + UpgradeInfo, +}; use libp2p_identity::{Keypair, PeerId}; use multihash::Multihash; use send_wrapper::SendWrapper; -use std::collections::HashSet; -use std::future::poll_fn; -use std::pin::Pin; -use std::task::{ready, Context, Poll}; use wasm_bindgen_futures::JsFuture; use web_sys::ReadableStreamDefaultReader; -use crate::bindings::{WebTransport, WebTransportBidirectionalStream}; -use crate::endpoint::Endpoint; -use crate::fused_js_promise::FusedJsPromise; -use crate::utils::{detach_promise, parse_reader_response, to_js_type}; -use crate::{Error, Stream}; +use crate::{ + bindings::{WebTransport, WebTransportBidirectionalStream}, + endpoint::Endpoint, + fused_js_promise::FusedJsPromise, + utils::{detach_promise, parse_reader_response, to_js_type}, + Error, Stream, +}; /// An opened WebTransport connection. #[derive(Debug)] diff --git a/transports/webtransport-websys/src/endpoint.rs b/transports/webtransport-websys/src/endpoint.rs index 0bff1ed6186..fd209c51664 100644 --- a/transports/webtransport-websys/src/endpoint.rs +++ b/transports/webtransport-websys/src/endpoint.rs @@ -1,11 +1,14 @@ +use std::collections::HashSet; + use js_sys::{Array, Uint8Array}; use libp2p_identity::PeerId; use multiaddr::{Multiaddr, Protocol}; use multihash::Multihash; -use std::collections::HashSet; -use crate::bindings::{WebTransportHash, WebTransportOptions}; -use crate::Error; +use crate::{ + bindings::{WebTransportHash, WebTransportOptions}, + Error, +}; pub(crate) struct Endpoint { pub(crate) host: String, @@ -149,9 +152,10 @@ impl Endpoint { #[cfg(test)] mod tests { - use super::*; use std::str::FromStr; + use super::*; + fn multihash_from_str(s: &str) -> Multihash<64> { let (_base, bytes) = multibase::decode(s).unwrap(); Multihash::from_bytes(&bytes).unwrap() diff --git a/transports/webtransport-websys/src/fused_js_promise.rs b/transports/webtransport-websys/src/fused_js_promise.rs index 0ba846501c2..d3d3858a553 100644 --- a/transports/webtransport-websys/src/fused_js_promise.rs +++ b/transports/webtransport-websys/src/fused_js_promise.rs @@ -1,8 +1,11 @@ +use std::{ + future::Future, + pin::Pin, + task::{ready, Context, Poll}, +}; + use futures::FutureExt; use js_sys::Promise; -use std::future::Future; -use std::pin::Pin; -use std::task::{ready, Context, Poll}; use wasm_bindgen::JsValue; use wasm_bindgen_futures::JsFuture; diff --git a/transports/webtransport-websys/src/lib.rs b/transports/webtransport-websys/src/lib.rs index dcb1010d986..126adc054a9 100644 --- a/transports/webtransport-websys/src/lib.rs +++ b/transports/webtransport-websys/src/lib.rs @@ -11,7 +11,9 @@ mod stream; mod transport; mod utils; -pub use self::connection::Connection; -pub use self::error::Error; -pub use self::stream::Stream; -pub use self::transport::{Config, Transport}; +pub use self::{ + connection::Connection, + error::Error, + stream::Stream, + transport::{Config, Transport}, +}; diff --git a/transports/webtransport-websys/src/stream.rs b/transports/webtransport-websys/src/stream.rs index ba4238ac814..b9d1669b6dc 100644 --- a/transports/webtransport-websys/src/stream.rs +++ b/transports/webtransport-websys/src/stream.rs @@ -1,16 +1,20 @@ +use std::{ + io, + pin::Pin, + task::{ready, Context, Poll}, +}; + use futures::{AsyncRead, AsyncWrite, FutureExt}; use js_sys::Uint8Array; use send_wrapper::SendWrapper; -use std::io; -use std::pin::Pin; -use std::task::ready; -use std::task::{Context, Poll}; use web_sys::{ReadableStreamDefaultReader, WritableStreamDefaultWriter}; -use crate::bindings::WebTransportBidirectionalStream; -use crate::fused_js_promise::FusedJsPromise; -use crate::utils::{detach_promise, parse_reader_response, to_io_error, to_js_type}; -use crate::Error; +use crate::{ + bindings::WebTransportBidirectionalStream, + fused_js_promise::FusedJsPromise, + utils::{detach_promise, parse_reader_response, to_io_error, to_js_type}, + Error, +}; /// A stream on a connection. #[derive(Debug)] diff --git a/transports/webtransport-websys/src/transport.rs b/transports/webtransport-websys/src/transport.rs index 6a9a9dad954..bad9509864e 100644 --- a/transports/webtransport-websys/src/transport.rs +++ b/transports/webtransport-websys/src/transport.rs @@ -1,17 +1,18 @@ +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + use futures::future::FutureExt; -use libp2p_core::muxing::StreamMuxerBox; -use libp2p_core::transport::{ - Boxed, DialOpts, ListenerId, Transport as _, TransportError, TransportEvent, +use libp2p_core::{ + muxing::StreamMuxerBox, + transport::{Boxed, DialOpts, ListenerId, Transport as _, TransportError, TransportEvent}, }; use libp2p_identity::{Keypair, PeerId}; use multiaddr::Multiaddr; -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; -use crate::endpoint::Endpoint; -use crate::Connection; -use crate::Error; +use crate::{endpoint::Endpoint, Connection, Error}; /// Config for the [`Transport`]. pub struct Config { diff --git a/transports/webtransport-websys/src/utils.rs b/transports/webtransport-websys/src/utils.rs index 0b3550e5b5b..df59ee15161 100644 --- a/transports/webtransport-websys/src/utils.rs +++ b/transports/webtransport-websys/src/utils.rs @@ -1,7 +1,8 @@ +use std::io; + use js_sys::{Promise, Reflect}; use once_cell::sync::Lazy; use send_wrapper::SendWrapper; -use std::io; use wasm_bindgen::{JsCast, JsValue}; use crate::Error; @@ -17,7 +18,6 @@ static DO_NOTHING: Lazy> = Lazy::new(|| { /// A promise always runs in the background, however if you don't await it, /// or specify a `catch` handler before you drop it, it might cause some side /// effects. This function avoids any side effects. -// // Ref: https://github.com/typescript-eslint/typescript-eslint/blob/391a6702c0a9b5b3874a7a27047f2a721f090fb6/packages/eslint-plugin/docs/rules/no-floating-promises.md pub(crate) fn detach_promise(promise: Promise) { // Avoid having "floating" promise and ignore any errors. @@ -50,7 +50,6 @@ where } /// Parse response from `ReadableStreamDefaultReader::read`. -// // Ref: https://streams.spec.whatwg.org/#default-reader-prototype pub(crate) fn parse_reader_response(resp: &JsValue) -> Result, JsValue> { let value = Reflect::get(resp, &JsValue::from_str("value"))?; diff --git a/wasm-tests/webtransport-tests/src/lib.rs b/wasm-tests/webtransport-tests/src/lib.rs index 4cf4375bf7a..207bdb03b91 100644 --- a/wasm-tests/webtransport-tests/src/lib.rs +++ b/wasm-tests/webtransport-tests/src/lib.rs @@ -1,17 +1,17 @@ #![allow(unexpected_cfgs)] +use std::{future::poll_fn, pin::Pin}; -use futures::channel::oneshot; -use futures::{AsyncReadExt, AsyncWriteExt}; +use futures::{channel::oneshot, AsyncReadExt, AsyncWriteExt}; use getrandom::getrandom; -use libp2p_core::transport::{DialOpts, PortUse}; -use libp2p_core::{Endpoint, StreamMuxer, Transport as _}; +use libp2p_core::{ + transport::{DialOpts, PortUse}, + Endpoint, StreamMuxer, Transport as _, +}; use libp2p_identity::{Keypair, PeerId}; use libp2p_noise as noise; use libp2p_webtransport_websys::{Config, Connection, Error, Stream, Transport}; use multiaddr::{Multiaddr, Protocol}; use multihash::Multihash; -use std::future::poll_fn; -use std::pin::Pin; use wasm_bindgen::JsCast; use wasm_bindgen_futures::{spawn_local, JsFuture}; use wasm_bindgen_test::{wasm_bindgen_test, wasm_bindgen_test_configure}; From d0590a7a71160dcf806d38813b74925d3217a98c Mon Sep 17 00:00:00 2001 From: maqi Date: Tue, 3 Dec 2024 21:42:56 +0800 Subject: [PATCH 41/50] feat(kad): make Distance private field public make Distance private field (U256) public So that some `network density` in Distance can be calculated as below: ```rust let density = U256::MAX / U256::from(estimated_network_size); let density_distance = Distance(estimated_distance); ``` Pull-Request: #5705. --- Cargo.lock | 2 +- Cargo.toml | 2 +- protocols/kad/CHANGELOG.md | 5 +++++ protocols/kad/Cargo.toml | 2 +- protocols/kad/src/kbucket/key.rs | 4 ++-- protocols/kad/src/lib.rs | 2 +- 6 files changed, 11 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4093d49504b..45f185d9780 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2844,7 +2844,7 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.47.0" +version = "0.47.1" dependencies = [ "arrayvec", "async-std", diff --git a/Cargo.toml b/Cargo.toml index dfa32628dbc..7f7b601ab82 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -83,7 +83,7 @@ libp2p-floodsub = { version = "0.45.0", path = "protocols/floodsub" } libp2p-gossipsub = { version = "0.48.0", path = "protocols/gossipsub" } libp2p-identify = { version = "0.46.0", path = "protocols/identify" } libp2p-identity = { version = "0.2.10" } -libp2p-kad = { version = "0.47.0", path = "protocols/kad" } +libp2p-kad = { version = "0.47.1", path = "protocols/kad" } libp2p-mdns = { version = "0.46.0", path = "protocols/mdns" } libp2p-memory-connection-limits = { version = "0.3.1", path = "misc/memory-connection-limits" } libp2p-metrics = { version = "0.15.0", path = "misc/metrics" } diff --git a/protocols/kad/CHANGELOG.md b/protocols/kad/CHANGELOG.md index 64049c7b60b..22af5fb5074 100644 --- a/protocols/kad/CHANGELOG.md +++ b/protocols/kad/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.47.1 + +- Expose Distance private field U256 to public. + See [PR 5705](https://github.com/libp2p/rust-libp2p/pull/5705). + ## 0.47.0 - Expose a kad query facility allowing specify num_results dynamicaly. diff --git a/protocols/kad/Cargo.toml b/protocols/kad/Cargo.toml index 295414f6ddd..cd97c91b2bf 100644 --- a/protocols/kad/Cargo.toml +++ b/protocols/kad/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-kad" edition = "2021" rust-version = { workspace = true } description = "Kademlia protocol for libp2p" -version = "0.47.0" +version = "0.47.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" diff --git a/protocols/kad/src/kbucket/key.rs b/protocols/kad/src/kbucket/key.rs index 367dfa807d3..5b9590cb94c 100644 --- a/protocols/kad/src/kbucket/key.rs +++ b/protocols/kad/src/kbucket/key.rs @@ -35,7 +35,7 @@ use crate::record; construct_uint! { /// 256-bit unsigned integer. - pub(super) struct U256(4); + pub struct U256(4); } /// A `Key` in the DHT keyspace with preserved preimage. @@ -193,7 +193,7 @@ impl AsRef for KeyBytes { /// A distance between two keys in the DHT keyspace. #[derive(Copy, Clone, PartialEq, Eq, Default, PartialOrd, Ord, Debug)] -pub struct Distance(pub(super) U256); +pub struct Distance(pub U256); impl Distance { /// Returns the integer part of the base 2 logarithm of the [`Distance`]. diff --git a/protocols/kad/src/lib.rs b/protocols/kad/src/lib.rs index 8ab45665c9b..91983b9aaf7 100644 --- a/protocols/kad/src/lib.rs +++ b/protocols/kad/src/lib.rs @@ -67,7 +67,7 @@ pub use behaviour::{ QueryResult, QueryStats, Quorum, RoutingUpdate, StoreInserts, }; pub use kbucket::{ - Distance as KBucketDistance, EntryView, KBucketRef, Key as KBucketKey, NodeStatus, + Distance as KBucketDistance, EntryView, KBucketRef, Key as KBucketKey, NodeStatus, U256, }; use libp2p_swarm::StreamProtocol; pub use protocol::{ConnectionType, KadPeer}; From 1c3e82039a6a48414d62e8226bffc35206cf42e8 Mon Sep 17 00:00:00 2001 From: needsure <166317845+needsure@users.noreply.github.com> Date: Sat, 7 Dec 2024 00:39:32 +0800 Subject: [PATCH 42/50] chore: fix some typos in comment fix some typos in comment Pull-Request: #5721. --- protocols/gossipsub/src/behaviour.rs | 2 +- swarm/src/dial_opts.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index bb3eaaa9b5a..954e87ee470 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -2761,7 +2761,7 @@ where | RpcOut::Prune(_) | RpcOut::Subscribe(_) | RpcOut::Unsubscribe(_) => { - unreachable!("Channel for highpriority contorl messages is unbounded and should always be open.") + unreachable!("Channel for highpriority control messages is unbounded and should always be open.") } } diff --git a/swarm/src/dial_opts.rs b/swarm/src/dial_opts.rs index cdaaeb358b2..f569a38df1c 100644 --- a/swarm/src/dial_opts.rs +++ b/swarm/src/dial_opts.rs @@ -338,7 +338,7 @@ pub enum PeerCondition { NotDialing, /// A combination of [`Disconnected`](PeerCondition::Disconnected) and /// [`NotDialing`](PeerCondition::NotDialing). A new dialing attempt is - /// iniated _only if_ the peer is both considered disconnected and there + /// initiated _only if_ the peer is both considered disconnected and there /// is currently no ongoing dialing attempt. #[default] DisconnectedAndNotDialing, From 78e6f08cff260c159ea08ef3b3b8e5d207ab4b59 Mon Sep 17 00:00:00 2001 From: DrHuangMHT Date: Tue, 10 Dec 2024 19:56:39 +0800 Subject: [PATCH 43/50] fix(libp2p): expose builder phase error May close #4829 and #4824. Export three error types `BehaviourError`, `TransportError`, `WebsocketError` with rename. Feature gated `WebsocketError`. Exported at crate root as [suggested](https://github.com/libp2p/rust-libp2p/issues/4824#issuecomment-1803013514). Pull-Request: #5726. --- libp2p/CHANGELOG.md | 3 +++ libp2p/src/builder.rs | 4 ++++ libp2p/src/builder/phase.rs | 5 +++++ libp2p/src/lib.rs | 7 ++++++- 4 files changed, 18 insertions(+), 1 deletion(-) diff --git a/libp2p/CHANGELOG.md b/libp2p/CHANGELOG.md index e383cfd0cdc..e86d633b5a7 100644 --- a/libp2p/CHANGELOG.md +++ b/libp2p/CHANGELOG.md @@ -3,6 +3,9 @@ - Deprecate `void` crate. See [PR 5676](https://github.com/libp2p/rust-libp2p/pull/5676). +- Expose swarm builder phase errors. + See [PR 5726](https://github.com/libp2p/rust-libp2p/pull/5726). + ## 0.54.1 - Update individual crates. diff --git a/libp2p/src/builder.rs b/libp2p/src/builder.rs index 99c340a5e3e..ae4d0b0d4e4 100644 --- a/libp2p/src/builder.rs +++ b/libp2p/src/builder.rs @@ -4,6 +4,10 @@ mod phase; mod select_muxer; mod select_security; +#[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))] +pub use phase::WebsocketError; +pub use phase::{BehaviourError, TransportError}; + /// Build a [`Swarm`](libp2p_swarm::Swarm) by combining an identity, a set of /// [`Transport`](libp2p_core::Transport)s and a /// [`NetworkBehaviour`](libp2p_swarm::NetworkBehaviour). diff --git a/libp2p/src/builder/phase.rs b/libp2p/src/builder/phase.rs index 6e3f41755ca..5bb0d948fc1 100644 --- a/libp2p/src/builder/phase.rs +++ b/libp2p/src/builder/phase.rs @@ -29,6 +29,11 @@ use swarm::*; use tcp::*; use websocket::*; +pub use behaviour::BehaviourError; +pub use other_transport::TransportError; +#[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))] +pub use websocket::WebsocketError; + use super::{ select_muxer::SelectMuxerUpgrade, select_security::SelectSecurityUpgrade, SwarmBuilder, }; diff --git a/libp2p/src/lib.rs b/libp2p/src/lib.rs index 1ec1cc530fc..47e1142d0e9 100644 --- a/libp2p/src/lib.rs +++ b/libp2p/src/lib.rs @@ -148,12 +148,17 @@ pub mod bandwidth; #[cfg(doc)] pub mod tutorials; +#[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))] +pub use builder::WebsocketError as WebsocketBuilderError; pub use libp2p_identity as identity; pub use libp2p_identity::PeerId; pub use libp2p_swarm::{Stream, StreamProtocol}; pub use self::{ - builder::SwarmBuilder, + builder::{ + BehaviourError as BehaviourBuilderError, SwarmBuilder, + TransportError as TransportBuilderError, + }, core::{ transport::TransportError, upgrade::{InboundUpgrade, OutboundUpgrade}, From 276ce84b28a5bb607b333d6fb8a382997f9ca64c Mon Sep 17 00:00:00 2001 From: Bastien Faivre Date: Tue, 10 Dec 2024 14:56:52 +0100 Subject: [PATCH 44/50] feat(request-response): Add connection id to behaviour events Closes #5716. Added connection id to the events emitted by a request-response Behaviour and adapted the code accordingly. Pull-Request: #5719. --- Cargo.lock | 6 +- Cargo.toml | 6 +- protocols/autonat/CHANGELOG.md | 4 + protocols/autonat/Cargo.toml | 2 +- .../autonat/src/v1/behaviour/as_client.rs | 2 + .../autonat/src/v1/behaviour/as_server.rs | 2 + protocols/rendezvous/CHANGELOG.md | 4 + protocols/rendezvous/Cargo.toml | 2 +- protocols/rendezvous/src/server.rs | 3 + protocols/request-response/CHANGELOG.md | 5 ++ protocols/request-response/Cargo.toml | 2 +- protocols/request-response/src/lib.rs | 81 ++++++++++++++----- .../request-response/tests/error_reporting.rs | 3 + protocols/request-response/tests/ping.rs | 9 ++- 14 files changed, 100 insertions(+), 31 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 45f185d9780..efa03d89d79 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2611,7 +2611,7 @@ dependencies = [ [[package]] name = "libp2p-autonat" -version = "0.13.1" +version = "0.13.2" dependencies = [ "async-trait", "asynchronous-codec", @@ -3145,7 +3145,7 @@ dependencies = [ [[package]] name = "libp2p-rendezvous" -version = "0.15.0" +version = "0.15.1" dependencies = [ "async-trait", "asynchronous-codec", @@ -3174,7 +3174,7 @@ dependencies = [ [[package]] name = "libp2p-request-response" -version = "0.27.1" +version = "0.28.0" dependencies = [ "anyhow", "async-std", diff --git a/Cargo.toml b/Cargo.toml index 7f7b601ab82..964f0fea240 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -74,7 +74,7 @@ rust-version = "1.75.0" [workspace.dependencies] libp2p = { version = "0.54.2", path = "libp2p" } libp2p-allow-block-list = { version = "0.4.2", path = "misc/allow-block-list" } -libp2p-autonat = { version = "0.13.1", path = "protocols/autonat" } +libp2p-autonat = { version = "0.13.2", path = "protocols/autonat" } libp2p-connection-limits = { version = "0.4.1", path = "misc/connection-limits" } libp2p-core = { version = "0.42.1", path = "core" } libp2p-dcutr = { version = "0.12.1", path = "protocols/dcutr" } @@ -95,8 +95,8 @@ libp2p-plaintext = { version = "0.42.0", path = "transports/plaintext" } libp2p-pnet = { version = "0.25.0", path = "transports/pnet" } libp2p-quic = { version = "0.11.2", path = "transports/quic" } libp2p-relay = { version = "0.18.1", path = "protocols/relay" } -libp2p-rendezvous = { version = "0.15.0", path = "protocols/rendezvous" } -libp2p-request-response = { version = "0.27.1", path = "protocols/request-response" } +libp2p-rendezvous = { version = "0.15.1", path = "protocols/rendezvous" } +libp2p-request-response = { version = "0.28.0", path = "protocols/request-response" } libp2p-server = { version = "0.12.8", path = "misc/server" } libp2p-stream = { version = "0.2.0-alpha.1", path = "protocols/stream" } libp2p-swarm = { version = "0.45.2", path = "swarm" } diff --git a/protocols/autonat/CHANGELOG.md b/protocols/autonat/CHANGELOG.md index 9b2bc4cb2ea..f946f59c9ef 100644 --- a/protocols/autonat/CHANGELOG.md +++ b/protocols/autonat/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.13.2 + +- Update to `libp2p-request-response` `v0.28.0`. + ## 0.13.1 - Verify that an incoming AutoNAT dial comes from a connected peer. See [PR 5597](https://github.com/libp2p/rust-libp2p/pull/5597). diff --git a/protocols/autonat/Cargo.toml b/protocols/autonat/Cargo.toml index 92ca163d8ec..88564b18541 100644 --- a/protocols/autonat/Cargo.toml +++ b/protocols/autonat/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-autonat" edition = "2021" rust-version = { workspace = true } description = "NAT and firewall detection for libp2p" -version = "0.13.1" +version = "0.13.2" authors = [ "David Craven ", "Elena Frank ", diff --git a/protocols/autonat/src/v1/behaviour/as_client.rs b/protocols/autonat/src/v1/behaviour/as_client.rs index 3377964373c..ca8daf6e1ac 100644 --- a/protocols/autonat/src/v1/behaviour/as_client.rs +++ b/protocols/autonat/src/v1/behaviour/as_client.rs @@ -112,6 +112,7 @@ impl HandleInnerEvent for AsClient<'_> { request_id, response, }, + .. } => { tracing::debug!(?response, "Outbound dial-back request returned response"); @@ -154,6 +155,7 @@ impl HandleInnerEvent for AsClient<'_> { peer, error, request_id, + .. } => { tracing::debug!( %peer, diff --git a/protocols/autonat/src/v1/behaviour/as_server.rs b/protocols/autonat/src/v1/behaviour/as_server.rs index 663f94122c7..32b4120c552 100644 --- a/protocols/autonat/src/v1/behaviour/as_server.rs +++ b/protocols/autonat/src/v1/behaviour/as_server.rs @@ -107,6 +107,7 @@ impl HandleInnerEvent for AsServer<'_> { request, channel, }, + .. } => { let probe_id = self.probe_id.next(); if !self.connected.contains_key(&peer) { @@ -183,6 +184,7 @@ impl HandleInnerEvent for AsServer<'_> { peer, error, request_id, + .. } => { tracing::debug!( %peer, diff --git a/protocols/rendezvous/CHANGELOG.md b/protocols/rendezvous/CHANGELOG.md index 1ed9e5bc3b0..ca01538a76d 100644 --- a/protocols/rendezvous/CHANGELOG.md +++ b/protocols/rendezvous/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.15.1 + +- Update to `libp2p-request-response` `v0.28.0`. + ## 0.15.0 diff --git a/protocols/rendezvous/Cargo.toml b/protocols/rendezvous/Cargo.toml index 5fa40c3785b..53a579918c5 100644 --- a/protocols/rendezvous/Cargo.toml +++ b/protocols/rendezvous/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-rendezvous" edition = "2021" rust-version = { workspace = true } description = "Rendezvous protocol for libp2p" -version = "0.15.0" +version = "0.15.1" authors = ["The COMIT guys "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" diff --git a/protocols/rendezvous/src/server.rs b/protocols/rendezvous/src/server.rs index 8aafcfb48e3..1be7220cfcb 100644 --- a/protocols/rendezvous/src/server.rs +++ b/protocols/rendezvous/src/server.rs @@ -183,6 +183,7 @@ impl NetworkBehaviour for Behaviour { libp2p_request_response::Message::Request { request, channel, .. }, + .. }) => { if let Some((event, response)) = handle_request(peer_id, request, &mut self.registrations) @@ -202,6 +203,7 @@ impl NetworkBehaviour for Behaviour { peer, request_id, error, + .. }) => { tracing::warn!( %peer, @@ -217,6 +219,7 @@ impl NetworkBehaviour for Behaviour { | ToSwarm::GenerateEvent(libp2p_request_response::Event::Message { peer: _, message: libp2p_request_response::Message::Response { .. }, + .. }) | ToSwarm::GenerateEvent(libp2p_request_response::Event::OutboundFailure { .. diff --git a/protocols/request-response/CHANGELOG.md b/protocols/request-response/CHANGELOG.md index 9ed658fc90f..15cb0c91797 100644 --- a/protocols/request-response/CHANGELOG.md +++ b/protocols/request-response/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.28.0 + +- Add connection id to the events emitted by a request-response `Behaviour`. + See [PR 5719](https://github.com/libp2p/rust-libp2p/pull/5719). + ## 0.27.1 - Deprecate `void` crate. diff --git a/protocols/request-response/Cargo.toml b/protocols/request-response/Cargo.toml index b2e6fd0b0ac..48ef4c2c066 100644 --- a/protocols/request-response/Cargo.toml +++ b/protocols/request-response/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-request-response" edition = "2021" rust-version = { workspace = true } description = "Generic Request/Response Protocols" -version = "0.27.1" +version = "0.28.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" diff --git a/protocols/request-response/src/lib.rs b/protocols/request-response/src/lib.rs index 052e1e87e2b..39a773d99b4 100644 --- a/protocols/request-response/src/lib.rs +++ b/protocols/request-response/src/lib.rs @@ -131,6 +131,8 @@ pub enum Event { Message { /// The peer who sent the message. peer: PeerId, + /// The connection used. + connection_id: ConnectionId, /// The incoming message. message: Message, }, @@ -138,6 +140,8 @@ pub enum Event { OutboundFailure { /// The peer to whom the request was sent. peer: PeerId, + /// The connection used. + connection_id: ConnectionId, /// The (local) ID of the failed request. request_id: OutboundRequestId, /// The error that occurred. @@ -147,6 +151,8 @@ pub enum Event { InboundFailure { /// The peer from whom the request was received. peer: PeerId, + /// The connection used. + connection_id: ConnectionId, /// The ID of the failed inbound request. request_id: InboundRequestId, /// The error that occurred. @@ -159,6 +165,8 @@ pub enum Event { ResponseSent { /// The peer to whom the response was sent. peer: PeerId, + /// The connection used. + connection_id: ConnectionId, /// The ID of the inbound request whose response was sent. request_id: InboundRequestId, }, @@ -569,10 +577,10 @@ where fn remove_pending_outbound_response( &mut self, peer: &PeerId, - connection: ConnectionId, + connection_id: ConnectionId, request: OutboundRequestId, ) -> bool { - self.get_connection_mut(peer, connection) + self.get_connection_mut(peer, connection_id) .map(|c| c.pending_outbound_responses.remove(&request)) .unwrap_or(false) } @@ -585,10 +593,10 @@ where fn remove_pending_inbound_response( &mut self, peer: &PeerId, - connection: ConnectionId, + connection_id: ConnectionId, request: InboundRequestId, ) -> bool { - self.get_connection_mut(peer, connection) + self.get_connection_mut(peer, connection_id) .map(|c| c.pending_inbound_responses.remove(&request)) .unwrap_or(false) } @@ -598,11 +606,11 @@ where fn get_connection_mut( &mut self, peer: &PeerId, - connection: ConnectionId, + connection_id: ConnectionId, ) -> Option<&mut Connection> { self.connected .get_mut(peer) - .and_then(|connections| connections.iter_mut().find(|c| c.id == connection)) + .and_then(|connections| connections.iter_mut().find(|c| c.id == connection_id)) } fn on_address_change( @@ -659,6 +667,7 @@ where self.pending_events .push_back(ToSwarm::GenerateEvent(Event::InboundFailure { peer: peer_id, + connection_id, request_id, error: InboundFailure::ConnectionClosed, })); @@ -668,13 +677,21 @@ where self.pending_events .push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { peer: peer_id, + connection_id, request_id, error: OutboundFailure::ConnectionClosed, })); } } - fn on_dial_failure(&mut self, DialFailure { peer_id, .. }: DialFailure) { + fn on_dial_failure( + &mut self, + DialFailure { + peer_id, + connection_id, + .. + }: DialFailure, + ) { if let Some(peer) = peer_id { // If there are pending outgoing requests when a dial failure occurs, // it is implied that we are not connected to the peer, since pending @@ -687,6 +704,7 @@ where self.pending_events .push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { peer, + connection_id, request_id: request.request_id, error: OutboundFailure::DialFailure, })); @@ -811,7 +829,7 @@ where fn on_connection_handler_event( &mut self, peer: PeerId, - connection: ConnectionId, + connection_id: ConnectionId, event: THandlerOutEvent, ) { match event { @@ -819,7 +837,8 @@ where request_id, response, } => { - let removed = self.remove_pending_outbound_response(&peer, connection, request_id); + let removed = + self.remove_pending_outbound_response(&peer, connection_id, request_id); debug_assert!( removed, "Expect request_id to be pending before receiving response.", @@ -830,13 +849,17 @@ where response, }; self.pending_events - .push_back(ToSwarm::GenerateEvent(Event::Message { peer, message })); + .push_back(ToSwarm::GenerateEvent(Event::Message { + peer, + connection_id, + message, + })); } handler::Event::Request { request_id, request, sender, - } => match self.get_connection_mut(&peer, connection) { + } => match self.get_connection_mut(&peer, connection_id) { Some(connection) => { let inserted = connection.pending_inbound_responses.insert(request_id); debug_assert!(inserted, "Expect id of new request to be unknown."); @@ -848,14 +871,19 @@ where channel, }; self.pending_events - .push_back(ToSwarm::GenerateEvent(Event::Message { peer, message })); + .push_back(ToSwarm::GenerateEvent(Event::Message { + peer, + connection_id, + message, + })); } None => { - tracing::debug!("Connection ({connection}) closed after `Event::Request` ({request_id}) has been emitted."); + tracing::debug!("Connection ({connection_id}) closed after `Event::Request` ({request_id}) has been emitted."); } }, handler::Event::ResponseSent(request_id) => { - let removed = self.remove_pending_inbound_response(&peer, connection, request_id); + let removed = + self.remove_pending_inbound_response(&peer, connection_id, request_id); debug_assert!( removed, "Expect request_id to be pending before response is sent." @@ -864,11 +892,13 @@ where self.pending_events .push_back(ToSwarm::GenerateEvent(Event::ResponseSent { peer, + connection_id, request_id, })); } handler::Event::ResponseOmission(request_id) => { - let removed = self.remove_pending_inbound_response(&peer, connection, request_id); + let removed = + self.remove_pending_inbound_response(&peer, connection_id, request_id); debug_assert!( removed, "Expect request_id to be pending before response is omitted.", @@ -877,12 +907,14 @@ where self.pending_events .push_back(ToSwarm::GenerateEvent(Event::InboundFailure { peer, + connection_id, request_id, error: InboundFailure::ResponseOmission, })); } handler::Event::OutboundTimeout(request_id) => { - let removed = self.remove_pending_outbound_response(&peer, connection, request_id); + let removed = + self.remove_pending_outbound_response(&peer, connection_id, request_id); debug_assert!( removed, "Expect request_id to be pending before request times out." @@ -891,12 +923,14 @@ where self.pending_events .push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { peer, + connection_id, request_id, error: OutboundFailure::Timeout, })); } handler::Event::OutboundUnsupportedProtocols(request_id) => { - let removed = self.remove_pending_outbound_response(&peer, connection, request_id); + let removed = + self.remove_pending_outbound_response(&peer, connection_id, request_id); debug_assert!( removed, "Expect request_id to be pending before failing to connect.", @@ -905,28 +939,33 @@ where self.pending_events .push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { peer, + connection_id, request_id, error: OutboundFailure::UnsupportedProtocols, })); } handler::Event::OutboundStreamFailed { request_id, error } => { - let removed = self.remove_pending_outbound_response(&peer, connection, request_id); + let removed = + self.remove_pending_outbound_response(&peer, connection_id, request_id); debug_assert!(removed, "Expect request_id to be pending upon failure"); self.pending_events .push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { peer, + connection_id, request_id, error: OutboundFailure::Io(error), })) } handler::Event::InboundTimeout(request_id) => { - let removed = self.remove_pending_inbound_response(&peer, connection, request_id); + let removed = + self.remove_pending_inbound_response(&peer, connection_id, request_id); if removed { self.pending_events .push_back(ToSwarm::GenerateEvent(Event::InboundFailure { peer, + connection_id, request_id, error: InboundFailure::Timeout, })); @@ -938,12 +977,14 @@ where } } handler::Event::InboundStreamFailed { request_id, error } => { - let removed = self.remove_pending_inbound_response(&peer, connection, request_id); + let removed = + self.remove_pending_inbound_response(&peer, connection_id, request_id); if removed { self.pending_events .push_back(ToSwarm::GenerateEvent(Event::InboundFailure { peer, + connection_id, request_id, error: InboundFailure::Io(error), })); diff --git a/protocols/request-response/tests/error_reporting.rs b/protocols/request-response/tests/error_reporting.rs index d1f26378a77..2108b6006c5 100644 --- a/protocols/request-response/tests/error_reporting.rs +++ b/protocols/request-response/tests/error_reporting.rs @@ -566,6 +566,7 @@ async fn wait_request( request, channel, }, + .. }) => { return Ok((peer, request_id, request, channel)); } @@ -600,6 +601,7 @@ async fn wait_inbound_failure( peer, request_id, error, + .. }) => { return Ok((peer, request_id, error)); } @@ -618,6 +620,7 @@ async fn wait_outbound_failure( peer, request_id, error, + .. }) => { return Ok((peer, request_id, error)); } diff --git a/protocols/request-response/tests/ping.rs b/protocols/request-response/tests/ping.rs index e53fe99d6cf..94adedac2d7 100644 --- a/protocols/request-response/tests/ping.rs +++ b/protocols/request-response/tests/ping.rs @@ -65,6 +65,7 @@ async fn is_response_outbound() { peer, request_id: req_id, error: _error, + .. } => { assert_eq!(&offline_peer, &peer); assert_eq!(req_id, request_id1); @@ -116,6 +117,7 @@ async fn ping_protocol() { request_response::Message::Request { request, channel, .. }, + .. }) => { assert_eq!(&request, &expected_ping); assert_eq!(&peer, &peer2_id); @@ -157,6 +159,7 @@ async fn ping_protocol() { request_id, response, }, + .. } => { count += 1; assert_eq!(&response, &expected_pong); @@ -205,7 +208,8 @@ async fn emits_inbound_connection_closed_failure() { event = swarm1.select_next_some() => match event { SwarmEvent::Behaviour(request_response::Event::Message { peer, - message: request_response::Message::Request { request, channel, .. } + message: request_response::Message::Request { request, channel, .. }, + .. }) => { assert_eq!(&request, &ping); assert_eq!(&peer, &peer2_id); @@ -270,7 +274,8 @@ async fn emits_inbound_connection_closed_if_channel_is_dropped() { event = swarm1.select_next_some() => { if let SwarmEvent::Behaviour(request_response::Event::Message { peer, - message: request_response::Message::Request { request, channel, .. } + message: request_response::Message::Request { request, channel, .. }, + .. }) = event { assert_eq!(&request, &ping); assert_eq!(&peer, &peer2_id); From bb9c3692bcea6c2b93d5459ef8db7e79426ca0a3 Mon Sep 17 00:00:00 2001 From: Elena Frank Date: Wed, 11 Dec 2024 17:21:43 +0700 Subject: [PATCH 45/50] chore(ci): update Rust stable version Update Rust stable version in our CI to the latest stable version 1.83.0. Pull-Request: #5730. --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index aad5b39aec7..0a849da9c97 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -225,7 +225,7 @@ jobs: fail-fast: false matrix: rust-version: [ - 1.80.0, # current stable + 1.83.0, # current stable beta, ] steps: From 1e9bb4ce2fb3c839c289b26e2d9db9dad4392eb1 Mon Sep 17 00:00:00 2001 From: lfg2 Date: Wed, 11 Dec 2024 19:05:35 +0800 Subject: [PATCH 46/50] chore(roadmap): fix typo Pull-Request: #5732. --- ROADMAP.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ROADMAP.md b/ROADMAP.md index 0d422a6d385..a8df8242730 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -61,7 +61,7 @@ The project supports Wasm already today, though the developer experience is cumb Properly supporting Wasm opens rust-libp2p to a whole new set of use-cases. I would love for this to happen earlier. Though (a) I think we should prioritize improving existing functionality over new functionality and (b) we don't have high demand for this feature from the community. -(One could argue that that demand follows this roadmap item and not the other way round.) +(One could argue that the demand follows this roadmap item and not the other way round.) ### WebRTC in the browser via WASM From 524afb4e90cc667263149879fed051c07f5b4b66 Mon Sep 17 00:00:00 2001 From: hanabi1224 Date: Wed, 11 Dec 2024 20:15:03 +0800 Subject: [PATCH 47/50] chore(deps): upgrade uint to 0.10 This PR upgrade `uint` to `0.10` https://github.com/paritytech/parity-common/blob/master/uint/CHANGELOG.md#0100---2024-09-11 (Skipping changelog as there's no changes in public APIs) Pull-Request: #5699. --- Cargo.lock | 4 ++-- protocols/kad/Cargo.toml | 2 +- protocols/kad/src/kbucket.rs | 4 ++-- protocols/kad/src/kbucket/key.rs | 8 ++++---- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index efa03d89d79..c0b635584ef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6379,9 +6379,9 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "uint" -version = "0.9.5" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" +checksum = "909988d098b2f738727b161a106cfc7cab00c539c2687a8836f8e565976fb53e" dependencies = [ "byteorder", "crunchy", diff --git a/protocols/kad/Cargo.toml b/protocols/kad/Cargo.toml index cd97c91b2bf..dd93da2a01a 100644 --- a/protocols/kad/Cargo.toml +++ b/protocols/kad/Cargo.toml @@ -26,7 +26,7 @@ libp2p-identity = { workspace = true, features = ["rand"] } rand = "0.8" sha2 = "0.10.8" smallvec = "1.13.2" -uint = "0.9" +uint = "0.10" futures-timer = "3.0.3" web-time = { workspace = true } serde = { version = "1.0", optional = true, features = ["derive"] } diff --git a/protocols/kad/src/kbucket.rs b/protocols/kad/src/kbucket.rs index 1c6d8857c9c..3f4b1281c3a 100644 --- a/protocols/kad/src/kbucket.rs +++ b/protocols/kad/src/kbucket.rs @@ -175,7 +175,7 @@ impl BucketIndex { let lower = usize::pow(2, rem); let upper = usize::pow(2, rem + 1); bytes[31 - quot] = rng.gen_range(lower..upper) as u8; - Distance(U256::from(bytes)) + Distance(U256::from_big_endian(bytes.as_slice())) } } @@ -650,7 +650,7 @@ mod tests { fn rand_distance() { fn prop(ix: u8) -> bool { let d = BucketIndex(ix as usize).rand_distance(&mut rand::thread_rng()); - let n = U256::from(<[u8; 32]>::from(d.0)); + let n = d.0; let b = U256::from(2); let e = U256::from(ix); let lower = b.pow(e); diff --git a/protocols/kad/src/kbucket/key.rs b/protocols/kad/src/kbucket/key.rs index 5b9590cb94c..ce14a3f779a 100644 --- a/protocols/kad/src/kbucket/key.rs +++ b/protocols/kad/src/kbucket/key.rs @@ -169,8 +169,8 @@ impl KeyBytes { where U: AsRef, { - let a = U256::from(self.0.as_slice()); - let b = U256::from(other.as_ref().0.as_slice()); + let a = U256::from_big_endian(self.0.as_slice()); + let b = U256::from_big_endian(other.as_ref().0.as_slice()); Distance(a ^ b) } @@ -180,8 +180,8 @@ impl KeyBytes { /// /// `self xor other = distance <==> other = self xor distance` pub fn for_distance(&self, d: Distance) -> KeyBytes { - let key_int = U256::from(self.0.as_slice()) ^ d.0; - KeyBytes(GenericArray::from(<[u8; 32]>::from(key_int))) + let key_int = U256::from_big_endian(self.0.as_slice()) ^ d.0; + KeyBytes(GenericArray::from(key_int.to_big_endian())) } } From cda1470dee3e29c040c53aa577317e1479ccf5c8 Mon Sep 17 00:00:00 2001 From: hanabi1224 Date: Wed, 11 Dec 2024 20:35:44 +0800 Subject: [PATCH 48/50] fix: RUSTSEC-2024-0421 by upgrading idna https://rustsec.org/advisories/RUSTSEC-2024-0421.html Pull-Request: #5727. --- Cargo.lock | 382 +++++++++++++++++--- Cargo.toml | 7 +- protocols/mdns/CHANGELOG.md | 5 + protocols/mdns/Cargo.toml | 4 +- protocols/mdns/src/behaviour/iface/query.rs | 8 +- transports/dns/CHANGELOG.md | 5 + transports/dns/Cargo.toml | 6 +- transports/dns/src/lib.rs | 15 +- 8 files changed, 358 insertions(+), 74 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c0b635584ef..15ea38544d2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -410,6 +410,17 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "async-recursion" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", +] + [[package]] name = "async-std" version = "1.12.0" @@ -440,9 +451,9 @@ dependencies = [ [[package]] name = "async-std-resolver" -version = "0.24.0" +version = "0.25.0-alpha.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c0ed2b6671c13d2c28756c5a64e04759c1e0b5d3d7ac031f521c3561e21fbcb" +checksum = "f42964492d88a2a555cc65d8ab30e5e1178c1776f40b2717643c1aebb4297a1a" dependencies = [ "async-std", "async-trait", @@ -1965,10 +1976,11 @@ checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" [[package]] name = "hickory-proto" -version = "0.24.1" +version = "0.25.0-alpha.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07698b8420e2f0d6447a436ba999ec85d8fbf2a398bbd737b82cac4a2e96e512" +checksum = "d063c0692ee669aa6d261988aa19ca5510f1cc40e4f211024f50c888499a35d7" dependencies = [ + "async-recursion", "async-trait", "cfg-if", "data-encoding", @@ -1976,12 +1988,12 @@ dependencies = [ "futures-channel", "futures-io", "futures-util", - "idna 0.4.0", + "idna", "ipnet", "once_cell", "rand 0.8.5", "socket2 0.5.7", - "thiserror 1.0.63", + "thiserror 2.0.3", "tinyvec", "tokio", "tracing", @@ -1990,21 +2002,21 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.24.1" +version = "0.25.0-alpha.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28757f23aa75c98f254cf0405e6d8c25b831b32921b050a66692427679b1f243" +checksum = "42bc352e4412fb657e795f79b4efcf2bd60b59ee5ca0187f3554194cd1107a27" dependencies = [ "cfg-if", "futures-util", "hickory-proto", "ipconfig", - "lru-cache", + "moka", "once_cell", "parking_lot", "rand 0.8.5", "resolv-conf", "smallvec", - "thiserror 1.0.63", + "thiserror 2.0.3", "tokio", "tracing", ] @@ -2230,6 +2242,124 @@ dependencies = [ "tracing", ] +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", +] + [[package]] name = "identify-example" version = "0.1.0" @@ -2243,22 +2373,23 @@ dependencies = [ [[package]] name = "idna" -version = "0.4.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "idna_adapter", + "smallvec", + "utf8_iter", ] [[package]] -name = "idna" -version = "0.5.0" +name = "idna_adapter" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "icu_normalizer", + "icu_properties", ] [[package]] @@ -2718,7 +2849,7 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.42.0" +version = "0.42.1" dependencies = [ "async-std", "async-std-resolver", @@ -2878,7 +3009,7 @@ dependencies = [ [[package]] name = "libp2p-mdns" -version = "0.46.0" +version = "0.46.1" dependencies = [ "async-io 2.3.3", "async-std", @@ -3558,12 +3689,6 @@ dependencies = [ "libsecp256k1-core", ] -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - [[package]] name = "linux-raw-sys" version = "0.3.8" @@ -3576,6 +3701,12 @@ version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" +[[package]] +name = "litemap" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" + [[package]] name = "lock_api" version = "0.4.10" @@ -3604,15 +3735,6 @@ dependencies = [ "hashbrown 0.14.3", ] -[[package]] -name = "lru-cache" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" -dependencies = [ - "linked-hash-map", -] - [[package]] name = "match_cfg" version = "0.1.0" @@ -3746,6 +3868,26 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "moka" +version = "0.12.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32cf62eb4dd975d2dde76432fb1075c49e3ee2331cf36f1f8fd4b66550d32b6f" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "once_cell", + "parking_lot", + "quanta", + "rustc_version", + "smallvec", + "tagptr", + "thiserror 1.0.63", + "triomphe", + "uuid", +] + [[package]] name = "multiaddr" version = "0.18.1" @@ -4027,9 +4169,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "oorandom" @@ -4545,6 +4687,21 @@ dependencies = [ "syn 2.0.89", ] +[[package]] +name = "quanta" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5167a477619228a0b284fac2674e3c388cba90631d7b7de620e6f1fcd08da5" +dependencies = [ + "crossbeam-utils", + "libc", + "once_cell", + "raw-cpuid", + "wasi 0.11.0+wasi-snapshot-preview1", + "web-sys", + "winapi", +] + [[package]] name = "quick-error" version = "1.2.3" @@ -4723,6 +4880,15 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "raw-cpuid" +version = "11.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ab240315c661615f2ee9f0f2cd32d5a7343a84d5ebcccb99d46e6637565e7b0" +dependencies = [ + "bitflags 2.4.1", +] + [[package]] name = "rayon" version = "1.10.0" @@ -5637,6 +5803,12 @@ dependencies = [ "der", ] +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "static_assertions" version = "1.1.0" @@ -5840,6 +6012,12 @@ dependencies = [ "libc", ] +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + [[package]] name = "tempfile" version = "3.10.1" @@ -6001,6 +6179,16 @@ dependencies = [ "time-core", ] +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tinytemplate" version = "1.2.1" @@ -6332,6 +6520,12 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "triomphe" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "859eb650cfee7434994602c3a68b25d77ad9e68c8a6cd491616ef86661382eb3" + [[package]] name = "try-lock" version = "0.2.4" @@ -6398,27 +6592,12 @@ dependencies = [ "version_check", ] -[[package]] -name = "unicode-bidi" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" - [[package]] name = "unicode-ident" version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" -[[package]] -name = "unicode-normalization" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" -dependencies = [ - "tinyvec", -] - [[package]] name = "unicode-xid" version = "0.2.4" @@ -6475,12 +6654,12 @@ dependencies = [ [[package]] name = "url" -version = "2.5.2" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", - "idna 0.5.0", + "idna", "percent-encoding", ] @@ -6490,6 +6669,18 @@ version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "utf8parse" version = "0.2.1" @@ -7204,6 +7395,18 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + [[package]] name = "x25519-dalek" version = "2.0.1" @@ -7306,6 +7509,30 @@ dependencies = [ "time", ] +[[package]] +name = "yoke" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", + "synstructure 0.13.1", +] + [[package]] name = "zerocopy" version = "0.7.32" @@ -7326,6 +7553,27 @@ dependencies = [ "syn 2.0.89", ] +[[package]] +name = "zerofrom" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", + "synstructure 0.13.1", +] + [[package]] name = "zeroize" version = "1.8.1" @@ -7345,3 +7593,25 @@ dependencies = [ "quote", "syn 2.0.89", ] + +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", +] diff --git a/Cargo.toml b/Cargo.toml index 964f0fea240..e0feda0392a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -78,13 +78,13 @@ libp2p-autonat = { version = "0.13.2", path = "protocols/autonat" } libp2p-connection-limits = { version = "0.4.1", path = "misc/connection-limits" } libp2p-core = { version = "0.42.1", path = "core" } libp2p-dcutr = { version = "0.12.1", path = "protocols/dcutr" } -libp2p-dns = { version = "0.42.0", path = "transports/dns" } +libp2p-dns = { version = "0.42.1", path = "transports/dns" } libp2p-floodsub = { version = "0.45.0", path = "protocols/floodsub" } libp2p-gossipsub = { version = "0.48.0", path = "protocols/gossipsub" } libp2p-identify = { version = "0.46.0", path = "protocols/identify" } libp2p-identity = { version = "0.2.10" } libp2p-kad = { version = "0.47.1", path = "protocols/kad" } -libp2p-mdns = { version = "0.46.0", path = "protocols/mdns" } +libp2p-mdns = { version = "0.46.1", path = "protocols/mdns" } libp2p-memory-connection-limits = { version = "0.3.1", path = "misc/memory-connection-limits" } libp2p-metrics = { version = "0.15.0", path = "misc/metrics" } libp2p-mplex = { version = "0.42.0", path = "muxers/mplex" } @@ -115,10 +115,13 @@ libp2p-webtransport-websys = { version = "0.4.1", path = "transports/webtranspor libp2p-yamux = { version = "0.46.0", path = "muxers/yamux" } # External dependencies +async-std-resolver = { version = "0.25.0-alpha.4", default-features = false } asynchronous-codec = { version = "0.7.0" } futures = "0.3.30" futures-bounded = { version = "0.2.4" } futures-rustls = { version = "0.26.0", default-features = false } +hickory-proto = { version = "0.25.0-alpha.4", default-features = false } +hickory-resolver = { version = "0.25.0-alpha.4", default-features = false } multiaddr = "0.18.1" multihash = "0.19.1" multistream-select = { version = "0.13.0", path = "misc/multistream-select" } diff --git a/protocols/mdns/CHANGELOG.md b/protocols/mdns/CHANGELOG.md index 67b1d669f60..61290703c34 100644 --- a/protocols/mdns/CHANGELOG.md +++ b/protocols/mdns/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.46.1 + +- Upgrade `hickory-proto`. + See [PR 5727](https://github.com/libp2p/rust-libp2p/pull/5727) + ## 0.46.0 diff --git a/protocols/mdns/Cargo.toml b/protocols/mdns/Cargo.toml index 338501aa896..16436848efe 100644 --- a/protocols/mdns/Cargo.toml +++ b/protocols/mdns/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-mdns" edition = "2021" rust-version = { workspace = true } -version = "0.46.0" +version = "0.46.1" description = "Implementation of the libp2p mDNS discovery method" authors = ["Parity Technologies "] license = "MIT" @@ -24,7 +24,7 @@ smallvec = "1.13.2" socket2 = { version = "0.5.7", features = ["all"] } tokio = { workspace = true, default-features = false, features = ["net", "time"], optional = true} tracing = { workspace = true } -hickory-proto = { version = "0.24.1", default-features = false, features = ["mdns"] } +hickory-proto = { workspace = true, features = ["mdns"] } [features] tokio = ["dep:tokio", "if-watch/tokio"] diff --git a/protocols/mdns/src/behaviour/iface/query.rs b/protocols/mdns/src/behaviour/iface/query.rs index 7762ac5d214..a2a2c200b3b 100644 --- a/protocols/mdns/src/behaviour/iface/query.rs +++ b/protocols/mdns/src/behaviour/iface/query.rs @@ -51,7 +51,7 @@ impl MdnsPacket { pub(crate) fn new_from_bytes( buf: &[u8], from: SocketAddr, - ) -> Result, hickory_proto::error::ProtoError> { + ) -> Result, hickory_proto::ProtoError> { let packet = Message::from_vec(buf)?; if packet.query().is_none() { @@ -161,7 +161,7 @@ impl MdnsResponse { return None; } - let RData::PTR(record_value) = record.data()? else { + let RData::PTR(record_value) = record.data() else { return None; }; @@ -243,7 +243,7 @@ impl MdnsPeer { return None; } - if let Some(RData::TXT(ref txt)) = add_record.data() { + if let RData::TXT(ref txt) = add_record.data() { Some(txt) } else { None @@ -341,7 +341,7 @@ mod tests { if record.name().to_utf8() != SERVICE_NAME_FQDN { return None; } - let Some(RData::PTR(record_value)) = record.data() else { + let RData::PTR(record_value) = record.data() else { return None; }; Some(record_value) diff --git a/transports/dns/CHANGELOG.md b/transports/dns/CHANGELOG.md index e4f951f157f..b46b0413403 100644 --- a/transports/dns/CHANGELOG.md +++ b/transports/dns/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.42.1 + +- Upgrade `async-std-resolver` and `hickory-resolver`. + See [PR 5727](https://github.com/libp2p/rust-libp2p/pull/5727) + ## 0.42.0 - Implement refactored `Transport`. diff --git a/transports/dns/Cargo.toml b/transports/dns/Cargo.toml index 707b67fc935..2a12c34a383 100644 --- a/transports/dns/Cargo.toml +++ b/transports/dns/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-dns" edition = "2021" rust-version = { workspace = true } description = "DNS transport implementation for libp2p" -version = "0.42.0" +version = "0.42.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,13 +11,13 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -async-std-resolver = { version = "0.24", optional = true } +async-std-resolver = { workspace = true, features = ["system-config"], optional = true } async-trait = "0.1.80" futures = { workspace = true } libp2p-core = { workspace = true } libp2p-identity = { workspace = true } parking_lot = "0.12.3" -hickory-resolver = { version = "0.24.1", default-features = false, features = ["system-config"] } +hickory-resolver = { workspace = true, features = ["system-config"] } smallvec = "1.13.2" tracing = { workspace = true } diff --git a/transports/dns/src/lib.rs b/transports/dns/src/lib.rs index d47f1e464db..d777d54a5f2 100644 --- a/transports/dns/src/lib.rs +++ b/transports/dns/src/lib.rs @@ -117,12 +117,12 @@ pub mod async_std { pub mod tokio { use std::sync::Arc; - use hickory_resolver::{system_conf, TokioAsyncResolver}; + use hickory_resolver::{system_conf, TokioResolver}; use parking_lot::Mutex; /// A `Transport` wrapper for performing DNS lookups when dialing `Multiaddr`esses /// using `tokio` for all async I/O. - pub type Transport = crate::Transport; + pub type Transport = crate::Transport; impl Transport { /// Creates a new [`Transport`] from the OS's DNS configuration and defaults. @@ -140,7 +140,7 @@ pub mod tokio { ) -> Transport { Transport { inner: Arc::new(Mutex::new(inner)), - resolver: TokioAsyncResolver::tokio(cfg, opts), + resolver: TokioResolver::tokio(cfg, opts), } } } @@ -160,13 +160,12 @@ use async_trait::async_trait; use futures::{future::BoxFuture, prelude::*}; pub use hickory_resolver::{ config::{ResolverConfig, ResolverOpts}, - error::{ResolveError, ResolveErrorKind}, + {ResolveError, ResolveErrorKind}, }; use hickory_resolver::{ lookup::{Ipv4Lookup, Ipv6Lookup, TxtLookup}, lookup_ip::LookupIp, name_server::ConnectionProvider, - AsyncResolver, }; use libp2p_core::{ multiaddr::{Multiaddr, Protocol}, @@ -594,7 +593,7 @@ pub trait Resolver { } #[async_trait] -impl Resolver for AsyncResolver +impl Resolver for hickory_resolver::Resolver where C: ConnectionProvider, { @@ -618,6 +617,7 @@ where #[cfg(all(test, any(feature = "tokio", feature = "async-std")))] mod tests { use futures::future::BoxFuture; + use hickory_resolver::proto::{ProtoError, ProtoErrorKind}; use libp2p_core::{ multiaddr::{Multiaddr, Protocol}, transport::{PortUse, TransportError, TransportEvent}, @@ -750,7 +750,8 @@ mod tests { .await { Err(Error::ResolveError(e)) => match e.kind() { - ResolveErrorKind::NoRecordsFound { .. } => {} + ResolveErrorKind::Proto(ProtoError { kind, .. }) + if matches!(kind.as_ref(), ProtoErrorKind::NoRecordsFound { .. }) => {} _ => panic!("Unexpected DNS error: {e:?}"), }, Err(e) => panic!("Unexpected error: {e:?}"), From 99544c40366d124a701c818582774265a341e440 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Thu, 12 Dec 2024 16:15:34 +0000 Subject: [PATCH 49/50] deps(metrics-example): update opentelemetry to 0.27 this will help fix the `cargo deny` situation as `opentelemetry-otlp` `0.25` has `tokio` [locked to `~1.38.0`](https://crates.io/crates/opentelemetry-otlp/0.25.0/dependencies) :man_shrugging: which then impedes us tfrom updating `netlink-sys` Pull-Request: #5735. --- Cargo.lock | 45 ++++++++++++++++++------------------ examples/metrics/Cargo.toml | 8 +++---- examples/metrics/src/main.rs | 22 ++++++++++-------- 3 files changed, 40 insertions(+), 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 15ea38544d2..b50e0058a8d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3806,13 +3806,13 @@ dependencies = [ "axum", "futures", "libp2p", - "opentelemetry 0.25.0", + "opentelemetry 0.27.1", "opentelemetry-otlp", - "opentelemetry_sdk 0.25.0", + "opentelemetry_sdk 0.27.1", "prometheus-client", "tokio", "tracing", - "tracing-opentelemetry 0.26.0", + "tracing-opentelemetry 0.28.0", "tracing-subscriber", ] @@ -4247,16 +4247,16 @@ dependencies = [ [[package]] name = "opentelemetry" -version = "0.25.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "803801d3d3b71cd026851a53f974ea03df3d179cb758b260136a6c9e22e196af" +checksum = "ab70038c28ed37b97d8ed414b6429d343a8bbf44c9f79ec854f3a643029ba6d7" dependencies = [ "futures-core", "futures-sink", "js-sys", - "once_cell", "pin-project-lite", "thiserror 1.0.63", + "tracing", ] [[package]] @@ -4277,30 +4277,31 @@ dependencies = [ [[package]] name = "opentelemetry-otlp" -version = "0.25.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "596b1719b3cab83addb20bcbffdf21575279d9436d9ccccfe651a3bf0ab5ab06" +checksum = "91cf61a1868dacc576bf2b2a1c3e9ab150af7272909e80085c3173384fe11f76" dependencies = [ "async-trait", "futures-core", "http 1.1.0", - "opentelemetry 0.25.0", + "opentelemetry 0.27.1", "opentelemetry-proto", - "opentelemetry_sdk 0.25.0", + "opentelemetry_sdk 0.27.1", "prost", "thiserror 1.0.63", "tokio", "tonic", + "tracing", ] [[package]] name = "opentelemetry-proto" -version = "0.25.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c43620e8f93359eb7e627a3b16ee92d8585774986f24f2ab010817426c5ce61" +checksum = "a6e05acbfada5ec79023c85368af14abd0b307c015e9064d249b2a950ef459a6" dependencies = [ - "opentelemetry 0.25.0", - "opentelemetry_sdk 0.25.0", + "opentelemetry 0.27.1", + "opentelemetry_sdk 0.27.1", "prost", "tonic", ] @@ -4338,23 +4339,23 @@ dependencies = [ [[package]] name = "opentelemetry_sdk" -version = "0.25.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0da0d6b47a3dbc6e9c9e36a0520e25cf943e046843818faaa3f87365a548c82" +checksum = "231e9d6ceef9b0b2546ddf52335785ce41252bc7474ee8ba05bfad277be13ab8" dependencies = [ "async-trait", "futures-channel", "futures-executor", "futures-util", "glob", - "once_cell", - "opentelemetry 0.25.0", + "opentelemetry 0.27.1", "percent-encoding", "rand 0.8.5", "serde_json", "thiserror 1.0.63", "tokio", "tokio-stream", + "tracing", ] [[package]] @@ -6475,14 +6476,14 @@ dependencies = [ [[package]] name = "tracing-opentelemetry" -version = "0.26.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eabc56d23707ad55ba2a0750fc24767125d5a0f51993ba41ad2c441cc7b8dea" +checksum = "97a971f6058498b5c0f1affa23e7ea202057a7301dbff68e968b2d578bcbd053" dependencies = [ "js-sys", "once_cell", - "opentelemetry 0.25.0", - "opentelemetry_sdk 0.25.0", + "opentelemetry 0.27.1", + "opentelemetry_sdk 0.27.1", "smallvec", "tracing", "tracing-core", diff --git a/examples/metrics/Cargo.toml b/examples/metrics/Cargo.toml index 129b1abb1f3..ad2941e3761 100644 --- a/examples/metrics/Cargo.toml +++ b/examples/metrics/Cargo.toml @@ -12,13 +12,13 @@ release = false futures = { workspace = true } axum = "0.7" libp2p = { path = "../../libp2p", features = ["tokio", "metrics", "ping", "noise", "identify", "tcp", "yamux", "macros"] } -opentelemetry = { version = "0.25.0", features = ["metrics"] } -opentelemetry-otlp = { version = "0.25.0", features = ["metrics"] } -opentelemetry_sdk = { version = "0.25.0", features = ["rt-tokio", "metrics"] } +opentelemetry = { version = "0.27.0", features = ["metrics"] } +opentelemetry-otlp = { version = "0.27.0", features = ["metrics"] } +opentelemetry_sdk = { version = "0.27.0", features = ["rt-tokio", "metrics"] } prometheus-client = { workspace = true } tokio = { workspace = true, features = ["full"] } tracing = { workspace = true } -tracing-opentelemetry = "0.26.0" +tracing-opentelemetry = "0.28.0" tracing-subscriber = { workspace = true, features = ["env-filter"] } [lints] diff --git a/examples/metrics/src/main.rs b/examples/metrics/src/main.rs index 92aa90479fd..62e5b06251d 100644 --- a/examples/metrics/src/main.rs +++ b/examples/metrics/src/main.rs @@ -31,7 +31,9 @@ use libp2p::{ swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, }; -use opentelemetry::{trace::TracerProvider, KeyValue}; +use opentelemetry::{trace::TracerProvider as _, KeyValue}; +use opentelemetry_otlp::SpanExporter; +use opentelemetry_sdk::{runtime, trace::TracerProvider}; use prometheus_client::registry::Registry; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter, Layer}; @@ -92,14 +94,16 @@ async fn main() -> Result<(), Box> { } fn setup_tracing() -> Result<(), Box> { - let provider = opentelemetry_otlp::new_pipeline() - .tracing() - .with_exporter(opentelemetry_otlp::new_exporter().tonic()) - .with_trace_config(opentelemetry_sdk::trace::Config::default().with_resource( - opentelemetry_sdk::Resource::new(vec![KeyValue::new("service.name", "libp2p")]), - )) - .install_batch(opentelemetry_sdk::runtime::Tokio)?; - + let provider = TracerProvider::builder() + .with_batch_exporter( + SpanExporter::builder().with_tonic().build()?, + runtime::Tokio, + ) + .with_resource(opentelemetry_sdk::Resource::new(vec![KeyValue::new( + "service.name", + "libp2p", + )])) + .build(); tracing_subscriber::registry() .with(tracing_subscriber::fmt::layer().with_filter(EnvFilter::from_default_env())) .with( From 596da237deab0b9a734216e6be115a3ee9f02fa5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Thu, 12 Dec 2024 16:20:52 +0000 Subject: [PATCH 50/50] fix: update Cargo.lock --- Cargo.lock | 493 ++++++++++++++++++++++------------------------------- 1 file changed, 205 insertions(+), 288 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b50e0058a8d..18b579f6d2c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -87,15 +87,16 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.7" +version = "0.6.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cd2405b3ac1faab2990b74d728624cd9fd115651fcecc7c2d8daf01376275ba" +checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", + "is_terminal_polyfill", "utf8parse", ] @@ -282,65 +283,43 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.5.1" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fa3dc5f2a8564f07759c008b9109dc0d39de92a88d5588b8a5036d286383afb" +checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" dependencies = [ - "async-lock 2.7.0", "async-task", "concurrent-queue", - "fastrand 1.9.0", - "futures-lite 1.13.0", + "fastrand", + "futures-lite", "slab", ] [[package]] name = "async-fs" -version = "1.6.0" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "279cf904654eeebfa37ac9bb1598880884924aab82e290aa65c9e77a0e142e06" +checksum = "ebcd09b382f40fcd159c2d695175b2ae620ffa5f3bd6f664131efff4e8b9e04a" dependencies = [ - "async-lock 2.7.0", - "autocfg", + "async-lock 3.1.0", "blocking", - "futures-lite 1.13.0", + "futures-lite", ] [[package]] name = "async-global-executor" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" +checksum = "9b4353121d5644cdf2beb5726ab752e79a8db1ebb52031770ec47db31d245526" dependencies = [ - "async-channel 1.9.0", + "async-channel 2.3.1", "async-executor", - "async-io 1.13.0", - "async-lock 2.7.0", + "async-io", + "async-lock 3.1.0", "blocking", - "futures-lite 1.13.0", + "futures-lite", "once_cell", ] -[[package]] -name = "async-io" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" -dependencies = [ - "async-lock 2.7.0", - "autocfg", - "cfg-if", - "concurrent-queue", - "futures-lite 1.13.0", - "log", - "parking", - "polling 2.8.0", - "rustix 0.37.25", - "slab", - "socket2 0.4.9", - "waker-fn", -] - [[package]] name = "async-io" version = "2.3.3" @@ -351,10 +330,10 @@ dependencies = [ "cfg-if", "concurrent-queue", "futures-io", - "futures-lite 2.0.1", + "futures-lite", "parking", - "polling 3.3.0", - "rustix 0.38.31", + "polling", + "rustix", "slab", "tracing", "windows-sys 0.52.0", @@ -382,32 +361,31 @@ dependencies = [ [[package]] name = "async-net" -version = "1.7.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4051e67316bc7eff608fe723df5d32ed639946adcd69e07df41fd42a7b411f1f" +checksum = "b948000fad4873c1c9339d60f2623323a0cfd3816e5181033c6a5cb68b2accf7" dependencies = [ - "async-io 1.13.0", - "autocfg", + "async-io", "blocking", - "futures-lite 1.13.0", + "futures-lite", ] [[package]] name = "async-process" -version = "1.7.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a9d28b1d97e08915212e2e45310d47854eafa69600756fc735fb788f75199c9" +checksum = "451e3cf68011bd56771c79db04a9e333095ab6349f7e47592b788e9b98720cc8" dependencies = [ - "async-io 1.13.0", - "async-lock 2.7.0", - "autocfg", + "async-channel 2.3.1", + "async-io", + "async-lock 3.1.0", + "async-signal", "blocking", "cfg-if", - "event-listener 2.5.3", - "futures-lite 1.13.0", - "rustix 0.37.25", - "signal-hook", - "windows-sys 0.48.0", + "event-listener 5.3.1", + "futures-lite", + "rustix", + "windows-sys 0.52.0", ] [[package]] @@ -421,24 +399,42 @@ dependencies = [ "syn 2.0.89", ] +[[package]] +name = "async-signal" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e47d90f65a225c4527103a8d747001fc56e375203592b25ad103e1ca13124c5" +dependencies = [ + "async-io", + "async-lock 2.7.0", + "atomic-waker", + "cfg-if", + "futures-core", + "futures-io", + "rustix", + "signal-hook-registry", + "slab", + "windows-sys 0.48.0", +] + [[package]] name = "async-std" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +checksum = "c634475f29802fde2b8f0b505b1bd00dfe4df7d4a000f0b36f7671197d5c3615" dependencies = [ "async-attributes", "async-channel 1.9.0", "async-global-executor", - "async-io 1.13.0", - "async-lock 2.7.0", + "async-io", + "async-lock 3.1.0", "async-process", "crossbeam-utils", "futures-channel", "futures-core", "futures-io", - "futures-lite 1.13.0", - "gloo-timers", + "futures-lite", + "gloo-timers 0.3.0", "kv-log-macro", "log", "memchr", @@ -461,7 +457,7 @@ dependencies = [ "futures-util", "hickory-resolver", "pin-utils", - "socket2 0.5.7", + "socket2", ] [[package]] @@ -739,17 +735,15 @@ dependencies = [ [[package]] name = "blocking" -version = "1.3.1" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77231a1c8f801696fc0123ec6150ce92cffb8e164a02afb9c8ddee0e9b65ad65" +checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" dependencies = [ - "async-channel 1.9.0", - "async-lock 2.7.0", + "async-channel 2.3.1", "async-task", - "atomic-waker", - "fastrand 1.9.0", - "futures-lite 1.13.0", - "log", + "futures-io", + "futures-lite", + "piper", ] [[package]] @@ -811,9 +805,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" dependencies = [ "serde", ] @@ -1572,15 +1566,6 @@ dependencies = [ "pin-project-lite", ] -[[package]] -name = "fastrand" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] - [[package]] name = "fastrand" version = "2.0.1" @@ -1648,9 +1633,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -1673,9 +1658,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -1683,15 +1668,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -1701,40 +1686,29 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" -version = "1.13.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +checksum = "d3831c2651acb5177cbd83943f3d9c8912c5ad03c76afcc0e9511ba568ec5ebb" dependencies = [ - "fastrand 1.9.0", + "fastrand", "futures-core", "futures-io", "memchr", "parking", "pin-project-lite", - "waker-fn", -] - -[[package]] -name = "futures-lite" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3831c2651acb5177cbd83943f3d9c8912c5ad03c76afcc0e9511ba568ec5ebb" -dependencies = [ - "futures-core", - "pin-project-lite", ] [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", @@ -1754,15 +1728,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-timer" @@ -1770,15 +1744,15 @@ version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" dependencies = [ - "gloo-timers", + "gloo-timers 0.2.6", "send_wrapper 0.4.0", ] [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -1886,6 +1860,18 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "gloo-timers" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + [[package]] name = "group" version = "0.13.0" @@ -1992,7 +1978,7 @@ dependencies = [ "ipnet", "once_cell", "rand 0.8.5", - "socket2 0.5.7", + "socket2", "thiserror 2.0.3", "tinyvec", "tokio", @@ -2236,7 +2222,7 @@ dependencies = [ "http-body", "hyper", "pin-project-lite", - "socket2 0.5.7", + "socket2", "tokio", "tower-service", "tracing", @@ -2404,22 +2390,26 @@ dependencies = [ [[package]] name = "if-watch" -version = "3.2.0" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6b0422c86d7ce0e97169cc42e04ae643caf278874a7a3c87b8150a220dc7e1e" +checksum = "cdf9d64cfcf380606e64f9a0bcf493616b65331199f984151a6fa11a7b3cde38" dependencies = [ - "async-io 2.3.3", + "async-io", "core-foundation", "fnv", "futures", "if-addrs", "ipnet", "log", + "netlink-packet-core", + "netlink-packet-route", + "netlink-proto", + "netlink-sys", "rtnetlink", "smol", - "system-configuration", + "system-configuration 0.6.1", "tokio", - "windows 0.51.1", + "windows", ] [[package]] @@ -2473,15 +2463,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "instant" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" -dependencies = [ - "cfg-if", -] - [[package]] name = "integer-encoding" version = "3.0.4" @@ -2541,24 +2522,13 @@ dependencies = [ "web-time 1.1.0", ] -[[package]] -name = "io-lifetimes" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" -dependencies = [ - "hermit-abi", - "libc", - "windows-sys 0.48.0", -] - [[package]] name = "ipconfig" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.7", + "socket2", "widestring", "windows-sys 0.48.0", "winreg 0.50.0", @@ -2605,10 +2575,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", - "rustix 0.38.31", + "rustix", "windows-sys 0.48.0", ] +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + [[package]] name = "itertools" version = "0.10.5" @@ -2672,9 +2648,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.155" +version = "0.2.168" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d" [[package]] name = "libp2p" @@ -3011,7 +2987,7 @@ dependencies = [ name = "libp2p-mdns" version = "0.46.1" dependencies = [ - "async-io 2.3.3", + "async-io", "async-std", "data-encoding", "futures", @@ -3026,7 +3002,7 @@ dependencies = [ "libp2p-yamux", "rand 0.8.5", "smallvec", - "socket2 0.5.7", + "socket2", "tokio", "tracing", "tracing-subscriber", @@ -3239,7 +3215,7 @@ dependencies = [ "rand 0.8.5", "ring 0.17.8", "rustls 0.23.11", - "socket2 0.5.7", + "socket2", "thiserror 2.0.3", "tokio", "tracing", @@ -3431,7 +3407,7 @@ dependencies = [ name = "libp2p-tcp" version = "0.42.0" dependencies = [ - "async-io 2.3.3", + "async-io", "async-std", "futures", "futures-timer", @@ -3439,7 +3415,7 @@ dependencies = [ "libc", "libp2p-core", "libp2p-identity", - "socket2 0.5.7", + "socket2", "tokio", "tracing", "tracing-subscriber", @@ -3689,12 +3665,6 @@ dependencies = [ "libsecp256k1-core", ] -[[package]] -name = "linux-raw-sys" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" - [[package]] name = "linux-raw-sys" version = "0.4.12" @@ -3859,13 +3829,13 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.11" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ "libc", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -3970,21 +3940,20 @@ dependencies = [ [[package]] name = "netlink-packet-core" -version = "0.4.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "345b8ab5bd4e71a2986663e88c56856699d060e78e152e6e9d7966fcd5491297" +checksum = "72724faf704479d67b388da142b186f916188505e7e0b26719019c525882eda4" dependencies = [ "anyhow", "byteorder", - "libc", "netlink-packet-utils", ] [[package]] name = "netlink-packet-route" -version = "0.12.0" +version = "0.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9ea4302b9759a7a88242299225ea3688e63c85ea136371bb6cf94fd674efaab" +checksum = "053998cea5a306971f88580d0829e90f270f940befd7cf928da179d4187a5a66" dependencies = [ "anyhow", "bitflags 1.3.2", @@ -4008,9 +3977,9 @@ dependencies = [ [[package]] name = "netlink-proto" -version = "0.10.0" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65b4b14489ab424703c092062176d52ba55485a89c076b4f9db05092b7223aa6" +checksum = "86b33524dc0968bfad349684447bfce6db937a9ac3332a1fe60c0c5a5ce63f21" dependencies = [ "bytes", "futures", @@ -4023,11 +3992,11 @@ dependencies = [ [[package]] name = "netlink-sys" -version = "0.8.5" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6471bf08e7ac0135876a9581bf3217ef0333c191c128d34878079f42ee150411" +checksum = "16c903aa70590cb93691bf97a767c8d1d6122d2cc9070433deb3bbf36ce8bd23" dependencies = [ - "async-io 1.13.0", + "async-io", "bytes", "futures", "libc", @@ -4035,17 +4004,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "nix" -version = "0.24.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" -dependencies = [ - "bitflags 1.3.2", - "cfg-if", - "libc", -] - [[package]] name = "nix" version = "0.26.4" @@ -4408,9 +4366,9 @@ dependencies = [ [[package]] name = "parking" -version = "2.1.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" @@ -4509,6 +4467,17 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "piper" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" +dependencies = [ + "atomic-waker", + "fastrand", + "futures-io", +] + [[package]] name = "pkcs8" version = "0.10.2" @@ -4553,22 +4522,6 @@ dependencies = [ "plotters-backend", ] -[[package]] -name = "polling" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" -dependencies = [ - "autocfg", - "bitflags 1.3.2", - "cfg-if", - "concurrent-queue", - "libc", - "log", - "pin-project-lite", - "windows-sys 0.48.0", -] - [[package]] name = "polling" version = "3.3.0" @@ -4578,7 +4531,7 @@ dependencies = [ "cfg-if", "concurrent-queue", "pin-project-lite", - "rustix 0.38.31", + "rustix", "tracing", "windows-sys 0.48.0", ] @@ -4757,7 +4710,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4ceeeeabace7857413798eb1ffa1e9c905a9946a57d81fb69b4b71c4d8eb3ad" dependencies = [ - "async-io 2.3.3", + "async-io", "async-std", "bytes", "futures-io", @@ -4796,7 +4749,7 @@ checksum = "cb7ad7bc932e4968523fa7d9c320ee135ff779de720e9350fee8728838551764" dependencies = [ "libc", "once_cell", - "socket2 0.5.7", + "socket2", "tracing", "windows-sys 0.52.0", ] @@ -5072,7 +5025,7 @@ dependencies = [ "serde_json", "serde_urlencoded", "sync_wrapper 0.1.2", - "system-configuration", + "system-configuration 0.5.1", "tokio", "tokio-native-tls", "tokio-rustls", @@ -5179,16 +5132,19 @@ dependencies = [ [[package]] name = "rtnetlink" -version = "0.10.1" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322c53fd76a18698f1c27381d58091de3a043d356aa5bd0d510608b565f469a0" +checksum = "7a552eb82d19f38c3beed3f786bd23aa434ceb9ac43ab44419ca6d67a7e186c0" dependencies = [ "async-global-executor", "futures", "log", + "netlink-packet-core", "netlink-packet-route", + "netlink-packet-utils", "netlink-proto", - "nix 0.24.3", + "netlink-sys", + "nix", "thiserror 1.0.63", "tokio", ] @@ -5278,20 +5234,6 @@ dependencies = [ "nom", ] -[[package]] -name = "rustix" -version = "0.37.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4eb579851244c2c03e7c24f501c3432bed80b8f720af1d6e5b0e0f01555a035" -dependencies = [ - "bitflags 1.3.2", - "errno", - "io-lifetimes", - "libc", - "linux-raw-sys 0.3.8", - "windows-sys 0.48.0", -] - [[package]] name = "rustix" version = "0.38.31" @@ -5301,7 +5243,7 @@ dependencies = [ "bitflags 2.4.1", "errno", "libc", - "linux-raw-sys 0.4.12", + "linux-raw-sys", "windows-sys 0.52.0", ] @@ -5660,16 +5602,6 @@ dependencies = [ "dirs", ] -[[package]] -name = "signal-hook" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801" -dependencies = [ - "libc", - "signal-hook-registry", -] - [[package]] name = "signal-hook-registry" version = "1.4.1" @@ -5706,19 +5638,19 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "smol" -version = "1.3.0" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13f2b548cd8447f8de0fdf1c592929f70f4fc7039a05e47404b0d096ec6987a1" +checksum = "a33bd3e260892199c3ccfc487c88b2da2265080acb316cd920da72fdfd7c599f" dependencies = [ - "async-channel 1.9.0", + "async-channel 2.3.1", "async-executor", "async-fs", - "async-io 1.13.0", - "async-lock 2.7.0", + "async-io", + "async-lock 3.1.0", "async-net", "async-process", "blocking", - "futures-lite 1.13.0", + "futures-lite", ] [[package]] @@ -5747,16 +5679,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "socket2" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "socket2" version = "0.5.7" @@ -5989,7 +5911,7 @@ dependencies = [ "ntapi", "once_cell", "rayon", - "windows 0.52.0", + "windows", ] [[package]] @@ -6000,7 +5922,18 @@ checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags 1.3.2", "core-foundation", - "system-configuration-sys", + "system-configuration-sys 0.5.0", +] + +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags 2.4.1", + "core-foundation", + "system-configuration-sys 0.6.0", ] [[package]] @@ -6013,6 +5946,16 @@ dependencies = [ "libc", ] +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "tagptr" version = "0.2.0" @@ -6026,8 +5969,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", - "fastrand 2.0.1", - "rustix 0.38.31", + "fastrand", + "rustix", "windows-sys 0.52.0", ] @@ -6217,28 +6160,27 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.38.0" +version = "1.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" +checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" dependencies = [ "backtrace", "bytes", "libc", "mio", - "num_cpus", "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.7", + "socket2", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "tokio-macros" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", @@ -6346,7 +6288,7 @@ dependencies = [ "percent-encoding", "pin-project", "prost", - "socket2 0.5.7", + "socket2", "tokio", "tokio-stream", "tower", @@ -6730,12 +6672,6 @@ dependencies = [ "atomic-waker", ] -[[package]] -name = "waker-fn" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" - [[package]] name = "walkdir" version = "2.3.3" @@ -7042,7 +6978,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62bebbd40e7f8b630a0f1a74783dbfff1edfc0ccaae891c4689891156a8c4d8c" dependencies = [ "log", - "socket2 0.5.7", + "socket2", "thiserror 1.0.63", "tokio", "webrtc-util 0.8.1", @@ -7114,7 +7050,7 @@ dependencies = [ "lazy_static", "libc", "log", - "nix 0.26.4", + "nix", "rand 0.8.5", "thiserror 1.0.63", "tokio", @@ -7134,7 +7070,7 @@ dependencies = [ "lazy_static", "libc", "log", - "nix 0.26.4", + "nix", "portable-atomic", "rand 0.8.5", "thiserror 1.0.63", @@ -7197,35 +7133,16 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -[[package]] -name = "windows" -version = "0.51.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" -dependencies = [ - "windows-core 0.51.1", - "windows-targets 0.48.5", -] - [[package]] name = "windows" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" dependencies = [ - "windows-core 0.52.0", + "windows-core", "windows-targets 0.52.0", ] -[[package]] -name = "windows-core" -version = "0.51.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" -dependencies = [ - "windows-targets 0.48.5", -] - [[package]] name = "windows-core" version = "0.52.0"