diff --git a/.github/workflows/automerge.yml b/.github/workflows/automerge.yml deleted file mode 100644 index 3833fc229..000000000 --- a/.github/workflows/automerge.yml +++ /dev/null @@ -1,11 +0,0 @@ -# File managed by web3-bot. DO NOT EDIT. -# See https://github.com/protocol/.github/ for details. - -name: Automerge -on: [ pull_request ] - -jobs: - automerge: - uses: protocol/.github/.github/workflows/automerge.yml@master - with: - job: 'automerge' diff --git a/.github/workflows/go-check.yml b/.github/workflows/go-check.yml index cc65ce68a..26f63bc12 100644 --- a/.github/workflows/go-check.yml +++ b/.github/workflows/go-check.yml @@ -1,67 +1,18 @@ -# File managed by web3-bot. DO NOT EDIT. -# See https://github.com/protocol/.github/ for details. - -on: [push, pull_request] name: Go Checks +on: + pull_request: + push: + branches: ["master"] + workflow_dispatch: + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} + cancel-in-progress: true + jobs: - unit: - runs-on: ubuntu-latest - name: All - steps: - - uses: actions/checkout@v3 - with: - submodules: recursive - - id: config - uses: protocol/.github/.github/actions/read-config@master - - uses: actions/setup-go@v3 - with: - go-version: 1.20.x - - name: Run repo-specific setup - uses: ./.github/actions/go-check-setup - if: hashFiles('./.github/actions/go-check-setup') != '' - - name: Install staticcheck - run: go install honnef.co/go/tools/cmd/staticcheck@4970552d932f48b71485287748246cf3237cebdf # 2023.1 (v0.4.0) - - name: Check that go.mod is tidy - uses: protocol/multiple-go-modules@v1.2 - with: - run: | - go mod tidy - if [[ -n $(git ls-files --other --exclude-standard --directory -- go.sum) ]]; then - echo "go.sum was added by go mod tidy" - exit 1 - fi - git diff --exit-code -- go.sum go.mod - - name: gofmt - if: success() || failure() # run this step even if the previous one failed - run: | - out=$(gofmt -s -l .) - if [[ -n "$out" ]]; then - echo $out | awk '{print "::error file=" $0 ",line=0,col=0::File is not gofmt-ed."}' - exit 1 - fi - - name: go vet - if: success() || failure() # run this step even if the previous one failed - uses: protocol/multiple-go-modules@v1.2 - with: - run: go vet ./... - - name: staticcheck - if: success() || failure() # run this step even if the previous one failed - uses: protocol/multiple-go-modules@v1.2 - with: - run: | - set -o pipefail - staticcheck ./... | sed -e 's@\(.*\)\.go@./\1.go@g' - - name: go generate - uses: protocol/multiple-go-modules@v1.2 - if: (success() || failure()) && fromJSON(steps.config.outputs.json).gogenerate == true - with: - run: | - git clean -fd # make sure there aren't untracked files / directories - go generate -x ./... - # check if go generate modified or added any files - if ! $(git add . && git diff-index HEAD --exit-code --quiet); then - echo "go generated caused changes to the repository:" - git status --short - exit 1 - fi + go-check: + uses: ipdxco/unified-github-workflows/.github/workflows/go-check.yml@v1.0 diff --git a/.github/workflows/go-test.yml b/.github/workflows/go-test.yml index c5cb3efc7..778de6ed4 100644 --- a/.github/workflows/go-test.yml +++ b/.github/workflows/go-test.yml @@ -1,76 +1,20 @@ -# File managed by web3-bot. DO NOT EDIT. -# See https://github.com/protocol/.github/ for details. - -on: [push, pull_request] name: Go Test +on: + pull_request: + push: + branches: ["master"] + workflow_dispatch: + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} + cancel-in-progress: true + jobs: - unit: - strategy: - fail-fast: false - matrix: - os: [ "ubuntu", "windows", "macos" ] - go: ["1.19.x","1.20.x"] - env: - COVERAGES: "" - runs-on: ${{ fromJSON(vars[format('UCI_GO_TEST_RUNNER_{0}', matrix.os)] || format('"{0}-latest"', matrix.os)) }} - name: ${{ matrix.os }} (go ${{ matrix.go }}) - steps: - - uses: actions/checkout@v3 - with: - submodules: recursive - - id: config - uses: protocol/.github/.github/actions/read-config@master - - uses: actions/setup-go@v3 - with: - go-version: ${{ matrix.go }} - - name: Go information - run: | - go version - go env - - name: Use msys2 on windows - if: matrix.os == 'windows' - shell: bash - # The executable for msys2 is also called bash.cmd - # https://github.com/actions/virtual-environments/blob/main/images/win/Windows2019-Readme.md#shells - # If we prepend its location to the PATH - # subsequent 'shell: bash' steps will use msys2 instead of gitbash - run: echo "C:/msys64/usr/bin" >> $GITHUB_PATH - - name: Run repo-specific setup - uses: ./.github/actions/go-test-setup - if: hashFiles('./.github/actions/go-test-setup') != '' - - name: Run tests - if: contains(fromJSON(steps.config.outputs.json).skipOSes, matrix.os) == false - uses: protocol/multiple-go-modules@v1.2 - with: - # Use -coverpkg=./..., so that we include cross-package coverage. - # If package ./A imports ./B, and ./A's tests also cover ./B, - # this means ./B's coverage will be significantly higher than 0%. - run: go test -v -shuffle=on -coverprofile=module-coverage.txt -coverpkg=./... ./... - - name: Run tests (32 bit) - # can't run 32 bit tests on OSX. - if: matrix.os != 'macos' && - fromJSON(steps.config.outputs.json).skip32bit != true && - contains(fromJSON(steps.config.outputs.json).skipOSes, matrix.os) == false - uses: protocol/multiple-go-modules@v1.2 - env: - GOARCH: 386 - with: - run: | - export "PATH=$PATH_386:$PATH" - go test -v -shuffle=on ./... - - name: Run tests with race detector - # speed things up. Windows and OSX VMs are slow - if: matrix.os == 'ubuntu' && - contains(fromJSON(steps.config.outputs.json).skipOSes, matrix.os) == false - uses: protocol/multiple-go-modules@v1.2 - with: - run: go test -v -race ./... - - name: Collect coverage files - shell: bash - run: echo "COVERAGES=$(find . -type f -name 'module-coverage.txt' | tr -s '\n' ',' | sed 's/,$//')" >> $GITHUB_ENV - - name: Upload coverage to Codecov - uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70 # v3.1.1 - with: - files: '${{ env.COVERAGES }}' - env_vars: OS=${{ matrix.os }}, GO=${{ matrix.go }} + go-test: + uses: ipdxco/unified-github-workflows/.github/workflows/go-test.yml@v1.0 + secrets: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/release-check.yml b/.github/workflows/release-check.yml index e2408e37c..0b5ff6070 100644 --- a/.github/workflows/release-check.yml +++ b/.github/workflows/release-check.yml @@ -1,13 +1,19 @@ -# File managed by web3-bot. DO NOT EDIT. -# See https://github.com/protocol/.github/ for details. - name: Release Checker + on: pull_request_target: paths: [ 'version.json' ] + types: [ opened, synchronize, reopened, labeled, unlabeled ] + workflow_dispatch: + +permissions: + contents: write + pull-requests: write + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true jobs: release-check: - uses: protocol/.github/.github/workflows/release-check.yml@master - with: - go-version: 1.20.x + uses: ipdxco/unified-github-workflows/.github/workflows/release-check.yml@v1.0 diff --git a/.github/workflows/releaser.yml b/.github/workflows/releaser.yml index cdccbf873..2ebdbed31 100644 --- a/.github/workflows/releaser.yml +++ b/.github/workflows/releaser.yml @@ -1,11 +1,17 @@ -# File managed by web3-bot. DO NOT EDIT. -# See https://github.com/protocol/.github/ for details. - name: Releaser + on: push: paths: [ 'version.json' ] + workflow_dispatch: + +permissions: + contents: write + +concurrency: + group: ${{ github.workflow }}-${{ github.sha }} + cancel-in-progress: true jobs: releaser: - uses: protocol/.github/.github/workflows/releaser.yml@master + uses: ipdxco/unified-github-workflows/.github/workflows/releaser.yml@v1.0 diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 6f6d895d1..16d65d721 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -2,25 +2,12 @@ name: Close and mark stale issue on: schedule: - - cron: '0 0 * * *' + - cron: '0 0 * * *' + +permissions: + issues: write + pull-requests: write jobs: stale: - - runs-on: ubuntu-latest - permissions: - issues: write - pull-requests: write - - steps: - - uses: actions/stale@v3 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - stale-issue-message: 'Oops, seems like we needed more information for this issue, please comment with more details or this issue will be closed in 7 days.' - close-issue-message: 'This issue was closed because it is missing author input.' - stale-issue-label: 'kind/stale' - any-of-labels: 'need/author-input' - exempt-issue-labels: 'need/triage,need/community-input,need/maintainer-input,need/maintainers-input,need/analysis,status/blocked,status/in-progress,status/ready,status/deferred,status/inactive' - days-before-issue-stale: 6 - days-before-issue-close: 7 - enable-statistics: true + uses: pl-strflt/.github/.github/workflows/reusable-stale-issue.yml@v0.3 diff --git a/.github/workflows/tagpush.yml b/.github/workflows/tagpush.yml index d84996187..5ef3fb9ed 100644 --- a/.github/workflows/tagpush.yml +++ b/.github/workflows/tagpush.yml @@ -1,12 +1,18 @@ -# File managed by web3-bot. DO NOT EDIT. -# See https://github.com/protocol/.github/ for details. - name: Tag Push Checker + on: push: tags: - v* +permissions: + contents: read + issues: write + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: releaser: - uses: protocol/.github/.github/workflows/tagpush.yml@master + uses: ipdxco/unified-github-workflows/.github/workflows/tagpush.yml@v1.0 diff --git a/README.md b/README.md index 517bc9f3a..b9b0a69b6 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,7 @@ - [Install](#install) - [Usage](#usage) +- [Optimizations](#optimizations) - [Contribute](#contribute) - [Maintainers](#maintainers) - [License](#license) @@ -21,6 +22,10 @@ go get github.com/libp2p/go-libp2p-kad-dht ``` +## Optimizations + +Client-side optimizations are described in [optimizations.md](./optimizations.md) + ## Usage Go to https://godoc.org/github.com/libp2p/go-libp2p-kad-dht. diff --git a/crawler/crawler.go b/crawler/crawler.go index 965f1df2f..e055c5414 100644 --- a/crawler/crawler.go +++ b/crawler/crawler.go @@ -10,7 +10,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" //lint:ignore SA1019 TODO migrate away from gogo pb "github.com/libp2p/go-msgio/protoio" diff --git a/dht.go b/dht.go index 2a660c21d..43b9689f3 100644 --- a/dht.go +++ b/dht.go @@ -8,6 +8,7 @@ import ( "sync" "time" + "github.com/libp2p/go-libp2p-routing-helpers/tracing" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" @@ -32,16 +33,18 @@ import ( "github.com/gogo/protobuf/proto" ds "github.com/ipfs/go-datastore" - logging "github.com/ipfs/go-log" - "github.com/jbenet/goprocess" - goprocessctx "github.com/jbenet/goprocess/context" + logging "github.com/ipfs/go-log/v2" "github.com/multiformats/go-base32" ma "github.com/multiformats/go-multiaddr" multihash "github.com/multiformats/go-multihash" "go.opencensus.io/tag" + "go.uber.org/multierr" "go.uber.org/zap" ) +const tracer = tracing.Tracer("go-libp2p-kad-dht") +const dhtName = "IpfsDHT" + var ( logger = logging.Logger("dht") baseLogger = logger.Desugar() @@ -100,13 +103,12 @@ type IpfsDHT struct { Validator record.Validator - ctx context.Context - proc goprocess.Process + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup protoMessenger *pb.ProtocolMessenger - msgSender pb.MessageSender - - plk sync.Mutex + msgSender pb.MessageSenderWithDisconnect stripedPutLocks [256]sync.Mutex @@ -134,6 +136,12 @@ type IpfsDHT struct { autoRefresh bool + // timeout for the lookupCheck operation + lookupCheckTimeout time.Duration + // number of concurrent lookupCheck operations + lookupCheckCapacity int + lookupChecksLk sync.Mutex + // A function returning a set of bootstrap peers to fallback on if all other attempts to fix // the routing table fail (or, e.g., this is the first time this node is // connecting to the network). @@ -149,7 +157,7 @@ type IpfsDHT struct { disableFixLowPeers bool fixLowPeersChan chan struct{} - addPeerToRTChan chan addPeerRTReq + addPeerToRTChan chan peer.ID refreshFinishedCh chan struct{} rtFreezeTimeout time.Duration @@ -163,6 +171,10 @@ type IpfsDHT struct { // configuration variables for tests testAddressUpdateProcessing bool + + // addrFilter is used to filter the addresses we put into the peer store. + // Mostly used to filter out localhost and local addresses. + addrFilter func([]ma.Multiaddr) []ma.Multiaddr } // Assert that IPFS assumptions about interfaces aren't broken. These aren't a @@ -192,7 +204,7 @@ func New(ctx context.Context, h host.Host, options ...Option) (*IpfsDHT, error) return nil, err } - dht, err := makeDHT(ctx, h, cfg) + dht, err := makeDHT(h, cfg) if err != nil { return nil, fmt.Errorf("failed to create DHT, err=%s", err) } @@ -243,30 +255,27 @@ func New(ctx context.Context, h host.Host, options ...Option) (*IpfsDHT, error) } // register for event bus and network notifications - sn, err := newSubscriberNotifiee(dht) - if err != nil { + if err := dht.startNetworkSubscriber(); err != nil { return nil, err } - dht.proc.Go(sn.subscribe) - // handle providers - if mgr, ok := dht.providerStore.(interface{ Process() goprocess.Process }); ok { - dht.proc.AddChild(mgr.Process()) - } // go-routine to make sure we ALWAYS have RT peer addresses in the peerstore // since RT membership is decoupled from connectivity go dht.persistRTPeersInPeerStore() - dht.proc.Go(dht.rtPeerLoop) + dht.rtPeerLoop() // Fill routing table with currently connected peers that are DHT servers - dht.plk.Lock() for _, p := range dht.host.Network().Peers() { - dht.peerFound(p, false) + dht.peerFound(p) } - dht.plk.Unlock() - dht.proc.Go(dht.populatePeers) + dht.rtRefreshManager.Start() + + // listens to the fix low peers chan and tries to fix the Routing Table + if !dht.disableFixLowPeers { + dht.runFixLowPeersLoop() + } return dht, nil } @@ -293,7 +302,7 @@ func NewDHTClient(ctx context.Context, h host.Host, dstore ds.Batching) *IpfsDHT return dht } -func makeDHT(ctx context.Context, h host.Host, cfg dhtcfg.Config) (*IpfsDHT, error) { +func makeDHT(h host.Host, cfg dhtcfg.Config) (*IpfsDHT, error) { var protocols, serverProtocols []protocol.ID v1proto := cfg.ProtocolPrefix + kad1 @@ -317,13 +326,15 @@ func makeDHT(ctx context.Context, h host.Host, cfg dhtcfg.Config) (*IpfsDHT, err bucketSize: cfg.BucketSize, alpha: cfg.Concurrency, beta: cfg.Resiliency, + lookupCheckCapacity: cfg.LookupCheckConcurrency, queryPeerFilter: cfg.QueryPeerFilter, routingTablePeerFilter: cfg.RoutingTable.PeerFilter, rtPeerDiversityFilter: cfg.RoutingTable.DiversityFilter, + addrFilter: cfg.AddressFilter, fixLowPeersChan: make(chan struct{}, 1), - addPeerToRTChan: make(chan addPeerRTReq), + addPeerToRTChan: make(chan peer.ID), refreshFinishedCh: make(chan struct{}), enableOptProv: cfg.EnableOptimisticProvide, @@ -353,6 +364,8 @@ func makeDHT(ctx context.Context, h host.Host, cfg dhtcfg.Config) (*IpfsDHT, err dht.routingTable = rt dht.bootstrapPeers = cfg.BootstrapPeers + dht.lookupCheckTimeout = cfg.RoutingTable.RefreshQueryTimeout + // init network size estimator dht.nsEstimator = netsize.NewEstimator(h.ID(), rt, cfg.BucketSize) @@ -361,26 +374,19 @@ func makeDHT(ctx context.Context, h host.Host, cfg dhtcfg.Config) (*IpfsDHT, err } // rt refresh manager - rtRefresh, err := makeRtRefreshManager(dht, cfg, maxLastSuccessfulOutboundThreshold) + dht.rtRefreshManager, err = makeRtRefreshManager(dht, cfg, maxLastSuccessfulOutboundThreshold) if err != nil { return nil, fmt.Errorf("failed to construct RT Refresh Manager,err=%s", err) } - dht.rtRefreshManager = rtRefresh - - // create a DHT proc with the given context - dht.proc = goprocessctx.WithContextAndTeardown(ctx, func() error { - return rtRefresh.Close() - }) // create a tagged context derived from the original context - ctxTags := dht.newContextWithLocalTags(ctx) // the DHT context should be done when the process is closed - dht.ctx = goprocessctx.WithProcessClosing(ctxTags, dht.proc) + dht.ctx, dht.cancel = context.WithCancel(dht.newContextWithLocalTags(context.Background())) if cfg.ProviderStore != nil { dht.providerStore = cfg.ProviderStore } else { - dht.providerStore, err = providers.NewProviderManager(dht.ctx, h.ID(), dht.peerstore, cfg.Datastore) + dht.providerStore, err = providers.NewProviderManager(h.ID(), dht.peerstore, cfg.Datastore) if err != nil { return nil, fmt.Errorf("initializing default provider manager (%v)", err) } @@ -391,6 +397,20 @@ func makeDHT(ctx context.Context, h host.Host, cfg dhtcfg.Config) (*IpfsDHT, err return dht, nil } +// lookupCheck performs a lookup request to a remote peer.ID, verifying that it is able to +// answer it correctly +func (dht *IpfsDHT) lookupCheck(ctx context.Context, p peer.ID) error { + // lookup request to p requesting for its own peer.ID + peerids, err := dht.protoMessenger.GetClosestPeers(ctx, p, p) + // p is expected to return at least 1 peer id, unless our routing table has + // less than bucketSize peers, in which case we aren't picky about who we + // add to the routing table. + if err == nil && len(peerids) == 0 && dht.routingTable.Size() >= dht.bucketSize { + return fmt.Errorf("peer %s failed to return its closest peers, got %d", p, len(peerids)) + } + return err +} + func makeRtRefreshManager(dht *IpfsDHT, cfg dhtcfg.Config, maxLastSuccessfulOutboundThreshold time.Duration) (*rtrefresh.RtRefreshManager, error) { keyGenFnc := func(cpl uint) (string, error) { @@ -403,16 +423,11 @@ func makeRtRefreshManager(dht *IpfsDHT, cfg dhtcfg.Config, maxLastSuccessfulOutb return err } - pingFnc := func(ctx context.Context, p peer.ID) error { - _, err := dht.protoMessenger.GetClosestPeers(ctx, p, p) // don't use the PING message type as it's deprecated - return err - } - r, err := rtrefresh.NewRtRefreshManager( dht.host, dht.routingTable, cfg.RoutingTable.AutoRefresh, keyGenFnc, queryFnc, - pingFnc, + dht.lookupCheck, cfg.RoutingTable.RefreshQueryTimeout, cfg.RoutingTable.RefreshInterval, maxLastSuccessfulOutboundThreshold, @@ -477,42 +492,32 @@ func (dht *IpfsDHT) Mode() ModeOpt { return dht.auto } -func (dht *IpfsDHT) populatePeers(_ goprocess.Process) { - if !dht.disableFixLowPeers { - dht.fixLowPeers(dht.ctx) - } +// runFixLowPeersLoop manages simultaneous requests to fixLowPeers +func (dht *IpfsDHT) runFixLowPeersLoop() { + dht.wg.Add(1) + go func() { + defer dht.wg.Done() - if err := dht.rtRefreshManager.Start(); err != nil { - logger.Error(err) - } + dht.fixLowPeers() - // listens to the fix low peers chan and tries to fix the Routing Table - if !dht.disableFixLowPeers { - dht.proc.Go(dht.fixLowPeersRoutine) - } - -} + ticker := time.NewTicker(periodicBootstrapInterval) + defer ticker.Stop() -// fixLowPeersRouting manages simultaneous requests to fixLowPeers -func (dht *IpfsDHT) fixLowPeersRoutine(proc goprocess.Process) { - ticker := time.NewTicker(periodicBootstrapInterval) - defer ticker.Stop() + for { + select { + case <-dht.fixLowPeersChan: + case <-ticker.C: + case <-dht.ctx.Done(): + return + } - for { - select { - case <-dht.fixLowPeersChan: - case <-ticker.C: - case <-proc.Closing(): - return + dht.fixLowPeers() } - - dht.fixLowPeers(dht.Context()) - } - + }() } // fixLowPeers tries to get more peers into the routing table if we're below the threshold -func (dht *IpfsDHT) fixLowPeers(ctx context.Context) { +func (dht *IpfsDHT) fixLowPeers() { if dht.routingTable.Size() > minRTRefreshThreshold { return } @@ -520,7 +525,7 @@ func (dht *IpfsDHT) fixLowPeers(ctx context.Context) { // we try to add all peers we are connected to to the Routing Table // in case they aren't already there. for _, p := range dht.host.Network().Peers() { - dht.peerFound(p, false) + dht.peerFound(p) } // TODO Active Bootstrapping @@ -537,7 +542,7 @@ func (dht *IpfsDHT) fixLowPeers(ctx context.Context) { found := 0 for _, i := range rand.Perm(len(bootstrapPeers)) { ai := bootstrapPeers[i] - err := dht.Host().Connect(ctx, ai) + err := dht.Host().Connect(dht.ctx, ai) if err == nil { found++ } else { @@ -622,84 +627,118 @@ func (dht *IpfsDHT) putLocal(ctx context.Context, key string, rec *recpb.Record) return dht.datastore.Put(ctx, mkDsKey(key), data) } -func (dht *IpfsDHT) rtPeerLoop(proc goprocess.Process) { - bootstrapCount := 0 - isBootsrapping := false - var timerCh <-chan time.Time +func (dht *IpfsDHT) rtPeerLoop() { + dht.wg.Add(1) + go func() { + defer dht.wg.Done() + + var bootstrapCount uint + var isBootsrapping bool + var timerCh <-chan time.Time + + for { + select { + case <-timerCh: + dht.routingTable.MarkAllPeersIrreplaceable() + case p := <-dht.addPeerToRTChan: + if dht.routingTable.Size() == 0 { + isBootsrapping = true + bootstrapCount = 0 + timerCh = nil + } + // queryPeer set to true as we only try to add queried peers to the RT + newlyAdded, err := dht.routingTable.TryAddPeer(p, true, isBootsrapping) + if err != nil { + // peer not added. + continue + } + if newlyAdded { + // peer was added to the RT, it can now be fixed if needed. + dht.fixRTIfNeeded() + } else { + // the peer is already in our RT, but we just successfully queried it and so let's give it a + // bump on the query time so we don't ping it too soon for a liveliness check. + dht.routingTable.UpdateLastSuccessfulOutboundQueryAt(p, time.Now()) + } + case <-dht.refreshFinishedCh: + bootstrapCount = bootstrapCount + 1 + if bootstrapCount == 2 { + timerCh = time.NewTimer(dht.rtFreezeTimeout).C + } - for { - select { - case <-timerCh: - dht.routingTable.MarkAllPeersIrreplaceable() - case addReq := <-dht.addPeerToRTChan: - prevSize := dht.routingTable.Size() - if prevSize == 0 { - isBootsrapping = true - bootstrapCount = 0 - timerCh = nil - } - newlyAdded, err := dht.routingTable.TryAddPeer(addReq.p, addReq.queryPeer, isBootsrapping) - if err != nil { - // peer not added. - continue - } - if !newlyAdded && addReq.queryPeer { - // the peer is already in our RT, but we just successfully queried it and so let's give it a - // bump on the query time so we don't ping it too soon for a liveliness check. - dht.routingTable.UpdateLastSuccessfulOutboundQueryAt(addReq.p, time.Now()) - } - case <-dht.refreshFinishedCh: - bootstrapCount = bootstrapCount + 1 - if bootstrapCount == 2 { - timerCh = time.NewTimer(dht.rtFreezeTimeout).C - } + old := isBootsrapping + isBootsrapping = false + if old { + dht.rtRefreshManager.RefreshNoWait() + } - old := isBootsrapping - isBootsrapping = false - if old { - dht.rtRefreshManager.RefreshNoWait() + case <-dht.ctx.Done(): + return } - - case <-proc.Closing(): - return } - } + }() } -// peerFound signals the routingTable that we've found a peer that -// might support the DHT protocol. -// If we have a connection a peer but no exchange of a query RPC -> -// -// LastQueriedAt=time.Now (so we don't ping it for some time for a liveliness check) -// LastUsefulAt=0 -// -// If we connect to a peer and then exchange a query RPC -> -// -// LastQueriedAt=time.Now (same reason as above) -// LastUsefulAt=time.Now (so we give it some life in the RT without immediately evicting it) -// -// If we query a peer we already have in our Routing Table -> -// -// LastQueriedAt=time.Now() -// LastUsefulAt remains unchanged -// -// If we connect to a peer we already have in the RT but do not exchange a query (rare) -// -// Do Nothing. -func (dht *IpfsDHT) peerFound(p peer.ID, queryPeer bool) { - - if c := baseLogger.Check(zap.DebugLevel, "peer found"); c != nil { - c.Write(zap.String("peer", p.String())) +// peerFound verifies whether the found peer advertises DHT protocols +// and probe it to make sure it answers DHT queries as expected. If +// it fails to answer, it isn't added to the routingTable. +func (dht *IpfsDHT) peerFound(p peer.ID) { + // if the peer is already in the routing table or the appropriate bucket is + // already full, don't try to add the new peer.ID + if !dht.routingTable.UsefulNewPeer(p) { + return } + + // verify whether the remote peer advertises the right dht protocol b, err := dht.validRTPeer(p) if err != nil { logger.Errorw("failed to validate if peer is a DHT peer", "peer", p, "error", err) } else if b { - select { - case dht.addPeerToRTChan <- addPeerRTReq{p, queryPeer}: - case <-dht.ctx.Done(): + + // check if the maximal number of concurrent lookup checks is reached + dht.lookupChecksLk.Lock() + if dht.lookupCheckCapacity == 0 { + dht.lookupChecksLk.Unlock() + // drop the new peer.ID if the maximal number of concurrent lookup + // checks is reached return } + dht.lookupCheckCapacity-- + dht.lookupChecksLk.Unlock() + + go func() { + livelinessCtx, cancel := context.WithTimeout(dht.ctx, dht.lookupCheckTimeout) + defer cancel() + + // performing a FIND_NODE query + err := dht.lookupCheck(livelinessCtx, p) + + dht.lookupChecksLk.Lock() + dht.lookupCheckCapacity++ + dht.lookupChecksLk.Unlock() + + if err != nil { + logger.Debugw("connected peer not answering DHT request as expected", "peer", p, "error", err) + return + } + + // if the FIND_NODE succeeded, the peer is considered as valid + dht.validPeerFound(p) + }() + } +} + +// validPeerFound signals the routingTable that we've found a peer that +// supports the DHT protocol, and just answered correctly to a DHT FindPeers +func (dht *IpfsDHT) validPeerFound(p peer.ID) { + if c := baseLogger.Check(zap.DebugLevel, "peer found"); c != nil { + c.Write(zap.String("peer", p.String())) + } + + select { + case dht.addPeerToRTChan <- p: + case <-dht.ctx.Done(): + return } } @@ -835,11 +874,6 @@ func (dht *IpfsDHT) Context() context.Context { return dht.ctx } -// Process returns the DHT's process. -func (dht *IpfsDHT) Process() goprocess.Process { - return dht.proc -} - // RoutingTable returns the DHT's routingTable. func (dht *IpfsDHT) RoutingTable() *kb.RoutingTable { return dht.routingTable @@ -847,7 +881,25 @@ func (dht *IpfsDHT) RoutingTable() *kb.RoutingTable { // Close calls Process Close. func (dht *IpfsDHT) Close() error { - return dht.proc.Close() + dht.cancel() + dht.wg.Wait() + + var wg sync.WaitGroup + closes := [...]func() error{ + dht.rtRefreshManager.Close, + dht.providerStore.Close, + } + var errors [len(closes)]error + wg.Add(len(errors)) + for i, c := range closes { + go func(i int, c func() error) { + defer wg.Done() + errors[i] = c() + }(i, c) + } + wg.Wait() + + return multierr.Combine(errors[:]...) } func mkDsKey(s string) ds.Key { @@ -904,7 +956,14 @@ func (dht *IpfsDHT) maybeAddAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Dura if p == dht.self || dht.host.Network().Connectedness(p) == network.Connected { return } - dht.peerstore.AddAddrs(p, addrs, ttl) + dht.peerstore.AddAddrs(p, dht.filterAddrs(addrs), ttl) +} + +func (dht *IpfsDHT) filterAddrs(addrs []ma.Multiaddr) []ma.Multiaddr { + if f := dht.addrFilter; f != nil { + return f(addrs) + } + return addrs } // GetProvidersFromPeer allows you to request the provider records of a single peer. diff --git a/dht_bootstrap.go b/dht_bootstrap.go index dc5749bad..7e2171b45 100644 --- a/dht_bootstrap.go +++ b/dht_bootstrap.go @@ -57,7 +57,10 @@ func GetDefaultBootstrapPeerAddrInfos() []*peer.AddrInfo { // Bootstrap tells the DHT to get into a bootstrapped state satisfying the // IpfsRouter interface. -func (dht *IpfsDHT) Bootstrap(ctx context.Context) error { +func (dht *IpfsDHT) Bootstrap(ctx context.Context) (err error) { + _, end := tracer.Bootstrap(dhtName, ctx) + defer func() { end(err) }() + dht.fixRTIfNeeded() dht.rtRefreshManager.RefreshNoWait() return nil diff --git a/dht_bootstrap_test.go b/dht_bootstrap_test.go index 9b1deb8b9..e2236f5a1 100644 --- a/dht_bootstrap_test.go +++ b/dht_bootstrap_test.go @@ -191,8 +191,8 @@ func TestBootstrappersReplacable(t *testing.T) { require.NoError(t, d.host.Network().ClosePeer(d5.self)) connectNoSync(t, ctx, d, d1) connectNoSync(t, ctx, d, d5) - d.peerFound(d5.self, true) - d.peerFound(d1.self, true) + d.peerFound(d5.self) + d.peerFound(d1.self) time.Sleep(1 * time.Second) require.Len(t, d.routingTable.ListPeers(), 2) diff --git a/dht_filters_test.go b/dht_filters_test.go index 3273011d8..7714b8d9e 100644 --- a/dht_filters_test.go +++ b/dht_filters_test.go @@ -3,6 +3,7 @@ package dht import ( "context" "net" + "sync/atomic" "testing" ic "github.com/libp2p/go-libp2p/core/crypto" @@ -31,12 +32,17 @@ func TestIsRelay(t *testing.T) { type mockConn struct { local peer.AddrInfo remote peer.AddrInfo + + isClosed atomic.Bool } var _ network.Conn = (*mockConn)(nil) -func (m *mockConn) ID() string { return "0" } -func (m *mockConn) Close() error { return nil } +func (m *mockConn) ID() string { return "0" } +func (m *mockConn) Close() error { + m.isClosed.Store(true) + return nil +} func (m *mockConn) NewStream(context.Context) (network.Stream, error) { return nil, nil } func (m *mockConn) GetStreams() []network.Stream { return []network.Stream{} } func (m *mockConn) Stat() network.ConnStats { @@ -50,6 +56,7 @@ func (m *mockConn) LocalPrivateKey() ic.PrivKey { return nil } func (m *mockConn) RemotePeer() peer.ID { return m.remote.ID } func (m *mockConn) RemotePublicKey() ic.PubKey { return nil } func (m *mockConn) ConnState() network.ConnectionState { return network.ConnectionState{} } +func (m *mockConn) IsClosed() bool { return m.isClosed.Load() } func TestFilterCaching(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) diff --git a/dht_net.go b/dht_net.go index 025715cd2..48c171024 100644 --- a/dht_net.go +++ b/dht_net.go @@ -44,7 +44,7 @@ func (dht *IpfsDHT) handleNewMessage(s network.Stream) bool { for { if dht.getMode() != modeServer { - logger.Errorf("ignoring incoming dht message while not in server mode") + logger.Debugf("ignoring incoming dht message while not in server mode") return false } @@ -110,9 +110,6 @@ func (dht *IpfsDHT) handleNewMessage(s network.Stream) bool { return false } - // a peer has queried us, let's add it to RT - dht.peerFound(mPeer, true) - if c := baseLogger.Check(zap.DebugLevel, "handling message"); c != nil { c.Write(zap.String("from", mPeer.String()), zap.Int32("type", int32(req.GetType())), diff --git a/dht_options.go b/dht_options.go index 5fd850924..f18a12958 100644 --- a/dht_options.go +++ b/dht_options.go @@ -8,14 +8,14 @@ import ( dhtcfg "github.com/libp2p/go-libp2p-kad-dht/internal/config" pb "github.com/libp2p/go-libp2p-kad-dht/pb" "github.com/libp2p/go-libp2p-kad-dht/providers" + "github.com/libp2p/go-libp2p-kbucket/peerdiversity" + record "github.com/libp2p/go-libp2p-record" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" - "github.com/libp2p/go-libp2p-kbucket/peerdiversity" - record "github.com/libp2p/go-libp2p-record" - ds "github.com/ipfs/go-datastore" + ma "github.com/multiformats/go-multiaddr" ) // ModeOpt describes what mode the dht should operate in @@ -196,6 +196,15 @@ func Resiliency(beta int) Option { } } +// LookupInterval configures maximal number of go routines that can be used to +// perform a lookup check operation, before adding a new node to the routing table. +func LookupCheckConcurrency(n int) Option { + return func(c *dhtcfg.Config) error { + c.LookupCheckConcurrency = n + return nil + } +} + // MaxRecordAge specifies the maximum time that any node will hold onto a record ("PutValue record") // from the time its received. This does not apply to any other forms of validity that // the record may contain. @@ -359,3 +368,13 @@ func WithCustomMessageSender(initFunc func(h host.Host, protos []protocol.ID) pb return nil } } + +// AddressFilter allows to configure the address filtering function. +// This function is run before addresses are added to the peerstore. +// It is most useful to avoid adding localhost / local addresses. +func AddressFilter(f func([]ma.Multiaddr) []ma.Multiaddr) Option { + return func(c *dhtcfg.Config) error { + c.AddressFilter = f + return nil + } +} diff --git a/dht_test.go b/dht_test.go index 7b21e517f..1c89958b1 100644 --- a/dht_test.go +++ b/dht_test.go @@ -14,13 +14,18 @@ import ( "testing" "time" + "github.com/libp2p/go-libp2p-kad-dht/internal/net" + "github.com/libp2p/go-libp2p-kad-dht/providers" + "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/core/routing" + "github.com/libp2p/go-msgio" ma "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" "github.com/multiformats/go-multihash" "github.com/multiformats/go-multistream" @@ -166,9 +171,7 @@ func connectNoSync(t *testing.T, ctx context.Context, a, b *IpfsDHT) { t.Fatal("peers setup incorrectly: no local address") } - a.peerstore.AddAddrs(idB, addrB, peerstore.TempAddrTTL) - pi := peer.AddrInfo{ID: idB} - if err := a.host.Connect(ctx, pi); err != nil { + if err := a.host.Connect(ctx, peer.AddrInfo{ID: idB, Addrs: addrB}); err != nil { t.Fatal(err) } } @@ -296,8 +299,6 @@ func TestValueGetSet(t *testing.T) { t.Fatalf("Expected 'world' got '%s'", string(val)) } - // late connect - connect(t, ctx, dhts[2], dhts[0]) connect(t, ctx, dhts[2], dhts[1]) @@ -563,12 +564,136 @@ func TestProvides(t *testing.T) { if prov.ID != dhts[3].self { t.Fatal("Got back wrong provider") } + if len(prov.Addrs) == 0 { + t.Fatal("Got no addresses back") + } case <-ctxT.Done(): t.Fatal("Did not get a provider back.") } } } +type testMessageSender struct { + sendRequest func(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) + sendMessage func(ctx context.Context, p peer.ID, pmes *pb.Message) error +} + +var _ pb.MessageSender = (*testMessageSender)(nil) + +func (t testMessageSender) SendRequest(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { + return t.sendRequest(ctx, p, pmes) +} + +func (t testMessageSender) SendMessage(ctx context.Context, p peer.ID, pmes *pb.Message) error { + return t.sendMessage(ctx, p, pmes) +} + +func TestProvideAddressFilter(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dhts := setupDHTS(t, ctx, 2) + + connect(t, ctx, dhts[0], dhts[1]) + testMaddr := ma.StringCast("/ip4/99.99.99.99/tcp/9999") + + done := make(chan struct{}) + impl := net.NewMessageSenderImpl(dhts[0].host, dhts[0].protocols) + tms := &testMessageSender{ + sendMessage: func(ctx context.Context, p peer.ID, pmes *pb.Message) error { + defer close(done) + assert.Equal(t, pmes.Type, pb.Message_ADD_PROVIDER) + assert.Len(t, pmes.ProviderPeers[0].Addrs, 1) + assert.True(t, pmes.ProviderPeers[0].Addresses()[0].Equal(testMaddr)) + return impl.SendMessage(ctx, p, pmes) + }, + sendRequest: func(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { + return impl.SendRequest(ctx, p, pmes) + }, + } + pm, err := pb.NewProtocolMessenger(tms) + require.NoError(t, err) + + dhts[0].protoMessenger = pm + dhts[0].addrFilter = func(multiaddrs []ma.Multiaddr) []ma.Multiaddr { + return []ma.Multiaddr{testMaddr} + } + + if err := dhts[0].Provide(ctx, testCaseCids[0], true); err != nil { + t.Fatal(err) + } + + select { + case <-done: + case <-time.After(5 * time.Second): + t.Fatal("timeout") + } +} + +type testProviderManager struct { + addProvider func(ctx context.Context, key []byte, prov peer.AddrInfo) error + getProviders func(ctx context.Context, key []byte) ([]peer.AddrInfo, error) + close func() error +} + +var _ providers.ProviderStore = (*testProviderManager)(nil) + +func (t *testProviderManager) AddProvider(ctx context.Context, key []byte, prov peer.AddrInfo) error { + return t.addProvider(ctx, key, prov) +} + +func (t *testProviderManager) GetProviders(ctx context.Context, key []byte) ([]peer.AddrInfo, error) { + return t.getProviders(ctx, key) +} + +func (t *testProviderManager) Close() error { + return t.close() +} + +func TestHandleAddProviderAddressFilter(t *testing.T) { + ctx := context.Background() + + d := setupDHT(ctx, t, false) + provider := setupDHT(ctx, t, false) + + testMaddr := ma.StringCast("/ip4/99.99.99.99/tcp/9999") + + d.addrFilter = func(multiaddrs []ma.Multiaddr) []ma.Multiaddr { + return []ma.Multiaddr{testMaddr} + } + + done := make(chan struct{}) + d.providerStore = &testProviderManager{ + addProvider: func(ctx context.Context, key []byte, prov peer.AddrInfo) error { + defer close(done) + assert.True(t, prov.Addrs[0].Equal(testMaddr)) + return nil + }, + close: func() error { return nil }, + } + + pmes := &pb.Message{ + Type: pb.Message_ADD_PROVIDER, + Key: []byte("test-key"), + ProviderPeers: pb.RawPeerInfosToPBPeers([]peer.AddrInfo{{ + ID: provider.self, + Addrs: []ma.Multiaddr{ + ma.StringCast("/ip4/55.55.55.55/tcp/5555"), + ma.StringCast("/ip4/66.66.66.66/tcp/6666"), + }, + }}), + } + + _, err := d.handleAddProvider(ctx, provider.self, pmes) + require.NoError(t, err) + + select { + case <-done: + case <-time.After(5 * time.Second): + t.Fatal("timeout") + } +} + func TestLocalProvides(t *testing.T) { // t.Skip("skipping test to debug another") ctx, cancel := context.WithCancel(context.Background()) @@ -605,30 +730,52 @@ func TestLocalProvides(t *testing.T) { } } +func TestAddressFilterProvide(t *testing.T) { + // t.Skip("skipping test to debug another") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testMaddr := ma.StringCast("/ip4/99.99.99.99/tcp/9999") + + d := setupDHT(ctx, t, false) + provider := setupDHT(ctx, t, false) + + d.addrFilter = func(maddrs []ma.Multiaddr) []ma.Multiaddr { + return []ma.Multiaddr{ + testMaddr, + } + } + + _, err := d.handleAddProvider(ctx, provider.self, &pb.Message{ + Type: pb.Message_ADD_PROVIDER, + Key: []byte("random-key"), + ProviderPeers: pb.PeerInfosToPBPeers(provider.host.Network(), []peer.AddrInfo{{ + ID: provider.self, + Addrs: provider.host.Addrs(), + }}), + }) + require.NoError(t, err) + + // because of the identify protocol we add all + // addresses to the peerstore, although the addresses + // will be filtered in the above handleAddProvider call + d.peerstore.AddAddrs(provider.self, provider.host.Addrs(), time.Hour) + + resp, err := d.handleGetProviders(ctx, d.self, &pb.Message{ + Type: pb.Message_GET_PROVIDERS, + Key: []byte("random-key"), + }) + require.NoError(t, err) + + assert.True(t, resp.ProviderPeers[0].Addresses()[0].Equal(testMaddr)) + assert.Len(t, resp.ProviderPeers[0].Addresses(), 1) +} + // if minPeers or avgPeers is 0, dont test for it. func waitForWellFormedTables(t *testing.T, dhts []*IpfsDHT, minPeers, avgPeers int, timeout time.Duration) { // test "well-formed-ness" (>= minPeers peers in every routing table) t.Helper() - checkTables := func() bool { - totalPeers := 0 - for _, dht := range dhts { - rtlen := dht.routingTable.Size() - totalPeers += rtlen - if minPeers > 0 && rtlen < minPeers { - // t.Logf("routing table for %s only has %d peers (should have >%d)", dht.self, rtlen, minPeers) - return false - } - } - actualAvgPeers := totalPeers / len(dhts) - t.Logf("avg rt size: %d", actualAvgPeers) - if avgPeers > 0 && actualAvgPeers < avgPeers { - t.Logf("avg rt size: %d < %d", actualAvgPeers, avgPeers) - return false - } - return true - } - timeoutA := time.After(timeout) for { select { @@ -636,7 +783,7 @@ func waitForWellFormedTables(t *testing.T, dhts []*IpfsDHT, minPeers, avgPeers i t.Errorf("failed to reach well-formed routing tables after %s", timeout) return case <-time.After(5 * time.Millisecond): - if checkTables() { + if checkForWellFormedTablesOnce(t, dhts, minPeers, avgPeers) { // succeeded return } @@ -644,6 +791,26 @@ func waitForWellFormedTables(t *testing.T, dhts []*IpfsDHT, minPeers, avgPeers i } } +func checkForWellFormedTablesOnce(t *testing.T, dhts []*IpfsDHT, minPeers, avgPeers int) bool { + t.Helper() + totalPeers := 0 + for _, dht := range dhts { + rtlen := dht.routingTable.Size() + totalPeers += rtlen + if minPeers > 0 && rtlen < minPeers { + // t.Logf("routing table for %s only has %d peers (should have >%d)", dht.self, rtlen, minPeers) + return false + } + } + actualAvgPeers := totalPeers / len(dhts) + t.Logf("avg rt size: %d", actualAvgPeers) + if avgPeers > 0 && actualAvgPeers < avgPeers { + t.Logf("avg rt size: %d < %d", actualAvgPeers, avgPeers) + return false + } + return true +} + func printRoutingTables(dhts []*IpfsDHT) { // the routing tables should be full now. let's inspect them. fmt.Printf("checking routing table of %d\n", len(dhts)) @@ -679,24 +846,16 @@ func TestRefresh(t *testing.T) { <-time.After(100 * time.Millisecond) // bootstrap a few times until we get good tables. t.Logf("bootstrapping them so they find each other %d", nDHTs) - ctxT, cancelT := context.WithTimeout(ctx, 5*time.Second) - defer cancelT() - for ctxT.Err() == nil { - bootstrap(t, ctxT, dhts) + for { + bootstrap(t, ctx, dhts) - // wait a bit. - select { - case <-time.After(50 * time.Millisecond): - continue // being explicit - case <-ctxT.Done(): - return + if checkForWellFormedTablesOnce(t, dhts, 7, 10) { + break } - } - - waitForWellFormedTables(t, dhts, 7, 10, 10*time.Second) - cancelT() + time.Sleep(time.Microsecond * 50) + } if u.Debug { // the routing tables should be full now. let's inspect them. @@ -766,7 +925,8 @@ func TestRefreshBelowMinRTThreshold(t *testing.T) { connect(t, ctx, dhtA, dhtD) // and because of the above bootstrap, A also discovers E ! - waitForWellFormedTables(t, []*IpfsDHT{dhtA}, 4, 4, 20*time.Second) + waitForWellFormedTables(t, []*IpfsDHT{dhtA}, 4, 4, 10*time.Second) + time.Sleep(100 * time.Millisecond) assert.Equal(t, dhtE.self, dhtA.routingTable.Find(dhtE.self), "A's routing table should have peer E!") } @@ -1325,6 +1485,89 @@ func TestClientModeConnect(t *testing.T) { } } +func TestInvalidServer(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s0 := setupDHT(ctx, t, false, BucketSize(2)) // server + s1 := setupDHT(ctx, t, false, BucketSize(2)) // server + m0 := setupDHT(ctx, t, false, BucketSize(2)) // misbehabing server + m1 := setupDHT(ctx, t, false, BucketSize(2)) // misbehabing server + + // make m0 and m1 advertise all dht server protocols, but hang on all requests + for _, proto := range s0.serverProtocols { + for _, m := range []*IpfsDHT{m0, m1} { + // Hang on every request. + m.host.SetStreamHandler(proto, func(s network.Stream) { + r := msgio.NewVarintReaderSize(s, network.MessageSizeMax) + msgbytes, err := r.ReadMsg() + if err != nil { + t.Fatal(err) + } + var req pb.Message + err = req.Unmarshal(msgbytes) + if err != nil { + t.Fatal(err) + } + + // answer with an empty response message + resp := pb.NewMessage(req.GetType(), nil, req.GetClusterLevel()) + + // send out response msg + err = net.WriteMsg(s, resp) + if err != nil { + t.Fatal(err) + } + }) + } + } + + // connect s0 and m0 + connectNoSync(t, ctx, s0, m0) + + // add a provider (p) for a key (k) to s0 + k := testCaseCids[0] + p := peer.ID("TestPeer") + s0.ProviderStore().AddProvider(ctx, k.Hash(), peer.AddrInfo{ID: p}) + time.Sleep(time.Millisecond * 5) // just in case... + + // find the provider for k from m0 + provs, err := m0.FindProviders(ctx, k) + if err != nil { + t.Fatal(err) + } + if len(provs) == 0 { + t.Fatal("Expected to get a provider back") + } + if provs[0].ID != p { + t.Fatal("expected it to be our test peer") + } + + // verify that m0 and s0 contain each other in their routing tables + if s0.routingTable.Find(m0.self) == "" { + // m0 is added to s0 routing table even though it is misbehaving, because + // s0's routing table is not well populated, so s0 isn't picky about who it adds. + t.Fatal("Misbehaving DHT servers should be added to routing table if not well populated") + } + if m0.routingTable.Find(s0.self) == "" { + t.Fatal("DHT server should have been added to the misbehaving server routing table") + } + + // connect s0 to both s1 and m1 + connectNoSync(t, ctx, s0, s1) + connectNoSync(t, ctx, s0, m1) + + // s1 should be added to s0's routing table. Then, because s0's routing table + // contains more than bucketSize (2) entries, lookupCheck is enabled and m1 + // shouldn't be added, because it fails the lookupCheck (hang on all requests). + if s0.routingTable.Find(s1.self) == "" { + t.Fatal("Well behaving DHT server should have been added to the server routing table") + } + if s0.routingTable.Find(m1.self) != "" { + t.Fatal("Misbehaving DHT servers should not be added to routing table if well populated") + } +} + func TestClientModeFindPeer(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -1533,9 +1776,7 @@ func TestProvideDisabled(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var ( - optsA, optsB []Option - ) + var optsA, optsB []Option optsA = append(optsA, ProtocolPrefix("/provMaybeDisabled")) optsB = append(optsB, ProtocolPrefix("/provMaybeDisabled")) @@ -1960,8 +2201,10 @@ func TestBootStrapWhenRTIsEmpty(t *testing.T) { // convert the bootstrap addresses to a p2p address bootstrapAddrs := make([]peer.AddrInfo, nBootStraps) for i := 0; i < nBootStraps; i++ { - b := peer.AddrInfo{ID: bootstrappers[i].self, - Addrs: bootstrappers[i].host.Addrs()} + b := peer.AddrInfo{ + ID: bootstrappers[i].self, + Addrs: bootstrappers[i].host.Addrs(), + } bootstrapAddrs[i] = b } @@ -2077,7 +2320,7 @@ func TestBootstrapPeersFunc(t *testing.T) { bootstrapPeersB = []peer.AddrInfo{addrA} lock.Unlock() - dhtB.fixLowPeers(ctx) + dhtB.fixLowPeers() require.NotEqual(t, 0, len(dhtB.host.Network().Peers())) } @@ -2124,6 +2367,8 @@ func TestPreconnectedNodes(t *testing.T) { require.NoError(t, err) defer h2.Close() + connect(t, ctx, d1, d2) + // See if it works peers, _, err := d2.GetClosestPeers(ctx, "testkey") require.NoError(t, err) @@ -2131,3 +2376,92 @@ func TestPreconnectedNodes(t *testing.T) { require.Equal(t, len(peers), 1, "why is there more than one peer?") require.Equal(t, h1.ID(), peers[0], "could not find peer") } + +func TestAddrFilter(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // generate a bunch of addresses + publicAddrs := []ma.Multiaddr{ + ma.StringCast("/ip4/1.2.3.1/tcp/123"), + ma.StringCast("/ip4/160.160.160.160/tcp/1600"), + ma.StringCast("/ip6/2001::10/tcp/123"), + } + privAddrs := []ma.Multiaddr{ + ma.StringCast("/ip4/192.168.1.100/tcp/123"), + ma.StringCast("/ip4/172.16.10.10/tcp/123"), + ma.StringCast("/ip4/10.10.10.10/tcp/123"), + ma.StringCast("/ip6/fc00::10/tcp/123"), + } + loopbackAddrs := []ma.Multiaddr{ + ma.StringCast("/ip4/127.0.0.100/tcp/123"), + ma.StringCast("/ip6/::1/tcp/123"), + } + + allAddrs := append(publicAddrs, privAddrs...) + allAddrs = append(allAddrs, loopbackAddrs...) + + // generate different address filters + acceptAllFilter := AddressFilter(func(addrs []ma.Multiaddr) []ma.Multiaddr { + return addrs + }) + rejectAllFilter := AddressFilter(func(addrs []ma.Multiaddr) []ma.Multiaddr { + return []ma.Multiaddr{} + }) + publicIpFilter := AddressFilter(func(addrs []ma.Multiaddr) []ma.Multiaddr { + return ma.FilterAddrs(addrs, manet.IsPublicAddr) + }) + localIpFilter := AddressFilter(func(addrs []ma.Multiaddr) []ma.Multiaddr { + return ma.FilterAddrs(addrs, func(a ma.Multiaddr) bool { return !manet.IsIPLoopback(a) }) + }) + + // generate peerid for "remote" peer + _, pub, err := crypto.GenerateKeyPair( + crypto.Ed25519, // Select your key type. Ed25519 are nice short + -1, // Select key length when possible (i.e. RSA). + ) + require.NoError(t, err) + peerid, err := peer.IDFromPublicKey(pub) + require.NoError(t, err) + + // DHT accepting all addresses + d0 := setupDHT(ctx, t, false, acceptAllFilter) + + // peerstore should only contain self + require.Equal(t, 1, d0.host.Peerstore().Peers().Len()) + + d0.maybeAddAddrs(peerid, allAddrs, time.Minute) + require.Equal(t, 2, d0.host.Peerstore().Peers().Len()) + for _, a := range allAddrs { + // check that the peerstore contains all addresses of the remote peer + require.Contains(t, d0.host.Peerstore().Addrs(peerid), a) + } + + // DHT rejecting all addresses + d1 := setupDHT(ctx, t, false, rejectAllFilter) + d1.maybeAddAddrs(peerid, allAddrs, time.Minute) + // remote peer should not be added to peerstore (all addresses rejected) + require.Equal(t, 1, d1.host.Peerstore().Peers().Len()) + + // DHT accepting only public addresses + d2 := setupDHT(ctx, t, false, publicIpFilter) + d2.maybeAddAddrs(peerid, allAddrs, time.Minute) + for _, a := range publicAddrs { + // check that the peerstore contains only public addresses of the remote peer + require.Contains(t, d2.host.Peerstore().Addrs(peerid), a) + } + require.Equal(t, len(publicAddrs), len(d2.host.Peerstore().Addrs(peerid))) + + // DHT accepting only non-loopback addresses + d3 := setupDHT(ctx, t, false, localIpFilter) + d3.maybeAddAddrs(peerid, allAddrs, time.Minute) + for _, a := range publicAddrs { + // check that the peerstore contains only non-loopback addresses of the remote peer + require.Contains(t, d3.host.Peerstore().Addrs(peerid), a) + } + for _, a := range privAddrs { + // check that the peerstore contains only non-loopback addresses of the remote peer + require.Contains(t, d3.host.Peerstore().Addrs(peerid), a) + } + require.Equal(t, len(publicAddrs)+len(privAddrs), len(d3.host.Peerstore().Addrs(peerid))) +} diff --git a/dual/dual.go b/dual/dual.go index e38967513..0f94cf728 100644 --- a/dual/dual.go +++ b/dual/dual.go @@ -1,4 +1,4 @@ -// Package dual provides an implementaiton of a split or "dual" dht, where two parallel instances +// Package dual provides an implementation of a split or "dual" dht, where two parallel instances // are maintained for the global internet and the local LAN respectively. package dual @@ -8,6 +8,8 @@ import ( "sync" dht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p-kad-dht/internal" + "github.com/libp2p/go-libp2p-routing-helpers/tracing" "github.com/ipfs/go-cid" kb "github.com/libp2p/go-libp2p-kbucket" @@ -19,10 +21,14 @@ import ( "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/core/routing" ma "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" "github.com/hashicorp/go-multierror" ) +const tracer = tracing.Tracer("go-libp2p-kad-dht/dual") +const dualName = "Dual" + // DHT implements the routing interface to provide two concrete DHT implementationts for use // in IPFS that are used to support both global network users and disjoint LAN usecases. type DHT struct { @@ -101,6 +107,8 @@ func New(ctx context.Context, h host.Host, options ...Option) (*DHT, error) { dht.QueryFilter(dht.PublicQueryFilter), dht.RoutingTableFilter(dht.PublicRoutingTableFilter), dht.RoutingTablePeerDiversityFilter(dht.NewRTPeerDiversityFilter(h, maxPrefixCountPerCpl, maxPrefixCount)), + // filter out all private addresses + dht.AddressFilter(func(addrs []ma.Multiaddr) []ma.Multiaddr { return ma.FilterAddrs(addrs, manet.IsPublicAddr) }), ), ) if err != nil { @@ -111,6 +119,10 @@ func New(ctx context.Context, h host.Host, options ...Option) (*DHT, error) { dht.ProtocolExtension(LanExtension), dht.QueryFilter(dht.PrivateQueryFilter), dht.RoutingTableFilter(dht.PrivateRoutingTableFilter), + // filter out localhost IP addresses + dht.AddressFilter(func(addrs []ma.Multiaddr) []ma.Multiaddr { + return ma.FilterAddrs(addrs, func(a ma.Multiaddr) bool { return !manet.IsIPLoopback(a) }) + }), ), ) if err != nil { @@ -151,7 +163,10 @@ func (dht *DHT) WANActive() bool { } // Provide adds the given cid to the content routing system. -func (dht *DHT) Provide(ctx context.Context, key cid.Cid, announce bool) error { +func (dht *DHT) Provide(ctx context.Context, key cid.Cid, announce bool) (err error) { + ctx, end := tracer.Provide(dualName, ctx, key, announce) + defer func() { end(err) }() + if dht.WANActive() { return dht.WAN.Provide(ctx, key, announce) } @@ -167,7 +182,10 @@ func (dht *DHT) GetRoutingTableDiversityStats() []peerdiversity.CplDiversityStat } // FindProvidersAsync searches for peers who are able to provide a given key -func (dht *DHT) FindProvidersAsync(ctx context.Context, key cid.Cid, count int) <-chan peer.AddrInfo { +func (dht *DHT) FindProvidersAsync(ctx context.Context, key cid.Cid, count int) (ch <-chan peer.AddrInfo) { + ctx, end := tracer.FindProvidersAsync(dualName, ctx, key, count) + defer func() { ch = end(ch, nil) }() + reqCtx, cancel := context.WithCancel(ctx) outCh := make(chan peer.AddrInfo) @@ -178,10 +196,13 @@ func (dht *DHT) FindProvidersAsync(ctx context.Context, key cid.Cid, count int) subCtx, evtCh = routing.RegisterForQueryEvents(reqCtx) } + subCtx, span := internal.StartSpan(subCtx, "Dual.worker") wanCh := dht.WAN.FindProvidersAsync(subCtx, key, count) lanCh := dht.LAN.FindProvidersAsync(subCtx, key, count) zeroCount := (count == 0) go func() { + defer span.End() + defer cancel() defer close(outCh) @@ -200,11 +221,13 @@ func (dht *DHT) FindProvidersAsync(ctx context.Context, key cid.Cid, count int) continue case pi, ok = <-wanCh: if !ok { + span.AddEvent("wan finished") wanCh = nil continue } case pi, ok = <-lanCh: if !ok { + span.AddEvent("lan finished") lanCh = nil continue } @@ -231,7 +254,10 @@ func (dht *DHT) FindProvidersAsync(ctx context.Context, key cid.Cid, count int) // FindPeer searches for a peer with given ID // Note: with signed peer records, we can change this to short circuit once either DHT returns. -func (dht *DHT) FindPeer(ctx context.Context, pid peer.ID) (peer.AddrInfo, error) { +func (dht *DHT) FindPeer(ctx context.Context, pid peer.ID) (pi peer.AddrInfo, err error) { + ctx, end := tracer.FindPeer(dualName, ctx, pid) + defer func() { end(pi, err) }() + var wg sync.WaitGroup wg.Add(2) var wanInfo, lanInfo peer.AddrInfo @@ -297,14 +323,20 @@ func combineErrors(erra, errb error) error { // Bootstrap allows callers to hint to the routing system to get into a // Boostrapped state and remain there. -func (dht *DHT) Bootstrap(ctx context.Context) error { +func (dht *DHT) Bootstrap(ctx context.Context) (err error) { + ctx, end := tracer.Bootstrap(dualName, ctx) + defer func() { end(err) }() + erra := dht.WAN.Bootstrap(ctx) errb := dht.LAN.Bootstrap(ctx) return combineErrors(erra, errb) } // PutValue adds value corresponding to given Key. -func (dht *DHT) PutValue(ctx context.Context, key string, val []byte, opts ...routing.Option) error { +func (dht *DHT) PutValue(ctx context.Context, key string, val []byte, opts ...routing.Option) (err error) { + ctx, end := tracer.PutValue(dualName, ctx, key, val, opts...) + defer func() { end(err) }() + if dht.WANActive() { return dht.WAN.PutValue(ctx, key, val, opts...) } @@ -312,7 +344,10 @@ func (dht *DHT) PutValue(ctx context.Context, key string, val []byte, opts ...ro } // GetValue searches for the value corresponding to given Key. -func (d *DHT) GetValue(ctx context.Context, key string, opts ...routing.Option) ([]byte, error) { +func (d *DHT) GetValue(ctx context.Context, key string, opts ...routing.Option) (result []byte, err error) { + ctx, end := tracer.GetValue(dualName, ctx, key, opts...) + defer func() { end(result, err) }() + lanCtx, cancelLan := context.WithCancel(ctx) defer cancelLan() @@ -342,7 +377,10 @@ func (d *DHT) GetValue(ctx context.Context, key string, opts ...routing.Option) } // SearchValue searches for better values from this value -func (dht *DHT) SearchValue(ctx context.Context, key string, opts ...routing.Option) (<-chan []byte, error) { +func (dht *DHT) SearchValue(ctx context.Context, key string, opts ...routing.Option) (ch <-chan []byte, err error) { + ctx, end := tracer.SearchValue(dualName, ctx, key, opts...) + defer func() { ch, err = end(ch, err) }() + p := helper.Parallel{Routers: []routing.Routing{dht.WAN, dht.LAN}, Validator: dht.WAN.Validator} return p.SearchValue(ctx, key, opts...) } diff --git a/ext_test.go b/ext_test.go index e96202731..3c4c0fcc1 100644 --- a/ext_test.go +++ b/ext_test.go @@ -2,36 +2,20 @@ package dht import ( "context" - "math/rand" "testing" "time" "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/peerstore" - "github.com/libp2p/go-libp2p/core/routing" "github.com/stretchr/testify/require" - record "github.com/libp2p/go-libp2p-record" - bhost "github.com/libp2p/go-libp2p/p2p/host/basic" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" - swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" - - //lint:ignore SA1019 TODO migrate away from gogo pb - "github.com/libp2p/go-msgio/protoio" - - pb "github.com/libp2p/go-libp2p-kad-dht/pb" - - u "github.com/ipfs/boxo/util" ) -// Test that one hung request to a peer doesn't prevent another request -// using that same peer from obeying its context. -func TestHungRequest(t *testing.T) { +func TestInvalidRemotePeers(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - mn, err := mocknet.FullMeshLinked(2) + mn, err := mocknet.FullMeshLinked(5) if err != nil { t.Fatal(err) } @@ -56,6 +40,8 @@ func TestHungRequest(t *testing.T) { t.Fatal("failed to connect peers", err) } +<<<<<<< HEAD +<<<<<<< HEAD // Wait at a bit for a peer in our routing table. for i := 0; i < 100 && d.routingTable.Size() == 0; i++ { time.Sleep(10 * time.Millisecond) @@ -73,396 +59,13 @@ func TestHungRequest(t *testing.T) { done <- err }() +======= +>>>>>>> 8c9fdff (fix: don't add unresponsive DHT servers to the Routing Table (#820)) +======= +>>>>>>> d373974 (fix: don't add unresponsive DHT servers to the Routing Table (#820)) time.Sleep(100 * time.Millisecond) - ctx2, cancel2 := context.WithTimeout(ctx, 100*time.Millisecond) - defer cancel2() - err = d.Provide(ctx2, testCaseCids[0], true) - if err != context.DeadlineExceeded { - t.Errorf("expected to fail with deadline exceeded, got: %s", ctx2.Err()) - } - select { - case err = <-done: - t.Error("GetClosestPeers should not have returned yet", err) - default: - err = <-done - if err != context.DeadlineExceeded { - t.Errorf("expected the deadline to be exceeded, got %s", err) - } - } - - if d.routingTable.Size() == 0 { - // make sure we didn't just disconnect - t.Fatal("expected peers in the routing table") - } -} - -func TestGetFailures(t *testing.T) { - if testing.Short() { - t.SkipNow() - } - - ctx := context.Background() - - host1, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts)) - require.NoError(t, err) - host1.Start() - host2, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts)) - require.NoError(t, err) - host2.Start() - - d, err := New(ctx, host1, testPrefix, DisableAutoRefresh(), Mode(ModeServer)) - require.NoError(t, err) - - // Reply with failures to every message - for _, proto := range d.serverProtocols { - host2.SetStreamHandler(proto, func(s network.Stream) { - time.Sleep(400 * time.Millisecond) - s.Close() - }) - } - - host1.Peerstore().AddAddrs(host2.ID(), host2.Addrs(), peerstore.ConnectedAddrTTL) - _, err = host1.Network().DialPeer(ctx, host2.ID()) - require.NoError(t, err) - time.Sleep(1 * time.Second) - - // This one should time out - ctx1, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) - defer cancel() - if _, err := d.GetValue(ctx1, "test"); err != nil { - if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 { - err = merr[0] - } - - if err != context.DeadlineExceeded { - t.Fatal("Got different error than we expected", err) - } - } else { - t.Fatal("Did not get expected error!") - } - - t.Log("Timeout test passed.") - - for _, proto := range d.serverProtocols { - // Reply with failures to every message - host2.SetStreamHandler(proto, func(s network.Stream) { - defer s.Close() - - pbr := protoio.NewDelimitedReader(s, network.MessageSizeMax) - pbw := protoio.NewDelimitedWriter(s) - - pmes := new(pb.Message) - if err := pbr.ReadMsg(pmes); err != nil { - // user gave up - return - } - - resp := &pb.Message{ - Type: pmes.Type, - } - _ = pbw.WriteMsg(resp) - }) - } - - // This one should fail with NotFound. - // long context timeout to ensure we dont end too early. - // the dht should be exhausting its query and returning not found. - // (was 3 seconds before which should be _plenty_ of time, but maybe - // travis machines really have a hard time...) - ctx2, cancel := context.WithTimeout(context.Background(), 20*time.Second) - defer cancel() - _, err = d.GetValue(ctx2, "test") - if err != nil { - if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 { - err = merr[0] - } - if err != routing.ErrNotFound { - t.Fatalf("Expected ErrNotFound, got: %s", err) - } - } else { - t.Fatal("expected error, got none.") - } - - t.Log("ErrNotFound check passed!") - - // Now we test this DHT's handleGetValue failure - { - typ := pb.Message_GET_VALUE - str := "hello" - - rec := record.MakePutRecord(str, []byte("blah")) - req := pb.Message{ - Type: typ, - Key: []byte(str), - Record: rec, - } - - s, err := host2.NewStream(context.Background(), host1.ID(), d.protocols...) - if err != nil { - t.Fatal(err) - } - defer s.Close() - - pbr := protoio.NewDelimitedReader(s, network.MessageSizeMax) - pbw := protoio.NewDelimitedWriter(s) - - if err := pbw.WriteMsg(&req); err != nil { - t.Fatal(err) - } - - pmes := new(pb.Message) - if err := pbr.ReadMsg(pmes); err != nil { - t.Fatal(err) - } - if pmes.GetRecord() != nil { - t.Fatal("shouldnt have value") - } - if pmes.GetProviderPeers() != nil { - t.Fatal("shouldnt have provider peers") - } - } - - if d.routingTable.Size() == 0 { - // make sure we didn't just disconnect - t.Fatal("expected peers in the routing table") - } -} - -func TestNotFound(t *testing.T) { - // t.Skip("skipping test to debug another") - if testing.Short() { - t.SkipNow() - } - - ctx := context.Background() - mn, err := mocknet.FullMeshConnected(16) - if err != nil { - t.Fatal(err) - } - defer mn.Close() - hosts := mn.Hosts() - - os := []Option{testPrefix, DisableAutoRefresh(), Mode(ModeServer)} - d, err := New(ctx, hosts[0], os...) - if err != nil { - t.Fatal(err) - } - - // Reply with random peers to every message - for _, host := range hosts { - host := host // shadow loop var - for _, proto := range d.serverProtocols { - host.SetStreamHandler(proto, func(s network.Stream) { - defer s.Close() - - pbr := protoio.NewDelimitedReader(s, network.MessageSizeMax) - pbw := protoio.NewDelimitedWriter(s) - pmes := new(pb.Message) - if err := pbr.ReadMsg(pmes); err != nil { - // this isn't an error, it just means the stream has died. - return - } - - switch pmes.GetType() { - case pb.Message_GET_VALUE: - resp := &pb.Message{Type: pmes.Type} - - ps := []peer.AddrInfo{} - for i := 0; i < 7; i++ { - p := hosts[rand.Intn(len(hosts))].ID() - pi := host.Peerstore().PeerInfo(p) - ps = append(ps, pi) - } - - resp.CloserPeers = pb.PeerInfosToPBPeers(d.host.Network(), ps) - if err := pbw.WriteMsg(resp); err != nil { - return - } - default: - panic("Shouldnt recieve this.") - } - }) - } - for _, peer := range hosts { - if host == peer { - continue - } - _ = peer.Peerstore().AddProtocols(host.ID(), d.serverProtocols...) - } - } - - for _, p := range hosts { - d.peerFound(p.ID(), true) - } - - // long timeout to ensure timing is not at play. - ctx, cancel := context.WithTimeout(ctx, time.Second*20) - defer cancel() - v, err := d.GetValue(ctx, "hello") - logger.Debugf("get value got %v", v) - if err != nil { - if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 { - err = merr[0] - } - switch err { - case routing.ErrNotFound: - if d.routingTable.Size() == 0 { - // make sure we didn't just disconnect - t.Fatal("expected peers in the routing table") - } - // Success! - return - case u.ErrTimeout: - t.Fatal("Should not have gotten timeout!") - default: - t.Fatalf("Got unexpected error: %s", err) - } - } - t.Fatal("Expected to recieve an error.") -} - -// If less than K nodes are in the entire network, it should fail when we make -// a GET rpc and nobody has the value -func TestLessThanKResponses(t *testing.T) { - // t.Skip("skipping test to debug another") - // t.Skip("skipping test because it makes a lot of output") - - ctx := context.Background() - mn, err := mocknet.FullMeshConnected(6) - if err != nil { - t.Fatal(err) - } - defer mn.Close() - hosts := mn.Hosts() - - os := []Option{testPrefix, DisableAutoRefresh(), Mode(ModeServer)} - d, err := New(ctx, hosts[0], os...) - if err != nil { - t.Fatal(err) - } - - for i := 1; i < 5; i++ { - d.peerFound(hosts[i].ID(), true) - } - - // Reply with random peers to every message - for _, host := range hosts { - host := host // shadow loop var - for _, proto := range d.serverProtocols { - host.SetStreamHandler(proto, func(s network.Stream) { - defer s.Close() - - pbr := protoio.NewDelimitedReader(s, network.MessageSizeMax) - pbw := protoio.NewDelimitedWriter(s) - - pmes := new(pb.Message) - if err := pbr.ReadMsg(pmes); err != nil { - panic(err) - } - - switch pmes.GetType() { - case pb.Message_GET_VALUE: - pi := host.Peerstore().PeerInfo(hosts[1].ID()) - resp := &pb.Message{ - Type: pmes.Type, - CloserPeers: pb.PeerInfosToPBPeers(d.host.Network(), []peer.AddrInfo{pi}), - } - - if err := pbw.WriteMsg(resp); err != nil { - panic(err) - } - default: - panic("Shouldnt recieve this.") - } - - }) - } - } - - ctx, cancel := context.WithTimeout(ctx, time.Second*30) - defer cancel() - if _, err := d.GetValue(ctx, "hello"); err != nil { - switch err { - case routing.ErrNotFound: - // Success! - return - case u.ErrTimeout: - t.Fatal("Should not have gotten timeout!") - default: - t.Fatalf("Got unexpected error: %s", err) - } - } - t.Fatal("Expected to recieve an error.") -} - -// Test multiple queries against a node that closes its stream after every query. -func TestMultipleQueries(t *testing.T) { - if testing.Short() { - t.SkipNow() - } - - ctx := context.Background() - mn, err := mocknet.FullMeshConnected(2) - if err != nil { - t.Fatal(err) - } - defer mn.Close() - hosts := mn.Hosts() - os := []Option{testPrefix, DisableAutoRefresh(), Mode(ModeServer)} - d, err := New(ctx, hosts[0], os...) - if err != nil { - t.Fatal(err) - } - - d.peerFound(hosts[1].ID(), true) - - for _, proto := range d.serverProtocols { - // It would be nice to be able to just get a value and succeed but then - // we'd need to deal with selectors and validators... - hosts[1].SetStreamHandler(proto, func(s network.Stream) { - defer s.Close() - - pbr := protoio.NewDelimitedReader(s, network.MessageSizeMax) - pbw := protoio.NewDelimitedWriter(s) - - pmes := new(pb.Message) - if err := pbr.ReadMsg(pmes); err != nil { - panic(err) - } - - switch pmes.GetType() { - case pb.Message_GET_VALUE: - pi := hosts[1].Peerstore().PeerInfo(hosts[0].ID()) - resp := &pb.Message{ - Type: pmes.Type, - CloserPeers: pb.PeerInfosToPBPeers(d.host.Network(), []peer.AddrInfo{pi}), - } - - if err := pbw.WriteMsg(resp); err != nil { - panic(err) - } - default: - panic("Shouldnt recieve this.") - } - }) - } - - // long timeout to ensure timing is not at play. - ctx, cancel := context.WithTimeout(ctx, time.Second*20) - defer cancel() - for i := 0; i < 10; i++ { - if _, err := d.GetValue(ctx, "hello"); err != nil { - switch err { - case routing.ErrNotFound: - // Success! - continue - case u.ErrTimeout: - t.Fatal("Should not have gotten timeout!") - default: - t.Fatalf("Got unexpected error: %s", err) - } - } - t.Fatal("Expected to recieve an error.") - } + // hosts[1] isn't added to the routing table because it isn't responding to + // the DHT request + require.Equal(t, 0, d.routingTable.Size()) } diff --git a/fullrt/dht.go b/fullrt/dht.go index 13f594995..13d2c9268 100644 --- a/fullrt/dht.go +++ b/fullrt/dht.go @@ -14,6 +14,7 @@ import ( "github.com/multiformats/go-multiaddr" "github.com/multiformats/go-multihash" + "github.com/libp2p/go-libp2p-routing-helpers/tracing" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" @@ -27,7 +28,7 @@ import ( "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" kaddht "github.com/libp2p/go-libp2p-kad-dht" "github.com/libp2p/go-libp2p-kad-dht/crawler" @@ -45,12 +46,14 @@ import ( kadkey "github.com/libp2p/go-libp2p-xor/key" "github.com/libp2p/go-libp2p-xor/trie" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" ) var logger = logging.Logger("fullrtdht") +const tracer = tracing.Tracer("go-libp2p-kad-dht/fullrt") +const dhtName = "FullRT" + const rtRefreshLimitsMsg = `Accelerated DHT client was unable to fully refresh its routing table due to Resource Manager limits, which may degrade content routing. Consider increasing resource limits. See debug logs for the "dht-crawler" subsystem for details.` // FullRT is an experimental DHT client that is under development. Expect breaking changes to occur in this client @@ -135,7 +138,7 @@ func NewFullRT(h host.Host, protocolPrefix protocol.ID, options ...Option) (*Ful return nil, err } - ms := net.NewMessageSenderImpl(h, []protocol.ID{dhtcfg.ProtocolPrefix + "/kad/1.0.0"}) + ms := net.NewMessageSenderImpl(h, []protocol.ID{dhtcfg.ProtocolPrefix + "/kad/1.0.0"}, "") protoMessenger, err := dht_pb.NewProtocolMessenger(ms) if err != nil { return nil, err @@ -151,7 +154,7 @@ func NewFullRT(h host.Host, protocolPrefix protocol.ID, options ...Option) (*Ful ctx, cancel := context.WithCancel(context.Background()) self := h.ID() - pm, err := providers.NewProviderManager(ctx, self, h.Peerstore(), dhtcfg.Datastore, fullrtcfg.pmOpts...) + pm, err := providers.NewProviderManager(self, h.Peerstore(), dhtcfg.Datastore, fullrtcfg.pmOpts...) if err != nil { cancel() return nil, err @@ -355,12 +358,16 @@ func (dht *FullRT) runCrawler(ctx context.Context) { func (dht *FullRT) Close() error { dht.cancel() - err := dht.ProviderManager.Process().Close() dht.wg.Wait() - return err + return dht.ProviderManager.Close() } -func (dht *FullRT) Bootstrap(ctx context.Context) error { +func (dht *FullRT) Bootstrap(ctx context.Context) (err error) { + _, end := tracer.Bootstrap(dhtName, ctx) + defer func() { end(err) }() + + // TODO: This should block until the first crawl finish. + return nil } @@ -426,7 +433,7 @@ func workers(numWorkers int, fn func(interface{}), inputs <-chan interface{}) { } func (dht *FullRT) GetClosestPeers(ctx context.Context, key string) ([]peer.ID, error) { - _, span := internal.StartSpan(ctx, "FullRT.GetClosestPeers", trace.WithAttributes(attribute.String("Key", key))) + _, span := internal.StartSpan(ctx, "FullRT.GetClosestPeers", trace.WithAttributes(internal.KeyAsAttribute("Key", key))) defer span.End() kbID := kb.ConvertKey(key) @@ -456,6 +463,9 @@ func (dht *FullRT) GetClosestPeers(ctx context.Context, key string) ([]peer.ID, // PutValue adds value corresponding to given Key. // This is the top level "Store" operation of the DHT func (dht *FullRT) PutValue(ctx context.Context, key string, value []byte, opts ...routing.Option) (err error) { + ctx, end := tracer.PutValue(dhtName, ctx, key, value, opts...) + defer func() { end(err) }() + if !dht.enableValues { return routing.ErrNotSupported } @@ -520,7 +530,10 @@ type RecvdVal struct { } // GetValue searches for the value corresponding to given Key. -func (dht *FullRT) GetValue(ctx context.Context, key string, opts ...routing.Option) (_ []byte, err error) { +func (dht *FullRT) GetValue(ctx context.Context, key string, opts ...routing.Option) (result []byte, err error) { + ctx, end := tracer.GetValue(dhtName, ctx, key, opts...) + defer func() { end(result, err) }() + if !dht.enableValues { return nil, routing.ErrNotSupported } @@ -554,14 +567,9 @@ func (dht *FullRT) GetValue(ctx context.Context, key string, opts ...routing.Opt } // SearchValue searches for the value corresponding to given Key and streams the results. -func (dht *FullRT) SearchValue(ctx context.Context, key string, opts ...routing.Option) (<-chan []byte, error) { - ctx, span := internal.StartSpan(ctx, "FullRT.SearchValue", trace.WithAttributes(attribute.String("Key", key))) - var good bool - defer func() { - if !good { - span.End() - } - }() +func (dht *FullRT) SearchValue(ctx context.Context, key string, opts ...routing.Option) (ch <-chan []byte, err error) { + ctx, end := tracer.SearchValue(dhtName, ctx, key, opts...) + defer func() { ch, err = end(ch, err) }() if !dht.enableValues { return nil, routing.ErrNotSupported @@ -581,9 +589,7 @@ func (dht *FullRT) SearchValue(ctx context.Context, key string, opts ...routing. valCh, lookupRes := dht.getValues(ctx, key, stopCh) out := make(chan []byte) - good = true go func() { - defer span.End() defer close(out) best, peersWithBest, aborted := dht.searchValueQuorum(ctx, key, valCh, stopCh, out, responsesNeeded) @@ -791,8 +797,8 @@ func (dht *FullRT) getValues(ctx context.Context, key string, stopQuery chan str // Provide makes this node announce that it can provide a value for the given key func (dht *FullRT) Provide(ctx context.Context, key cid.Cid, brdcst bool) (err error) { - ctx, span := internal.StartSpan(ctx, "FullRT.Provide", trace.WithAttributes(attribute.Stringer("Key", key), attribute.Bool("Broadcast", brdcst))) - defer span.End() + ctx, end := tracer.Provide(dhtName, ctx, key, brdcst) + defer func() { end(err) }() if !dht.enableProviders { return routing.ErrNotSupported @@ -846,7 +852,10 @@ func (dht *FullRT) Provide(ctx context.Context, key cid.Cid, brdcst bool) (err e } successes := dht.execOnMany(ctx, func(ctx context.Context, p peer.ID) error { - err := dht.protoMessenger.PutProvider(ctx, p, keyMH, dht.h) + err := dht.protoMessenger.PutProviderAddrs(ctx, p, keyMH, peer.AddrInfo{ + ID: dht.self, + Addrs: dht.h.Addrs(), + }) return err }, peers, true) @@ -931,9 +940,9 @@ func (dht *FullRT) execOnMany(ctx context.Context, fn func(context.Context, peer return numSuccess } -func (dht *FullRT) ProvideMany(ctx context.Context, keys []multihash.Multihash) error { - ctx, span := internal.StartSpan(ctx, "FullRT.ProvideMany", trace.WithAttributes(attribute.Int("NumKeys", len(keys)))) - defer span.End() +func (dht *FullRT) ProvideMany(ctx context.Context, keys []multihash.Multihash) (err error) { + ctx, end := tracer.ProvideMany(dhtName, ctx, keys) + defer func() { end(err) }() if !dht.enableProviders { return routing.ErrNotSupported @@ -1219,18 +1228,17 @@ func (dht *FullRT) FindProviders(ctx context.Context, c cid.Cid) ([]peer.AddrInf // the search query completes. If count is zero then the query will run until it // completes. Note: not reading from the returned channel may block the query // from progressing. -func (dht *FullRT) FindProvidersAsync(ctx context.Context, key cid.Cid, count int) <-chan peer.AddrInfo { +func (dht *FullRT) FindProvidersAsync(ctx context.Context, key cid.Cid, count int) (ch <-chan peer.AddrInfo) { + ctx, end := tracer.FindProvidersAsync(dhtName, ctx, key, count) + defer func() { ch = end(ch, nil) }() + if !dht.enableProviders || !key.Defined() { peerOut := make(chan peer.AddrInfo) close(peerOut) return peerOut } - chSize := count - if count == 0 { - chSize = 1 - } - peerOut := make(chan peer.AddrInfo, chSize) + peerOut := make(chan peer.AddrInfo) keyMH := key.Hash() @@ -1240,7 +1248,8 @@ func (dht *FullRT) FindProvidersAsync(ctx context.Context, key cid.Cid, count in } func (dht *FullRT) findProvidersAsyncRoutine(ctx context.Context, key multihash.Multihash, count int, peerOut chan peer.AddrInfo) { - ctx, span := internal.StartSpan(ctx, "FullRT.FindProvidersAsyncRoutine", trace.WithAttributes(attribute.Stringer("Key", key))) + // use a span here because unlike tracer.FindProvidersAsync we know who told us about it and that intresting to log. + ctx, span := internal.StartSpan(ctx, "FullRT.FindProvidersAsyncRoutine") defer span.End() defer close(peerOut) @@ -1304,16 +1313,10 @@ func (dht *FullRT) findProvidersAsyncRoutine(ctx context.Context, key multihash. ID: p, }) - mctx, mspan := internal.StartSpan(ctx, "protoMessenger.GetProviders", trace.WithAttributes(attribute.Stringer("peer", p))) - provs, closest, err := dht.protoMessenger.GetProviders(mctx, p, key) + provs, closest, err := dht.protoMessenger.GetProviders(ctx, p, key) if err != nil { - if mspan.IsRecording() { - mspan.SetStatus(codes.Error, err.Error()) - } - mspan.End() return err } - mspan.End() logger.Debugf("%d provider entries", len(provs)) @@ -1356,9 +1359,9 @@ func (dht *FullRT) findProvidersAsyncRoutine(ctx context.Context, key multihash. } // FindPeer searches for a peer with given ID. -func (dht *FullRT) FindPeer(ctx context.Context, id peer.ID) (_ peer.AddrInfo, err error) { - ctx, span := internal.StartSpan(ctx, "FullRT.FindPeer", trace.WithAttributes(attribute.Stringer("PeerID", id))) - defer span.End() +func (dht *FullRT) FindPeer(ctx context.Context, id peer.ID) (pi peer.AddrInfo, err error) { + ctx, end := tracer.FindPeer(dhtName, ctx, id) + defer func() { end(pi, err) }() if err := id.Validate(); err != nil { return peer.AddrInfo{}, err diff --git a/go.mod b/go.mod index c05e214f7..52084fce2 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,11 @@ module github.com/libp2p/go-libp2p-kad-dht -go 1.19 +go 1.21 -replace github.com/libp2p/go-libp2p-kad-dht => ./ +<<<<<<< HEAD +======= +retract v0.24.3 // this includes a breaking change and should have been released as v0.25.0 +>>>>>>> 61c9591 (chore: run go mod tidy) require ( github.com/gogo/protobuf v1.3.2 @@ -10,37 +13,38 @@ require ( github.com/google/uuid v1.3.0 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/golang-lru v0.5.4 - github.com/ipfs/boxo v0.8.0 + github.com/ipfs/boxo v0.10.0 github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-datastore v0.6.0 github.com/ipfs/go-detect-race v0.0.1 - github.com/ipfs/go-log v1.0.5 - github.com/jbenet/goprocess v0.1.4 - github.com/libp2p/go-libp2p v0.27.3 - github.com/libp2p/go-libp2p-kbucket v0.5.0 + github.com/ipfs/go-log/v2 v2.5.1 + github.com/libp2p/go-libp2p v0.30.0 + github.com/libp2p/go-libp2p-kbucket v0.6.3 github.com/libp2p/go-libp2p-record v0.2.0 - github.com/libp2p/go-libp2p-routing-helpers v0.4.0 + github.com/libp2p/go-libp2p-routing-helpers v0.7.2 github.com/libp2p/go-libp2p-testing v0.12.0 github.com/libp2p/go-libp2p-xor v0.1.0 github.com/libp2p/go-msgio v0.3.0 github.com/libp2p/go-netroute v0.2.1 github.com/multiformats/go-base32 v0.1.0 - github.com/multiformats/go-multiaddr v0.9.0 + github.com/multiformats/go-multiaddr v0.11.0 github.com/multiformats/go-multibase v0.2.0 - github.com/multiformats/go-multihash v0.2.1 + github.com/multiformats/go-multihash v0.2.3 github.com/multiformats/go-multistream v0.4.1 github.com/sirupsen/logrus v1.8.1 - github.com/stretchr/testify v1.8.2 + github.com/stretchr/testify v1.8.4 github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 go.opencensus.io v0.24.0 - go.opentelemetry.io/otel v1.15.1 - go.opentelemetry.io/otel/trace v1.15.1 - go.uber.org/zap v1.24.0 - gonum.org/v1/gonum v0.11.0 + go.opentelemetry.io/otel v1.16.0 + go.opentelemetry.io/otel/trace v1.16.0 + go.uber.org/multierr v1.11.0 + go.uber.org/zap v1.25.0 + gonum.org/v1/gonum v0.13.0 ) require ( - github.com/benbjohnson/clock v1.3.0 // indirect + github.com/Jorropo/jsync v1.0.1 // indirect + github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect @@ -58,70 +62,67 @@ require ( github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/golang/mock v1.6.0 // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b // indirect + github.com/google/pprof v0.0.0-20230821062121-407c9e7a662f // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/huin/goupnp v1.1.0 // indirect - github.com/ipfs/go-ipfs-util v0.0.2 // indirect - github.com/ipfs/go-log/v2 v2.5.1 // indirect + github.com/huin/goupnp v1.2.0 // indirect + github.com/ipfs/go-log v1.0.5 // indirect github.com/ipld/go-ipld-prime v0.20.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect - github.com/klauspost/compress v1.16.4 // indirect - github.com/klauspost/cpuid/v2 v2.2.4 // indirect + github.com/jbenet/goprocess v0.1.4 // indirect + github.com/klauspost/compress v1.16.7 // indirect + github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect - github.com/libp2p/go-nat v0.1.0 // indirect - github.com/libp2p/go-reuseport v0.2.0 // indirect - github.com/libp2p/go-yamux/v4 v4.0.0 // indirect + github.com/libp2p/go-nat v0.2.0 // indirect + github.com/libp2p/go-reuseport v0.4.0 // indirect + github.com/libp2p/go-yamux/v4 v4.0.1 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-isatty v0.0.19 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/miekg/dns v1.1.53 // indirect + github.com/miekg/dns v1.1.55 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect - github.com/minio/sha256-simd v1.0.0 // indirect + github.com/minio/sha256-simd v1.0.1 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect github.com/multiformats/go-multicodec v0.9.0 // indirect github.com/multiformats/go-varint v0.0.7 // indirect - github.com/onsi/ginkgo/v2 v2.9.2 // indirect - github.com/opencontainers/runtime-spec v1.0.2 // indirect + github.com/onsi/ginkgo/v2 v2.11.0 // indirect + github.com/opencontainers/runtime-spec v1.1.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.89.0 // indirect - github.com/prometheus/client_golang v1.14.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.42.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect + github.com/prometheus/client_golang v1.16.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/procfs v0.11.1 // indirect github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/qtls-go1-19 v0.3.2 // indirect - github.com/quic-go/qtls-go1-20 v0.2.2 // indirect - github.com/quic-go/quic-go v0.33.0 // indirect - github.com/quic-go/webtransport-go v0.5.2 // indirect + github.com/quic-go/qtls-go1-20 v0.3.2 // indirect + github.com/quic-go/quic-go v0.38.0 // indirect + github.com/quic-go/webtransport-go v0.5.3 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - go.uber.org/atomic v1.11.0 // indirect - go.uber.org/dig v1.16.1 // indirect - go.uber.org/fx v1.19.2 // indirect - go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.9.0 // indirect - golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect - golang.org/x/mod v0.10.0 // indirect - golang.org/x/net v0.10.0 // indirect - golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.8.0 // indirect - golang.org/x/text v0.9.0 // indirect - golang.org/x/tools v0.7.0 // indirect - google.golang.org/protobuf v1.30.0 // indirect + go.opentelemetry.io/otel/metric v1.16.0 // indirect + go.uber.org/dig v1.17.0 // indirect + go.uber.org/fx v1.20.0 // indirect + golang.org/x/crypto v0.12.0 // indirect + golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/net v0.14.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/sys v0.11.0 // indirect + golang.org/x/text v0.12.0 // indirect + golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect - nhooyr.io/websocket v1.8.7 // indirect ) diff --git a/go.sum b/go.sum index 4bf7409b1..c5e306730 100644 --- a/go.sum +++ b/go.sum @@ -9,12 +9,15 @@ dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU= +github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -56,6 +59,7 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= @@ -77,12 +81,9 @@ github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwU github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14= -github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= @@ -91,22 +92,9 @@ github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY= -github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= -github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= -github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= -github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= -github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= @@ -121,6 +109,7 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -129,8 +118,6 @@ github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+Licev github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -152,16 +139,15 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b h1:Qcx5LM0fSiks9uCyFZwDBUasd3lxd1RM0GYpL+Li5o4= -github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= +github.com/google/pprof v0.0.0-20230821062121-407c9e7a662f h1:pDhu5sgp8yJlEF/g6osliIIpF9K4F5jvkULXa4daRDQ= +github.com/google/pprof v0.0.0-20230821062121-407c9e7a662f/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -171,7 +157,7 @@ github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -188,13 +174,11 @@ github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+l github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= -github.com/huin/goupnp v1.1.0 h1:gEe0Dp/lZmPZiDFzJJaOfUpOvv2MKUkoBX8lDrn9vKU= -github.com/huin/goupnp v1.1.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= -github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY= +github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/ipfs/boxo v0.8.0 h1:UdjAJmHzQHo/j3g3b1bAcAXCj/GM6iTwvSlBDvPBNBs= -github.com/ipfs/boxo v0.8.0/go.mod h1:RIsi4CnTyQ7AUsNn5gXljJYZlQrHBMnJp94p73liFiA= +github.com/ipfs/boxo v0.10.0 h1:tdDAxq8jrsbRkYoF+5Rcqyeb91hgWe2hp7iLu7ORZLY= +github.com/ipfs/boxo v0.10.0/go.mod h1:Fg+BnfxZ0RPzR0nOodzdIq3A7KgoWAOWsEIImrIQdBM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= @@ -232,8 +216,6 @@ github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= @@ -243,25 +225,22 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.16.4 h1:91KN02FnsOYhuunwU4ssRe8lc2JosWmizWa91B5v1PU= -github.com/klauspost/compress v1.16.4/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= -github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= -github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= +github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= @@ -272,20 +251,20 @@ github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZ github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.27.3 h1:tkV/zm3KCZ4R5er9Xcs2pt0YNB4JH0iBfGAtHJdLHRs= -github.com/libp2p/go-libp2p v0.27.3/go.mod h1:FAvvfQa/YOShUYdiSS03IR9OXzkcJXwcNA2FUCh9ImE= +github.com/libp2p/go-libp2p v0.30.0 h1:9EZwFtJPFBcs/yJTnP90TpN1hgrT/EsFfM+OZuwV87U= +github.com/libp2p/go-libp2p v0.30.0/go.mod h1:nr2g5V7lfftwgiJ78/HrID+pwvayLyqKCEirT2Y3Byg= github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s= github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w= github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio= -github.com/libp2p/go-libp2p-kbucket v0.5.0 h1:g/7tVm8ACHDxH29BGrpsQlnNeu+6OF1A9bno/4/U1oA= -github.com/libp2p/go-libp2p-kbucket v0.5.0/go.mod h1:zGzGCpQd78b5BNTDGHNDLaTt9aDK/A02xeZp9QeFC4U= +github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0= +github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0= github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs= github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= -github.com/libp2p/go-libp2p-routing-helpers v0.4.0 h1:b7y4aixQ7AwbqYfcOQ6wTw8DQvuRZeTAA0Od3YYN5yc= -github.com/libp2p/go-libp2p-routing-helpers v0.4.0/go.mod h1:dYEAgkVhqho3/YKxfOEGdFMIcWfAFNlZX8iAIihYA2E= +github.com/libp2p/go-libp2p-routing-helpers v0.7.2 h1:xJMFyhQ3Iuqnk9Q2dYE1eUTzsah7NLw3Qs2zjUV78T0= +github.com/libp2p/go-libp2p-routing-helpers v0.7.2/go.mod h1:cN4mJAD/7zfPKXBcs9ze31JGYAZgzdABEm+q/hkswb8= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= github.com/libp2p/go-libp2p-xor v0.1.0 h1:hhQwT4uGrBcuAkUGXADuPltalOdpf9aag9kaYNT2tLA= @@ -293,18 +272,16 @@ github.com/libp2p/go-libp2p-xor v0.1.0/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQ github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= -github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg= -github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= -github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= +github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= +github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU= github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ= github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560= -github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k= -github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-yamux/v4 v4.0.0 h1:+Y80dV2Yx/kv7Y7JKu0LECyVdMXm1VUoko+VQ9rBfZQ= -github.com/libp2p/go-yamux/v4 v4.0.0/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= +github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= +github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= +github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= +github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -315,7 +292,6 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= @@ -325,8 +301,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfr github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.53 h1:ZBkuHr5dxHtB1caEOlZTLPo7D3L3TWckgUUs/RHfDxw= -github.com/miekg/dns v1.1.53/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= +github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= +github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= @@ -337,16 +313,12 @@ github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8Rv github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= -github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= @@ -360,8 +332,8 @@ github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.9.0 h1:3h4V1LHIk5w4hJHekMKWALPXErDfz/sggzwC/NcqbDQ= -github.com/multiformats/go-multiaddr v0.9.0/go.mod h1:mI67Lb1EeTOYb8GQfL/7wpIZwc46ElrvzhYnoJOmTT0= +github.com/multiformats/go-multiaddr v0.11.0 h1:XqGyJ8ufbCE0HmTDwx2kPdsrQ36AGPZNZX6s6xfJH10= +github.com/multiformats/go-multiaddr v0.11.0/go.mod h1:gWUm0QLR4thQ6+ZF6SXUw8YjtwQSPapICM+NmCkxHSM= github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= @@ -376,8 +348,8 @@ github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKT github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= -github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108= -github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= +github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= +github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= github.com/multiformats/go-multistream v0.4.1 h1:rFy0Iiyn3YT0asivDUIR05leAdwZq3de4741sbiSdfo= github.com/multiformats/go-multistream v0.4.1/go.mod h1:Mz5eykRVAjJWckE2U78c6xqdtyNUEhKSM0Lwar2p77Q= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= @@ -388,12 +360,14 @@ github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJE github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU= -github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= +github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= +github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E= -github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= +github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= +github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= +github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= @@ -409,32 +383,31 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= -github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= +github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/qtls-go1-19 v0.3.2 h1:tFxjCFcTQzK+oMxG6Zcvp4Dq8dx4yD3dDiIiyc86Z5U= -github.com/quic-go/qtls-go1-19 v0.3.2/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI= -github.com/quic-go/qtls-go1-20 v0.2.2 h1:WLOPx6OY/hxtTxKV1Zrq20FtXtDEkeY00CGQm8GEa3E= -github.com/quic-go/qtls-go1-20 v0.2.2/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM= -github.com/quic-go/quic-go v0.33.0 h1:ItNoTDN/Fm/zBlq769lLJc8ECe9gYaW40veHCCco7y0= -github.com/quic-go/quic-go v0.33.0/go.mod h1:YMuhaAV9/jIu0XclDXwZPAsP/2Kgr5yMYhe9oxhhOFA= -github.com/quic-go/webtransport-go v0.5.2 h1:GA6Bl6oZY+g/flt00Pnu0XtivSD8vukOu3lYhJjnGEk= -github.com/quic-go/webtransport-go v0.5.2/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= +github.com/quic-go/qtls-go1-20 v0.3.2 h1:rRgN3WfnKbyik4dBV8A6girlJVxGand/d+jVKbQq5GI= +github.com/quic-go/qtls-go1-20 v0.3.2/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= +github.com/quic-go/quic-go v0.38.0 h1:T45lASr5q/TrVwt+jrVccmqHhPL2XuSyoCLVCpfOSLc= +github.com/quic-go/quic-go v0.38.0/go.mod h1:MPCuRq7KBK2hNcfKj/1iD1BGuN3eAYMeNxp3T42LRUg= +github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU= +github.com/quic-go/webtransport-go v0.5.3/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= @@ -494,15 +467,11 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= -github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= @@ -523,20 +492,23 @@ go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.15.1 h1:3Iwq3lfRByPaws0f6bU3naAqOR1n5IeDWd9390kWHa8= -go.opentelemetry.io/otel v1.15.1/go.mod h1:mHHGEHVDLal6YrKMmk9LqC4a3sF5g+fHfrttQIB1NTc= -go.opentelemetry.io/otel/trace v1.15.1 h1:uXLo6iHJEzDfrNC0L0mNjItIp06SyaBQxu5t3xMlngY= -go.opentelemetry.io/otel/trace v1.15.1/go.mod h1:IWdQG/5N1x7f6YUlmdLeJvH9yxtuJAfc4VW5Agv9r/8= +go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= +go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= +go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= +go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= +go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= +go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/dig v1.16.1 h1:+alNIBsl0qfY0j6epRubp/9obgtrObRAc5aD+6jbWY8= -go.uber.org/dig v1.16.1/go.mod h1:557JTAUZT5bUK0SvCwikmLPPtdQhfvLYtO5tJgQSbnk= -go.uber.org/fx v1.19.2 h1:SyFgYQFr1Wl0AYstE8vyYIzP4bFz2URrScjwC4cwUvY= -go.uber.org/fx v1.19.2/go.mod h1:43G1VcqSzbIv77y00p1DRAsyZS8WdzuYdhZXmEUkMyQ= +go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI= +go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU= +go.uber.org/fx v1.20.0 h1:ZMC/pnRvhsthOZh9MZjMq5U8Or3mA9zBSPaLnzs3ihQ= +go.uber.org/fx v1.20.0/go.mod h1:qCUj0btiR3/JnanEr1TYEePfSw6o/4qYJscgvzQ5Ub0= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= +go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -544,8 +516,8 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= +go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -561,11 +533,11 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= -golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -577,12 +549,11 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -598,8 +569,8 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -613,8 +584,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -624,14 +595,11 @@ golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -640,20 +608,19 @@ golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -674,14 +641,14 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E= +golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.11.0 h1:f1IJhK4Km5tBJmaiJXtk/PkL4cdVX6J+tGiM187uT5E= -gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM= +gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= @@ -717,8 +684,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -733,6 +700,7 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= @@ -745,7 +713,5 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= -nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= -nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/handlers.go b/handlers.go index ada7338a4..c2d496728 100644 --- a/handlers.go +++ b/handlers.go @@ -262,30 +262,26 @@ func (dht *IpfsDHT) handleFindPeer(ctx context.Context, from peer.ID, pmes *pb.M // if looking for self... special case where we send it on CloserPeers. targetPid := peer.ID(pmes.GetKey()) - if targetPid == dht.self { - closest = []peer.ID{dht.self} - } else { - closest = dht.betterPeersToQuery(pmes, from, dht.bucketSize) - - // Never tell a peer about itself. - if targetPid != from { - // Add the target peer to the set of closest peers if - // not already present in our routing table. - // - // Later, when we lookup known addresses for all peers - // in this set, we'll prune this peer if we don't - // _actually_ know where it is. - found := false - for _, p := range closest { - if targetPid == p { - found = true - break - } - } - if !found { - closest = append(closest, targetPid) + closest = dht.betterPeersToQuery(pmes, from, dht.bucketSize) + + // Never tell a peer about itself. + if targetPid != from { + // Add the target peer to the set of closest peers if + // not already present in our routing table. + // + // Later, when we lookup known addresses for all peers + // in this set, we'll prune this peer if we don't + // _actually_ know where it is. + found := false + for _, p := range closest { + if targetPid == p { + found = true + break } } + if !found { + closest = append(closest, targetPid) + } } if closest == nil { @@ -321,7 +317,16 @@ func (dht *IpfsDHT) handleGetProviders(ctx context.Context, p peer.ID, pmes *pb. if err != nil { return nil, err } - resp.ProviderPeers = pb.PeerInfosToPBPeers(dht.host.Network(), providers) + + filtered := make([]peer.AddrInfo, len(providers)) + for i, provider := range providers { + filtered[i] = peer.AddrInfo{ + ID: provider.ID, + Addrs: dht.filterAddrs(provider.Addrs), + } + } + + resp.ProviderPeers = pb.PeerInfosToPBPeers(dht.host.Network(), filtered) // Also send closer peers. closer := dht.betterPeersToQuery(pmes, p, dht.bucketSize) @@ -359,7 +364,10 @@ func (dht *IpfsDHT) handleAddProvider(ctx context.Context, p peer.ID, pmes *pb.M continue } - dht.providerStore.AddProvider(ctx, key, peer.AddrInfo{ID: p}) + // We run the addrs filter after checking for the length, + // this allows transient nodes with varying /p2p-circuit addresses to still have their anouncement go through. + addrs := dht.filterAddrs(pi.Addrs) + dht.providerStore.AddProvider(ctx, key, peer.AddrInfo{ID: pi.ID, Addrs: addrs}) } return nil, nil diff --git a/handlers_test.go b/handlers_test.go index 327cc93d5..35959df62 100644 --- a/handlers_test.go +++ b/handlers_test.go @@ -111,7 +111,7 @@ func BenchmarkHandleFindPeer(b *testing.B) { panic(err) } - d.peerFound(id, true) + d.peerFound(id) peers = append(peers, id) a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 2000+i)) diff --git a/internal/config/config.go b/internal/config/config.go index 24ebdd9b0..d10b9823f 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -14,6 +14,7 @@ import ( "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" + ma "github.com/multiformats/go-multiaddr" ) // DefaultPrefix is the application specific prefix attached to all DHT protocols by default. @@ -33,22 +34,23 @@ type RouteTableFilterFunc func(dht interface{}, p peer.ID) bool // Config is a structure containing all the options that can be used when constructing a DHT. type Config struct { - Datastore ds.Batching - Validator record.Validator - ValidatorChanged bool // if true implies that the validator has been changed and that Defaults should not be used - Mode ModeOpt - ProtocolPrefix protocol.ID - V1ProtocolOverride protocol.ID - BucketSize int - Concurrency int - Resiliency int - MaxRecordAge time.Duration - EnableProviders bool - EnableValues bool - ProviderStore providers.ProviderStore - QueryPeerFilter QueryFilterFunc - BlacklistPeers map[peer.ID]struct{} - MessageSenderFunc func(h host.Host, protos []protocol.ID) pb.MessageSender + Datastore ds.Batching + Validator record.Validator + ValidatorChanged bool // if true implies that the validator has been changed and that Defaults should not be used + Mode ModeOpt + ProtocolPrefix protocol.ID + V1ProtocolOverride protocol.ID + BucketSize int + Concurrency int + Resiliency int + MaxRecordAge time.Duration + EnableProviders bool + EnableValues bool + ProviderStore providers.ProviderStore + QueryPeerFilter QueryFilterFunc + LookupCheckConcurrency int + BlacklistPeers map[peer.ID]struct{} + MessageSenderFunc func(h host.Host, protos []protocol.ID) pb.MessageSender RoutingTable struct { RefreshQueryTimeout time.Duration @@ -61,6 +63,7 @@ type Config struct { } BootstrapPeers func() []peer.AddrInfo + AddressFilter func([]ma.Multiaddr) []ma.Multiaddr // test specific Config options DisableFixLowPeers bool @@ -115,16 +118,18 @@ var Defaults = func(o *Config) error { o.EnableValues = true o.QueryPeerFilter = EmptyQueryFilter - o.RoutingTable.LatencyTolerance = time.Minute - o.RoutingTable.RefreshQueryTimeout = 1 * time.Minute + o.RoutingTable.LatencyTolerance = 10 * time.Second + o.RoutingTable.RefreshQueryTimeout = 10 * time.Second o.RoutingTable.RefreshInterval = 10 * time.Minute o.RoutingTable.AutoRefresh = true o.RoutingTable.PeerFilter = EmptyRTFilter + o.MaxRecordAge = providers.ProvideValidity o.BucketSize = defaultBucketSize o.Concurrency = 10 o.Resiliency = 3 + o.LookupCheckConcurrency = 256 // MAGIC: It makes sense to set it to a multiple of OptProvReturnRatio * BucketSize. We chose a multiple of 4. o.OptimisticProvideJobsPoolSize = 60 diff --git a/internal/tracing.go b/internal/tracing.go index 2a2f18647..6b707f9cf 100644 --- a/internal/tracing.go +++ b/internal/tracing.go @@ -3,11 +3,30 @@ package internal import ( "context" "fmt" + "unicode/utf8" + "github.com/multiformats/go-multibase" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" ) func StartSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { return otel.Tracer("go-libp2p-kad-dht").Start(ctx, fmt.Sprintf("KademliaDHT.%s", name), opts...) } + +// KeyAsAttribute format a DHT key into a suitable tracing attribute. +// DHT keys can be either valid utf-8 or binary, when they are derived from, for example, a multihash. +// Tracing (and notably OpenTelemetry+grpc exporter) requires valid utf-8 for string attributes. +func KeyAsAttribute(name string, key string) attribute.KeyValue { + b := []byte(key) + if utf8.Valid(b) { + return attribute.String(name, key) + } + encoded, err := multibase.Encode(multibase.Base58BTC, b) + if err != nil { + // should be unreachable + panic(err) + } + return attribute.String(name, encoded) +} diff --git a/lookup.go b/lookup.go index 3f52914ba..c4e3792a6 100644 --- a/lookup.go +++ b/lookup.go @@ -11,8 +11,6 @@ import ( kb "github.com/libp2p/go-libp2p-kbucket" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/routing" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" ) @@ -68,13 +66,8 @@ func (dht *IpfsDHT) pmGetClosestPeers(key string) queryFn { ID: p, }) - mctx, mspan := internal.StartSpan(ctx, "protoMessenger.GetClosestPeers", trace.WithAttributes(attribute.Stringer("peer", p))) - peers, err := dht.protoMessenger.GetClosestPeers(mctx, p, peer.ID(key)) + peers, err := dht.protoMessenger.GetClosestPeers(ctx, p, peer.ID(key)) if err != nil { - if mspan.IsRecording() { - mspan.SetStatus(codes.Error, err.Error()) - } - mspan.End() logger.Debugf("error getting closer peers: %s", err) routing.PublishQueryEvent(ctx, &routing.QueryEvent{ Type: routing.QueryError, @@ -83,7 +76,6 @@ func (dht *IpfsDHT) pmGetClosestPeers(key string) queryFn { }) return nil, err } - mspan.End() // For DHT query command routing.PublishQueryEvent(ctx, &routing.QueryEvent{ diff --git a/lookup_optim.go b/lookup_optim.go index 0f49757b8..144b011e2 100644 --- a/lookup_optim.go +++ b/lookup_optim.go @@ -242,7 +242,10 @@ func (os *optimisticState) stopFn(qps *qpeerset.QueryPeerset) bool { } func (os *optimisticState) putProviderRecord(pid peer.ID) { - err := os.dht.protoMessenger.PutProvider(os.putCtx, pid, []byte(os.key), os.dht.host) + err := os.dht.protoMessenger.PutProviderAddrs(os.putCtx, pid, []byte(os.key), peer.AddrInfo{ + ID: os.dht.self, + Addrs: os.dht.filterAddrs(os.dht.host.Addrs()), + }) os.peerStatesLk.Lock() if err != nil { os.peerStates[pid] = failure diff --git a/net/message_manager.go b/net/message_manager.go index 629ad5afe..9e731e2c2 100644 --- a/net/message_manager.go +++ b/net/message_manager.go @@ -14,7 +14,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" "github.com/libp2p/go-msgio" //lint:ignore SA1019 TODO migrate away from gogo pb @@ -49,7 +49,11 @@ type messageSenderImpl struct { blacklistUA string // piece of user-agent that is desired to be blacklistable (no-blacklist if empty) } +<<<<<<< HEAD:net/message_manager.go func NewMessageSenderImpl(h host.Host, protos []protocol.ID, blacklistUA string) pb.MessageSender { +======= +func NewMessageSenderImpl(h host.Host, protos []protocol.ID) pb.MessageSenderWithDisconnect { +>>>>>>> cbe39cd (refactor: remove goprocess):internal/net/message_manager.go return &messageSenderImpl{ host: h, strmap: make(map[peer.ID]*peerMessageSender), @@ -325,7 +329,10 @@ func (ms *peerMessageSender) SendRequest(ctx context.Context, pmes *pb.Message) if err := ms.ctxReadMsg(ctx, mes); err != nil { _ = ms.s.Reset() ms.s = nil - + if err == context.Canceled { + // retry would be same error + return nil, err + } if retry { logger.Debugw("error reading message", "error", err) return nil, err diff --git a/net/message_manager_test.go b/net/message_manager_test.go index 5c61ec2de..a58db1144 100644 --- a/net/message_manager_test.go +++ b/net/message_manager_test.go @@ -24,7 +24,7 @@ func TestInvalidMessageSenderTracking(t *testing.T) { h.Start() defer h.Close() - msgSender := NewMessageSenderImpl(h, []protocol.ID{"/test/kad/1.0.0"}).(*messageSenderImpl) + msgSender := NewMessageSenderImpl(h, []protocol.ID{"/test/kad/1.0.0"}, "").(*messageSenderImpl) _, err = msgSender.messageSenderForPeer(ctx, foo) require.Error(t, err, "should have failed to find message sender") diff --git a/netsize/netsize.go b/netsize/netsize.go index 669081e8c..02a0e6789 100644 --- a/netsize/netsize.go +++ b/netsize/netsize.go @@ -10,7 +10,7 @@ import ( "sync/atomic" "time" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" kbucket "github.com/libp2p/go-libp2p-kbucket" "github.com/libp2p/go-libp2p/core/peer" ks "github.com/whyrusleeping/go-keyspace" diff --git a/optimizations.md b/optimizations.md new file mode 100644 index 000000000..214c1796f --- /dev/null +++ b/optimizations.md @@ -0,0 +1,7 @@ +# Client-side optimizations + +This document reflects client-side optimizations that are implemented in this repository. Client-side optimizations are not part of the [Kademlia spec](https://github.com/libp2p/specs/tree/master/kad-dht), and are not required to be implemented on all clients. + +## Checking before Adding + +A Kademlia server should try to add remote peers querying it to its routing table. However, the Kademlia server has no guarantee that remote peers issuing requests are able to answer Kademlia requests correctly, even though they advertise speaking the Kademlia server protocol. It is important that only server nodes able to answer Kademlia requests end up in other peers' routing tables. Hence, before adding a remote peer to the Kademlia server's routing table, the Kademlia server will send a trivial `FIND_NODE` request to the remote peer, and add it to its routing table only if it is able to provide a valid response. \ No newline at end of file diff --git a/pb/message.go b/pb/message.go index 49f52ade6..2e4f3d665 100644 --- a/pb/message.go +++ b/pb/message.go @@ -4,7 +4,7 @@ import ( "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" ma "github.com/multiformats/go-multiaddr" ) diff --git a/pb/protocol_messenger.go b/pb/protocol_messenger.go index e175dde10..7971db68e 100644 --- a/pb/protocol_messenger.go +++ b/pb/protocol_messenger.go @@ -6,11 +6,13 @@ import ( "errors" "fmt" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" recpb "github.com/libp2p/go-libp2p-record/pb" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" "github.com/multiformats/go-multihash" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" "github.com/libp2p/go-libp2p-kad-dht/internal" ) @@ -45,6 +47,12 @@ func NewProtocolMessenger(msgSender MessageSender, opts ...ProtocolMessengerOpti return pm, nil } +type MessageSenderWithDisconnect interface { + MessageSender + + OnDisconnect(context.Context, peer.ID) +} + // MessageSender handles sending wire protocol messages to a given peer type MessageSender interface { // SendRequest sends a peer a message and waits for its response @@ -54,7 +62,18 @@ type MessageSender interface { } // PutValue asks a peer to store the given key/value pair. -func (pm *ProtocolMessenger) PutValue(ctx context.Context, p peer.ID, rec *recpb.Record) error { +func (pm *ProtocolMessenger) PutValue(ctx context.Context, p peer.ID, rec *recpb.Record) (err error) { + ctx, span := internal.StartSpan(ctx, "ProtocolMessenger.PutValue") + defer span.End() + if span.IsRecording() { + span.SetAttributes(attribute.Stringer("to", p), attribute.Stringer("record", rec)) + defer func() { + if err != nil { + span.SetStatus(codes.Error, err.Error()) + } + }() + } + pmes := NewMessage(Message_PUT_VALUE, rec.Key, 0) pmes.Record = rec rpmes, err := pm.m.SendRequest(ctx, p, pmes) @@ -74,7 +93,27 @@ func (pm *ProtocolMessenger) PutValue(ctx context.Context, p peer.ID, rec *recpb // GetValue asks a peer for the value corresponding to the given key. Also returns the K closest peers to the key // as described in GetClosestPeers. -func (pm *ProtocolMessenger) GetValue(ctx context.Context, p peer.ID, key string) (*recpb.Record, []*peer.AddrInfo, error) { +func (pm *ProtocolMessenger) GetValue(ctx context.Context, p peer.ID, key string) (record *recpb.Record, closerPeers []*peer.AddrInfo, err error) { + ctx, span := internal.StartSpan(ctx, "ProtocolMessenger.GetValue") + defer span.End() + if span.IsRecording() { + span.SetAttributes(attribute.Stringer("to", p), internal.KeyAsAttribute("key", key)) + defer func() { + if err != nil { + span.SetStatus(codes.Error, err.Error()) + } else { + peers := make([]string, len(closerPeers)) + for i, v := range closerPeers { + peers[i] = v.String() + } + span.SetAttributes( + attribute.Stringer("record", record), + attribute.StringSlice("closestPeers", peers), + ) + } + }() + } + pmes := NewMessage(Message_GET_VALUE, []byte(key), 0) respMsg, err := pm.m.SendRequest(ctx, p, pmes) if err != nil { @@ -103,7 +142,24 @@ func (pm *ProtocolMessenger) GetValue(ctx context.Context, p peer.ID, key string // GetClosestPeers asks a peer to return the K (a DHT-wide parameter) DHT server peers closest in XOR space to the id // Note: If the peer happens to know another peer whose peerID exactly matches the given id it will return that peer // even if that peer is not a DHT server node. -func (pm *ProtocolMessenger) GetClosestPeers(ctx context.Context, p peer.ID, id peer.ID) ([]*peer.AddrInfo, error) { +func (pm *ProtocolMessenger) GetClosestPeers(ctx context.Context, p peer.ID, id peer.ID) (closerPeers []*peer.AddrInfo, err error) { + ctx, span := internal.StartSpan(ctx, "ProtocolMessenger.GetClosestPeers") + defer span.End() + if span.IsRecording() { + span.SetAttributes(attribute.Stringer("to", p), attribute.Stringer("key", id)) + defer func() { + if err != nil { + span.SetStatus(codes.Error, err.Error()) + } else { + peers := make([]string, len(closerPeers)) + for i, v := range closerPeers { + peers[i] = v.String() + } + span.SetAttributes(attribute.StringSlice("peers", peers)) + } + }() + } + pmes := NewMessage(Message_FIND_NODE, []byte(id), 0) respMsg, err := pm.m.SendRequest(ctx, p, pmes) if err != nil { @@ -113,40 +169,86 @@ func (pm *ProtocolMessenger) GetClosestPeers(ctx context.Context, p peer.ID, id return peers, nil } -// PutProvider asks a peer to store that we are a provider for the given key. -func (pm *ProtocolMessenger) PutProvider(ctx context.Context, p peer.ID, key multihash.Multihash, host host.Host) error { - pi := peer.AddrInfo{ - ID: host.ID(), - Addrs: host.Addrs(), +// PutProvider is deprecated please use [ProtocolMessenger.PutProviderAddrs]. +func (pm *ProtocolMessenger) PutProvider(ctx context.Context, p peer.ID, key multihash.Multihash, h host.Host) error { + return pm.PutProviderAddrs(ctx, p, key, peer.AddrInfo{ + ID: h.ID(), + Addrs: h.Addrs(), + }) +} + +// PutProviderAddrs asks a peer to store that we are a provider for the given key. +func (pm *ProtocolMessenger) PutProviderAddrs(ctx context.Context, p peer.ID, key multihash.Multihash, self peer.AddrInfo) (err error) { + ctx, span := internal.StartSpan(ctx, "ProtocolMessenger.PutProvider") + defer span.End() + if span.IsRecording() { + span.SetAttributes(attribute.Stringer("to", p), attribute.Stringer("key", key)) + defer func() { + if err != nil { + span.SetStatus(codes.Error, err.Error()) + } + }() } // TODO: We may want to limit the type of addresses in our provider records // For example, in a WAN-only DHT prohibit sharing non-WAN addresses (e.g. 192.168.0.100) - if len(pi.Addrs) < 1 { + if len(self.Addrs) < 1 { return fmt.Errorf("no known addresses for self, cannot put provider") } pmes := NewMessage(Message_ADD_PROVIDER, key, 0) - pmes.ProviderPeers = RawPeerInfosToPBPeers([]peer.AddrInfo{pi}) + pmes.ProviderPeers = RawPeerInfosToPBPeers([]peer.AddrInfo{self}) return pm.m.SendMessage(ctx, p, pmes) } // GetProviders asks a peer for the providers it knows of for a given key. Also returns the K closest peers to the key // as described in GetClosestPeers. -func (pm *ProtocolMessenger) GetProviders(ctx context.Context, p peer.ID, key multihash.Multihash) ([]*peer.AddrInfo, []*peer.AddrInfo, error) { +func (pm *ProtocolMessenger) GetProviders(ctx context.Context, p peer.ID, key multihash.Multihash) (provs []*peer.AddrInfo, closerPeers []*peer.AddrInfo, err error) { + ctx, span := internal.StartSpan(ctx, "ProtocolMessenger.GetProviders") + defer span.End() + if span.IsRecording() { + span.SetAttributes(attribute.Stringer("to", p), attribute.Stringer("key", key)) + defer func() { + if err != nil { + span.SetStatus(codes.Error, err.Error()) + } else { + provsStr := make([]string, len(provs)) + for i, v := range provs { + provsStr[i] = v.String() + } + closerPeersStr := make([]string, len(provs)) + for i, v := range provs { + closerPeersStr[i] = v.String() + } + span.SetAttributes(attribute.StringSlice("provs", provsStr), attribute.StringSlice("closestPeers", closerPeersStr)) + } + }() + } + pmes := NewMessage(Message_GET_PROVIDERS, key, 0) respMsg, err := pm.m.SendRequest(ctx, p, pmes) if err != nil { return nil, nil, err } - provs := PBPeersToPeerInfos(respMsg.GetProviderPeers()) - closerPeers := PBPeersToPeerInfos(respMsg.GetCloserPeers()) + provs = PBPeersToPeerInfos(respMsg.GetProviderPeers()) + closerPeers = PBPeersToPeerInfos(respMsg.GetCloserPeers()) return provs, closerPeers, nil } // Ping sends a ping message to the passed peer and waits for a response. -func (pm *ProtocolMessenger) Ping(ctx context.Context, p peer.ID) error { +func (pm *ProtocolMessenger) Ping(ctx context.Context, p peer.ID) (err error) { + ctx, span := internal.StartSpan(ctx, "ProtocolMessenger.Ping") + defer span.End() + if span.IsRecording() { + span.SetAttributes(attribute.Stringer("to", p)) + defer func() { + if err != nil { + span.SetStatus(codes.Error, err.Error()) + } + }() + } + req := NewMessage(Message_PING, nil, 0) resp, err := pm.m.SendRequest(ctx, p, req) if err != nil { diff --git a/providers/providers_manager.go b/providers/providers_manager.go index f2a7ad17c..1400dc76c 100644 --- a/providers/providers_manager.go +++ b/providers/providers_manager.go @@ -4,16 +4,16 @@ import ( "context" "encoding/binary" "fmt" + "io" "strings" + "sync" "time" lru "github.com/hashicorp/golang-lru/simplelru" ds "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/autobatch" dsq "github.com/ipfs/go-datastore/query" - logging "github.com/ipfs/go-log" - "github.com/jbenet/goprocess" - goprocessctx "github.com/jbenet/goprocess/context" + logging "github.com/ipfs/go-log/v2" "github.com/libp2p/go-libp2p-kad-dht/internal" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" @@ -45,6 +45,7 @@ var log = logging.Logger("providers") type ProviderStore interface { AddProvider(ctx context.Context, key []byte, prov peer.AddrInfo) error GetProviders(ctx context.Context, key []byte) ([]peer.AddrInfo, error) + io.Closer } // ProviderManager adds and pulls providers out of the datastore, @@ -59,9 +60,12 @@ type ProviderManager struct { newprovs chan *addProv getprovs chan *getProv - proc goprocess.Process cleanupInterval time.Duration + + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup } var _ ProviderStore = (*ProviderManager)(nil) @@ -109,7 +113,7 @@ type getProv struct { } // NewProviderManager constructor -func NewProviderManager(ctx context.Context, local peer.ID, ps peerstore.Peerstore, dstore ds.Batching, opts ...Option) (*ProviderManager, error) { +func NewProviderManager(local peer.ID, ps peerstore.Peerstore, dstore ds.Batching, opts ...Option) (*ProviderManager, error) { pm := new(ProviderManager) pm.self = local pm.getprovs = make(chan *getProv) @@ -125,117 +129,121 @@ func NewProviderManager(ctx context.Context, local peer.ID, ps peerstore.Peersto if err := pm.applyOptions(opts...); err != nil { return nil, err } - pm.proc = goprocessctx.WithContext(ctx) - pm.proc.Go(func(proc goprocess.Process) { pm.run(ctx, proc) }) + pm.ctx, pm.cancel = context.WithCancel(context.Background()) + pm.run() return pm, nil } -// Process returns the ProviderManager process -func (pm *ProviderManager) Process() goprocess.Process { - return pm.proc -} +func (pm *ProviderManager) run() { + pm.wg.Add(1) + go func() { + defer pm.wg.Done() -func (pm *ProviderManager) run(ctx context.Context, proc goprocess.Process) { - var ( - gcQuery dsq.Results - gcQueryRes <-chan dsq.Result - gcSkip map[string]struct{} - gcTime time.Time - gcTimer = time.NewTimer(pm.cleanupInterval) - ) - - defer func() { - gcTimer.Stop() - if gcQuery != nil { - // don't really care if this fails. - _ = gcQuery.Close() - } - if err := pm.dstore.Flush(ctx); err != nil { - log.Error("failed to flush datastore: ", err) - } - }() + var gcQuery dsq.Results + gcTimer := time.NewTimer(pm.cleanupInterval) - for { - select { - case np := <-pm.newprovs: - err := pm.addProv(np.ctx, np.key, np.val) - if err != nil { - log.Error("error adding new providers: ", err) - continue + defer func() { + gcTimer.Stop() + if gcQuery != nil { + // don't really care if this fails. + _ = gcQuery.Close() } - if gcSkip != nil { - // we have an gc, tell it to skip this provider - // as we've updated it since the GC started. - gcSkip[mkProvKeyFor(np.key, np.val)] = struct{}{} + if err := pm.dstore.Flush(context.Background()); err != nil { + log.Error("failed to flush datastore: ", err) } - case gp := <-pm.getprovs: - provs, err := pm.getProvidersForKey(gp.ctx, gp.key) - if err != nil && err != ds.ErrNotFound { - log.Error("error reading providers: ", err) - } - - // set the cap so the user can't append to this. - gp.resp <- provs[0:len(provs):len(provs)] - case res, ok := <-gcQueryRes: - if !ok { - if err := gcQuery.Close(); err != nil { - log.Error("failed to close provider GC query: ", err) + }() + + var gcQueryRes <-chan dsq.Result + var gcSkip map[string]struct{} + var gcTime time.Time + for { + select { + case np := <-pm.newprovs: + err := pm.addProv(np.ctx, np.key, np.val) + if err != nil { + log.Error("error adding new providers: ", err) + continue + } + if gcSkip != nil { + // we have an gc, tell it to skip this provider + // as we've updated it since the GC started. + gcSkip[mkProvKeyFor(np.key, np.val)] = struct{}{} + } + case gp := <-pm.getprovs: + provs, err := pm.getProvidersForKey(gp.ctx, gp.key) + if err != nil && err != ds.ErrNotFound { + log.Error("error reading providers: ", err) } - gcTimer.Reset(pm.cleanupInterval) - // cleanup GC round - gcQueryRes = nil - gcSkip = nil - gcQuery = nil - continue - } - if res.Error != nil { - log.Error("got error from GC query: ", res.Error) - continue - } - if _, ok := gcSkip[res.Key]; ok { - // We've updated this record since starting the - // GC round, skip it. - continue - } + // set the cap so the user can't append to this. + gp.resp <- provs[0:len(provs):len(provs)] + case res, ok := <-gcQueryRes: + if !ok { + if err := gcQuery.Close(); err != nil { + log.Error("failed to close provider GC query: ", err) + } + gcTimer.Reset(pm.cleanupInterval) + + // cleanup GC round + gcQueryRes = nil + gcSkip = nil + gcQuery = nil + continue + } + if res.Error != nil { + log.Error("got error from GC query: ", res.Error) + continue + } + if _, ok := gcSkip[res.Key]; ok { + // We've updated this record since starting the + // GC round, skip it. + continue + } - // check expiration time - t, err := readTimeValue(res.Value) - switch { - case err != nil: - // couldn't parse the time - log.Error("parsing providers record from disk: ", err) - fallthrough - case gcTime.Sub(t) > ProvideValidity: - // or expired - err = pm.dstore.Delete(ctx, ds.RawKey(res.Key)) - if err != nil && err != ds.ErrNotFound { - log.Error("failed to remove provider record from disk: ", err) + // check expiration time + t, err := readTimeValue(res.Value) + switch { + case err != nil: + // couldn't parse the time + log.Error("parsing providers record from disk: ", err) + fallthrough + case gcTime.Sub(t) > ProvideValidity: + // or expired + err = pm.dstore.Delete(pm.ctx, ds.RawKey(res.Key)) + if err != nil && err != ds.ErrNotFound { + log.Error("failed to remove provider record from disk: ", err) + } } - } - case gcTime = <-gcTimer.C: - // You know the wonderful thing about caches? You can - // drop them. - // - // Much faster than GCing. - pm.cache.Purge() - - // Now, kick off a GC of the datastore. - q, err := pm.dstore.Query(ctx, dsq.Query{ - Prefix: ProvidersKeyPrefix, - }) - if err != nil { - log.Error("provider record GC query failed: ", err) - continue + case gcTime = <-gcTimer.C: + // You know the wonderful thing about caches? You can + // drop them. + // + // Much faster than GCing. + pm.cache.Purge() + + // Now, kick off a GC of the datastore. + q, err := pm.dstore.Query(pm.ctx, dsq.Query{ + Prefix: ProvidersKeyPrefix, + }) + if err != nil { + log.Error("provider record GC query failed: ", err) + continue + } + gcQuery = q + gcQueryRes = q.Next() + gcSkip = make(map[string]struct{}) + case <-pm.ctx.Done(): + return } - gcQuery = q - gcQueryRes = q.Next() - gcSkip = make(map[string]struct{}) - case <-proc.Closing(): - return } - } + }() +} + +func (pm *ProviderManager) Close() error { + pm.cancel() + pm.wg.Wait() + return nil } // AddProvider adds a provider diff --git a/providers/providers_manager_test.go b/providers/providers_manager_test.go index ba238a59e..e830929ef 100644 --- a/providers/providers_manager_test.go +++ b/providers/providers_manager_test.go @@ -31,7 +31,7 @@ func TestProviderManager(t *testing.T) { if err != nil { t.Fatal(err) } - p, err := NewProviderManager(ctx, mid, ps, dssync.MutexWrap(ds.NewMapDatastore())) + p, err := NewProviderManager(mid, ps, dssync.MutexWrap(ds.NewMapDatastore())) if err != nil { t.Fatal(err) } @@ -60,7 +60,7 @@ func TestProviderManager(t *testing.T) { t.Fatalf("Should have got 3 providers, got %d", len(resp)) } - p.proc.Close() + p.Close() } func TestProvidersDatastore(t *testing.T) { @@ -77,11 +77,11 @@ func TestProvidersDatastore(t *testing.T) { t.Fatal(err) } - p, err := NewProviderManager(ctx, mid, ps, dssync.MutexWrap(ds.NewMapDatastore())) + p, err := NewProviderManager(mid, ps, dssync.MutexWrap(ds.NewMapDatastore())) if err != nil { t.Fatal(err) } - defer p.proc.Close() + defer p.Close() friend := peer.ID("friend") var mhs []mh.Multihash @@ -166,7 +166,7 @@ func TestProvidesExpire(t *testing.T) { if err != nil { t.Fatal(err) } - p, err := NewProviderManager(ctx, mid, ps, ds) + p, err := NewProviderManager(mid, ps, ds) if err != nil { t.Fatal(err) } @@ -216,7 +216,7 @@ func TestProvidesExpire(t *testing.T) { time.Sleep(time.Second / 2) // Stop to prevent data races - p.Process().Close() + p.Close() if p.cache.Len() != 0 { t.Fatal("providers map not cleaned up") @@ -278,11 +278,11 @@ func TestLargeProvidersSet(t *testing.T) { t.Fatal(err) } - p, err := NewProviderManager(ctx, mid, ps, dstore) + p, err := NewProviderManager(mid, ps, dstore) if err != nil { t.Fatal(err) } - defer p.proc.Close() + defer p.Close() var mhs []mh.Multihash for i := 0; i < 1000; i++ { @@ -318,7 +318,7 @@ func TestUponCacheMissProvidersAreReadFromDatastore(t *testing.T) { t.Fatal(err) } - pm, err := NewProviderManager(ctx, p1, ps, dssync.MutexWrap(ds.NewMapDatastore())) + pm, err := NewProviderManager(p1, ps, dssync.MutexWrap(ds.NewMapDatastore())) if err != nil { t.Fatal(err) } @@ -347,7 +347,7 @@ func TestWriteUpdatesCache(t *testing.T) { t.Fatal(err) } - pm, err := NewProviderManager(ctx, p1, ps, dssync.MutexWrap(ds.NewMapDatastore())) + pm, err := NewProviderManager(p1, ps, dssync.MutexWrap(ds.NewMapDatastore())) if err != nil { t.Fatal(err) } diff --git a/query.go b/query.go index d0d5d3f24..47ef11dfc 100644 --- a/query.go +++ b/query.go @@ -85,7 +85,7 @@ type lookupWithFollowupResult struct { // After the lookup is complete the query function is run (unless stopped) against all of the top K peers from the // lookup that have not already been successfully queried. func (dht *IpfsDHT) runLookupWithFollowup(ctx context.Context, target string, queryFn queryFn, stopFn stopFn) (*lookupWithFollowupResult, error) { - ctx, span := internal.StartSpan(ctx, "IpfsDHT.RunLookupWithFollowup", trace.WithAttributes(attribute.String("Target", target))) + ctx, span := internal.StartSpan(ctx, "IpfsDHT.RunLookupWithFollowup", trace.WithAttributes(internal.KeyAsAttribute("Target", target))) defer span.End() // run the query @@ -470,7 +470,7 @@ func (q *query) queryPeer(ctx context.Context, ch chan<- *queryUpdate, p peer.ID queryDuration := time.Since(startQuery) // query successful, try to add to RT - q.dht.peerFound(p, true) + q.dht.validPeerFound(p) // process new peers saw := []peer.ID{} diff --git a/query_test.go b/query_test.go index 2f57743a0..f8d398612 100644 --- a/query_test.go +++ b/query_test.go @@ -34,7 +34,7 @@ func TestRTEvictionOnFailedQuery(t *testing.T) { // peers should be in the RT because of fixLowPeers require.NoError(t, tu.WaitFor(ctx, func() error { if !checkRoutingTable(d1, d2) { - return fmt.Errorf("should have routes") + return fmt.Errorf("should have routes") } return nil })) @@ -45,7 +45,7 @@ func TestRTEvictionOnFailedQuery(t *testing.T) { // peers will still be in the RT because we have decoupled membership from connectivity require.NoError(t, tu.WaitFor(ctx, func() error { if !checkRoutingTable(d1, d2) { - return fmt.Errorf("should have routes") + return fmt.Errorf("should have routes") } return nil })) diff --git a/routing.go b/routing.go index bcc062e10..06ea5dec6 100644 --- a/routing.go +++ b/routing.go @@ -13,7 +13,6 @@ import ( "github.com/libp2p/go-libp2p/core/peerstore" "github.com/libp2p/go-libp2p/core/routing" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" u "github.com/ipfs/boxo/util" @@ -35,8 +34,8 @@ import ( // PutValue adds value corresponding to given Key. // This is the top level "Store" operation of the DHT func (dht *IpfsDHT) PutValue(ctx context.Context, key string, value []byte, opts ...routing.Option) (err error) { - ctx, span := internal.StartSpan(ctx, "IpfsDHT.PutValue", trace.WithAttributes(attribute.String("Key", key))) - defer span.End() + ctx, end := tracer.PutValue(dhtName, ctx, key, value, opts...) + defer func() { end(err) }() if !dht.enableValues { return routing.ErrNotSupported @@ -109,9 +108,9 @@ type recvdVal struct { } // GetValue searches for the value corresponding to given Key. -func (dht *IpfsDHT) GetValue(ctx context.Context, key string, opts ...routing.Option) (_ []byte, err error) { - ctx, span := internal.StartSpan(ctx, "IpfsDHT.GetValue", trace.WithAttributes(attribute.String("Key", key))) - defer span.End() +func (dht *IpfsDHT) GetValue(ctx context.Context, key string, opts ...routing.Option) (result []byte, err error) { + ctx, end := tracer.GetValue(dhtName, ctx, key, opts...) + defer func() { end(result, err) }() if !dht.enableValues { return nil, routing.ErrNotSupported @@ -146,14 +145,9 @@ func (dht *IpfsDHT) GetValue(ctx context.Context, key string, opts ...routing.Op } // SearchValue searches for the value corresponding to given Key and streams the results. -func (dht *IpfsDHT) SearchValue(ctx context.Context, key string, opts ...routing.Option) (<-chan []byte, error) { - ctx, span := internal.StartSpan(ctx, "IpfsDHT.SearchValue", trace.WithAttributes(attribute.String("Key", key))) - var good bool - defer func() { - if !good { - span.End() - } - }() +func (dht *IpfsDHT) SearchValue(ctx context.Context, key string, opts ...routing.Option) (ch <-chan []byte, err error) { + ctx, end := tracer.SearchValue(dhtName, ctx, key, opts...) + defer func() { ch, err = end(ch, err) }() if !dht.enableValues { return nil, routing.ErrNotSupported @@ -174,9 +168,7 @@ func (dht *IpfsDHT) SearchValue(ctx context.Context, key string, opts ...routing valCh, lookupRes := dht.getValues(ctx, key, stopCh) out := make(chan []byte) - good = true go func() { - defer span.End() defer close(out) best, peersWithBest, aborted := dht.searchValueQuorum(ctx, key, valCh, stopCh, out, responsesNeeded) if best == nil || aborted { @@ -319,17 +311,11 @@ func (dht *IpfsDHT) getValues(ctx context.Context, key string, stopQuery chan st ID: p, }) - mctx, mspan := internal.StartSpan(ctx, "protoMessenger.GetValue", trace.WithAttributes(attribute.Stringer("peer", p))) - rec, peers, err := dht.protoMessenger.GetValue(mctx, p, key) + rec, peers, err := dht.protoMessenger.GetValue(ctx, p, key) if err != nil { - if mspan.IsRecording() { - mspan.SetStatus(codes.Error, err.Error()) - } - mspan.End() logger.Debugf("error getting closer peers: %s", err) return nil, err } - mspan.End() // For DHT query command routing.PublishQueryEvent(ctx, &routing.QueryEvent{ @@ -401,8 +387,8 @@ func (dht *IpfsDHT) refreshRTIfNoShortcut(key kb.ID, lookupRes *lookupWithFollow // Provide makes this node announce that it can provide a value for the given key func (dht *IpfsDHT) Provide(ctx context.Context, key cid.Cid, brdcst bool) (err error) { - ctx, span := internal.StartSpan(ctx, "IpfsDHT.Provide", trace.WithAttributes(attribute.String("Key", key.String()), attribute.Bool("Broadcast", brdcst))) - defer span.End() + ctx, end := tracer.Provide(dhtName, ctx, key, brdcst) + defer func() { end(err) }() if !dht.enableProviders { return routing.ErrNotSupported @@ -505,7 +491,10 @@ func (dht *IpfsDHT) classicProvide(ctx context.Context, keyMH multihash.Multihas go func(p peer.ID) { defer wg.Done() logger.Debugf("putProvider(%s, %s)", internal.LoggableProviderRecordBytes(keyMH), p) - err := dht.protoMessenger.PutProvider(ctx, p, keyMH, dht.host) + err := dht.protoMessenger.PutProviderAddrs(ctx, p, keyMH, peer.AddrInfo{ + ID: dht.self, + Addrs: dht.filterAddrs(dht.host.Addrs()), + }) if err != nil { logger.Debug(err) } @@ -754,18 +743,17 @@ func (dht *IpfsDHT) detailedLookupForProvidersAsync(ctx context.Context, key mul // the search query completes. If count is zero then the query will run until it // completes. Note: not reading from the returned channel may block the query // from progressing. -func (dht *IpfsDHT) FindProvidersAsync(ctx context.Context, key cid.Cid, count int) <-chan peer.AddrInfo { +func (dht *IpfsDHT) FindProvidersAsync(ctx context.Context, key cid.Cid, count int) (ch <-chan peer.AddrInfo) { + ctx, end := tracer.FindProvidersAsync(dhtName, ctx, key, count) + defer func() { ch = end(ch, nil) }() + if !dht.enableProviders || !key.Defined() { peerOut := make(chan peer.AddrInfo) close(peerOut) return peerOut } - chSize := count - if count == 0 { - chSize = 1 - } - peerOut := make(chan peer.AddrInfo, chSize) + peerOut := make(chan peer.AddrInfo) keyMH := key.Hash() @@ -775,7 +763,8 @@ func (dht *IpfsDHT) FindProvidersAsync(ctx context.Context, key cid.Cid, count i } func (dht *IpfsDHT) findProvidersAsyncRoutine(ctx context.Context, key multihash.Multihash, count int, peerOut chan peer.AddrInfo) { - ctx, span := internal.StartSpan(ctx, "IpfsDHT.FindProvidersAsyncRoutine", trace.WithAttributes(attribute.Stringer("Key", key))) + // use a span here because unlike tracer.FindProvidersAsync we know who told us about it and that intresting to log. + ctx, span := internal.StartSpan(ctx, "IpfsDHT.FindProvidersAsyncRoutine") defer span.End() defer close(peerOut) @@ -834,16 +823,10 @@ func (dht *IpfsDHT) findProvidersAsyncRoutine(ctx context.Context, key multihash ID: p, }) - mctx, mspan := internal.StartSpan(ctx, "protoMessenger.GetProviders", trace.WithAttributes(attribute.Stringer("peer", p))) - provs, closest, err := dht.protoMessenger.GetProviders(mctx, p, key) + provs, closest, err := dht.protoMessenger.GetProviders(ctx, p, key) if err != nil { - if mspan.IsRecording() { - mspan.SetStatus(codes.Error, err.Error()) - } - mspan.End() return nil, err } - mspan.End() logger.Debugf("%d provider entries", len(provs)) @@ -893,9 +876,9 @@ func (dht *IpfsDHT) findProvidersAsyncRoutine(ctx context.Context, key multihash } // FindPeer searches for a peer with given ID. -func (dht *IpfsDHT) FindPeer(ctx context.Context, id peer.ID) (_ peer.AddrInfo, err error) { - ctx, span := internal.StartSpan(ctx, "IpfsDHT.FindPeer", trace.WithAttributes(attribute.Stringer("PeerID", id))) - defer span.End() +func (dht *IpfsDHT) FindPeer(ctx context.Context, id peer.ID) (pi peer.AddrInfo, err error) { + ctx, end := tracer.FindPeer(dhtName, ctx, id) + defer func() { end(pi, err) }() if err := id.Validate(); err != nil { return peer.AddrInfo{}, err @@ -916,17 +899,11 @@ func (dht *IpfsDHT) FindPeer(ctx context.Context, id peer.ID) (_ peer.AddrInfo, ID: p, }) - mctx, mspan := internal.StartSpan(ctx, "protoMessenger.GetClosestPeers", trace.WithAttributes(attribute.Stringer("peer", p))) - peers, err := dht.protoMessenger.GetClosestPeers(mctx, p, id) + peers, err := dht.protoMessenger.GetClosestPeers(ctx, p, id) if err != nil { - if mspan.IsRecording() { - mspan.SetStatus(codes.Error, err.Error()) - } - mspan.End() logger.Debugf("error getting closer peers: %s", err) return nil, err } - mspan.End() // For DHT query command routing.PublishQueryEvent(ctx, &routing.QueryEvent{ diff --git a/rtrefresh/rt_refresh_manager.go b/rtrefresh/rt_refresh_manager.go index d08983702..c81e9e6ed 100644 --- a/rtrefresh/rt_refresh_manager.go +++ b/rtrefresh/rt_refresh_manager.go @@ -8,7 +8,7 @@ import ( "time" "github.com/hashicorp/go-multierror" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" "github.com/libp2p/go-libp2p-kad-dht/internal" kbucket "github.com/libp2p/go-libp2p-kbucket" "github.com/libp2p/go-libp2p/core/host" @@ -31,10 +31,9 @@ type triggerRefreshReq struct { } type RtRefreshManager struct { - ctx context.Context - cancel context.CancelFunc - refcount sync.WaitGroup - closeOnce sync.Once + ctx context.Context + cancel context.CancelFunc + refcount sync.WaitGroup // peerId of this DHT peer i.e. self peerId. h host.Host @@ -89,17 +88,14 @@ func NewRtRefreshManager(h host.Host, rt *kbucket.RoutingTable, autoRefresh bool }, nil } -func (r *RtRefreshManager) Start() error { +func (r *RtRefreshManager) Start() { r.refcount.Add(1) go r.loop() - return nil } func (r *RtRefreshManager) Close() error { - r.closeOnce.Do(func() { - r.cancel() - r.refcount.Wait() - }) + r.cancel() + r.refcount.Wait() return nil } @@ -117,6 +113,7 @@ func (r *RtRefreshManager) Refresh(force bool) <-chan error { case r.triggerRefresh <- &triggerRefreshReq{respCh: resp, forceCplRefresh: force}: case <-r.ctx.Done(): resp <- r.ctx.Err() + close(resp) } }() diff --git a/subscriber_notifee.go b/subscriber_notifee.go index eb20e7fcb..c1eb69387 100644 --- a/subscriber_notifee.go +++ b/subscriber_notifee.go @@ -1,27 +1,15 @@ package dht import ( - "context" "fmt" "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/p2p/host/eventbus" - - "github.com/jbenet/goprocess" - ma "github.com/multiformats/go-multiaddr" ) -// subscriberNotifee implements network.Notifee and also manages the subscriber to the event bus. We consume peer -// identification events to trigger inclusion in the routing table, and we consume Disconnected events to eject peers -// from it. -type subscriberNotifee struct { - dht *IpfsDHT - subs event.Subscription -} - -func newSubscriberNotifiee(dht *IpfsDHT) (*subscriberNotifee, error) { +func (dht *IpfsDHT) startNetworkSubscriber() error { bufSize := eventbus.BufSize(256) evts := []interface{}{ @@ -35,6 +23,9 @@ func newSubscriberNotifiee(dht *IpfsDHT) (*subscriberNotifee, error) { // register for event bus notifications for when our local address/addresses change so we can // advertise those to the network new(event.EvtLocalAddressesUpdated), + + // we want to know when we are disconnecting from other peers. + new(event.EvtPeerConnectednessChanged), } // register for event bus local routability changes in order to trigger switching between client and server modes @@ -45,61 +36,57 @@ func newSubscriberNotifiee(dht *IpfsDHT) (*subscriberNotifee, error) { subs, err := dht.host.EventBus().Subscribe(evts, bufSize) if err != nil { - return nil, fmt.Errorf("dht could not subscribe to eventbus events; err: %s", err) + return fmt.Errorf("dht could not subscribe to eventbus events: %w", err) } - nn := &subscriberNotifee{ - dht: dht, - subs: subs, - } - - // register for network notifications - dht.host.Network().Notify(nn) - - return nn, nil -} - -func (nn *subscriberNotifee) subscribe(proc goprocess.Process) { - dht := nn.dht - defer dht.host.Network().StopNotify(nn) - defer nn.subs.Close() - - for { - select { - case e, more := <-nn.subs.Out(): - if !more { - return - } + dht.wg.Add(1) + go func() { + defer dht.wg.Done() + defer subs.Close() - switch evt := e.(type) { - case event.EvtLocalAddressesUpdated: - // when our address changes, we should proactively tell our closest peers about it so - // we become discoverable quickly. The Identify protocol will push a signed peer record - // with our new address to all peers we are connected to. However, we might not necessarily be connected - // to our closet peers & so in the true spirit of Zen, searching for ourself in the network really is the best way - // to to forge connections with those matter. - if dht.autoRefresh || dht.testAddressUpdateProcessing { - dht.rtRefreshManager.RefreshNoWait() + for { + select { + case e, more := <-subs.Out(): + if !more { + return } - case event.EvtPeerProtocolsUpdated: - handlePeerChangeEvent(dht, evt.Peer) - case event.EvtPeerIdentificationCompleted: - handlePeerChangeEvent(dht, evt.Peer) - case event.EvtLocalReachabilityChanged: - if dht.auto == ModeAuto || dht.auto == ModeAutoServer { - handleLocalReachabilityChangedEvent(dht, evt) - } else { - // something has gone really wrong if we get an event we did not subscribe to - logger.Errorf("received LocalReachabilityChanged event that was not subscribed to") + + switch evt := e.(type) { + case event.EvtLocalAddressesUpdated: + // when our address changes, we should proactively tell our closest peers about it so + // we become discoverable quickly. The Identify protocol will push a signed peer record + // with our new address to all peers we are connected to. However, we might not necessarily be connected + // to our closet peers & so in the true spirit of Zen, searching for ourself in the network really is the best way + // to to forge connections with those matter. + if dht.autoRefresh || dht.testAddressUpdateProcessing { + dht.rtRefreshManager.RefreshNoWait() + } + case event.EvtPeerProtocolsUpdated: + handlePeerChangeEvent(dht, evt.Peer) + case event.EvtPeerIdentificationCompleted: + handlePeerChangeEvent(dht, evt.Peer) + case event.EvtPeerConnectednessChanged: + if evt.Connectedness != network.Connected { + dht.msgSender.OnDisconnect(dht.ctx, evt.Peer) + } + case event.EvtLocalReachabilityChanged: + if dht.auto == ModeAuto || dht.auto == ModeAutoServer { + handleLocalReachabilityChangedEvent(dht, evt) + } else { + // something has gone really wrong if we get an event we did not subscribe to + logger.Errorf("received LocalReachabilityChanged event that was not subscribed to") + } + default: + // something has gone really wrong if we get an event for another type + logger.Errorf("got wrong type from subscription: %T", e) } - default: - // something has gone really wrong if we get an event for another type - logger.Errorf("got wrong type from subscription: %T", e) + case <-dht.ctx.Done(): + return } - case <-proc.Closing(): - return } - } + }() + + return nil } func handlePeerChangeEvent(dht *IpfsDHT, p peer.ID) { @@ -108,8 +95,7 @@ func handlePeerChangeEvent(dht *IpfsDHT, p peer.ID) { logger.Errorf("could not check peerstore for protocol support: err: %s", err) return } else if valid { - dht.peerFound(p, false) - dht.fixRTIfNeeded() + dht.peerFound(p) } else { dht.peerStoppedDHT(p) } @@ -153,41 +139,3 @@ func (dht *IpfsDHT) validRTPeer(p peer.ID) (bool, error) { return dht.routingTablePeerFilter == nil || dht.routingTablePeerFilter(dht, p), nil } - -type disconnector interface { - OnDisconnect(ctx context.Context, p peer.ID) -} - -func (nn *subscriberNotifee) Disconnected(n network.Network, v network.Conn) { - dht := nn.dht - - ms, ok := dht.msgSender.(disconnector) - if !ok { - return - } - - select { - case <-dht.Process().Closing(): - return - default: - } - - p := v.RemotePeer() - - // Lock and check to see if we're still connected. We lock to make sure - // we don't concurrently process a connect event. - dht.plk.Lock() - defer dht.plk.Unlock() - if dht.host.Network().Connectedness(p) == network.Connected { - // We're still connected. - return - } - - ms.OnDisconnect(dht.Context(), p) -} - -func (nn *subscriberNotifee) Connected(network.Network, network.Conn) {} -func (nn *subscriberNotifee) OpenedStream(network.Network, network.Stream) {} -func (nn *subscriberNotifee) ClosedStream(network.Network, network.Stream) {} -func (nn *subscriberNotifee) Listen(network.Network, ma.Multiaddr) {} -func (nn *subscriberNotifee) ListenClose(network.Network, ma.Multiaddr) {} diff --git a/version.json b/version.json index 93d6ca712..86718ea8a 100644 --- a/version.json +++ b/version.json @@ -1,3 +1,3 @@ { - "version": "v0.23.0" + "version": "v0.25.2" }