diff --git a/cmd/main.go b/cmd/main.go new file mode 100644 index 0000000..4278659 --- /dev/null +++ b/cmd/main.go @@ -0,0 +1,132 @@ +package main + +import ( + "fmt" + "os" + + logging "github.com/ipfs/go-log/v2" + "github.com/storacha/go-ucanto/did" + ed25519 "github.com/storacha/go-ucanto/principal/ed25519/signer" + "github.com/storacha/go-ucanto/principal/signer" + "github.com/storacha/indexing-service/pkg/server" + "github.com/storacha/indexing-service/pkg/service" + "github.com/urfave/cli/v2" +) + +var log = logging.Logger("cmd") + +func main() { + logging.SetLogLevel("*", "info") + + app := &cli.App{ + Name: "indexing-service", + Usage: "Manage running the indexing service.", + Commands: []*cli.Command{ + { + Name: "server", + Usage: "HTTP server interface to the indexing service", + Subcommands: []*cli.Command{ + { + Name: "start", + Usage: "start an indexing service HTTP server", + Flags: []cli.Flag{ + &cli.IntFlag{ + Name: "port", + Aliases: []string{"p"}, + Value: 9000, + Usage: "port to bind the server to", + }, + &cli.StringFlag{ + Name: "private-key", + Aliases: []string{"pk"}, + Usage: "base64 encoded private key identity for the server", + }, + &cli.StringFlag{ + Name: "did", + Usage: "DID of the server (only needs to be set if different from what is derived from the private key i.e. a did:web DID)", + }, + &cli.StringFlag{ + Name: "redis-url", + Aliases: []string{"redis"}, + EnvVars: []string{"REDIS_URL"}, + Usage: "url for a running redis database", + }, + &cli.StringFlag{ + Name: "redis-passwd", + Aliases: []string{"rp"}, + EnvVars: []string{"REDIS_PASSWD"}, + Usage: "passwd for redis", + }, + &cli.IntFlag{ + Name: "provider-redis-db", + Aliases: []string{"prd"}, + Usage: "database number for providers index", + Value: 0, + }, + &cli.IntFlag{ + Name: "claims-redis-db", + Aliases: []string{"c"}, + Usage: "database number for claims", + Value: 1, + }, + &cli.IntFlag{ + Name: "indexes-redis-db", + Aliases: []string{"c"}, + Usage: "database number for indexes cache", + Value: 2, + }, + &cli.StringFlag{ + Name: "ipni-endpoint", + Aliases: []string{"ipni"}, + DefaultText: "Defaults to https://cid.contact", + Value: "https://cid.contact", + Usage: "HTTP endpoint of the IPNI instance used to discover providers.", + }, + }, + Action: func(cCtx *cli.Context) error { + addr := fmt.Sprintf(":%d", cCtx.Int("port")) + var opts []server.Option + if cCtx.String("private-key") != "" { + id, err := ed25519.Parse(cCtx.String("private-key")) + if err != nil { + return fmt.Errorf("parsing server private key: %w", err) + } + if cCtx.String("did") != "" { + did, err := did.Parse(cCtx.String("did")) + if err != nil { + return fmt.Errorf("parsing server DID: %w", err) + } + id, err = signer.Wrap(id, did) + if err != nil { + return fmt.Errorf("wrapping server DID: %w", err) + } + } + opts = append(opts, server.WithIdentity(id)) + } + var sc service.ServiceConfig + sc.RedisURL = cCtx.String("redis-url") + sc.RedisPasswd = cCtx.String("redis-passwd") + sc.ProvidersDB = cCtx.Int("providers-redis-db") + sc.ClaimsDB = cCtx.Int("claims-redis-db") + sc.IndexesDB = cCtx.Int("indexes-redis-db") + sc.IndexerURL = cCtx.String("ipni-endpoint") + indexingService, shutdown, err := service.Construct(sc) + if err != nil { + return err + } + defer func() { + shutdown(cCtx.Context) + }() + opts = append(opts, server.WithService(indexingService)) + return server.ListenAndServe(addr, opts...) + }, + }, + }, + }, + }, + } + + if err := app.Run(os.Args); err != nil { + log.Fatal(err) + } +} diff --git a/go.mod b/go.mod index cc77ae1..db029dd 100644 --- a/go.mod +++ b/go.mod @@ -1,4 +1,4 @@ -module github.com/storacha-network/indexing-service +module github.com/storacha/indexing-service go 1.23 @@ -14,10 +14,20 @@ require ( github.com/multiformats/go-multihash v0.2.3 github.com/multiformats/go-varint v0.0.7 github.com/redis/go-redis/v9 v9.6.1 - github.com/storacha-network/go-ucanto v0.1.1-0.20240916072230-3bed7025597b + github.com/storacha/go-ucanto v0.1.1-0.20241003110856-f3261cb2a702 github.com/stretchr/testify v1.9.0 ) +require ( + github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect + github.com/pion/ice/v2 v2.3.35 // indirect + github.com/quic-go/qpack v0.5.1 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/ucan-wg/go-ucan v0.0.0-20240916120445-37f52863156c // indirect + github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect + go.opentelemetry.io/otel/metric v1.30.0 // indirect +) + require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -25,7 +35,7 @@ require ( github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/gammazero/channelqueue v0.2.2 // indirect github.com/gammazero/deque v0.2.1 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -34,24 +44,24 @@ require ( github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect - github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/ipfs/bbloom v0.0.4 // indirect github.com/ipfs/go-block-format v0.2.0 // indirect - github.com/ipfs/go-blockservice v0.5.0 // indirect + github.com/ipfs/go-blockservice v0.5.2 // indirect github.com/ipfs/go-datastore v0.6.0 // indirect - github.com/ipfs/go-ipfs-blockstore v1.3.0 // indirect - github.com/ipfs/go-ipfs-ds-help v1.1.0 // indirect - github.com/ipfs/go-ipfs-exchange-interface v0.2.0 // indirect - github.com/ipfs/go-ipfs-util v0.0.2 // indirect + github.com/ipfs/go-ipfs-blockstore v1.3.1 // indirect + github.com/ipfs/go-ipfs-ds-help v1.1.1 // indirect + github.com/ipfs/go-ipfs-exchange-interface v0.2.1 // indirect + github.com/ipfs/go-ipfs-util v0.0.3 // indirect github.com/ipfs/go-ipld-cbor v0.1.0 // indirect github.com/ipfs/go-ipld-format v0.6.0 // indirect github.com/ipfs/go-ipld-legacy v0.2.1 // indirect github.com/ipfs/go-log v1.0.5 // indirect - github.com/ipfs/go-log/v2 v2.5.1 // indirect + github.com/ipfs/go-log/v2 v2.5.1 github.com/ipfs/go-merkledag v0.11.0 // indirect github.com/ipfs/go-metrics-interface v0.0.1 // indirect - github.com/ipfs/go-verifcid v0.0.2 // indirect + github.com/ipfs/go-verifcid v0.0.3 // indirect github.com/ipld/go-car v0.6.2 // indirect github.com/ipld/go-codec-dagpb v1.6.0 // indirect github.com/jbenet/goprocess v0.1.4 // indirect @@ -72,16 +82,17 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.89.1-0.20231129105047-37766d95467a // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/whyrusleeping/cbor-gen v0.1.1 // indirect - go.opentelemetry.io/otel v1.13.0 // indirect - go.opentelemetry.io/otel/trace v1.13.0 // indirect - go.uber.org/atomic v1.10.0 // indirect + github.com/urfave/cli/v2 v2.27.4 + github.com/whyrusleeping/cbor-gen v0.1.2 // indirect + go.opentelemetry.io/otel v1.30.0 // indirect + go.opentelemetry.io/otel/trace v1.30.0 // indirect + go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.26.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + golang.org/x/crypto v0.27.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.3.0 // indirect diff --git a/go.sum b/go.sum index 32f54dd..f6f4aa2 100644 --- a/go.sum +++ b/go.sum @@ -72,6 +72,8 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8 github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -115,8 +117,8 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= @@ -194,8 +196,8 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -231,8 +233,8 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= @@ -251,35 +253,32 @@ github.com/ipfs/go-bitswap v0.11.0 h1:j1WVvhDX1yhG32NTC9xfxnqycqYIlhzEzLXG/cU1Hy github.com/ipfs/go-bitswap v0.11.0/go.mod h1:05aE8H3XOU+LXpTedeAS0OZpcO1WFsj5niYQH9a1Tmk= github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM= -github.com/ipfs/go-blockservice v0.5.0 h1:B2mwhhhVQl2ntW2EIpaWPwSCxSuqr5fFA93Ms4bYLEY= -github.com/ipfs/go-blockservice v0.5.0/go.mod h1:W6brZ5k20AehbmERplmERn8o2Ni3ZZubvAxaIUeaT6w= -github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-blockservice v0.5.2 h1:in9Bc+QcXwd1apOVM7Un9t8tixPKdaHQFdLSUM1Xgk8= +github.com/ipfs/go-blockservice v0.5.2/go.mod h1:VpMblFEqG67A/H2sHKAemeH9vlURVavlysbdUI632yk= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= -github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= -github.com/ipfs/go-ipfs-blockstore v1.3.0 h1:m2EXaWgwTzAfsmt5UdJ7Is6l4gJcaM/A12XwJyvYvMM= -github.com/ipfs/go-ipfs-blockstore v1.3.0/go.mod h1:KgtZyc9fq+P2xJUiCAzbRdhhqJHvsw8u2Dlqy2MyRTE= +github.com/ipfs/go-ipfs-blockstore v1.3.1 h1:cEI9ci7V0sRNivqaOr0elDsamxXFxJMMMy7PTTDQNsQ= +github.com/ipfs/go-ipfs-blockstore v1.3.1/go.mod h1:KgtZyc9fq+P2xJUiCAzbRdhhqJHvsw8u2Dlqy2MyRTE= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= -github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= -github.com/ipfs/go-ipfs-ds-help v1.1.0 h1:yLE2w9RAsl31LtfMt91tRZcrx+e61O5mDxFRR994w4Q= -github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU= -github.com/ipfs/go-ipfs-exchange-interface v0.2.0 h1:8lMSJmKogZYNo2jjhUs0izT+dck05pqUw4mWNW9Pw6Y= -github.com/ipfs/go-ipfs-exchange-interface v0.2.0/go.mod h1:z6+RhJuDQbqKguVyslSOuVDhqF9JtTrO3eptSAiW2/Y= +github.com/ipfs/go-ipfs-ds-help v1.1.1 h1:B5UJOH52IbcfS56+Ul+sv8jnIV10lbjLF5eOO0C66Nw= +github.com/ipfs/go-ipfs-ds-help v1.1.1/go.mod h1:75vrVCkSdSFidJscs8n4W+77AtTpCIAdDGAwjitJMIo= +github.com/ipfs/go-ipfs-exchange-interface v0.2.1 h1:jMzo2VhLKSHbVe+mHNzYgs95n0+t0Q69GQ5WhRDZV/s= +github.com/ipfs/go-ipfs-exchange-interface v0.2.1/go.mod h1:MUsYn6rKbG6CTtsDp+lKJPmVt3ZrCViNyH3rfPGsZ2E= github.com/ipfs/go-ipfs-exchange-offline v0.3.0 h1:c/Dg8GDPzixGd0MC8Jh6mjOwU57uYokgWRFidfvEkuA= github.com/ipfs/go-ipfs-exchange-offline v0.3.0/go.mod h1:MOdJ9DChbb5u37M1IcbrRB02e++Z7521fMxqCNRrz9s= github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= github.com/ipfs/go-ipfs-routing v0.3.0 h1:9W/W3N+g+y4ZDeffSgqhgo7BsBSJwPMcyssET9OWevc= github.com/ipfs/go-ipfs-routing v0.3.0/go.mod h1:dKqtTFIql7e1zYsEuWLyuOU+E0WJWW8JjbTPLParDWo= -github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= -github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= +github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= +github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn17xAKWBvs= github.com/ipfs/go-ipld-cbor v0.1.0 h1:dx0nS0kILVivGhfWuB6dUpMa/LAwElHPw1yOGYopoYs= github.com/ipfs/go-ipld-cbor v0.1.0/go.mod h1:U2aYlmVrJr2wsUBU67K4KgepApSZddGRDWBYR0H4sCk= github.com/ipfs/go-ipld-format v0.6.0 h1:VEJlA2kQ3LqFSIm5Vu6eIlSxD/Ze90xtc4Meten1F5U= @@ -299,14 +298,12 @@ github.com/ipfs/go-peertaskqueue v0.8.0 h1:JyNO144tfu9bx6Hpo119zvbEL9iQ760FHOiJY github.com/ipfs/go-peertaskqueue v0.8.0/go.mod h1:cz8hEnnARq4Du5TGqiWKgMr/BOSQ5XOgMOh1K5YYKKM= github.com/ipfs/go-test v0.0.4 h1:DKT66T6GBB6PsDFLoO56QZPrOmzJkqU1FZH5C9ySkew= github.com/ipfs/go-test v0.0.4/go.mod h1:qhIM1EluEfElKKM6fnWxGn822/z9knUGM1+I/OAQNKI= -github.com/ipfs/go-verifcid v0.0.2 h1:XPnUv0XmdH+ZIhLGKg6U2vaPaRDXb9urMyNVCE7uvTs= -github.com/ipfs/go-verifcid v0.0.2/go.mod h1:40cD9x1y4OWnFXbLNJYRe7MpNvWlMn3LZAG5Wb4xnPU= +github.com/ipfs/go-verifcid v0.0.3 h1:gmRKccqhWDocCRkC+a59g5QW7uJw5bpX9HWBevXa0zs= +github.com/ipfs/go-verifcid v0.0.3/go.mod h1:gcCtGniVzelKrbk9ooUSX/pM3xlH73fZZJDzQJRvOUw= github.com/ipld/go-car v0.6.2 h1:Hlnl3Awgnq8icK+ze3iRghk805lu8YNq3wlREDTF2qc= github.com/ipld/go-car v0.6.2/go.mod h1:oEGXdwp6bmxJCZ+rARSkDliTeYnVzv3++eXajZ+Bmr8= github.com/ipld/go-codec-dagpb v1.6.0 h1:9nYazfyu9B1p3NAgfVdpRco3Fs2nFC72DqVsMj6rOcc= github.com/ipld/go-codec-dagpb v1.6.0/go.mod h1:ANzFhfP2uMJxRBr8CE+WQWs5UsNa0pYtmKZ+agnUw9s= -github.com/ipld/go-ipld-prime v0.21.1-0.20240914151128-f75d95ee166a h1:+aNUuMaSXRhQ48i9zUzBik/rkEJeVXvu+POuh+yOt9Y= -github.com/ipld/go-ipld-prime v0.21.1-0.20240914151128-f75d95ee166a/go.mod h1:LN+1Tx6867lbDCmf8bErp1TNw3Kh9eY2n0eJ+whRx38= github.com/ipld/go-ipld-prime v0.21.1-0.20240917223228-6148356a4c2e h1:0Anxx6pMS8U/qjTLVxPhpTYuuDMssHDtUEvzIz2Skw4= github.com/ipld/go-ipld-prime v0.21.1-0.20240917223228-6148356a4c2e/go.mod h1:LN+1Tx6867lbDCmf8bErp1TNw3Kh9eY2n0eJ+whRx38= github.com/ipni/go-libipni v0.6.13 h1:6fQU6ZFu8fi0DZIs4VXZrIFbT9r97dNmNl7flWMVblE= @@ -333,7 +330,6 @@ github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -396,12 +392,9 @@ github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= @@ -413,18 +406,15 @@ github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2 github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= -github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= -github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE= github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA= -github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= @@ -441,12 +431,13 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pion/datachannel v1.5.8 h1:ph1P1NsGkazkjrvyMfhRBUAWMxugJjq2HfQifaOoSNo= -github.com/pion/datachannel v1.5.8/go.mod h1:PgmdpoaNBLX9HNzNClmdki4DYW5JtI7Yibu8QzbL3tI= +github.com/pion/datachannel v1.5.9 h1:LpIWAOYPyDrXtU+BW7X0Yt/vGtYxtXQ8ql7dFfYUVZA= +github.com/pion/datachannel v1.5.9/go.mod h1:kDUuk4CU4Uxp82NH4LQZbISULkX/HtzKa4P7ldf9izE= +github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/ice/v2 v2.3.34 h1:Ic1ppYCj4tUOcPAp76U6F3fVrlSw8A9JtRXLqw6BbUM= -github.com/pion/ice/v2 v2.3.34/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ= +github.com/pion/ice/v2 v2.3.35 h1:KrmahHoP3VXv40Sd12usQzjKKFNFY0pidsRup7It+RI= +github.com/pion/ice/v2 v2.3.35/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ= github.com/pion/interceptor v0.1.30 h1:au5rlVHsgmxNi+v/mjOPazbW1SHzfx7/hYOEYQnUcxA= github.com/pion/interceptor v0.1.30/go.mod h1:RQuKT5HTdkP2Fi0cuOS5G5WNymTjzXaGF75J4k7z2nc= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= @@ -467,12 +458,15 @@ github.com/pion/srtp/v2 v2.0.20 h1:HNNny4s+OUmG280ETrCdgFndp4ufx3/uy85EawYEhTk= github.com/pion/srtp/v2 v2.0.20/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= +github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= +github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= +github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc= github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= -github.com/pion/webrtc/v3 v3.3.0 h1:Rf4u6n6U5t5sUxhYPQk/samzU/oDv7jk6BA5hyO2F9I= -github.com/pion/webrtc/v3 v3.3.0/go.mod h1:hVmrDJvwhEertRWObeb1xzulzHGeVUoPlWvxdGzcfU0= +github.com/pion/webrtc/v3 v3.3.3 h1:Qnh7O8CGvYfxjSZK10N0eFy8u5tzfwaNnEL0ltc/ZcU= +github.com/pion/webrtc/v3 v3.3.3/go.mod h1:9ssmnlmII7ZZtExYe7QXwh1xl6SiynZ9O4ABq+7YXwk= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -482,8 +476,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/polydawn/refmt v0.89.1-0.20231129105047-37766d95467a h1:cgqrm0F3zwf9IPzca7xN4w+Zy6MC9ZkPvAC8QEWa/iQ= github.com/polydawn/refmt v0.89.1-0.20231129105047-37766d95467a/go.mod h1:ocZfO/tLSHqfScRDNTJbAJR1by4D1lewauX9OwTaPuY= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v1.20.0 h1:jBzTZ7B099Rg24tny+qngoynol8LtVYlA2bqx3vEloI= -github.com/prometheus/client_golang v1.20.0/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= @@ -491,10 +485,10 @@ github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= -github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/quic-go v0.46.0 h1:uuwLClEEyk1DNvchH8uCByQVjo3yKL9opKulExNDs7Y= -github.com/quic-go/quic-go v0.46.0/go.mod h1:1dLehS7TIR64+vxGR70GDcatWTOtMX2PUtnKsjbTurI= +github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= +github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= +github.com/quic-go/quic-go v0.47.0 h1:yXs3v7r2bm1wmPTYNLKAAJTHMYkPEsfYJmTazXrCZ7Y= +github.com/quic-go/quic-go v0.47.0/go.mod h1:3bCapYsJvXGZcipOHuu7plYtaV6tnF+z7wIFsU0WK9E= github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv/cD7QFJg= github.com/quic-go/webtransport-go v0.8.0/go.mod h1:N99tjprW432Ut5ONql/aUhSLT0YVSlwHohQsuac9WaM= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= @@ -503,9 +497,11 @@ github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0 github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= @@ -527,37 +523,40 @@ github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t6 github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= -github.com/storacha-network/go-ucanto v0.1.1-0.20240916072230-3bed7025597b h1:/YMRDfuwR70fIADnYRTLFS2phkki1rl/I0fQC3mVHiw= -github.com/storacha-network/go-ucanto v0.1.1-0.20240916072230-3bed7025597b/go.mod h1:q5QQt6aCAOvsHiMq1k3ji2wOC/EufDJdEW6XSsqXgqA= +github.com/storacha/go-ucanto v0.1.1-0.20241003110856-f3261cb2a702 h1:YghuzmQK0XJj2Rl+TTbL8N21QLrE2xAs5y53ruoR/Mg= +github.com/storacha/go-ucanto v0.1.1-0.20241003110856-f3261cb2a702/go.mod h1:Bi7DFuo0nj9/QmkqbLNLWf41xnOoJSFGg21G+UtzWoY= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tidwall/gjson v1.17.1 h1:wlYEnwqAHgzmhNUFfw7Xalt2JzQvsMx2Se4PcoFCT/U= -github.com/tidwall/gjson v1.17.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= -github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= -github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= -github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= -github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/ucan-wg/go-ucan v0.0.0-20240916120445-37f52863156c h1:A1pMNIlHPnJ6KROqNc6SKg7QlSiQA6umiEoy89Os4cM= +github.com/ucan-wg/go-ucan v0.0.0-20240916120445-37f52863156c/go.mod h1:IiRc1OKWUk7FziOTWmOo7iwbcEMr7ch0lgs3UrF13pU= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/wI2L/jsondiff v0.6.0 h1:zrsH3FbfVa3JO9llxrcDy/XLkYPLgoMX6Mz3T2PP2AI= -github.com/wI2L/jsondiff v0.6.0/go.mod h1:D6aQ5gKgPF9g17j+E9N7aasmU1O+XvfmWm1y8UMmNpw= +github.com/urfave/cli/v2 v2.27.4 h1:o1owoI+02Eb+K107p27wEX9Bb8eqIoZCfLXloLUSWJ8= +github.com/urfave/cli/v2 v2.27.4/go.mod h1:m4QzxcD2qpra4z7WhzEGn74WZLViBnMpb1ToCAKdGRQ= github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s= github.com/warpfork/go-testmark v0.12.1/go.mod h1:kHwy7wfvGSPh1rQJYKayD4AbtNaeyZdcGi9tNJTaa5Y= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/whyrusleeping/cbor-gen v0.1.1 h1:eKfcJIoxivjMtwfCfmJAqSF56MHcWqyIScXwaC1VBgw= -github.com/whyrusleeping/cbor-gen v0.1.1/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= +github.com/whyrusleeping/cbor-gen v0.1.2 h1:WQFlrPhpcQl+M2/3dP5cvlTLWPVsL6LGBb9jJt6l/cA= +github.com/whyrusleeping/cbor-gen v0.1.2/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= +github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/wlynxg/anet v0.0.4 h1:0de1OFQxnNqAu+x2FAKKCVIrnfGKQbs7FQz++tB0+Uw= github.com/wlynxg/anet v0.0.4/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -574,14 +573,16 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/otel v1.13.0 h1:1ZAKnNQKwBBxFtww/GwxNUyTf0AxkZzrukO8MeXqe4Y= -go.opentelemetry.io/otel v1.13.0/go.mod h1:FH3RtdZCzRkJYFTCsAKDy9l/XYjMdNv6QrkFFB8DvVg= -go.opentelemetry.io/otel/trace v1.13.0 h1:CBgRZ6ntv+Amuj1jDsMhZtlAPT6gbyIRdaIzFhfBSdY= -go.opentelemetry.io/otel/trace v1.13.0/go.mod h1:muCvmmO9KKpvuXSf3KKAXXB2ygNYHQ+ZfI5X08d3tds= +go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= +go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= +go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= +go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= +go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= +go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= go.uber.org/fx v1.22.2 h1:iPW+OPxv0G8w75OemJ1RAnTUrF55zOJlXlo1TbJ0Buw= @@ -611,8 +612,11 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -623,8 +627,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -653,8 +657,8 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -694,8 +698,12 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -771,12 +779,21 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -787,8 +804,11 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -849,14 +869,14 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -963,8 +983,9 @@ google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6h google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/pkg/blobindex/multihashmap.go b/pkg/blobindex/multihashmap.go index 7ef70f8..6c1629b 100644 --- a/pkg/blobindex/multihashmap.go +++ b/pkg/blobindex/multihashmap.go @@ -1,52 +1,11 @@ package blobindex import ( - "iter" - "maps" - mh "github.com/multiformats/go-multihash" - "github.com/storacha-network/go-ucanto/core/iterable" + "github.com/storacha/indexing-service/pkg/internal/bytemap" ) -type multihashMap[T any] struct { - data map[string]T -} - // NewMultihashMap returns a new map of multihash to a data type func NewMultihashMap[T any](sizeHint int) MultihashMap[T] { - var stringMap map[string]T - if sizeHint == -1 { - stringMap = make(map[string]T) - } else { - stringMap = make(map[string]T, sizeHint) - } - return &multihashMap[T]{stringMap} -} - -func (mhm *multihashMap[T]) Get(mh mh.Multihash) T { - return mhm.data[string(mh)] -} - -func (mhm *multihashMap[T]) Has(mh mh.Multihash) bool { - _, ok := mhm.data[string(mh)] - return ok -} - -func (mhm *multihashMap[T]) Set(mh mh.Multihash, t T) { - mhm.data[string(mh)] = t -} - -func (mhm *multihashMap[T]) Delete(mh mh.Multihash) bool { - _, ok := mhm.data[string(mh)] - delete(mhm.data, string(mh)) - return ok -} - -func (mhm *multihashMap[T]) Size() int { - return len(mhm.data) -} -func (mhm *multihashMap[T]) Iterator() iter.Seq2[mh.Multihash, T] { - return iterable.Map2(func(str string, t T) (mh.Multihash, T) { - return mh.Multihash(str), t - }, maps.All(mhm.data)) + return bytemap.NewByteMap[mh.Multihash, T](sizeHint) } diff --git a/pkg/blobindex/shardeddagindex.go b/pkg/blobindex/shardeddagindex.go index d25d7fb..f2f8134 100644 --- a/pkg/blobindex/shardeddagindex.go +++ b/pkg/blobindex/shardeddagindex.go @@ -10,13 +10,13 @@ import ( "github.com/ipfs/go-cid" cidlink "github.com/ipld/go-ipld-prime/linking/cid" mh "github.com/multiformats/go-multihash" - "github.com/storacha-network/go-ucanto/core/car" - "github.com/storacha-network/go-ucanto/core/ipld" - "github.com/storacha-network/go-ucanto/core/ipld/block" - "github.com/storacha-network/go-ucanto/core/ipld/codec/cbor" - "github.com/storacha-network/go-ucanto/core/ipld/hash/sha256" - "github.com/storacha-network/go-ucanto/core/result/failure" - dm "github.com/storacha-network/indexing-service/pkg/blobindex/datamodel" + "github.com/storacha/go-ucanto/core/car" + "github.com/storacha/go-ucanto/core/ipld" + "github.com/storacha/go-ucanto/core/ipld/block" + "github.com/storacha/go-ucanto/core/ipld/codec/cbor" + "github.com/storacha/go-ucanto/core/ipld/hash/sha256" + "github.com/storacha/go-ucanto/core/result/failure" + dm "github.com/storacha/indexing-service/pkg/blobindex/datamodel" ) // ExtractError is a union type of UnknownFormatError and DecodeFailureErorr diff --git a/pkg/blobindex/shardeddagindex_test.go b/pkg/blobindex/shardeddagindex_test.go index 06aa2c0..b0773c4 100644 --- a/pkg/blobindex/shardeddagindex_test.go +++ b/pkg/blobindex/shardeddagindex_test.go @@ -9,9 +9,9 @@ import ( "github.com/ipld/go-ipld-prime/datamodel" cidlink "github.com/ipld/go-ipld-prime/linking/cid" mh "github.com/multiformats/go-multihash" - "github.com/storacha-network/go-ucanto/core/car" - "github.com/storacha-network/go-ucanto/core/ipld/block" - "github.com/storacha-network/indexing-service/pkg/blobindex" + "github.com/storacha/go-ucanto/core/car" + "github.com/storacha/go-ucanto/core/ipld/block" + "github.com/storacha/indexing-service/pkg/blobindex" "github.com/stretchr/testify/require" ) diff --git a/pkg/blobindex/types.go b/pkg/blobindex/types.go index 6f7b84c..333a1d5 100644 --- a/pkg/blobindex/types.go +++ b/pkg/blobindex/types.go @@ -5,8 +5,8 @@ import ( "iter" mh "github.com/multiformats/go-multihash" - "github.com/storacha-network/go-ucanto/core/ipld" - dm "github.com/storacha-network/indexing-service/pkg/blobindex/datamodel" + "github.com/storacha/go-ucanto/core/ipld" + dm "github.com/storacha/indexing-service/pkg/blobindex/datamodel" ) // MultihashMap is a generic for mapping multihashes to arbitrary data types diff --git a/pkg/blobindex/util.go b/pkg/blobindex/util.go index 9b1d310..bb64204 100644 --- a/pkg/blobindex/util.go +++ b/pkg/blobindex/util.go @@ -5,8 +5,8 @@ import ( cidlink "github.com/ipld/go-ipld-prime/linking/cid" mh "github.com/multiformats/go-multihash" - "github.com/storacha-network/go-ucanto/core/car" - "github.com/storacha-network/go-ucanto/core/ipld" + "github.com/storacha/go-ucanto/core/car" + "github.com/storacha/go-ucanto/core/ipld" ) // FromShardArchives creates a sharded DAG index by indexing blocks in the passed CAR shards. diff --git a/pkg/capability/assert/assert.go b/pkg/capability/assert/assert.go index 8be727b..8c5d307 100644 --- a/pkg/capability/assert/assert.go +++ b/pkg/capability/assert/assert.go @@ -8,11 +8,11 @@ import ( cidlink "github.com/ipld/go-ipld-prime/linking/cid" basicnode "github.com/ipld/go-ipld-prime/node/basic" mh "github.com/multiformats/go-multihash" - "github.com/storacha-network/go-ucanto/core/ipld" - "github.com/storacha-network/go-ucanto/core/result/failure" - "github.com/storacha-network/go-ucanto/core/schema" - "github.com/storacha-network/go-ucanto/validator" - adm "github.com/storacha-network/indexing-service/pkg/capability/assert/datamodel" + "github.com/storacha/go-ucanto/core/ipld" + "github.com/storacha/go-ucanto/core/result/failure" + "github.com/storacha/go-ucanto/core/schema" + "github.com/storacha/go-ucanto/validator" + adm "github.com/storacha/indexing-service/pkg/capability/assert/datamodel" ) // export const assert = capability({ @@ -62,6 +62,10 @@ func Digest(d adm.DigestModel) (HasMultihash, failure.Failure) { return digest(d.Digest), nil } +func FromHash(mh mh.Multihash) HasMultihash { + return digest(mh) +} + var linkOrDigest = schema.Or(schema.Mapped(schema.Link(), Link), schema.Mapped(schema.Struct[adm.DigestModel](adm.DigestType(), nil), Digest)) type LocationCaveats struct { @@ -70,7 +74,7 @@ type LocationCaveats struct { Range *adm.Range } -func (lc LocationCaveats) Build() (datamodel.Node, error) { +func (lc LocationCaveats) ToIPLD() (datamodel.Node, error) { cn, err := lc.Content.ToIPLD() if err != nil { return nil, err @@ -122,7 +126,7 @@ type InclusionCaveats struct { Proof *ipld.Link } -func (ic InclusionCaveats) Build() (datamodel.Node, error) { +func (ic InclusionCaveats) ToIPLD() (datamodel.Node, error) { cn, err := ic.Content.ToIPLD() if err != nil { return nil, err @@ -172,7 +176,7 @@ type IndexCaveats struct { Index ipld.Link } -func (ic IndexCaveats) Build() (datamodel.Node, error) { +func (ic IndexCaveats) ToIPLD() (datamodel.Node, error) { md := &adm.IndexCaveatsModel{ Content: ic.Content, @@ -210,7 +214,7 @@ type PartitionCaveats struct { Parts []ipld.Link } -func (pc PartitionCaveats) Build() (datamodel.Node, error) { +func (pc PartitionCaveats) ToIPLD() (datamodel.Node, error) { cn, err := pc.Content.ToIPLD() if err != nil { return nil, err @@ -277,7 +281,7 @@ type RelationCaveats struct { Parts []RelationPart } -func (rc RelationCaveats) Build() (datamodel.Node, error) { +func (rc RelationCaveats) ToIPLD() (datamodel.Node, error) { cn, err := rc.Content.ToIPLD() if err != nil { return nil, err @@ -366,7 +370,7 @@ type EqualsCaveats struct { Equals ipld.Link } -func (ec EqualsCaveats) Build() (datamodel.Node, error) { +func (ec EqualsCaveats) ToIPLD() (datamodel.Node, error) { content, err := ec.Content.ToIPLD() if err != nil { return nil, err @@ -396,3 +400,18 @@ var Equals = validator.NewCapability( }), nil, ) + +// Unit is a success type that can be used when there is no data to return from +// a capability handler. +type Unit struct{} + +func (u Unit) ToIPLD() (datamodel.Node, error) { + np := basicnode.Prototype.Any + nb := np.NewBuilder() + ma, err := nb.BeginMap(0) + if err != nil { + return nil, err + } + ma.Finish() + return nb.Build(), nil +} diff --git a/pkg/internal/bytemap/bytemap.go b/pkg/internal/bytemap/bytemap.go new file mode 100644 index 0000000..f4cea90 --- /dev/null +++ b/pkg/internal/bytemap/bytemap.go @@ -0,0 +1,62 @@ +package bytemap + +import ( + "iter" + "maps" + + "github.com/storacha/go-ucanto/core/iterable" +) + +type byteMap[K ~[]byte, T any] struct { + data map[string]T +} + +// ByteMap is a generic for mapping byte array like types to arbitrary data types +type ByteMap[K ~[]byte, T any] interface { + Get(K) T + Has(K) bool + Set(K, T) + Delete(K) bool + Size() int + Iterator() iter.Seq2[K, T] +} + +// NewByteMap returns a new map of multihash to a data type +func NewByteMap[K ~[]byte, T any](sizeHint int) ByteMap[K, T] { + var stringMap map[string]T + if sizeHint == -1 { + stringMap = make(map[string]T) + } else { + stringMap = make(map[string]T, sizeHint) + } + return &byteMap[K, T]{stringMap} +} + +func (bm *byteMap[K, T]) Get(b K) T { + return bm.data[string(b)] +} + +func (bm *byteMap[K, T]) Has(b K) bool { + _, ok := bm.data[string(b)] + return ok +} + +func (bm *byteMap[K, T]) Set(b K, t T) { + bm.data[string(b)] = t +} + +func (bm *byteMap[K, T]) Delete(b K) bool { + _, ok := bm.data[string(b)] + delete(bm.data, string(b)) + return ok +} + +func (bm *byteMap[K, T]) Size() int { + return len(bm.data) +} + +func (bm *byteMap[K, T]) Iterator() iter.Seq2[K, T] { + return iterable.Map2(func(str string, t T) (K, T) { + return K(str), t + }, maps.All(bm.data)) +} diff --git a/pkg/internal/jobqueue/jobqueue.go b/pkg/internal/jobqueue/jobqueue.go new file mode 100644 index 0000000..f009729 --- /dev/null +++ b/pkg/internal/jobqueue/jobqueue.go @@ -0,0 +1,214 @@ +package jobqueue + +import ( + "context" + "errors" + "sync" + "time" +) + +// ErrQueueShutdown means the queue is shutdown so the job could not be queued +var ErrQueueShutdown = errors.New("queue is shutdown") + +type ( + // Option modifies the config of a JobQueue + Option func(*config) + + // Handler handles jobs of the given type + Handler[Job any] func(ctx context.Context, j Job) error + + // JobQueue is asyncronous queue for jobs, that can be processed in parallel + // by the job queue's handler + JobQueue[Job any] struct { + *config + handler Handler[Job] + incoming chan quitOrJob + closed chan struct{} + closing chan struct{} + } + + config struct { + jobTimeout time.Duration + shutdownTimeout time.Duration + errorHandler func(error) + buffer int + concurrency int + } + + quitOrJob interface { + isQuitOrJob() + } + + job[Job any] struct { + j Job + } + quit struct{} +) + +// WithBuffer allows a set amount of jobs to be buffered even if all workers are busy +func WithBuffer(buffer int) Option { + return func(c *config) { + c.buffer = buffer + } +} + +// WithConcurrency sets the number of workers that will process jobs in +// parallel +func WithConcurrency(concurrency int) Option { + return func(c *config) { + c.concurrency = concurrency + } +} + +// WithErrorHandler uses the given error handler whenever a job errors while processing +func WithErrorHandler(errorHandler func(error)) Option { + return func(c *config) { + c.errorHandler = errorHandler + } +} + +// WithJobTimeout cancels the past into context to the job handler after the specified +// timeout +func WithJobTimeout(jobTimeout time.Duration) Option { + return func(c *config) { + c.jobTimeout = jobTimeout + } +} + +// WithShutdownTimeout sets the shutdown timeout. When the queue is shutdown, the +// context passed to all job handlers will cancel after the specified timeout +func WithShutdownTimeout(shutdownTimeout time.Duration) Option { + return func(c *config) { + c.shutdownTimeout = shutdownTimeout + } +} + +// NewJobQueue returns a new job queue that processes with the given handler +func NewJobQueue[Job any](handler Handler[Job], opts ...Option) *JobQueue[Job] { + c := &config{ + concurrency: 1, + } + for _, opt := range opts { + opt(c) + } + return &JobQueue[Job]{ + config: c, + handler: handler, + incoming: make(chan quitOrJob), + closing: make(chan struct{}), + closed: make(chan struct{}), + } +} + +// Queue attempts to queue the job. It will fail if the queue is shutdown, or +// the passed context cancels before the job can be queued +func (p *JobQueue[Job]) Queue(ctx context.Context, j Job) error { + select { + case <-ctx.Done(): + return ctx.Err() + case <-p.closing: + return ErrQueueShutdown + case p.incoming <- job[Job]{j}: + return nil + } +} + +// Startup starts the queue in the background (returns immediately) +func (p *JobQueue[Job]) Startup() { + go p.run() +} + +// Shutdown shuts down the queue, returning when the whole queue is shutdown or +// the passed context cancels +func (p *JobQueue[Job]) Shutdown(ctx context.Context) error { + // signal the queue is closing -- this will cause anyone awaiting a queue + // to abort + close(p.closing) + // now get get a quit message into the incoming queue this will be the last + // message written in the queue but we also don't just close incoming cause it + // would cause a potential panic + p.incoming <- quit{} + // now wait for the go routines to complete + select { + case <-p.closed: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +func (p *JobQueue[Job]) run() { + // the queue is fully closed when this function completes + defer close(p.closed) + // outgoing will be used to consume jobs by the workers + outgoing := make(chan Job, p.buffer) + + // setup a cancellable context so that you can shut down all job + // executions when ready + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var wg sync.WaitGroup + // spin up all workers + for range p.concurrency { + wg.Add(1) + go func() { + defer wg.Done() + p.worker(ctx, outgoing) + }() + } + + for { + // read the next message from the incoming queue + queued := <-p.incoming + switch typed := queued.(type) { + case job[Job]: + // if its a job, just send to the workers + outgoing <- typed.j + case quit: + // if it's a quit message, this is the last message we will receive + // so start the shutdown process + + // tell all the workers they're done processing jobs + close(outgoing) + // if there is a shut down timeout, queue a background routune to cancel + // the context (i.e. accelerate workers shutting down by the handler getting + // a shutdown context) + if p.shutdownTimeout != 0 { + timer := time.NewTimer(p.shutdownTimeout) + go func() { + <-timer.C + cancel() + }() + } + // wait for the workers to shutdown + wg.Wait() + return + } + } +} + +func (p *JobQueue[Job]) jobCtx(ctx context.Context) (context.Context, context.CancelFunc) { + if p.jobTimeout != 0 { + return context.WithTimeout(ctx, p.jobTimeout) + } + return context.WithCancel(ctx) +} + +func (p *JobQueue[Job]) handleJob(ctx context.Context, job Job) { + ctx, cancel := p.jobCtx(ctx) + defer cancel() + err := p.handler(ctx, job) + if err != nil && p.errorHandler != nil { + p.errorHandler(err) + } +} + +func (p *JobQueue[Job]) worker(ctx context.Context, jobs <-chan Job) { + for job := range jobs { + p.handleJob(ctx, job) + } +} + +func (job[Job]) isQuitOrJob() {} +func (quit) isQuitOrJob() {} diff --git a/pkg/internal/jobwalker/jobwalker.go b/pkg/internal/jobwalker/jobwalker.go new file mode 100644 index 0000000..6bf1d85 --- /dev/null +++ b/pkg/internal/jobwalker/jobwalker.go @@ -0,0 +1,19 @@ +package jobwalker + +import "context" + +// WrappedState is a wrapper around any state to enable atomic access and modification +type WrappedState[State any] interface { + Access() State + Modify(func(State) State) + // CmpSwap calls the "willModify" function (potentially multiple times) and calls modify if it returns true + CmpSwap(willModify func(State) bool, modify func(State) State) bool +} + +// JobHandler handles the specified job and uses the passed in function to spawn more +// jobs. +// The handler should stop processing if spawn errors, returning the error from spawn +type JobHandler[Job any, State any] func(ctx context.Context, j Job, spawn func(Job) error, state WrappedState[State]) error + +// JobWalker processes a set of jobs that spawn other jobs, all while modifying a final state +type JobWalker[Job, State any] func(ctx context.Context, initial []Job, initialState State, handler JobHandler[Job, State]) (State, error) diff --git a/pkg/internal/jobwalker/parallelwalk/parallelwalk.go b/pkg/internal/jobwalker/parallelwalk/parallelwalk.go new file mode 100644 index 0000000..f111f63 --- /dev/null +++ b/pkg/internal/jobwalker/parallelwalk/parallelwalk.go @@ -0,0 +1,141 @@ +package parallelwalk + +import ( + "context" + "errors" + "sync" + + "github.com/storacha/indexing-service/pkg/internal/jobwalker" +) + +type threadSafeState[State any] struct { + state State + lk sync.RWMutex +} + +func (ts *threadSafeState[State]) Access() State { + ts.lk.RLock() + defer ts.lk.RUnlock() + return ts.state +} + +func (ts *threadSafeState[State]) Modify(modify func(State) State) { + ts.lk.Lock() + defer ts.lk.Unlock() + ts.modify(modify) +} + +func (ts *threadSafeState[State]) modify(modify func(State) State) { + ts.state = modify(ts.state) +} + +func (ts *threadSafeState[State]) CmpSwap(willModify func(State) bool, modify func(State) State) bool { + if !willModify(ts.Access()) { + return false + } + ts.lk.Lock() + defer ts.lk.Unlock() + if !willModify(ts.state) { + return false + } + ts.modify(modify) + return true +} + +// NewParallelWalk generates a function to handle a series of jobs that may spawn more jobs +// It will execute jobs in parallel, with the specified concurrency until all initial jobs +// and all spawned jobs (recursively) are handled, or a job errors +// This code is adapted from https://github.com/ipfs/go-merkledag/blob/master/merkledag.go#L464C6-L584 +func NewParallelWalk[Job, State any](concurrency int) jobwalker.JobWalker[Job, State] { + return func(ctx context.Context, initial []Job, initialState State, handler jobwalker.JobHandler[Job, State]) (State, error) { + if len(initial) == 0 { + return initialState, errors.New("must provide at least one initial job") + } + jobFeed := make(chan Job) + spawnedJobs := make(chan Job) + jobFinishes := make(chan struct{}) + + state := &threadSafeState[State]{ + state: initialState, + } + var wg sync.WaitGroup + + errChan := make(chan error) + + jobFeedCtx, cancel := context.WithCancel(ctx) + + defer wg.Wait() + defer cancel() + for i := 0; i < concurrency; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for job := range jobFeed { + + err := handler(jobFeedCtx, job, func(next Job) error { + select { + case spawnedJobs <- next: + return nil + case <-jobFeedCtx.Done(): + return jobFeedCtx.Err() + } + }, state) + + if err != nil { + select { + case errChan <- err: + case <-jobFeedCtx.Done(): + } + return + } + + select { + case jobFinishes <- struct{}{}: + case <-jobFeedCtx.Done(): + } + } + }() + } + defer close(jobFeed) + + jobProcessor := jobFeed + nextJob, queuedJobs := initial[0], initial[1:] + + var inProgress int + + hasNextJob := func() bool { + return jobProcessor != nil + } + + for { + select { + case jobProcessor <- nextJob: + inProgress++ + if len(queuedJobs) > 0 { + nextJob = queuedJobs[0] + queuedJobs = queuedJobs[1:] + } else { + var empty Job + nextJob = empty + jobProcessor = nil + } + case <-jobFinishes: + inProgress-- + if inProgress == 0 && !hasNextJob() { + return state.Access(), nil + } + case queued := <-spawnedJobs: + if !hasNextJob() { + nextJob = queued + jobProcessor = jobFeed + } else { + queuedJobs = append(queuedJobs, queued) + } + case err := <-errChan: + return state.Access(), err + case <-ctx.Done(): + return state.Access(), ctx.Err() + } + } + } +} diff --git a/pkg/internal/jobwalker/singlewalk/singlewalk.go b/pkg/internal/jobwalker/singlewalk/singlewalk.go new file mode 100644 index 0000000..f5160a1 --- /dev/null +++ b/pkg/internal/jobwalker/singlewalk/singlewalk.go @@ -0,0 +1,52 @@ +package singlewalk + +import ( + "context" + + "github.com/storacha/indexing-service/pkg/internal/jobwalker" +) + +type singleState[State any] struct { + m State +} + +// Access implements jobwalker.WrappedState. +func (s *singleState[State]) Access() State { + return s.m +} + +// CmpSwap implements jobwalker.WrappedState. +func (s *singleState[State]) CmpSwap(willModify func(State) bool, modify func(State) State) bool { + if !willModify(s.m) { + return false + } + s.m = modify(s.m) + return true +} + +// Modify implements jobwalker.WrappedState. +func (s *singleState[State]) Modify(modify func(State) State) { + s.m = modify(s.m) +} + +var _ jobwalker.WrappedState[any] = &singleState[any]{} + +// SingleWalker processes jobs that span more jobs, sequentially depth first in a single thread +func SingleWalker[Job, State any](ctx context.Context, initial []Job, initialState State, handler jobwalker.JobHandler[Job, State]) (State, error) { + stack := initial + state := &singleState[State]{initialState} + for len(stack) > 0 { + select { + case <-ctx.Done(): + return state.Access(), ctx.Err() + default: + } + next := initial[len(initial)-1] + initial = initial[:len(initial)-1] + handler(ctx, next, func(j Job) error { + stack = append(stack, j) + return nil + }, state) + } + return state.Access(), nil +} diff --git a/pkg/internal/testutil/fixtures.go b/pkg/internal/testutil/fixtures.go index d2642c2..03e7fa4 100644 --- a/pkg/internal/testutil/fixtures.go +++ b/pkg/internal/testutil/fixtures.go @@ -1,6 +1,10 @@ package testutil -import "github.com/storacha-network/go-ucanto/principal/ed25519/signer" +import ( + "net/url" + + "github.com/storacha/go-ucanto/principal/ed25519/signer" +) // did:key:z6Mkk89bC3JrVqKie71YEcc5M1SMVxuCgNx6zLZ8SYJsxALi var Alice, _ = signer.Parse("MgCZT5vOnYZoVAeyjnzuJIVY9J4LNtJ+f8Js0cTPuKUpFne0BVEDJjEu6quFIU8yp91/TY/+MYK8GvlKoTDnqOCovCVM=") @@ -13,3 +17,5 @@ var Mallory, _ = signer.Parse("MgCYtH0AvYxiQwBG6+ZXcwlXywq9tI50G2mCAUJbwrrahkO0B // did:key:z6MkrZ1r5XBFZjBU34qyD8fueMbMRkKw17BZaq2ivKFjnz2z var Service, _ = signer.Parse("MgCYKXoHVy7Vk4/QjcEGi+MCqjntUiasxXJ8uJKY0qh11e+0Bs8WsdqGK7xothgrDzzWD0ME7ynPjz2okXDh8537lId8=") + +var TestURL, _ = url.Parse("https://storacha.network") diff --git a/pkg/internal/testutil/gen.go b/pkg/internal/testutil/gen.go index fae9ddd..adfad5c 100644 --- a/pkg/internal/testutil/gen.go +++ b/pkg/internal/testutil/gen.go @@ -5,18 +5,25 @@ import ( "io" "math/rand" "net" + "net/url" "strconv" "github.com/ipfs/go-cid" "github.com/ipld/go-ipld-prime/datamodel" cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/ipni/go-libipni/find/model" crypto "github.com/libp2p/go-libp2p/core/crypto" peer "github.com/libp2p/go-libp2p/core/peer" "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" mh "github.com/multiformats/go-multihash" - "github.com/storacha-network/go-ucanto/core/car" - "github.com/storacha-network/go-ucanto/core/ipld/block" + "github.com/storacha/go-ucanto/core/car" + "github.com/storacha/go-ucanto/core/delegation" + "github.com/storacha/go-ucanto/core/ipld/block" + "github.com/storacha/go-ucanto/principal/ed25519/signer" + "github.com/storacha/go-ucanto/ucan" + "github.com/storacha/indexing-service/pkg/blobindex" + "github.com/storacha/indexing-service/pkg/capability/assert" ) func RandomBytes(size int) []byte { @@ -80,3 +87,85 @@ func RandomCID() datamodel.Link { func RandomMultihash() mh.Multihash { return RandomCID().(cidlink.Link).Hash() } + +func RandomMultihashes(count int) []mh.Multihash { + mhs := make([]mh.Multihash, 0, count) + for range count { + mhs = append(mhs, RandomMultihash()) + } + return mhs +} + +func RandomLocationClaim() ucan.Capability[assert.LocationCaveats] { + return assert.Location.New(Service.DID().String(), assert.LocationCaveats{ + Content: assert.FromHash(RandomMultihash()), + Location: []url.URL{*TestURL}, + }) +} + +func RandomLocationDelegation() delegation.Delegation { + did, err := signer.Generate() + if err != nil { + panic(err) + } + delegation, err := delegation.Delegate(Service, did, []ucan.Capability[assert.LocationCaveats]{RandomLocationClaim()}) + if err != nil { + panic(err) + } + return delegation +} + +func RandomIndexClaim() ucan.Capability[assert.IndexCaveats] { + return assert.Index.New(Service.DID().String(), assert.IndexCaveats{ + Content: RandomCID(), + Index: RandomCID(), + }) +} + +func RandomIndexDelegation() delegation.Delegation { + delegation, err := delegation.Delegate(Service, Service, []ucan.Capability[assert.IndexCaveats]{RandomIndexClaim()}) + if err != nil { + panic(err) + } + return delegation +} + +func RandomProviderResult() model.ProviderResult { + return model.ProviderResult{ + ContextID: RandomBytes(10), + Metadata: RandomBytes(10), + Provider: &peer.AddrInfo{ + ID: RandomPeer(), + Addrs: []multiaddr.Multiaddr{ + RandomMultiaddr(), + RandomMultiaddr(), + }, + }, + } +} + +func RandomShardedDagIndexView(size int) (cid.Cid, blobindex.ShardedDagIndexView) { + roots, contentCar := RandomCAR(size) + contentCarBytes, err := io.ReadAll(contentCar) + if err != nil { + panic(err) + } + + root, err := cid.Prefix{ + Version: 1, + Codec: cid.Raw, + MhType: mh.SHA2_256, + MhLength: -1, + }.Sum(contentCarBytes) + + if err != nil { + panic(err) + } + + shard, err := blobindex.FromShardArchives(roots[0], [][]byte{contentCarBytes}) + if err != nil { + panic(err) + } + + return root, shard +} diff --git a/pkg/internal/testutil/helpers.go b/pkg/internal/testutil/helpers.go index 8b5903b..eec3202 100644 --- a/pkg/internal/testutil/helpers.go +++ b/pkg/internal/testutil/helpers.go @@ -3,8 +3,8 @@ package testutil import ( "testing" - "github.com/storacha-network/go-ucanto/core/delegation" - "github.com/storacha-network/indexing-service/pkg/blobindex" + "github.com/storacha/go-ucanto/core/delegation" + "github.com/storacha/indexing-service/pkg/blobindex" "github.com/stretchr/testify/require" ) @@ -28,6 +28,10 @@ func Must2[T, U any](val1 T, val2 U, err error) func(*testing.T) (T, U) { // RequireEqualIndex compares two sharded dag indexes to verify their equality func RequireEqualIndex(t *testing.T, expectedIndex blobindex.ShardedDagIndexView, actualIndex blobindex.ShardedDagIndexView) { + if expectedIndex == nil { + require.Nil(t, actualIndex) + return + } require.NotZero(t, actualIndex.Shards().Size()) require.Equal(t, expectedIndex.Shards().Size(), actualIndex.Shards().Size()) for key, shard := range actualIndex.Shards().Iterator() { @@ -43,6 +47,10 @@ func RequireEqualIndex(t *testing.T, expectedIndex blobindex.ShardedDagIndexView // RequireEqualDelegation compares two delegations to verify their equality func RequireEqualDelegation(t *testing.T, expectedDelegation delegation.Delegation, actualDelegation delegation.Delegation) { + if expectedDelegation == nil { + require.Nil(t, actualDelegation) + return + } require.Equal(t, expectedDelegation.Issuer(), actualDelegation.Issuer()) require.Equal(t, expectedDelegation.Audience(), actualDelegation.Audience()) require.Equal(t, expectedDelegation.Capabilities(), actualDelegation.Capabilities()) diff --git a/pkg/metadata/metadata.go b/pkg/metadata/metadata.go index 9031be7..025920a 100644 --- a/pkg/metadata/metadata.go +++ b/pkg/metadata/metadata.go @@ -1,7 +1,7 @@ /* - Package metadata implements the metadata protocol for publishing content claims on IPNI + Package metadata implements protocols for publishing content claims on IPNI - The goal of the content claims transport protocol is to provide a way to index content claims on IPNI + The goal is to enable partial publishing of content claims to IPNI The rules for publishing content claims records to IPNI are as follows: @@ -18,13 +18,19 @@ The metadata for the claim is structured to maximize utility of the record while minimizing size - To generally respect the 100 byte maximum size for IPNI records, we encode the claim type as an integer, rather than as string + To generally respect the 100 byte maximum size for IPNI records, we do not encode the claim itself, but rather its CID. - The claim itself, being too large to fit in metadata, is referenced only by its CID. The full claim must be retrievable by - combining the http multiaddr of the provider + the claim CID + The full claim must be retrievable through an http multiaddr of the provider which contains path segments of the form `{claim}`. + To retrieve the claim, replace every `{claim}` with the string encoding of the claim CID - However, in order to enable faster chaining of requests and general processing, we add a shortcut bytes field, - which encodes specific information from the full claim and is interpreted based on the claim type. + For a location commitment, the content must retrievable through an http multiaddr of the provider + which contains path segments of the form `{shard}`. To retrieve the claim, replace every `{shard}` with the string encoding + of the shard cid in the metadata, or if not present, the CIDv1 encoding using RAW codec of the multihash used to lookup the record + Additionally, if a Range parameter is present in the metadata, it should be translated into a range HTTP header when retrieving + content + + However, in order to enable faster chaining of requests and general processing, we add additional fields to encode + specific information from the full claim. This enables a client to quickly read the record and take action based on information in the claim before it has retrieved the full claim */ @@ -32,11 +38,10 @@ package metadata import ( "bytes" + // for import _ "embed" - "errors" "fmt" "io" - "net/url" "github.com/ipfs/go-cid" "github.com/ipld/go-ipld-prime" @@ -46,15 +51,16 @@ import ( ipnimd "github.com/ipni/go-libipni/metadata" "github.com/multiformats/go-multicodec" "github.com/multiformats/go-varint" - "github.com/storacha-network/indexing-service/pkg/capability/assert" ) var ( - _ ipnimd.Protocol = (*ContentClaimMetadata)(nil) + _ ipnimd.Protocol = (*IndexClaimMetadata)(nil) //go:embed metadata.ipldsch - schemaBytes []byte - contentClaimMetadata schema.TypedPrototype + schemaBytes []byte + indexClaimMetadata schema.TypedPrototype + equalsClaimMetadata schema.TypedPrototype + locationCommitmentMetadata schema.TypedPrototype ) func init() { @@ -62,142 +68,159 @@ func init() { if err != nil { panic(fmt.Errorf("failed to load schema: %w", err)) } - t := typeSystem.TypeByName("ContentClaimMetadata") - contentClaimMetadata = bindnode.Prototype((*ContentClaimMetadata)(nil), t) + indexClaimMetadata = bindnode.Prototype((*IndexClaimMetadata)(nil), typeSystem.TypeByName("IndexClaimMetadata")) + equalsClaimMetadata = bindnode.Prototype((*EqualsClaimMetadata)(nil), typeSystem.TypeByName("EqualsClaimMetadata")) + locationCommitmentMetadata = bindnode.Prototype((*LocationCommitmentMetadata)(nil), typeSystem.TypeByName("LocationCommitmentMetadata")) } +// metadata identifiers // currently we just use experimental codecs for now -const TransportContentClaim = 0x3E0000 -type ClaimType uint64 +// IndexClaimID is the multicodec for index claims +const IndexClaimID = 0x3E0000 -const ( - LocationCommitment ClaimType = iota - IndexClaim - EqualsClaim -) +// EqualsClaimID is the multicodec for equals claims +const EqualsClaimID = 0x3E0001 + +// LocationCommitmentID is the multicodec for location commitments +const LocationCommitmentID = 0x3E0002 + +var nodePrototypes = map[multicodec.Code]schema.TypedPrototype{ + IndexClaimID: indexClaimMetadata, + EqualsClaimID: equalsClaimMetadata, + LocationCommitmentID: locationCommitmentMetadata, +} + +var MetadataContext ipnimd.MetadataContext + +func init() { + mdctx := ipnimd.Default + mdctx = mdctx.WithProtocol(IndexClaimID, func() ipnimd.Protocol { return &IndexClaimMetadata{} }) + mdctx = mdctx.WithProtocol(EqualsClaimID, func() ipnimd.Protocol { return &EqualsClaimMetadata{} }) + mdctx = mdctx.WithProtocol(LocationCommitmentID, func() ipnimd.Protocol { return &LocationCommitmentMetadata{} }) + MetadataContext = mdctx +} + +type HasClaim interface { + GetClaim() cid.Cid +} + +/* + IndexClaimMetadata represents metadata for an index claim + Index claim metadata +*/ +type IndexClaimMetadata struct { + // Index represents the cid of the index for this claim + Index cid.Cid + // Expiration as unix epoch in seconds + Expiration int64 + // Claim indicates the cid of the claim - the claim should be fetchable by combining the http multiaddr of the provider with the claim cid + Claim cid.Cid +} + +func (i *IndexClaimMetadata) ID() multicodec.Code { + return IndexClaimID +} +func (i *IndexClaimMetadata) MarshalBinary() ([]byte, error) { return marshalBinary(i) } +func (i *IndexClaimMetadata) UnmarshalBinary(data []byte) error { return unmarshalBinary(i, data) } +func (i *IndexClaimMetadata) ReadFrom(r io.Reader) (n int64, err error) { return readFrom(i, r) } +func (i *IndexClaimMetadata) GetClaim() cid.Cid { + return i.Claim +} + +// EqualsClaimMetadata represents metadata for an equals claim +type EqualsClaimMetadata struct { + // Equals represents an equivalent cid to the content cid that was used for lookup + Equals cid.Cid + // Expiration as unix epoch in seconds + Expiration int64 + // Claim indicates the cid of the claim - the claim should be fetchable by combining the http multiaddr of the provider with the claim cid + Claim cid.Cid +} -var ClaimNames = map[ClaimType]string{ - LocationCommitment: assert.LocationAbility, - IndexClaim: assert.IndexAbility, - EqualsClaim: assert.EqualsAbility, +func (e *EqualsClaimMetadata) ID() multicodec.Code { + return EqualsClaimID +} +func (e *EqualsClaimMetadata) MarshalBinary() ([]byte, error) { return marshalBinary(e) } +func (e *EqualsClaimMetadata) UnmarshalBinary(data []byte) error { return unmarshalBinary(e, data) } +func (e *EqualsClaimMetadata) ReadFrom(r io.Reader) (n int64, err error) { return readFrom(e, r) } +func (e *EqualsClaimMetadata) GetClaim() cid.Cid { + return e.Claim +} + +type Range struct { + Offset uint64 + Length *uint64 } -func (a ClaimType) String() string { - return ClaimNames[a] +// LocationCommitmentMetadata represents metadata for an equals claim +type LocationCommitmentMetadata struct { + // Shard is an optional alternate cid to use to lookup this location -- if the looked up shard is part of a larger shard + Shard *cid.Cid + // Range is an optional byte range within a shard + Range *Range + // Expiration as unix epoch in seconds + Expiration int64 + // Claim indicates the cid of the claim - the claim should be fetchable by combining the http multiaddr of the provider with the claim cid + Claim cid.Cid } -// ContentClaimMetadata represents metadata for a content claim -type ContentClaimMetadata struct { - // kind of claim - ClaimType ClaimType - // based on the claim type, this can be used to access the key information in the claim without fetching the whole claim - EmbeddedData []byte - // ClaimCID indicates the cid of the claim - the claim should be fetchable by combining the http multiaddr of the provider with the claim cid - ClaimCID cid.Cid +func (l *LocationCommitmentMetadata) ID() multicodec.Code { + return EqualsClaimID +} +func (l *LocationCommitmentMetadata) MarshalBinary() ([]byte, error) { return marshalBinary(l) } +func (l *LocationCommitmentMetadata) UnmarshalBinary(data []byte) error { + return unmarshalBinary(l, data) +} +func (l *LocationCommitmentMetadata) ReadFrom(r io.Reader) (n int64, err error) { + return readFrom(l, r) +} +func (l *LocationCommitmentMetadata) GetClaim() cid.Cid { + return l.Claim } -func (ccm *ContentClaimMetadata) ID() multicodec.Code { - return TransportContentClaim +type hasID[T any] interface { + *T + ID() multicodec.Code } -// MarshalBinary implements encoding.BinaryMarshaler. -func (ccm *ContentClaimMetadata) MarshalBinary() ([]byte, error) { - buf := bytes.NewBuffer(varint.ToUvarint(uint64(ccm.ID()))) - nd := bindnode.Wrap(ccm, contentClaimMetadata.Type()) +func marshalBinary(metadata ipnimd.Protocol) ([]byte, error) { + buf := bytes.NewBuffer(varint.ToUvarint(uint64(metadata.ID()))) + nd := bindnode.Wrap(metadata, nodePrototypes[metadata.ID()].Type()) if err := dagcbor.Encode(nd, buf); err != nil { return nil, err } return buf.Bytes(), nil } -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (ccm *ContentClaimMetadata) UnmarshalBinary(data []byte) error { +func unmarshalBinary[PT hasID[T], T any](val PT, data []byte) error { r := bytes.NewReader(data) - _, err := ccm.ReadFrom(r) + _, err := readFrom(val, r) return err } -func (ccm *ContentClaimMetadata) ReadFrom(r io.Reader) (n int64, err error) { +func readFrom[PT hasID[T], T any](val PT, r io.Reader) (int64, error) { cr := &countingReader{r: r} v, err := varint.ReadUvarint(cr) if err != nil { return cr.readCount, err } id := multicodec.Code(v) - if id != TransportContentClaim { - return cr.readCount, fmt.Errorf("transport id does not match %s: %s", TransportContentClaim, id) + if id != val.ID() { + return cr.readCount, fmt.Errorf("transport id does not match %s: %s", val.ID(), id) } - nb := contentClaimMetadata.NewBuilder() + nb := nodePrototypes[val.ID()].NewBuilder() err = dagcbor.Decode(nb, cr) if err != nil { return cr.readCount, err } nd := nb.Build() - read := bindnode.Unwrap(nd).(*ContentClaimMetadata) - ccm.ClaimType = read.ClaimType - ccm.EmbeddedData = read.EmbeddedData - ccm.ClaimCID = read.ClaimCID + read := bindnode.Unwrap(nd).(PT) + *val = *read return cr.readCount, nil } -var ErrUnrecognizedAssertion = errors.New("unrecognized assertion type") - -type ClaimPreview interface { - isClaimPreview() -} - -type LocationCommitmentPreview struct { - Location url.URL -} - -func (LocationCommitmentPreview) isClaimPreview() {} - -type IndexClaimPreview struct { - Index cid.Cid -} - -func (IndexClaimPreview) isClaimPreview() {} - -type EqualsClaimPreview struct { - Equals cid.Cid -} - -func (EqualsClaimPreview) isClaimPreview() {} - -// ClaimPreview uses the claim type and short cut field to construct a preview of relevant data in the full claim -func (ccm *ContentClaimMetadata) ClaimPreview() (ClaimPreview, error) { - switch ccm.ClaimType { - case LocationCommitment: - location, err := url.ParseRequestURI(string(ccm.EmbeddedData)) - if err != nil { - return nil, err - } - return LocationCommitmentPreview{ - Location: *location, - }, nil - case IndexClaim: - _, index, err := cid.CidFromBytes(ccm.EmbeddedData) - if err != nil { - return nil, err - } - return IndexClaimPreview{ - Index: index, - }, nil - case EqualsClaim: - _, equals, err := cid.CidFromBytes(ccm.EmbeddedData) - if err != nil { - return nil, err - } - return EqualsClaimPreview{ - Equals: equals, - }, nil - default: - return nil, ErrUnrecognizedAssertion - } -} - // copied from go-libipni var ( _ io.Reader = (*countingReader)(nil) diff --git a/pkg/metadata/metadata.ipldsch b/pkg/metadata/metadata.ipldsch index debd62a..a4fcc35 100644 --- a/pkg/metadata/metadata.ipldsch +++ b/pkg/metadata/metadata.ipldsch @@ -1,5 +1,23 @@ -type ContentClaimMetadata struct { - ClaimType Int - EmbeddedData Bytes - Claim Link -} representation tuple \ No newline at end of file +type IndexClaimMetadata struct { + index Link (rename "i") + expiration Int (rename "e") + claim Link (rename "c") +} + +type EqualsClaimMetadata struct { + equals Link (rename "e") + expiration Int (rename "e") + claim Link (rename "c") +} + +type Range struct { + offset Int + length nullable Int +} representation tuple + +type LocationCommitmentMetadata struct { + shard optional Link (rename "s") + range optional Range (rename "r") + expiration Int (rename "e") + claim Link (rename "c") +} \ No newline at end of file diff --git a/pkg/redis/ipnistore.go b/pkg/providerresults/providerresults.go similarity index 58% rename from pkg/redis/ipnistore.go rename to pkg/providerresults/providerresults.go index a430546..ca4e049 100644 --- a/pkg/redis/ipnistore.go +++ b/pkg/providerresults/providerresults.go @@ -1,7 +1,9 @@ -package redis +// Package providerresults implements utilities for the IPNI provider result type +package providerresults import ( - // imported for embedding + "bytes" + // for importing schema _ "embed" "fmt" @@ -10,19 +12,16 @@ import ( "github.com/ipld/go-ipld-prime/node/bindnode" "github.com/ipld/go-ipld-prime/schema" "github.com/ipni/go-libipni/find/model" - "github.com/libp2p/go-libp2p/core/peer" + peer "github.com/libp2p/go-libp2p/core/peer" "github.com/multiformats/go-multiaddr" - multihash "github.com/multiformats/go-multihash" - "github.com/storacha-network/indexing-service/pkg/types" ) var ( - //go:embed providerresult.ipldsch + //go:embed providerresults.ipldsch providerResultsBytes []byte peerIDConverter = bindnode.NamedBytesConverter("PeerID", bytesToPeerID, peerIDtoBytes) multiaddrConverter = bindnode.NamedBytesConverter("Multiaddr", bytesToMultiaddr, multiaddrToBytes) providerResultsType schema.Type - _ types.IPNIStore = (*IPNIStore)(nil) ) func init() { @@ -33,14 +32,6 @@ func init() { providerResultsType = typeSystem.TypeByName("ProviderResults") } -// IPNIStore is a RedisStore for storing IPNI data that implements types.IPNIStore -type IPNIStore = Store[multihash.Multihash, []model.ProviderResult] - -// NewIPNIStore returns a new instance of an IPNI store using the given redis client -func NewIPNIStore(client Client) *IPNIStore { - return NewStore(providerResultsFromRedis, providerResultsToRedis, multihashKeyString, client) -} - func bytesToPeerID(data []byte) (interface{}, error) { id, err := peer.IDFromBytes(data) return &id, err @@ -59,7 +50,8 @@ func multiaddrToBytes(ma interface{}) ([]byte, error) { return (*ma.(*multiaddr.Multiaddr)).Bytes(), nil } -func providerResultsFromRedis(data string) ([]model.ProviderResult, error) { +// UnmarshalCBOR decodes a list provider results from CBOR-encoded bytes +func UnmarshalCBOR(data []byte) ([]model.ProviderResult, error) { var records []model.ProviderResult _, err := ipld.Unmarshal([]byte(data), dagcbor.Decode, &records, providerResultsType, peerIDConverter, multiaddrConverter) if err != nil { @@ -68,11 +60,21 @@ func providerResultsFromRedis(data string) ([]model.ProviderResult, error) { return records, nil } -func providerResultsToRedis(records []model.ProviderResult) (string, error) { - data, err := ipld.Marshal(dagcbor.Encode, &records, providerResultsType, peerIDConverter, multiaddrConverter) - return string(data), err +// MarshalCBOR encodes a list provider results in CBOR +func MarshalCBOR(records []model.ProviderResult) ([]byte, error) { + return ipld.Marshal(dagcbor.Encode, &records, providerResultsType, peerIDConverter, multiaddrConverter) +} + +func equalProvider(a, b *peer.AddrInfo) bool { + if a == nil { + return b == nil + } + return b != nil && a.String() == b.String() } -func multihashKeyString(k multihash.Multihash) string { - return string(k) +// Equals compares two ProviderResults +func Equals(a, b model.ProviderResult) bool { + return bytes.Equal(a.ContextID, b.ContextID) && + bytes.Equal(a.Metadata, b.Metadata) && + equalProvider(a.Provider, b.Provider) } diff --git a/pkg/redis/providerresult.ipldsch b/pkg/providerresults/providerresults.ipldsch similarity index 100% rename from pkg/redis/providerresult.ipldsch rename to pkg/providerresults/providerresults.ipldsch diff --git a/pkg/providerresults/providerresults_test.go b/pkg/providerresults/providerresults_test.go new file mode 100644 index 0000000..2c1b05f --- /dev/null +++ b/pkg/providerresults/providerresults_test.go @@ -0,0 +1,58 @@ +package providerresults_test + +import ( + "testing" + + "github.com/ipni/go-libipni/find/model" + peer "github.com/libp2p/go-libp2p/core/peer" + "github.com/multiformats/go-multiaddr" + "github.com/storacha/indexing-service/pkg/internal/testutil" + "github.com/storacha/indexing-service/pkg/providerresults" + "github.com/stretchr/testify/require" +) + +func TestProviderResults__Equals(t *testing.T) { + testProvider := testutil.RandomProviderResult() + testProvider2 := testutil.RandomProviderResult() + + // Create slightly modified versions of the testProvider + contextIDChanged := testProvider + contextIDChanged.ContextID = testutil.RandomBytes(10) + + metadataChanged := testProvider + metadataChanged.Metadata = testutil.RandomBytes(10) + + nullProvider := testProvider + nullProvider.Provider = nil + + providerPeerIDChanged := testProvider + providerPeerIDChanged.Provider = &peer.AddrInfo{ + ID: testutil.RandomPeer(), + Addrs: testProvider.Provider.Addrs, + } + + providerAddrsChanged := testProvider + providerAddrsChanged.Provider = &peer.AddrInfo{ + ID: testProvider.Provider.ID, + Addrs: []multiaddr.Multiaddr{testutil.RandomMultiaddr(), testutil.RandomMultiaddr()}, + } + + testCases := []struct { + name string + provider model.ProviderResult + assert require.BoolAssertionFunc + }{ + {"same provider", testProvider, require.True}, + {"full alternate", testProvider2, require.False}, + {"context ID changed", contextIDChanged, require.False}, + {"metadata changed", metadataChanged, require.False}, + {"provider changed to null", nullProvider, require.False}, + {"provider peer ID changed", providerPeerIDChanged, require.False}, + {"provider addrs changed", providerAddrsChanged, require.False}, + } + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + testCase.assert(t, providerresults.Equals(testProvider, testCase.provider)) + }) + } +} diff --git a/pkg/redis/contentclaimsstore.go b/pkg/redis/contentclaimsstore.go index 6ea61aa..b142269 100644 --- a/pkg/redis/contentclaimsstore.go +++ b/pkg/redis/contentclaimsstore.go @@ -4,8 +4,8 @@ import ( "io" cid "github.com/ipfs/go-cid" - "github.com/storacha-network/go-ucanto/core/delegation" - "github.com/storacha-network/indexing-service/pkg/types" + "github.com/storacha/go-ucanto/core/delegation" + "github.com/storacha/indexing-service/pkg/types" ) var ( diff --git a/pkg/redis/contentclaimsstore_test.go b/pkg/redis/contentclaimsstore_test.go index b7cfa0b..2bcf542 100644 --- a/pkg/redis/contentclaimsstore_test.go +++ b/pkg/redis/contentclaimsstore_test.go @@ -8,12 +8,12 @@ import ( cid "github.com/ipfs/go-cid" mh "github.com/multiformats/go-multihash" - "github.com/storacha-network/go-ucanto/core/delegation" - "github.com/storacha-network/go-ucanto/ucan" - "github.com/storacha-network/indexing-service/pkg/capability/assert" - adm "github.com/storacha-network/indexing-service/pkg/capability/assert/datamodel" - "github.com/storacha-network/indexing-service/pkg/internal/testutil" - "github.com/storacha-network/indexing-service/pkg/redis" + "github.com/storacha/go-ucanto/core/delegation" + "github.com/storacha/go-ucanto/ucan" + "github.com/storacha/indexing-service/pkg/capability/assert" + adm "github.com/storacha/indexing-service/pkg/capability/assert/datamodel" + "github.com/storacha/indexing-service/pkg/internal/testutil" + "github.com/storacha/indexing-service/pkg/redis" "github.com/stretchr/testify/require" ) diff --git a/pkg/redis/ipnistore_test.go b/pkg/redis/ipnistore_test.go deleted file mode 100644 index eac2215..0000000 --- a/pkg/redis/ipnistore_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package redis_test - -import ( - "context" - "testing" - - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - "github.com/ipni/go-libipni/find/model" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/multiformats/go-multiaddr" - "github.com/multiformats/go-multihash" - "github.com/storacha-network/indexing-service/pkg/internal/testutil" - "github.com/storacha-network/indexing-service/pkg/metadata" - "github.com/storacha-network/indexing-service/pkg/redis" - "github.com/storacha-network/indexing-service/pkg/types" - "github.com/stretchr/testify/require" -) - -func TestIPNIStore(t *testing.T) { - mockRedis := NewMockRedis() - ipniStore := redis.NewIPNIStore(mockRedis) - mh1, results1 := testutil.Must2(randomProviderResults(4))(t) - mh2, results2 := testutil.Must2(randomProviderResults(4))(t) - - ctx := context.Background() - require.NoError(t, ipniStore.Set(ctx, mh1, results1, false)) - require.NoError(t, ipniStore.Set(ctx, mh2, results2, true)) - - returnedResults1 := testutil.Must(ipniStore.Get(ctx, mh1))(t) - returnedResults2 := testutil.Must(ipniStore.Get(ctx, mh2))(t) - require.Equal(t, results1, returnedResults1) - require.Equal(t, results2, returnedResults2) -} - -func randomProviderResults(num int) (multihash.Multihash, []model.ProviderResult, error) { - randomHash := testutil.RandomCID().(cidlink.Link).Cid.Hash() - aliceDid := testutil.Alice.DID() - encodedContextID, err := types.ContextID{Space: &aliceDid, Hash: randomHash}.ToEncoded() - if err != nil { - return nil, nil, err - } - metadata, err := (&metadata.ContentClaimMetadata{ - ClaimType: metadata.LocationCommitment, - EmbeddedData: testutil.RandomBytes(30), - ClaimCID: testutil.RandomCID().(cidlink.Link).Cid, - }).MarshalBinary() - if err != nil { - return nil, nil, err - } - providerResults := make([]model.ProviderResult, 0, num) - for i := 0; i < num; i++ { - providerResults = append(providerResults, model.ProviderResult{ - ContextID: encodedContextID, - Metadata: metadata, - Provider: &peer.AddrInfo{ - ID: testutil.RandomPeer(), - Addrs: []multiaddr.Multiaddr{testutil.RandomMultiaddr()}, - }, - }) - } - - return randomHash, providerResults, nil -} diff --git a/pkg/redis/providerstore.go b/pkg/redis/providerstore.go new file mode 100644 index 0000000..02c30d5 --- /dev/null +++ b/pkg/redis/providerstore.go @@ -0,0 +1,36 @@ +package redis + +import ( + // imported for embedding + _ "embed" + + "github.com/ipni/go-libipni/find/model" + multihash "github.com/multiformats/go-multihash" + "github.com/storacha/indexing-service/pkg/providerresults" + "github.com/storacha/indexing-service/pkg/types" +) + +var ( + _ types.ProviderStore = (*ProviderStore)(nil) +) + +// ProviderStore is a RedisStore for storing IPNI data that implements types.ProviderStore +type ProviderStore = Store[multihash.Multihash, []model.ProviderResult] + +// NewProviderStore returns a new instance of an IPNI store using the given redis client +func NewProviderStore(client Client) *ProviderStore { + return NewStore(providerResultsFromRedis, providerResultsToRedis, multihashKeyString, client) +} + +func providerResultsFromRedis(data string) ([]model.ProviderResult, error) { + return providerresults.UnmarshalCBOR([]byte(data)) +} + +func providerResultsToRedis(records []model.ProviderResult) (string, error) { + data, err := providerresults.MarshalCBOR(records) + return string(data), err +} + +func multihashKeyString(k multihash.Multihash) string { + return string(k) +} diff --git a/pkg/redis/providerstore_test.go b/pkg/redis/providerstore_test.go new file mode 100644 index 0000000..717efa6 --- /dev/null +++ b/pkg/redis/providerstore_test.go @@ -0,0 +1,39 @@ +package redis_test + +import ( + "context" + "testing" + + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/ipni/go-libipni/find/model" + "github.com/multiformats/go-multihash" + "github.com/storacha/indexing-service/pkg/internal/testutil" + "github.com/storacha/indexing-service/pkg/redis" + "github.com/stretchr/testify/require" +) + +func TestProviderStore(t *testing.T) { + mockRedis := NewMockRedis() + providerStore := redis.NewProviderStore(mockRedis) + mh1, results1 := testutil.Must2(randomProviderResults(4))(t) + mh2, results2 := testutil.Must2(randomProviderResults(4))(t) + + ctx := context.Background() + require.NoError(t, providerStore.Set(ctx, mh1, results1, false)) + require.NoError(t, providerStore.Set(ctx, mh2, results2, true)) + + returnedResults1 := testutil.Must(providerStore.Get(ctx, mh1))(t) + returnedResults2 := testutil.Must(providerStore.Get(ctx, mh2))(t) + require.Equal(t, results1, returnedResults1) + require.Equal(t, results2, returnedResults2) +} + +func randomProviderResults(num int) (multihash.Multihash, []model.ProviderResult, error) { + randomHash := testutil.RandomCID().(cidlink.Link).Cid.Hash() + providerResults := make([]model.ProviderResult, 0, num) + for i := 0; i < num; i++ { + providerResults = append(providerResults, testutil.RandomProviderResult()) + } + + return randomHash, providerResults, nil +} diff --git a/pkg/redis/redisstore.go b/pkg/redis/redisstore.go index 479ba1a..5b9e4fc 100644 --- a/pkg/redis/redisstore.go +++ b/pkg/redis/redisstore.go @@ -6,7 +6,7 @@ import ( "time" "github.com/redis/go-redis/v9" - "github.com/storacha-network/indexing-service/pkg/types" + "github.com/storacha/indexing-service/pkg/types" ) // DefaultExpire is the expire time we set on Redis when Set/SetExpiration are called with expire=true diff --git a/pkg/redis/redisstore_test.go b/pkg/redis/redisstore_test.go index 204983f..90b7de8 100644 --- a/pkg/redis/redisstore_test.go +++ b/pkg/redis/redisstore_test.go @@ -7,9 +7,9 @@ import ( "time" goredis "github.com/redis/go-redis/v9" - "github.com/storacha-network/indexing-service/pkg/internal/testutil" - "github.com/storacha-network/indexing-service/pkg/redis" - "github.com/storacha-network/indexing-service/pkg/types" + "github.com/storacha/indexing-service/pkg/internal/testutil" + "github.com/storacha/indexing-service/pkg/redis" + "github.com/storacha/indexing-service/pkg/types" "github.com/stretchr/testify/require" ) diff --git a/pkg/redis/shardeddagindexstore_test.go b/pkg/redis/shardeddagindexstore_test.go index 11ce476..b109d91 100644 --- a/pkg/redis/shardeddagindexstore_test.go +++ b/pkg/redis/shardeddagindexstore_test.go @@ -2,23 +2,19 @@ package redis_test import ( "context" - "io" "testing" - cid "github.com/ipfs/go-cid" - mh "github.com/multiformats/go-multihash" - "github.com/storacha-network/indexing-service/pkg/blobindex" - "github.com/storacha-network/indexing-service/pkg/internal/testutil" - "github.com/storacha-network/indexing-service/pkg/redis" - "github.com/storacha-network/indexing-service/pkg/types" + "github.com/storacha/indexing-service/pkg/internal/testutil" + "github.com/storacha/indexing-service/pkg/redis" + "github.com/storacha/indexing-service/pkg/types" "github.com/stretchr/testify/require" ) func TestShardedDagIndexStore(t *testing.T) { mockRedis := NewMockRedis() shardedDagIndexStore := redis.NewShardedDagIndexStore(mockRedis) - root1, index1 := testutil.Must2(randomShardedDagIndexView(32))(t) - root2, index2 := testutil.Must2(randomShardedDagIndexView(32))(t) + root1, index1 := testutil.RandomShardedDagIndexView(32) + root2, index2 := testutil.RandomShardedDagIndexView(32) aliceDid := testutil.Alice.DID() encodedID1 := testutil.Must(types.ContextID{ @@ -38,25 +34,3 @@ func TestShardedDagIndexStore(t *testing.T) { testutil.RequireEqualIndex(t, index1, returnedIndex1) testutil.RequireEqualIndex(t, index2, returnedIndex2) } - -func randomShardedDagIndexView(size int) (cid.Cid, blobindex.ShardedDagIndexView, error) { - roots, contentCar := testutil.RandomCAR(size) - contentCarBytes, err := io.ReadAll(contentCar) - if err != nil { - return cid.Undef, nil, err - } - - root, err := cid.Prefix{ - Version: 1, - Codec: cid.Raw, - MhType: mh.SHA2_256, - MhLength: -1, - }.Sum(contentCarBytes) - - if err != nil { - return cid.Undef, nil, err - } - - shard, err := blobindex.FromShardArchives(roots[0], [][]byte{contentCarBytes}) - return root, shard, err -} diff --git a/pkg/redis/shareddagindexstore.go b/pkg/redis/shareddagindexstore.go index 10a1916..6a1fe0c 100644 --- a/pkg/redis/shareddagindexstore.go +++ b/pkg/redis/shareddagindexstore.go @@ -4,8 +4,8 @@ import ( "bytes" "io" - "github.com/storacha-network/indexing-service/pkg/blobindex" - "github.com/storacha-network/indexing-service/pkg/types" + "github.com/storacha/indexing-service/pkg/blobindex" + "github.com/storacha/indexing-service/pkg/types" ) var ( diff --git a/pkg/server/server.go b/pkg/server/server.go new file mode 100644 index 0000000..aaff888 --- /dev/null +++ b/pkg/server/server.go @@ -0,0 +1,174 @@ +package server + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + + logging "github.com/ipfs/go-log/v2" + "github.com/ipld/go-ipld-prime/datamodel" + "github.com/multiformats/go-multibase" + "github.com/multiformats/go-multihash" + "github.com/storacha/go-ucanto/core/car" + "github.com/storacha/go-ucanto/core/delegation" + "github.com/storacha/go-ucanto/did" + "github.com/storacha/go-ucanto/principal" + ed25519 "github.com/storacha/go-ucanto/principal/ed25519/signer" + "github.com/storacha/go-ucanto/principal/signer" + ucanhttp "github.com/storacha/go-ucanto/transport/http" + "github.com/storacha/indexing-service/pkg/service" + "github.com/storacha/indexing-service/pkg/service/contentclaims" + "github.com/storacha/indexing-service/pkg/service/queryresult" +) + +var log = logging.Logger("server") + +type Service interface { + CacheClaim(ctx context.Context, claim delegation.Delegation) error + PublishClaim(ctx context.Context, claim delegation.Delegation) error + Query(ctx context.Context, q service.Query) (queryresult.QueryResult, error) +} + +type config struct { + id principal.Signer + service Service +} + +type Option func(*config) + +// WithIdentity specifies the server DID. +func WithIdentity(s principal.Signer) Option { + return func(c *config) { + c.id = s + } +} + +func WithService(service Service) Option { + return func(c *config) { + c.service = service + } +} + +// ListenAndServe creates a new indexing service HTTP server, and starts it up. +func ListenAndServe(addr string, opts ...Option) error { + srv := &http.Server{ + Addr: addr, + Handler: NewServer(opts...), + } + log.Infof("Listening on %s", addr) + err := srv.ListenAndServe() + if err != nil && !errors.Is(err, http.ErrServerClosed) { + return err + } + return nil +} + +// NewServer creates a new indexing service HTTP server. +func NewServer(opts ...Option) *http.ServeMux { + c := &config{} + for _, opt := range opts { + opt(c) + } + + if c.id == nil { + log.Warn("Generating a server identity as one has not been set!") + id, err := ed25519.Generate() + if err != nil { + panic(err) + } + c.id = id + } + + if s, ok := c.id.(signer.WrappedSigner); ok { + log.Infof("Server ID: %s (%s)", s.DID(), s.Unwrap().DID()) + } else { + log.Infof("Server ID: %s", c.id.DID()) + } + + mux := http.NewServeMux() + mux.HandleFunc("GET /", getRootHandler(c.id)) + mux.HandleFunc("POST /claims", postClaimsHandler(c.id)) + mux.HandleFunc("GET /claims", getClaimsHandler(c.service)) + return mux +} + +// getRootHandler displays version info when a GET request is sent to "/". +func getRootHandler(id principal.Signer) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("🔥 indexing-service v0.0.0\n")) + w.Write([]byte("- https://github.com/storacha/indexing-service\n")) + if s, ok := id.(signer.WrappedSigner); ok { + w.Write([]byte(fmt.Sprintf("- %s (%s)", s.DID(), s.Unwrap().DID()))) + } else { + w.Write([]byte(fmt.Sprintf("- %s", id.DID()))) + } + } +} + +// postClaimsHandler invokes the ucanto service when a POST request is sent to +// "/claims". +func postClaimsHandler(id principal.Signer) func(http.ResponseWriter, *http.Request) { + server, err := contentclaims.NewServer(id) + if err != nil { + log.Fatalf("creating ucanto server: %s", err) + } + + return func(w http.ResponseWriter, r *http.Request) { + res, _ := server.Request(ucanhttp.NewHTTPRequest(r.Body, r.Header)) + + for key, vals := range res.Headers() { + for _, v := range vals { + w.Header().Add(key, v) + } + } + + if res.Status() != 0 { + w.WriteHeader(res.Status()) + } + + io.Copy(w, res.Body()) + } +} + +// getClaimsHandler retrieves content claims when a GET request is sent to +// "/claims/{multihash}". +func getClaimsHandler(s Service) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + mhStrings := r.URL.Query()["multihash"] + hashes := make([]multihash.Multihash, 0, len(mhStrings)) + for _, mhString := range mhStrings { + _, bytes, err := multibase.Decode(mhString) + if err != nil { + http.Error(w, fmt.Sprintf("invalid multibase encoding: %s", err.Error()), 400) + return + } + hashes = append(hashes, bytes) + } + spaceStrings := r.URL.Query()["spaces"] + spaces := make([]did.DID, 0, len(spaceStrings)) + for _, spaceString := range spaceStrings { + space, err := did.Parse(spaceString) + if err != nil { + http.Error(w, fmt.Sprintf("invalid did: %s", err.Error()), 400) + return + } + spaces = append(spaces, space) + } + + qr, err := s.Query(r.Context(), service.Query{ + Hashes: hashes, + Match: service.Match{ + Subject: spaces, + }, + }) + if err != nil { + http.Error(w, fmt.Sprintf("processing queury: %s", err.Error()), 400) + } + + body := car.Encode([]datamodel.Link{qr.Root().Link()}, qr.Blocks()) + w.WriteHeader(http.StatusOK) + io.Copy(w, body) + } +} diff --git a/pkg/service/blobindexlookup/blobindexlookup.go b/pkg/service/blobindexlookup/blobindexlookup.go deleted file mode 100644 index 56d2698..0000000 --- a/pkg/service/blobindexlookup/blobindexlookup.go +++ /dev/null @@ -1,24 +0,0 @@ -package blobindexlookup - -import ( - "net/url" - - "github.com/ipni/go-libipni/find/model" - "github.com/storacha-network/indexing-service/pkg/blobindex" - "github.com/storacha-network/indexing-service/pkg/types" -) - -// BlobIndexLookup is a read through cache for fetching blob indexes -type BlobIndexLookup interface { - // Find should: - // 1. attempt to read the sharded dag index from the cache from the encoded contextID in the provided ProviderResult - // 2. if not found, attempt to fetch the index from the provided URL. Store the result in cache - // 3. return the index - // 4. asyncronously, add records to the IPNICache from the parsed blob index so that we can avoid future queries to IPNI for - // other multihashes in the index - Find(indexRecord model.ProviderResult, fetchURL url.URL) (blobindex.ShardedDagIndexView, error) -} - -func NewBlobIndex(blobCache types.ShardedDagIndexStore, ipniCache types.IPNIStore) BlobIndexLookup { - return nil -} diff --git a/pkg/service/blobindexlookup/cachinglookup.go b/pkg/service/blobindexlookup/cachinglookup.go new file mode 100644 index 0000000..2c203db --- /dev/null +++ b/pkg/service/blobindexlookup/cachinglookup.go @@ -0,0 +1,64 @@ +package blobindexlookup + +import ( + "context" + "errors" + "fmt" + "net/url" + + "github.com/ipni/go-libipni/find/model" + "github.com/storacha/indexing-service/pkg/blobindex" + "github.com/storacha/indexing-service/pkg/metadata" + "github.com/storacha/indexing-service/pkg/types" +) + +// CachingQueue can queue a provider record to be cached for all CIDs in an index +type CachingQueue interface { + QueueProviderCaching(ctx context.Context, provider model.ProviderResult, index blobindex.ShardedDagIndexView) error +} + +type cachingLookup struct { + blobIndexLookup BlobIndexLookup + shardDagIndexCache types.ShardedDagIndexStore + cachingQueue CachingQueue +} + +// WithCache returns a blobIndexLookup that attempts to read blobs from the cache, and also caches providers asociated with index cids +func WithCache(blobIndexLookup BlobIndexLookup, shardedDagIndexCache types.ShardedDagIndexStore, cachingQueue CachingQueue) BlobIndexLookup { + return &cachingLookup{ + blobIndexLookup: blobIndexLookup, + shardDagIndexCache: shardedDagIndexCache, + cachingQueue: cachingQueue, + } +} + +func (b *cachingLookup) Find(ctx context.Context, contextID types.EncodedContextID, provider model.ProviderResult, fetchURL url.URL, rng *metadata.Range) (blobindex.ShardedDagIndexView, error) { + // attempt to read index from cache and return it if succesful + index, err := b.shardDagIndexCache.Get(ctx, contextID) + if err == nil { + return index, nil + } + + // if an error occurred other than the index not being in the cache, return it + if !errors.Is(err, types.ErrKeyNotFound) { + return nil, fmt.Errorf("reading from index cache: %w", err) + } + + // attempt to fetch the index from the underlying blob index lookup + index, err = b.blobIndexLookup.Find(ctx, contextID, provider, fetchURL, rng) + if err != nil { + return nil, fmt.Errorf("fetching underlying index: %w", err) + } + + // cache the index for the future + if err := b.shardDagIndexCache.Set(ctx, contextID, index, true); err != nil { + return nil, fmt.Errorf("caching fetched index: %w", err) + } + + // queue a background cache of an provider record for all cids in the index without one + if err := b.cachingQueue.QueueProviderCaching(ctx, provider, index); err != nil { + return nil, fmt.Errorf("queueing provider caching for index failed: %w", err) + } + + return index, nil +} diff --git a/pkg/service/blobindexlookup/cachinglookup_test.go b/pkg/service/blobindexlookup/cachinglookup_test.go new file mode 100644 index 0000000..b268825 --- /dev/null +++ b/pkg/service/blobindexlookup/cachinglookup_test.go @@ -0,0 +1,194 @@ +package blobindexlookup_test + +import ( + "context" + "errors" + "fmt" + "net/url" + "testing" + + "github.com/ipni/go-libipni/find/model" + "github.com/storacha/indexing-service/pkg/blobindex" + "github.com/storacha/indexing-service/pkg/internal/testutil" + "github.com/storacha/indexing-service/pkg/metadata" + "github.com/storacha/indexing-service/pkg/service/blobindexlookup" + "github.com/storacha/indexing-service/pkg/types" + "github.com/stretchr/testify/require" +) + +func TestWithCache__Find(t *testing.T) { + + // Create a test CID + cachedContextID := testutil.RandomBytes(16) + notCachedContextID := testutil.RandomBytes(16) + // Create a cached index + _, cachedIndex := testutil.RandomShardedDagIndexView(32) + _, notCachedIndex := testutil.RandomShardedDagIndexView(32) + + // Create provider + provider := testutil.RandomProviderResult() + + // sample error + anError := errors.New("something went wrong") + // Define test cases + testCases := []struct { + name string + contextID types.EncodedContextID + setErr error + getErr error + expectedErr error + baseLookup *mockBlobIndexLookup + providerCacher *mockCachingQueue + expectedIndex blobindex.ShardedDagIndexView + finalState map[string]blobindex.ShardedDagIndexView + }{ + { + name: "Index cached", + contextID: cachedContextID, + expectedIndex: cachedIndex, + finalState: map[string]blobindex.ShardedDagIndexView{ + string(cachedContextID): cachedIndex, + }, + }, + { + name: "Index not cached, successful fetch", + contextID: notCachedContextID, + expectedIndex: notCachedIndex, + finalState: map[string]blobindex.ShardedDagIndexView{ + string(cachedContextID): cachedIndex, + string(notCachedContextID): notCachedIndex, + }, + }, + { + name: "Lookup error", + contextID: cachedContextID, + expectedIndex: nil, + getErr: anError, + expectedErr: fmt.Errorf("reading from index cache: %w", anError), + finalState: map[string]blobindex.ShardedDagIndexView{ + string(cachedContextID): cachedIndex, + }, + }, + { + name: "Save cache error", + contextID: notCachedContextID, + expectedIndex: nil, + setErr: anError, + expectedErr: fmt.Errorf("caching fetched index: %w", anError), + finalState: map[string]blobindex.ShardedDagIndexView{ + string(cachedContextID): cachedIndex, + }, + }, + { + name: "underlying lookup error", + contextID: notCachedContextID, + expectedIndex: nil, + baseLookup: &mockBlobIndexLookup{nil, anError}, + expectedErr: fmt.Errorf("fetching underlying index: %w", anError), + finalState: map[string]blobindex.ShardedDagIndexView{ + string(cachedContextID): cachedIndex, + }, + }, + { + name: "provider cacher error", + contextID: notCachedContextID, + expectedIndex: nil, + providerCacher: &mockCachingQueue{anError}, + expectedErr: fmt.Errorf("queueing provider caching for index failed: %w", anError), + finalState: map[string]blobindex.ShardedDagIndexView{ + string(cachedContextID): cachedIndex, + string(notCachedContextID): notCachedIndex, + }, + }, + } + + // Run test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockStore := &MockShardedDagIndexStore{ + setErr: tc.setErr, + getErr: tc.getErr, + indexes: map[string]blobindex.ShardedDagIndexView{ + string(cachedContextID): cachedIndex, + }, + } + lookup := tc.baseLookup + if lookup == nil { + lookup = &mockBlobIndexLookup{notCachedIndex, nil} + } + providerCacher := tc.providerCacher + if providerCacher == nil { + providerCacher = &mockCachingQueue{nil} + } + // Create ClaimLookup instance + cl := blobindexlookup.WithCache(lookup, mockStore, providerCacher) + + index, err := cl.Find(context.Background(), tc.contextID, provider, *testutil.TestURL, nil) + if tc.expectedErr != nil { + require.EqualError(t, err, tc.expectedErr.Error()) + } else { + require.NoError(t, err) + } + testutil.RequireEqualIndex(t, tc.expectedIndex, index) + finalState := tc.finalState + if finalState == nil { + finalState = make(map[string]blobindex.ShardedDagIndexView) + } + require.Equal(t, len(finalState), len(mockStore.indexes)) + for c, index := range mockStore.indexes { + expectedIndex := finalState[c] + testutil.RequireEqualIndex(t, expectedIndex, index) + } + }) + } +} + +// MockShardedDagIndexStore is a mock implementation of the ShardedDagIndexStore interface +type MockShardedDagIndexStore struct { + setErr, getErr error + indexes map[string]blobindex.ShardedDagIndexView +} + +var _ types.ShardedDagIndexStore = &MockShardedDagIndexStore{} + +// SetExpirable implements types.ShardedDagIndexStore. +func (m *MockShardedDagIndexStore) SetExpirable(ctx context.Context, contextID types.EncodedContextID, expires bool) error { + return nil +} + +func (m *MockShardedDagIndexStore) Get(ctx context.Context, contextID types.EncodedContextID) (blobindex.ShardedDagIndexView, error) { + if m.getErr != nil { + return nil, m.getErr + } + index, exists := m.indexes[string(contextID)] + if !exists { + return nil, types.ErrKeyNotFound + } + return index, nil +} + +func (m *MockShardedDagIndexStore) Set(ctx context.Context, contextID types.EncodedContextID, index blobindex.ShardedDagIndexView, expire bool) error { + if m.setErr != nil { + return m.setErr + } + m.indexes[string(contextID)] = index + return nil +} + +type mockBlobIndexLookup struct { + index blobindex.ShardedDagIndexView + err error +} + +func (m *mockBlobIndexLookup) Find(ctx context.Context, contextID types.EncodedContextID, provider model.ProviderResult, fetchURL url.URL, rng *metadata.Range) (blobindex.ShardedDagIndexView, error) { + return m.index, m.err +} + +type mockCachingQueue struct { + err error +} + +// QueueProviderCaching implements blobindexlookup.ProviderCacher. +func (m *mockCachingQueue) QueueProviderCaching(ctx context.Context, provider model.ProviderResult, index blobindex.ShardedDagIndexView) error { + return m.err +} diff --git a/pkg/service/blobindexlookup/interface.go b/pkg/service/blobindexlookup/interface.go new file mode 100644 index 0000000..5b6ff04 --- /dev/null +++ b/pkg/service/blobindexlookup/interface.go @@ -0,0 +1,15 @@ +package blobindexlookup + +import ( + "context" + "net/url" + + "github.com/ipni/go-libipni/find/model" + "github.com/storacha/indexing-service/pkg/blobindex" + "github.com/storacha/indexing-service/pkg/metadata" + "github.com/storacha/indexing-service/pkg/types" +) + +type BlobIndexLookup interface { + Find(ctx context.Context, contextID types.EncodedContextID, provider model.ProviderResult, fetchURL url.URL, rng *metadata.Range) (blobindex.ShardedDagIndexView, error) +} diff --git a/pkg/service/blobindexlookup/simplelookup.go b/pkg/service/blobindexlookup/simplelookup.go new file mode 100644 index 0000000..ea13102 --- /dev/null +++ b/pkg/service/blobindexlookup/simplelookup.go @@ -0,0 +1,46 @@ +package blobindexlookup + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + + "github.com/ipni/go-libipni/find/model" + "github.com/storacha/indexing-service/pkg/blobindex" + "github.com/storacha/indexing-service/pkg/metadata" + "github.com/storacha/indexing-service/pkg/types" +) + +type simpleLookup struct { + httpClient *http.Client +} + +func NewBlobIndexLookup(httpClient *http.Client) BlobIndexLookup { + return &simpleLookup{httpClient} +} + +// Find fetches the blob index from the given fetchURL +func (s *simpleLookup) Find(ctx context.Context, _ types.EncodedContextID, _ model.ProviderResult, fetchURL url.URL, rng *metadata.Range) (blobindex.ShardedDagIndexView, error) { + // attempt to fetch the index from provided url + req, err := http.NewRequestWithContext(ctx, http.MethodGet, fetchURL.String(), nil) + if rng != nil { + rangeHeader := fmt.Sprintf("bytes=%d-", rng.Offset) + if rng.Length != nil { + rangeHeader += strconv.FormatUint(rng.Offset+*rng.Length-1, 10) + } + req.Header.Set("Range", rangeHeader) + } + resp, err := s.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to fetch index: %w", err) + } + if resp.StatusCode < 200 || resp.StatusCode > 299 { + body, _ := io.ReadAll(resp.Body) + + return nil, fmt.Errorf("failure response fetching index. status: %s, message: %s", resp.Status, string(body)) + } + return blobindex.Extract(resp.Body) +} diff --git a/pkg/service/blobindexlookup/simplelookup_test.go b/pkg/service/blobindexlookup/simplelookup_test.go new file mode 100644 index 0000000..9dcf09c --- /dev/null +++ b/pkg/service/blobindexlookup/simplelookup_test.go @@ -0,0 +1,89 @@ +package blobindexlookup_test + +import ( + "bytes" + "context" + "errors" + "io" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/storacha/indexing-service/pkg/blobindex" + "github.com/storacha/indexing-service/pkg/internal/testutil" + "github.com/storacha/indexing-service/pkg/metadata" + "github.com/storacha/indexing-service/pkg/service/blobindexlookup" + "github.com/stretchr/testify/require" +) + +func TestBlobIndexLookup__Find(t *testing.T) { + cid := testutil.RandomCID().(cidlink.Link).Cid + provider := testutil.RandomProviderResult() + _, index := testutil.RandomShardedDagIndexView(32) + indexBytes := testutil.Must(io.ReadAll(testutil.Must(index.Archive())(t)))(t) + indexEncodedLength := uint64(len(indexBytes)) + // sample error + testCases := []struct { + name string + handler http.HandlerFunc + rngHeader *metadata.Range + expectedErr error + expectedIndex blobindex.ShardedDagIndexView + }{ + { + name: "success fetch", + handler: func(w http.ResponseWriter, r *http.Request) { + testutil.Must(w.Write(indexBytes))(t) + }, + expectedIndex: index, + }, + { + name: "failure", + handler: http.NotFound, + expectedErr: errors.New("failure response fetching index. status: 404 Not Found, message: 404 page not found\n"), + }, + { + name: "partial fetch from offset", + handler: func(w http.ResponseWriter, r *http.Request) { + randomBytes := testutil.RandomBytes(10) + allBytes := append(randomBytes, indexBytes...) + http.ServeContent(w, r, "index", time.Now(), bytes.NewReader(allBytes)) + }, + rngHeader: &metadata.Range{Offset: 10}, + expectedIndex: index, + }, + { + name: "partial fetch from offset + length", + handler: func(w http.ResponseWriter, r *http.Request) { + + randomStartBytes := testutil.RandomBytes(10) + randomEndBytes := testutil.RandomBytes(20) + + allBytes := append(append(randomStartBytes, indexBytes...), randomEndBytes...) + + http.ServeContent(w, r, "index", time.Now(), bytes.NewReader(allBytes)) + }, + rngHeader: &metadata.Range{Offset: 10, Length: &indexEncodedLength}, + expectedIndex: index, + }, + } + // Run test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + testServer := httptest.NewServer(tc.handler) + defer func() { testServer.Close() }() + // Create BlobIndexLookup instance + cl := blobindexlookup.NewBlobIndexLookup(testServer.Client()) + index, err := cl.Find(context.Background(), cid.Bytes(), provider, *testutil.Must(url.Parse(testServer.URL))(t), tc.rngHeader) + if tc.expectedErr != nil { + require.EqualError(t, err, tc.expectedErr.Error()) + } else { + require.NoError(t, err) + } + testutil.RequireEqualIndex(t, tc.expectedIndex, index) + }) + } +} diff --git a/pkg/service/claimlookup/cachinglookup.go b/pkg/service/claimlookup/cachinglookup.go new file mode 100644 index 0000000..f872687 --- /dev/null +++ b/pkg/service/claimlookup/cachinglookup.go @@ -0,0 +1,51 @@ +package claimlookup + +import ( + "context" + "errors" + "fmt" + "net/url" + + "github.com/ipfs/go-cid" + "github.com/storacha/go-ucanto/core/delegation" + "github.com/storacha/indexing-service/pkg/types" +) + +type cachingLookup struct { + claimLookup ClaimLookup + claimStore types.ContentClaimsStore +} + +// WithCache augments a ClaimLookup with cached claims from a claim store +func WithCache(claimLookup ClaimLookup, claimStore types.ContentClaimsStore) ClaimLookup { + return &cachingLookup{ + claimLookup: claimLookup, + claimStore: claimStore, + } +} + +// LookupClaim attempts to fetch a claim from either the local cache or via the provided URL (caching the result if its fetched) +func (cl *cachingLookup) LookupClaim(ctx context.Context, claimCid cid.Cid, fetchURL url.URL) (delegation.Delegation, error) { + // attempt to read claim from cache and return it if succesful + claim, err := cl.claimStore.Get(ctx, claimCid) + if err == nil { + return claim, nil + } + + // if an error occurred other than the claim not being in the cache, return it + if !errors.Is(err, types.ErrKeyNotFound) { + return nil, fmt.Errorf("reading from claim cache: %w", err) + } + + // attempt to fetch the claim from the underlying claim lookup + claim, err = cl.claimLookup.LookupClaim(ctx, claimCid, fetchURL) + if err != nil { + return nil, fmt.Errorf("fetching underlying claim: %w", err) + } + + // cache the claim for the future + if err := cl.claimStore.Set(ctx, claimCid, claim, true); err != nil { + return nil, fmt.Errorf("caching fetched claim: %w", err) + } + return claim, nil +} diff --git a/pkg/service/claimlookup/cachinglookup_test.go b/pkg/service/claimlookup/cachinglookup_test.go new file mode 100644 index 0000000..cceb3d0 --- /dev/null +++ b/pkg/service/claimlookup/cachinglookup_test.go @@ -0,0 +1,167 @@ +package claimlookup_test + +import ( + "context" + "errors" + "fmt" + "net/url" + "testing" + + "github.com/ipfs/go-cid" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/storacha/go-ucanto/core/delegation" + "github.com/storacha/indexing-service/pkg/internal/testutil" + "github.com/storacha/indexing-service/pkg/service/claimlookup" + "github.com/storacha/indexing-service/pkg/types" + "github.com/stretchr/testify/require" +) + +func TestWithCache__LookupClaim(t *testing.T) { + + // Create a test CID + cachedCid := testutil.RandomCID().(cidlink.Link).Cid + notCachedCid := testutil.RandomCID().(cidlink.Link).Cid + // Create a cached claim + cachedClaim := testutil.RandomLocationDelegation() + notCachedClaim := testutil.RandomIndexDelegation() + + // sample error + anError := errors.New("something went wrong") + // Define test cases + testCases := []struct { + name string + claimCid cid.Cid + setErr error + getErr error + expectedErr error + baseLookup *mockClaimLookup + expectedClaim delegation.Delegation + finalState map[string]delegation.Delegation + }{ + { + name: "Claim cached", + claimCid: cachedCid, + expectedClaim: cachedClaim, + finalState: map[string]delegation.Delegation{ + cachedCid.String(): cachedClaim, + }, + }, + { + name: "Claim not cached, successful fetch", + claimCid: notCachedCid, + expectedClaim: notCachedClaim, + finalState: map[string]delegation.Delegation{ + cachedCid.String(): cachedClaim, + notCachedCid.String(): notCachedClaim, + }, + }, + { + name: "Lookup error", + claimCid: cachedCid, + expectedClaim: nil, + getErr: anError, + expectedErr: fmt.Errorf("reading from claim cache: %w", anError), + finalState: map[string]delegation.Delegation{ + cachedCid.String(): cachedClaim, + }, + }, + { + name: "Save cache error", + claimCid: notCachedCid, + expectedClaim: nil, + setErr: anError, + expectedErr: fmt.Errorf("caching fetched claim: %w", anError), + finalState: map[string]delegation.Delegation{ + cachedCid.String(): cachedClaim, + }, + }, + { + name: "underlying lookup error", + claimCid: notCachedCid, + expectedClaim: nil, + baseLookup: &mockClaimLookup{nil, anError}, + expectedErr: fmt.Errorf("fetching underlying claim: %w", anError), + finalState: map[string]delegation.Delegation{ + cachedCid.String(): cachedClaim, + }, + }, + } + + // Run test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockStore := &MockContentClaimsStore{ + setErr: tc.setErr, + getErr: tc.getErr, + claims: map[string]delegation.Delegation{ + cachedCid.String(): cachedClaim, + }, + } + // generate a test server for requests + lookup := tc.baseLookup + if lookup == nil { + lookup = &mockClaimLookup{notCachedClaim, nil} + } + // Create ClaimLookup instance + cl := claimlookup.WithCache(lookup, mockStore) + + claim, err := cl.LookupClaim(context.Background(), tc.claimCid, *testutil.TestURL) + if tc.expectedErr != nil { + require.EqualError(t, err, tc.expectedErr.Error()) + } else { + require.NoError(t, err) + } + testutil.RequireEqualDelegation(t, tc.expectedClaim, claim) + finalState := tc.finalState + if finalState == nil { + finalState = make(map[string]delegation.Delegation) + } + require.Equal(t, len(finalState), len(mockStore.claims)) + for c, claim := range mockStore.claims { + expectedClaim := finalState[c] + testutil.RequireEqualDelegation(t, expectedClaim, claim) + } + }) + } +} + +// MockContentClaimsStore is a mock implementation of the ContentClaimsStore interface +type MockContentClaimsStore struct { + setErr, getErr error + claims map[string]delegation.Delegation +} + +var _ types.ContentClaimsStore = &MockContentClaimsStore{} + +// SetExpirable implements types.ContentClaimsStore. +func (m *MockContentClaimsStore) SetExpirable(ctx context.Context, key cid.Cid, expires bool) error { + return nil +} + +func (m *MockContentClaimsStore) Get(ctx context.Context, claimCid cid.Cid) (delegation.Delegation, error) { + if m.getErr != nil { + return nil, m.getErr + } + claim, exists := m.claims[claimCid.String()] + if !exists { + return nil, types.ErrKeyNotFound + } + return claim, nil +} + +func (m *MockContentClaimsStore) Set(ctx context.Context, claimCid cid.Cid, claim delegation.Delegation, overwrite bool) error { + if m.setErr != nil { + return m.setErr + } + m.claims[claimCid.String()] = claim + return nil +} + +type mockClaimLookup struct { + claim delegation.Delegation + err error +} + +func (m *mockClaimLookup) LookupClaim(ctx context.Context, claimCid cid.Cid, fetchURL url.URL) (delegation.Delegation, error) { + return m.claim, m.err +} diff --git a/pkg/service/claimlookup/claimlookup.go b/pkg/service/claimlookup/claimlookup.go deleted file mode 100644 index 20e07c5..0000000 --- a/pkg/service/claimlookup/claimlookup.go +++ /dev/null @@ -1,15 +0,0 @@ -package claimlookup - -import ( - "github.com/ipni/go-libipni/find/model" - "github.com/storacha-network/go-ucanto/core/delegation" -) - -// ClaimLookup is used to get full claims from a claim cid -// I'll be honest, I'm not exactly sure whether these claims should be stored or simply synthesized -// from the information in IPNI combined with having private keys stored in this service -// I THINK it's possible you can synthesize Index & Equals claims from the information in IPNI + a private key -// Location commitments are more complicated cause they really ought to be signed by the storer of the commitment? -type ClaimLookup interface { - LookupClaim(model.ProviderResult) (delegation.Delegation, error) -} diff --git a/pkg/service/claimlookup/interface.go b/pkg/service/claimlookup/interface.go new file mode 100644 index 0000000..2514539 --- /dev/null +++ b/pkg/service/claimlookup/interface.go @@ -0,0 +1,13 @@ +package claimlookup + +import ( + "context" + "net/url" + + "github.com/ipfs/go-cid" + "github.com/storacha/go-ucanto/core/delegation" +) + +type ClaimLookup interface { + LookupClaim(ctx context.Context, claimCid cid.Cid, fetchURL url.URL) (delegation.Delegation, error) +} diff --git a/pkg/service/claimlookup/simplelookup.go b/pkg/service/claimlookup/simplelookup.go new file mode 100644 index 0000000..69eb51a --- /dev/null +++ b/pkg/service/claimlookup/simplelookup.go @@ -0,0 +1,41 @@ +package claimlookup + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + + "github.com/ipfs/go-cid" + "github.com/storacha/go-ucanto/core/delegation" +) + +// simpleLookup is a read through cache for fetching content claims +type simpleLookup struct { + httpClient *http.Client +} + +// NewClaimLookup creates a new ClaimLookup with the provided claimstore and HTTP client +func NewClaimLookup(httpClient *http.Client) ClaimLookup { + return &simpleLookup{ + httpClient: httpClient, + } +} + +// LookupClaim attempts to fetch a claim from either the local cache or via the provided URL (caching the result if its fetched) +func (sl *simpleLookup) LookupClaim(ctx context.Context, claimCid cid.Cid, fetchURL url.URL) (delegation.Delegation, error) { + // attempt to fetch the claim from provided url + resp, err := sl.httpClient.Get(fetchURL.String()) + if err != nil { + return nil, fmt.Errorf("failed to fetch claim: %w", err) + } + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("reading fetched claim body: %w", err) + } + if resp.StatusCode < 200 || resp.StatusCode > 299 { + return nil, fmt.Errorf("failure response fetching claim. status: %s, message: %s", resp.Status, string(body)) + } + return delegation.Extract(body) +} diff --git a/pkg/service/claimlookup/simplelookup_test.go b/pkg/service/claimlookup/simplelookup_test.go new file mode 100644 index 0000000..5d17b27 --- /dev/null +++ b/pkg/service/claimlookup/simplelookup_test.go @@ -0,0 +1,59 @@ +package claimlookup_test + +import ( + "context" + "errors" + "io" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/storacha/go-ucanto/core/delegation" + "github.com/storacha/indexing-service/pkg/internal/testutil" + "github.com/storacha/indexing-service/pkg/service/claimlookup" + "github.com/stretchr/testify/require" +) + +func TestClaimLookup__LookupClaim(t *testing.T) { + cid := testutil.RandomCID().(cidlink.Link).Cid + claim := testutil.RandomIndexDelegation() + // sample error + testCases := []struct { + name string + handler http.HandlerFunc + expectedErr error + expectedClaim delegation.Delegation + }{ + { + name: "success fetch", + handler: func(w http.ResponseWriter, r *http.Request) { + claimBytes := testutil.Must(io.ReadAll(claim.Archive()))(t) + testutil.Must(w.Write(claimBytes))(t) + }, + expectedClaim: claim, + }, + { + name: "failure", + handler: http.NotFound, + expectedErr: errors.New("failure response fetching claim. status: 404 Not Found, message: 404 page not found\n"), + }, + } + // Run test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + testServer := httptest.NewServer(tc.handler) + defer func() { testServer.Close() }() + // Create ClaimLookup instance + cl := claimlookup.NewClaimLookup(testServer.Client()) + claim, err := cl.LookupClaim(context.Background(), cid, *testutil.Must(url.Parse(testServer.URL))(t)) + if tc.expectedErr != nil { + require.EqualError(t, err, tc.expectedErr.Error()) + } else { + require.NoError(t, err) + } + testutil.RequireEqualDelegation(t, tc.expectedClaim, claim) + }) + } +} diff --git a/pkg/service/construct.go b/pkg/service/construct.go new file mode 100644 index 0000000..58a0bb9 --- /dev/null +++ b/pkg/service/construct.go @@ -0,0 +1,90 @@ +package service + +import ( + "context" + "net/http" + + logging "github.com/ipfs/go-log/v2" + "github.com/ipld/go-ipld-prime/linking" + ipnifind "github.com/ipni/go-libipni/find/client" + goredis "github.com/redis/go-redis/v9" + "github.com/storacha/indexing-service/pkg/internal/jobqueue" + "github.com/storacha/indexing-service/pkg/redis" + "github.com/storacha/indexing-service/pkg/service/blobindexlookup" + "github.com/storacha/indexing-service/pkg/service/claimlookup" + "github.com/storacha/indexing-service/pkg/service/providercacher" + "github.com/storacha/indexing-service/pkg/service/providerindex" +) + +var log = logging.Logger("service") + +type ServiceConfig struct { + RedisURL string + RedisPasswd string + ProvidersDB int + ClaimsDB int + IndexesDB int + IndexerURL string +} + +func Construct(sc ServiceConfig) (*IndexingService, func(context.Context), error) { + + // connect to redis + providersClient := goredis.NewClient(&goredis.Options{ + Addr: sc.RedisURL, + Password: sc.RedisPasswd, + DB: sc.ProvidersDB, + }) + claimsClient := goredis.NewClient(&goredis.Options{ + Addr: sc.RedisURL, + Password: sc.RedisPasswd, + DB: sc.ClaimsDB, + }) + indexesClient := goredis.NewClient(&goredis.Options{ + Addr: sc.RedisURL, + Password: sc.RedisPasswd, + DB: sc.IndexesDB, + }) + + // build caches + providersCache := redis.NewProviderStore(providersClient) + claimsCache := redis.NewContentClaimsStore(claimsClient) + shardDagIndexesCache := redis.NewShardedDagIndexStore(indexesClient) + + // setup and start the provider caching queue for indexes + cachingJobHandler := providercacher.NewJobHandler(providercacher.NewSimpleProviderCacher(providersCache)) + jobQueue := jobqueue.NewJobQueue(cachingJobHandler.Handle, + jobqueue.WithBuffer(5), + jobqueue.WithConcurrency(5), + jobqueue.WithErrorHandler(func(err error) { + log.Errorw("caching provider index", "error", err) + })) + cachingQueue := providercacher.NewCachingQueue(jobQueue) + + // setup IPNI + // TODO: switch to double hashed client for reader privacy? + findClient, err := ipnifind.New(sc.IndexerURL) + if err != nil { + return nil, nil, err + } + + // build read through fetchers + // TODO: add sender / publisher / linksystem / legacy systems + providerIndex := providerindex.NewProviderIndex(providersCache, findClient, nil, nil, linking.LinkSystem{}, nil) + claimLookup := claimlookup.WithCache(claimlookup.NewClaimLookup(http.DefaultClient), claimsCache) + blobIndexLookup := blobindexlookup.WithCache( + blobindexlookup.NewBlobIndexLookup(http.DefaultClient), + shardDagIndexesCache, + cachingQueue, + ) + + // setup walker + service := NewIndexingService(blobIndexLookup, claimLookup, providerIndex, WithConcurrency(5)) + + // start the job queue + jobQueue.Startup() + + return service, func(ctx context.Context) { + jobQueue.Shutdown(ctx) + }, nil +} diff --git a/pkg/service/contentclaims/server.go b/pkg/service/contentclaims/server.go new file mode 100644 index 0000000..4220fbf --- /dev/null +++ b/pkg/service/contentclaims/server.go @@ -0,0 +1,15 @@ +package contentclaims + +import ( + "github.com/storacha/go-ucanto/principal" + "github.com/storacha/go-ucanto/server" +) + +func NewServer(id principal.Signer) (server.ServerView, error) { + service := NewService() + var opts []server.Option + for ability, method := range service { + opts = append(opts, server.WithServiceMethod(ability, method)) + } + return server.NewServer(id, opts...) +} diff --git a/pkg/service/contentclaims/server_test.go b/pkg/service/contentclaims/server_test.go new file mode 100644 index 0000000..13b17db --- /dev/null +++ b/pkg/service/contentclaims/server_test.go @@ -0,0 +1,85 @@ +package contentclaims + +import ( + "fmt" + "net/url" + "testing" + + "github.com/ipld/go-ipld-prime/datamodel" + "github.com/storacha/go-ucanto/client" + "github.com/storacha/go-ucanto/core/invocation" + "github.com/storacha/go-ucanto/core/receipt" + "github.com/storacha/go-ucanto/core/result" + "github.com/storacha/indexing-service/pkg/capability/assert" + "github.com/storacha/indexing-service/pkg/internal/testutil" + "github.com/stretchr/testify/require" +) + +var rcptsch = []byte(` + type Result union { + | Unit "ok" + | Any "error" + } representation keyed + + type Unit struct {} +`) + +func TestServer(t *testing.T) { + server, err := NewServer(testutil.Service) + require.NoError(t, err) + + conn, err := client.NewConnection(testutil.Service, server) + require.NoError(t, err) + + invs := []invocation.Invocation{ + testutil.Must(assert.Equals.Invoke( + testutil.Service, + testutil.Service, + testutil.Service.DID().String(), + assert.EqualsCaveats{ + Content: assert.FromHash(testutil.RandomMultihash()), + Equals: testutil.RandomCID(), + }, + ))(t), + testutil.Must(assert.Index.Invoke( + testutil.Service, + testutil.Service, + testutil.Service.DID().String(), + assert.IndexCaveats{ + Content: testutil.RandomCID(), + Index: testutil.RandomCID(), + }, + ))(t), + testutil.Must(assert.Location.Invoke( + testutil.Service, + testutil.Service, + testutil.Service.DID().String(), + assert.LocationCaveats{ + Content: assert.FromHash(testutil.RandomMultihash()), + Location: []url.URL{}, + }, + ))(t), + } + + for _, inv := range invs { + t.Run(inv.Capabilities()[0].Can(), func(t *testing.T) { + resp, err := client.Execute([]invocation.Invocation{inv}, conn) + require.NoError(t, err) + + rcptlnk, ok := resp.Get(inv.Link()) + require.True(t, ok, "missing receipt for invocation: %s", inv.Link()) + + reader, err := receipt.NewReceiptReader[assert.Unit, datamodel.Node](rcptsch) + require.NoError(t, err) + + rcpt, err := reader.Read(rcptlnk, resp.Blocks()) + require.NoError(t, err) + + result.MatchResultR0(rcpt.Out(), func(ok assert.Unit) { + fmt.Printf("%+v\n", ok) + }, func(x datamodel.Node) { + require.Fail(t, "unexpected failure") + }) + }) + } +} diff --git a/pkg/service/contentclaims/service.go b/pkg/service/contentclaims/service.go new file mode 100644 index 0000000..eb480bc --- /dev/null +++ b/pkg/service/contentclaims/service.go @@ -0,0 +1,38 @@ +package contentclaims + +import ( + logging "github.com/ipfs/go-log/v2" + "github.com/storacha/go-ucanto/core/invocation" + "github.com/storacha/go-ucanto/core/receipt" + "github.com/storacha/go-ucanto/server" + "github.com/storacha/go-ucanto/ucan" + "github.com/storacha/indexing-service/pkg/capability/assert" +) + +var log = logging.Logger("contentclaims") + +func NewService() map[ucan.Ability]server.ServiceMethod[assert.Unit] { + return map[ucan.Ability]server.ServiceMethod[assert.Unit]{ + assert.Equals.Can(): server.Provide( + assert.Equals, + func(cap ucan.Capability[assert.EqualsCaveats], inv invocation.Invocation, ctx server.InvocationContext) (assert.Unit, receipt.Effects, error) { + log.Errorf("TODO: implement me") + return assert.Unit{}, nil, nil + }, + ), + assert.Index.Can(): server.Provide( + assert.Index, + func(cap ucan.Capability[assert.IndexCaveats], inv invocation.Invocation, ctx server.InvocationContext) (assert.Unit, receipt.Effects, error) { + log.Errorf("TODO: implement me") + return assert.Unit{}, nil, nil + }, + ), + assert.Location.Can(): server.Provide( + assert.Location, + func(cap ucan.Capability[assert.LocationCaveats], inv invocation.Invocation, ctx server.InvocationContext) (assert.Unit, receipt.Effects, error) { + log.Errorf("TODO: implement me") + return assert.Unit{}, nil, nil + }, + ), + } +} diff --git a/pkg/service/ipnindex/providerindex.go b/pkg/service/ipnindex/providerindex.go deleted file mode 100644 index 4da545c..0000000 --- a/pkg/service/ipnindex/providerindex.go +++ /dev/null @@ -1,43 +0,0 @@ -package provider - -import ( - "github.com/ipld/go-ipld-prime" - "github.com/ipni/go-libipni/announce" - "github.com/ipni/go-libipni/dagsync" - ipnifind "github.com/ipni/go-libipni/find/client" - "github.com/ipni/go-libipni/find/model" - mh "github.com/multiformats/go-multihash" - "github.com/storacha-network/go-ucanto/did" - "github.com/storacha-network/indexing-service/pkg/metadata" - "github.com/storacha-network/indexing-service/pkg/types" -) - -type QueryKey struct { - Spaces []did.DID - Hash mh.Multihash - ClaimType metadata.ClaimType -} - -// IPNIIndex is a read/write interface to a local cache that falls back to IPNI, exclusively publishing claims data -type IPNIIndex interface { - // Find should do the following - // 1. Read from the IPNI Storage cache to get a list of providers - // a. If there is no record in cache, query IPNI, filter out any non-content claims metadata, and store - // the resulting records in the cache - // b. the are no records in the cache or IPNI, it can attempt to read from legacy systems -- Dynamo tables & content claims storage, synthetically constructing provider results - // 2. With returned provider results, filter additionally for claim type. If space dids are set, calculate an encodedcontextid's by hashing space DID and Hash, and filter for a matching context id - // Future TODO: kick off a conversion task to update the recrds - Find(QueryKey) []model.ProviderResult - // Publish should do the following: - // 1. Write the entries to the cache with no expiration until publishing is complete - // 2. Generate an advertisement for the advertised hashes and publish/announce it - Publish([]mh.Multihash, model.ProviderResult) -} - -// TBD access to legacy systems -type LegacySystems interface{} - -// TODO: This assumes using low level primitives for publishing from IPNI but maybe we want to go ahead and use index-provider? -func NewIPNIIndex(ipniCache types.IPNIStore, findClient ipnifind.Client, sender announce.Sender, publisher dagsync.Publisher, advertisementsLsys ipld.LinkSystem, legacySystems LegacySystems) IPNIIndex { - return nil -} diff --git a/pkg/service/providercacher/cachingqueue.go b/pkg/service/providercacher/cachingqueue.go new file mode 100644 index 0000000..d73e8c0 --- /dev/null +++ b/pkg/service/providercacher/cachingqueue.go @@ -0,0 +1,48 @@ +package providercacher + +import ( + "context" + + "github.com/ipni/go-libipni/find/model" + "github.com/storacha/indexing-service/pkg/blobindex" +) + +type ( + ProviderCachingJob struct { + provider model.ProviderResult + index blobindex.ShardedDagIndexView + } + + JobQueue interface { + Queue(ctx context.Context, j ProviderCachingJob) error + } + + JobHandler struct { + providerCacher ProviderCacher + } + + CachingQueue struct { + jobQueue JobQueue + } +) + +func NewJobHandler(providerCacher ProviderCacher) *JobHandler { + return &JobHandler{ + providerCacher: providerCacher, + } +} + +func (j *JobHandler) Handle(ctx context.Context, job ProviderCachingJob) error { + _, err := j.providerCacher.CacheProviderForIndexRecords(ctx, job.provider, job.index) + return err +} + +func NewCachingQueue(jobQueue JobQueue) *CachingQueue { + return &CachingQueue{ + jobQueue: jobQueue, + } +} + +func (q *CachingQueue) QueueProviderCaching(ctx context.Context, provider model.ProviderResult, index blobindex.ShardedDagIndexView) error { + return q.jobQueue.Queue(ctx, ProviderCachingJob{provider: provider, index: index}) +} diff --git a/pkg/service/providercacher/interface.go b/pkg/service/providercacher/interface.go new file mode 100644 index 0000000..a3d602e --- /dev/null +++ b/pkg/service/providercacher/interface.go @@ -0,0 +1,12 @@ +package providercacher + +import ( + "context" + + "github.com/ipni/go-libipni/find/model" + "github.com/storacha/indexing-service/pkg/blobindex" +) + +type ProviderCacher interface { + CacheProviderForIndexRecords(ctx context.Context, provider model.ProviderResult, index blobindex.ShardedDagIndexView) (written uint64, err error) +} diff --git a/pkg/service/providercacher/simpleprovidercacher.go b/pkg/service/providercacher/simpleprovidercacher.go new file mode 100644 index 0000000..d0f686e --- /dev/null +++ b/pkg/service/providercacher/simpleprovidercacher.go @@ -0,0 +1,47 @@ +package providercacher + +import ( + "context" + "errors" + "fmt" + "slices" + + "github.com/ipni/go-libipni/find/model" + "github.com/storacha/indexing-service/pkg/blobindex" + "github.com/storacha/indexing-service/pkg/providerresults" + "github.com/storacha/indexing-service/pkg/types" +) + +type simpleProviderCacher struct { + providerStore types.ProviderStore +} + +func NewSimpleProviderCacher(providerStore types.ProviderStore) ProviderCacher { + return &simpleProviderCacher{providerStore: providerStore} +} + +func (s *simpleProviderCacher) CacheProviderForIndexRecords(ctx context.Context, provider model.ProviderResult, index blobindex.ShardedDagIndexView) (uint64, error) { + written := uint64(0) + for _, shardIndex := range index.Shards().Iterator() { + for hash := range shardIndex.Iterator() { + existing, err := s.providerStore.Get(ctx, hash) + if err != nil && !errors.Is(err, types.ErrKeyNotFound) { + return written, err + } + + inList := slices.ContainsFunc(existing, func(matchProvider model.ProviderResult) bool { + fmt.Println(provider, matchProvider) + return providerresults.Equals(provider, matchProvider) + }) + if !inList { + newResults := append(existing, provider) + err = s.providerStore.Set(ctx, hash, newResults, true) + if err != nil { + return written, err + } + written++ + } + } + } + return written, nil +} diff --git a/pkg/service/providercacher/simpleprovidercacher_test.go b/pkg/service/providercacher/simpleprovidercacher_test.go new file mode 100644 index 0000000..86110f9 --- /dev/null +++ b/pkg/service/providercacher/simpleprovidercacher_test.go @@ -0,0 +1,177 @@ +package providercacher_test + +import ( + "context" + "testing" + + "github.com/ipni/go-libipni/find/model" + "github.com/multiformats/go-multihash" + "github.com/storacha/indexing-service/pkg/blobindex" + "github.com/storacha/indexing-service/pkg/internal/testutil" + "github.com/storacha/indexing-service/pkg/service/providercacher" + "github.com/storacha/indexing-service/pkg/types" + "github.com/stretchr/testify/require" +) + +func TestSimpleProviderCacher_CacheProviderForIndexRecords(t *testing.T) { + + // Create a test context + ctx := context.Background() + + // Create test providers + testProvider := testutil.RandomProviderResult() + testProvider2 := testutil.RandomProviderResult() + + // Create a test index with random CIDs + testCid1 := testutil.RandomCID() + shardIndex := blobindex.NewShardedDagIndexView(testCid1, 2) + + shardMhs := testutil.RandomMultihashes(2) + sliceMhs := testutil.RandomMultihashes(6) + for i := range 2 { + for j := range 3 { + shardIndex.SetSlice(shardMhs[i], sliceMhs[i*3+j], blobindex.Position{}) + } + } + + shardIndex2 := blobindex.NewShardedDagIndexView(testutil.RandomCID(), 2) + for j := range 2 { + shardIndex2.SetSlice(shardMhs[0], sliceMhs[j], blobindex.Position{}) + } + + evensFilled := func() map[string][]model.ProviderResult { + starter := make(map[string][]model.ProviderResult) + for i, sliceMh := range sliceMhs { + if i%2 == 0 { + starter[sliceMh.String()] = []model.ProviderResult{testProvider} + } + } + return starter + } + + // Define test cases + testCases := []struct { + name string + provider model.ProviderResult + index blobindex.ShardedDagIndexView + getErr error + setErr error + initialStore map[string][]model.ProviderResult + expectedCount uint64 + expectedErr error + testStore func(t *testing.T, store map[string][]model.ProviderResult) + }{ + { + name: "Cache new provider", + provider: testProvider, + index: shardIndex, + expectedCount: 6, + expectedErr: nil, + testStore: func(t *testing.T, store map[string][]model.ProviderResult) { + require.Len(t, store, 6) + for _, sliceMh := range sliceMhs { + require.Equal(t, store[sliceMh.String()], []model.ProviderResult{testProvider}) + } + }, + }, + { + name: "Cache provider already present", + provider: testProvider, + index: shardIndex, + initialStore: evensFilled(), + expectedCount: 3, + expectedErr: nil, + testStore: func(t *testing.T, store map[string][]model.ProviderResult) { + require.Len(t, store, 6) + for _, sliceMh := range sliceMhs { + require.Equal(t, store[sliceMh.String()], []model.ProviderResult{testProvider}) + } + }, + }, + { + name: "Cache another provider on top", + provider: testProvider2, + index: shardIndex, + initialStore: evensFilled(), + expectedCount: 6, + expectedErr: nil, + testStore: func(t *testing.T, store map[string][]model.ProviderResult) { + require.Len(t, store, 6) + for i, sliceMh := range sliceMhs { + expected := []model.ProviderResult{testProvider2} + if i%2 == 0 { + expected = []model.ProviderResult{testProvider, testProvider2} + } + require.Equal(t, store[sliceMh.String()], expected) + } + }, + }, + } + + // Run test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Initialize mock store + initialStore := tc.initialStore + if initialStore == nil { + initialStore = make(map[string][]model.ProviderResult) + } + mockStore := &MockProviderStore{ + setErr: tc.setErr, + getErr: tc.getErr, + store: initialStore, + } + + // Create SimpleProviderCacher instance + cacher := providercacher.NewSimpleProviderCacher(mockStore) + + count, err := cacher.CacheProviderForIndexRecords(ctx, tc.provider, tc.index) + if tc.expectedErr == nil { + require.NoError(t, err) + } else { + require.EqualError(t, err, tc.expectedErr.Error()) + } + require.Equal(t, tc.expectedCount, count) + if tc.testStore != nil { + tc.testStore(t, mockStore.store) + } + }) + } +} + +// MockProviderStore is a mock implementation of the ProviderStore interface +type MockProviderStore struct { + setErr, getErr error + store map[string][]model.ProviderResult +} + +var _ types.ProviderStore = &MockProviderStore{} + +func (m *MockProviderStore) Get(ctx context.Context, hash multihash.Multihash) ([]model.ProviderResult, error) { + if m.getErr != nil { + return nil, m.getErr + } + results, exists := m.store[hash.String()] + if !exists { + return nil, types.ErrKeyNotFound + } + return results, nil +} + +func (m *MockProviderStore) Set(ctx context.Context, hash multihash.Multihash, providers []model.ProviderResult, expires bool) error { + if m.setErr != nil { + return m.setErr + } + m.store[hash.String()] = providers + return nil +} + +// SetExpirable implements types.ProviderStore. +func (m *MockProviderStore) SetExpirable(ctx context.Context, key multihash.Multihash, expires bool) error { + panic("unimplemented") +} + +type alternateProviderSubTest struct { + name string + provider model.ProviderResult +} diff --git a/pkg/service/providerindex/providerindex.go b/pkg/service/providerindex/providerindex.go new file mode 100644 index 0000000..b917d3a --- /dev/null +++ b/pkg/service/providerindex/providerindex.go @@ -0,0 +1,154 @@ +package providerindex + +import ( + "bytes" + "context" + "slices" + + "github.com/ipld/go-ipld-prime" + "github.com/ipni/go-libipni/announce" + "github.com/ipni/go-libipni/dagsync" + ipnifind "github.com/ipni/go-libipni/find/client" + "github.com/ipni/go-libipni/find/model" + "github.com/multiformats/go-multicodec" + mh "github.com/multiformats/go-multihash" + "github.com/storacha/go-ucanto/did" + "github.com/storacha/indexing-service/pkg/metadata" + "github.com/storacha/indexing-service/pkg/types" +) + +type QueryKey struct { + Spaces []did.DID + Hash mh.Multihash + TargetClaims []multicodec.Code +} + +// ProviderIndex is a read/write interface to a local cache of providers that falls back to IPNI +type ProviderIndex struct { + providerStore types.ProviderStore + findClient ipnifind.Finder +} + +// TBD access to legacy systems +type LegacySystems interface{} + +// TODO: This assumes using low level primitives for publishing from IPNI but maybe we want to go ahead and use index-provider? +func NewProviderIndex(providerStore types.ProviderStore, findClient ipnifind.Finder, sender announce.Sender, publisher dagsync.Publisher, advertisementsLsys ipld.LinkSystem, legacySystems LegacySystems) *ProviderIndex { + return &ProviderIndex{ + providerStore: providerStore, + findClient: findClient, + } +} + +// Find should do the following +// 1. Read from the IPNI Storage cache to get a list of providers +// a. If there is no record in cache, query IPNI, filter out any non-content claims metadata, and store +// the resulting records in the cache +// b. the are no records in the cache or IPNI, it can attempt to read from legacy systems -- Dynamo tables & content claims storage, synthetically constructing provider results +// 2. With returned provider results, filter additionally for claim type. If space dids are set, calculate an encodedcontextid's by hashing space DID and Hash, and filter for a matching context id +// Future TODO: kick off a conversion task to update the recrds +func (pi *ProviderIndex) Find(ctx context.Context, qk QueryKey) ([]model.ProviderResult, error) { + results, err := pi.getProviderResults(ctx, qk.Hash) + if err != nil { + return nil, err + } + results, err = pi.filteredCodecs(results, qk.TargetClaims) + if err != nil { + return nil, err + } + return pi.filterBySpace(results, qk.Hash, qk.Spaces) +} + +func (pi *ProviderIndex) getProviderResults(ctx context.Context, mh mh.Multihash) ([]model.ProviderResult, error) { + res, err := pi.providerStore.Get(ctx, mh) + if err == nil { + return res, nil + } + if err != types.ErrKeyNotFound { + return nil, err + } + + findRes, err := pi.findClient.Find(ctx, mh) + if err != nil { + return nil, err + } + var results []model.ProviderResult + for _, mhres := range findRes.MultihashResults { + results = append(results, mhres.ProviderResults...) + } + err = pi.providerStore.Set(ctx, mh, results, true) + if err != nil { + return nil, err + } + return results, nil +} + +func (pi *ProviderIndex) filteredCodecs(results []model.ProviderResult, codecs []multicodec.Code) ([]model.ProviderResult, error) { + if len(codecs) == 0 { + return results, nil + } + return filter(results, func(result model.ProviderResult) (bool, error) { + md := metadata.MetadataContext.New() + err := md.UnmarshalBinary(result.Metadata) + if err != nil { + return false, err + } + return slices.ContainsFunc(codecs, func(code multicodec.Code) bool { + return slices.ContainsFunc(md.Protocols(), func(mdCode multicodec.Code) bool { + return mdCode == code + }) + }), nil + }) +} + +func (pi *ProviderIndex) filterBySpace(results []model.ProviderResult, mh mh.Multihash, spaces []did.DID) ([]model.ProviderResult, error) { + if len(spaces) == 0 { + return results, nil + } + encryptedIds := make([]types.EncodedContextID, 0, len(spaces)) + for _, space := range spaces { + encryptedId, err := types.ContextID{ + Space: &space, + Hash: mh, + }.ToEncoded() + if err != nil { + return nil, err + } + encryptedIds = append(encryptedIds, encryptedId) + } + + filtered, err := filter(results, func(result model.ProviderResult) (bool, error) { + return slices.ContainsFunc(encryptedIds, func(encyptedID types.EncodedContextID) bool { + return bytes.Equal(result.ContextID, encyptedID) + }), nil + }) + if err != nil { + return nil, err + } + if len(filtered) > 0 { + return filtered, nil + } + return results, nil +} + +// Publish should do the following: +// 1. Write the entries to the cache with no expiration until publishing is complete +// 2. Generate an advertisement for the advertised hashes and publish/announce it +func (pi *ProviderIndex) Publish(context.Context, []mh.Multihash, model.ProviderResult) { + +} + +func filter(results []model.ProviderResult, filterFunc func(model.ProviderResult) (bool, error)) ([]model.ProviderResult, error) { + + filtered := make([]model.ProviderResult, 0, len(results)) + for _, result := range results { + include, err := filterFunc(result) + if err != nil { + return nil, err + } + if include { + filtered = append(filtered, result) + } + } + return filtered, nil +} diff --git a/pkg/service/queryresult/datamodel/queryresult.go b/pkg/service/queryresult/datamodel/queryresult.go new file mode 100644 index 0000000..8c5f71f --- /dev/null +++ b/pkg/service/queryresult/datamodel/queryresult.go @@ -0,0 +1,46 @@ +package datamodel + +import ( + // for schema import + _ "embed" + "fmt" + + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/schema" +) + +var ( + //go:embed queryresult.ipldsch + queryResultBytes []byte + queryResultType schema.Type +) + +func init() { + typeSystem, err := ipld.LoadSchemaBytes(queryResultBytes) + if err != nil { + panic(fmt.Errorf("failed to load schema: %w", err)) + } + queryResultType = typeSystem.TypeByName("QueryResult") +} + +// QueryResultType is the schema for a QueryResult +func QueryResultType() schema.Type { + return queryResultType +} + +// QueryResultModel is the golang structure for encoding query results +type QueryResultModel struct { + Result0_1 *QueryResultModel0_1 +} + +// QueryResultModel0_1 describes the found claims and indexes for a given query +type QueryResultModel0_1 struct { + Claims []ipld.Link + Indexes *IndexesModel +} + +// IndexesModel maps encoded context IDs to index links +type IndexesModel struct { + Keys []string + Values map[string]ipld.Link +} diff --git a/pkg/service/queryresult/datamodel/queryresult.ipldsch b/pkg/service/queryresult/datamodel/queryresult.ipldsch new file mode 100644 index 0000000..eced1e4 --- /dev/null +++ b/pkg/service/queryresult/datamodel/queryresult.ipldsch @@ -0,0 +1,8 @@ +type QueryResult union { + | QueryResult0_1 "index/query/result@0.1" +} + +type QueryResult0_1 struct { + claims optional [Link] + indexes optional {String:Link} +} diff --git a/pkg/service/queryresult/queryresult.go b/pkg/service/queryresult/queryresult.go new file mode 100644 index 0000000..12aff74 --- /dev/null +++ b/pkg/service/queryresult/queryresult.go @@ -0,0 +1,139 @@ +package queryresult + +import ( + "io" + "iter" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime/datamodel" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + multihash "github.com/multiformats/go-multihash/core" + "github.com/storacha/go-ucanto/core/dag/blockstore" + "github.com/storacha/go-ucanto/core/delegation" + "github.com/storacha/go-ucanto/core/ipld" + "github.com/storacha/go-ucanto/core/ipld/block" + "github.com/storacha/go-ucanto/core/ipld/codec/cbor" + "github.com/storacha/go-ucanto/core/ipld/hash/sha256" + "github.com/storacha/indexing-service/pkg/blobindex" + "github.com/storacha/indexing-service/pkg/internal/bytemap" + qdm "github.com/storacha/indexing-service/pkg/service/queryresult/datamodel" + "github.com/storacha/indexing-service/pkg/types" +) + +// QueryResult is an encodable result of a query +type QueryResult interface { + ipld.View + // Claims is a list of links to the root bock of claims that can be found in this message + Claims() []ipld.Link + // Indexes is a list of links to the CID hash of archived sharded dag indexes that can be found in this + // message + Indexes() []ipld.Link +} + +type queryResult struct { + root ipld.Block + data *qdm.QueryResultModel0_1 + blks blockstore.BlockReader +} + +var _ QueryResult = (*queryResult)(nil) + +func (q *queryResult) Blocks() iter.Seq2[block.Block, error] { + return q.blks.Iterator() +} + +func (q *queryResult) Claims() []datamodel.Link { + return q.data.Claims +} + +func (q *queryResult) Indexes() []datamodel.Link { + var indexes []ipld.Link + for _, k := range q.data.Indexes.Keys { + l, ok := q.data.Indexes.Values[k] + if ok { + indexes = append(indexes, l) + } + } + return indexes +} + +func (q *queryResult) Root() block.Block { + return q.root +} + +// Build generates a new encodable QueryResult +func Build(claims map[cid.Cid]delegation.Delegation, indexes bytemap.ByteMap[types.EncodedContextID, blobindex.ShardedDagIndexView]) (QueryResult, error) { + bs, err := blockstore.NewBlockStore() + if err != nil { + return nil, err + } + + cls := []ipld.Link{} + for _, claim := range claims { + cls = append(cls, claim.Link()) + + err := blockstore.WriteInto(claim, bs) + if err != nil { + return nil, err + } + } + + var indexesModel *qdm.IndexesModel + if indexes.Size() > 0 { + indexesModel = &qdm.IndexesModel{ + Keys: make([]string, 0, indexes.Size()), + Values: make(map[string]ipld.Link, indexes.Size()), + } + for contextID, index := range indexes.Iterator() { + reader, err := index.Archive() + if err != nil { + return nil, err + } + bytes, err := io.ReadAll(reader) + if err != nil { + return nil, err + } + indexCid, err := cid.Prefix{ + Version: 1, + Codec: cid.Raw, + MhType: multihash.SHA2_256, + MhLength: -1, + }.Sum(bytes) + if err != nil { + return nil, err + } + + lnk := cidlink.Link{Cid: indexCid} + err = bs.Put(block.NewBlock(lnk, bytes)) + if err != nil { + return nil, err + } + indexesModel.Keys = append(indexesModel.Keys, string(contextID)) + indexesModel.Values[string(contextID)] = lnk + } + } + + queryResultModel := qdm.QueryResultModel{ + Result0_1: &qdm.QueryResultModel0_1{ + Claims: cls, + Indexes: indexesModel, + }, + } + + rt, err := block.Encode( + &queryResultModel, + qdm.QueryResultType(), + cbor.Codec, + sha256.Hasher, + ) + if err != nil { + return nil, err + } + + err = bs.Put(rt) + if err != nil { + return nil, err + } + + return &queryResult{root: rt, data: queryResultModel.Result0_1, blks: bs}, nil +} diff --git a/pkg/service/service.go b/pkg/service/service.go index 9c5b814..81cce12 100644 --- a/pkg/service/service.go +++ b/pkg/service/service.go @@ -1,46 +1,348 @@ package service import ( - mh "github.com/multiformats/go-multihash" - "github.com/storacha-network/go-ucanto/core/delegation" - "github.com/storacha-network/go-ucanto/did" - "github.com/storacha-network/indexing-service/pkg/blobindex" + "context" + "errors" + "net/url" + "strings" + + "github.com/ipfs/go-cid" + "github.com/ipni/go-libipni/find/model" + "github.com/ipni/go-libipni/maurl" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/multiformats/go-multicodec" + "github.com/multiformats/go-multihash" + "github.com/storacha/go-ucanto/core/delegation" + "github.com/storacha/go-ucanto/did" + "github.com/storacha/indexing-service/pkg/blobindex" + "github.com/storacha/indexing-service/pkg/internal/bytemap" + "github.com/storacha/indexing-service/pkg/internal/jobwalker" + "github.com/storacha/indexing-service/pkg/internal/jobwalker/parallelwalk" + "github.com/storacha/indexing-service/pkg/internal/jobwalker/singlewalk" + "github.com/storacha/indexing-service/pkg/metadata" + "github.com/storacha/indexing-service/pkg/service/providerindex" + "github.com/storacha/indexing-service/pkg/service/queryresult" + "github.com/storacha/indexing-service/pkg/types" ) +const defaultConcurrency = 5 + +// Match narrows parameters for locating providers/claims for a set of multihashes type Match struct { Subject []did.DID } +// Query is a query for several multihashes type Query struct { - Hashes []mh.Multihash + Hashes []multihash.Multihash Match Match } -type QueryResult struct { - Claims []delegation.Delegation - Indexes []blobindex.ShardedDagIndexView -} - -type IndexingService interface { - // Query returns back relevant content claims for the given query using the following steps - // 1. Query the IPNIIndex for all matching records - // 2. For any index records, query the IPNIIndex for any location claims for that index cid - // 3. For any index claims, query the IPNIIndex for location claims for the index cid - // 4. Query the BlobIndexLookup to get the full ShardedDagIndex for any index claims - // 5. Query IPNIIndex for any location claims for any shards that contain the multihash based on the ShardedDagIndex - // 6. Read the requisite claims from the ClaimLookup - // 7. Return all discovered claims and sharded dag indexes - Query(Query) (QueryResult, error) - // I imagine publish claim to work as follows - // For all claims except index, just use the publish API on IPNIIndex - // For index claims, let's assume they fail if a location claim for the index car cid is not already published - // The service should lookup the index cid location claim, and fetch the ShardedDagIndexView, then use the hashes inside - // to assemble all the multihashes in the index advertisement - // still not 100% sure how location claim publishing works - // generally I would expect the call to publish a location commitment to come from the storage provider on blob/accept (a delegation for a location commitment is already - // generated on blob/accept) - // the publishing should happen throught his service so that the location commitment is read-on-write - // ideally however, IPNI would support UCAN chains for publishing so that the claim could be published on the storage provider's PeerAddr - // it doesn't for now, so either we publish on the services address, or we develop some kind of signing scheme for advertisements (seems complicated) - PublishClaim(delegation.Delegation) error +// ProviderIndex is a read/write interface to a local cache of providers that falls back to IPNI +type ProviderIndex interface { + // Find should do the following + // 1. Read from the IPNI Storage cache to get a list of providers + // a. If there is no record in cache, query IPNI, filter out any non-content claims metadata, and store + // the resulting records in the cache + // b. the are no records in the cache or IPNI, it can attempt to read from legacy systems -- Dynamo tables & content claims storage, synthetically constructing provider results + // 2. With returned provider results, filter additionally for claim type. If space dids are set, calculate an encodedcontextid's by hashing space DID and Hash, and filter for a matching context id + // Future TODO: kick off a conversion task to update the recrds + Find(context.Context, providerindex.QueryKey) ([]model.ProviderResult, error) + // Publish should do the following: + // 1. Write the entries to the cache with no expiration until publishing is complete + // 2. Generate an advertisement for the advertised hashes and publish/announce it + Publish(context.Context, []multihash.Multihash, model.ProviderResult) +} + +// ClaimLookup is used to get full claims from a claim cid +type ClaimLookup interface { + // LookupClaim should: + // 1. attempt to read the claim from the cache from the encoded contextID + // 2. if not found, attempt to fetch the claim from the provided URL. Store the result in cache + // 3. return the claim + LookupClaim(ctx context.Context, claimCid cid.Cid, fetchURL url.URL) (delegation.Delegation, error) +} + +// BlobIndexLookup is a read through cache for fetching blob indexes +type BlobIndexLookup interface { + // Find should: + // 1. attempt to read the sharded dag index from the cache from the encoded contextID + // 2. if not found, attempt to fetch the index from the provided URL. Store the result in cache + // 3. return the index + // 4. asyncronously, add records to the ProviderStore from the parsed blob index so that we can avoid future queries to IPNI for + // other multihashes in the index + Find(ctx context.Context, contextID types.EncodedContextID, provider model.ProviderResult, fetchURL url.URL, rng *metadata.Range) (blobindex.ShardedDagIndexView, error) +} + +// IndexingService implements read/write logic for indexing data with IPNI, content claims, sharded dag indexes, and a cache layer +type IndexingService struct { + blobIndexLookup BlobIndexLookup + claimLookup ClaimLookup + providerIndex ProviderIndex + jobWalker jobwalker.JobWalker[job, queryState] +} + +type job struct { + mh multihash.Multihash + indexForMh *multihash.Multihash + indexProviderRecord *model.ProviderResult + jobType jobType +} + +type jobKey string + +func (j job) key() jobKey { + k := jobKey(j.mh) + jobKey(j.jobType) + if j.indexForMh != nil { + k += jobKey(*j.indexForMh) + } + return k +} + +type jobType string + +const standardJobType jobType = "standard" +const locationJobType jobType = "location" +const equalsOrLocationJobType jobType = "equals_or_location" + +var targetClaims = map[jobType][]multicodec.Code{ + standardJobType: {metadata.EqualsClaimID, metadata.IndexClaimID, metadata.LocationCommitmentID}, + locationJobType: {metadata.LocationCommitmentID}, + equalsOrLocationJobType: {metadata.IndexClaimID, metadata.LocationCommitmentID}, +} + +type queryResult struct { + Claims map[cid.Cid]delegation.Delegation + Indexes bytemap.ByteMap[types.EncodedContextID, blobindex.ShardedDagIndexView] +} + +type queryState struct { + q *Query + qr *queryResult + visits map[jobKey]struct{} +} + +func (is *IndexingService) jobHandler(mhCtx context.Context, j job, spawn func(job) error, state jobwalker.WrappedState[queryState]) error { + + // check if node has already been visited and ignore if that is the case + if !state.CmpSwap(func(qs queryState) bool { + _, ok := qs.visits[j.key()] + return !ok + }, func(qs queryState) queryState { + qs.visits[j.key()] = struct{}{} + return qs + }) { + return nil + } + + // find provider records related to this multihash + results, err := is.providerIndex.Find(mhCtx, providerindex.QueryKey{ + Hash: j.mh, + Spaces: state.Access().q.Match.Subject, + TargetClaims: targetClaims[j.jobType], + }) + if err != nil { + return err + } + for _, result := range results { + // unmarshall metadata for this provider + md := metadata.MetadataContext.New() + err = md.UnmarshalBinary(result.Metadata) + if err != nil { + return err + } + // the provider may list one or more protocols for this CID + // in our case, the protocols are just differnt types of content claims + for _, code := range md.Protocols() { + protocol := md.Get(code) + // make sure this is some kind of claim protocol, ignore if not + hasClaimCid, ok := protocol.(metadata.HasClaim) + if !ok { + continue + } + // fetch (from cache or url) the actual content claim + claimCid := hasClaimCid.GetClaim() + url, err := is.fetchClaimURL(*result.Provider, claimCid) + if err != nil { + return err + } + claim, err := is.claimLookup.LookupClaim(mhCtx, claimCid, *url) + if err != nil { + return err + } + // add the fetched claim to the results, if we don't already have it + state.CmpSwap( + func(qs queryState) bool { + _, ok := qs.qr.Claims[claimCid] + return !ok + }, + func(qs queryState) queryState { + qs.qr.Claims[claimCid] = claim + return qs + }) + + // handle each type of protocol + switch typedProtocol := protocol.(type) { + case *metadata.EqualsClaimMetadata: + // for an equals claim, it's published on both the content and equals multihashes + // we follow with a query for location claim on the OTHER side of the multihash + if string(typedProtocol.Equals.Hash()) != string(j.mh) { + // lookup was the content hash, queue the equals hash + if err := spawn(job{typedProtocol.Equals.Hash(), nil, nil, locationJobType}); err != nil { + return err + } + } else { + // lookup was the equals hash, queue the content hash + if err := spawn(job{multihash.Multihash(result.ContextID), nil, nil, locationJobType}); err != nil { + return err + } + } + case *metadata.IndexClaimMetadata: + // for an index claim, we follow by looking for a location claim for the index, and fetching the index + mh := j.mh + if err := spawn(job{typedProtocol.Index.Hash(), &mh, &result, equalsOrLocationJobType}); err != nil { + return err + } + case *metadata.LocationCommitmentMetadata: + // for a location claim, we just store it, unless its for an index CID, in which case get the full idnex + if j.indexForMh != nil { + // fetch (from URL or cache) the full index + shard := typedProtocol.Shard + if shard == nil { + c := cid.NewCidV1(cid.Raw, j.mh) + shard = &c + } + url, err := is.fetchRetrievalURL(*result.Provider, *shard) + if err != nil { + return err + } + index, err := is.blobIndexLookup.Find(mhCtx, result.ContextID, *j.indexProviderRecord, *url, typedProtocol.Range) + if err != nil { + return err + } + // Add the index to the query results, if we don't already have it + state.CmpSwap( + func(qs queryState) bool { + return !qs.qr.Indexes.Has(result.ContextID) + }, + func(qs queryState) queryState { + qs.qr.Indexes.Set(result.ContextID, index) + return qs + }) + + // add location queries for all shards containing the original CID we're seeing an index for + shards := index.Shards().Iterator() + for shard, index := range shards { + if index.Has(*j.indexForMh) { + if err := spawn(job{shard, nil, nil, equalsOrLocationJobType}); err != nil { + return err + } + } + } + } + } + } + } + return nil +} + +// Query returns back relevant content claims for the given query using the following steps +// 1. Query the IPNIIndex for all matching records +// 2. For any index records, query the IPNIIndex for any location claims for that index cid +// 3. For any index claims, query the IPNIIndex for location claims for the index cid +// 4. Query the BlobIndexLookup to get the full ShardedDagIndex for any index claims +// 5. Query IPNIIndex for any location claims for any shards that contain the multihash based on the ShardedDagIndex +// 6. Read the requisite claims from the ClaimLookup +// 7. Return all discovered claims and sharded dag indexes +func (is *IndexingService) Query(ctx context.Context, q Query) (queryresult.QueryResult, error) { + initialJobs := make([]job, 0, len(q.Hashes)) + for _, mh := range q.Hashes { + initialJobs = append(initialJobs, job{mh, nil, nil, standardJobType}) + } + qs, err := is.jobWalker(ctx, initialJobs, queryState{ + q: &q, + qr: &queryResult{ + Claims: make(map[cid.Cid]delegation.Delegation), + Indexes: bytemap.NewByteMap[types.EncodedContextID, blobindex.ShardedDagIndexView](-1), + }, + visits: map[jobKey]struct{}{}, + }, is.jobHandler) + if err != nil { + return nil, err + } + return queryresult.Build(qs.qr.Claims, qs.qr.Indexes) +} + +func (is *IndexingService) urlForResource(provider peer.AddrInfo, resourceType string, resourceID string) (*url.URL, error) { + for _, addr := range provider.Addrs { + // first, attempt to convert the addr to a url scheme + url, err := maurl.ToURL(addr) + // if it can't be converted, skip + if err != nil { + continue + } + // must be an http url + if !(url.Scheme == "http" || url.Scheme == "https") { + continue + } + // we must have a place to place the resourceId in the path + if !strings.Contains(url.Path, resourceType) { + continue + } + // ok we have a matching URL, return with all resource type components replaced with the id + url.Path = strings.ReplaceAll(url.Path, resourceType, resourceID) + return url, nil + } + return nil, errors.New("no claim endpoint found") +} + +func (is *IndexingService) fetchClaimURL(provider peer.AddrInfo, claimCid cid.Cid) (*url.URL, error) { + return is.urlForResource(provider, "{claim}", claimCid.String()) +} + +func (is *IndexingService) fetchRetrievalURL(provider peer.AddrInfo, shard cid.Cid) (*url.URL, error) { + return is.urlForResource(provider, "{shard}", shard.String()) +} + +// CacheClaim is used to cache a claim without publishing it to IPNI +// this is used cache a location commitment that come from a storage provider on blob/accept, without publishing, since the SP will publish themselves +// (a delegation for a location commitment is already generated on blob/accept) +// ideally however, IPNI would enable UCAN chains for publishing so that we could publish it directly from the storage service +// it doesn't for now, so we let SPs publish themselves them direct cache with us +func (is *IndexingService) CacheClaim(ctx context.Context, claim delegation.Delegation) error { + return errors.New("not implemented") +} + +// PublishClaim caches and publishes a content claim +// I imagine publish claim to work as follows +// For all claims except index, just use the publish API on IPNIIndex +// For index claims, let's assume they fail if a location claim for the index car cid is not already published +// The service should lookup the index cid location claim, and fetch the ShardedDagIndexView, then use the hashes inside +// to assemble all the multihashes in the index advertisement +func (is *IndexingService) PublishClaim(ctx context.Context, claim delegation.Delegation) error { + return errors.New("not implemented") +} + +// Option configures an IndexingService +type Option func(is *IndexingService) + +// WithConcurrency causes the indexing service to process find queries parallel, with the given concurrency +func WithConcurrency(concurrency int) Option { + return func(is *IndexingService) { + is.jobWalker = parallelwalk.NewParallelWalk[job, queryState](concurrency) + } +} + +// NewIndexingService returns a new indexing service +func NewIndexingService(blobIndexLookup BlobIndexLookup, claimLookup ClaimLookup, providerIndex ProviderIndex, options ...Option) *IndexingService { + is := &IndexingService{ + blobIndexLookup: blobIndexLookup, + claimLookup: claimLookup, + providerIndex: providerIndex, + jobWalker: singlewalk.SingleWalker[job, queryState], + } + for _, option := range options { + option(is) + } + return is } diff --git a/pkg/types/types.go b/pkg/types/types.go index 495a749..b2cc448 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -8,9 +8,9 @@ import ( "github.com/ipfs/go-cid" "github.com/ipni/go-libipni/find/model" mh "github.com/multiformats/go-multihash" - "github.com/storacha-network/go-ucanto/core/delegation" - "github.com/storacha-network/go-ucanto/did" - "github.com/storacha-network/indexing-service/pkg/blobindex" + "github.com/storacha/go-ucanto/core/delegation" + "github.com/storacha/go-ucanto/did" + "github.com/storacha/indexing-service/pkg/blobindex" ) // ContextID describes the data used to calculate a context id for IPNI @@ -42,8 +42,8 @@ type Cache[Key, Value any] interface { Get(ctx context.Context, key Key) (Value, error) } -// IPNIStore caches queries to IPNI -type IPNIStore Cache[mh.Multihash, []model.ProviderResult] +// ProviderStore caches queries to IPNI +type ProviderStore Cache[mh.Multihash, []model.ProviderResult] // ContentClaimsStore caches fetched content claims type ContentClaimsStore Cache[cid.Cid, delegation.Delegation]