diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 1d6619f270..45874fb45f 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -70,5 +70,8 @@ jobs: key: ${{ runner.os }}-go-mod-${{ hashFiles('**/go.sum') }} restore-keys: ${{ runner.os }}-go-mod + - name: generate a shared library file for erasure + run: make compile-erasure + - name: Run integration tests run: go test -timeout=45m -tags integration ${{ matrix.packages }} diff --git a/Dockerfile b/Dockerfile index 98337d9650..6d5ad3a8ee 100644 --- a/Dockerfile +++ b/Dockerfile @@ -17,6 +17,12 @@ RUN wget -qO- https://deb.nodesource.com/setup_14.x | bash - && \ RUN wget -O /usr/local/bin/subkey https://chainbridge.ams3.digitaloceanspaces.com/subkey-v2.0.0 && \ chmod +x /usr/local/bin/subkey +# Get Rust; NOTE: using sh for better compatibility with other base images +RUN curl https://sh.rustup.rs -sSf | sh -s -- -y + +# Add .cargo/bin to PATH +ENV PATH="/root/.cargo/bin:${PATH}" + WORKDIR /go/src/github.com/ChainSafe/gossamer # Go dependencies @@ -26,6 +32,9 @@ RUN go mod download # Copy gossamer sources COPY . . +# build erasure lib +RUN cargo build --release --manifest-path=./lib/erasure/rustlib/Cargo.toml + # Build ARG GO_BUILD_FLAGS RUN go build \ @@ -43,5 +52,8 @@ EXPOSE 7001 8546 8540 ENTRYPOINT [ "/gossamer/bin/gossamer" ] +ENV LD_LIBRARY_PATH=/usr/local/lib + COPY chain /gossamer/chain +COPY --from=builder /go/src/github.com/ChainSafe/gossamer/lib/erasure/rustlib/target/release/liberasure.so /usr/local/lib/liberasure.so COPY --from=builder /go/src/github.com/ChainSafe/gossamer/bin/gossamer /gossamer/bin/gossamer diff --git a/Makefile b/Makefile index ddae5fb5bc..eaef052ec2 100644 --- a/Makefile +++ b/Makefile @@ -75,7 +75,7 @@ deps: go mod download ## build: Builds application binary and stores it in `./bin/gossamer` -build: +build: compile-erasure @echo " > \033[32mBuilding binary...\033[0m " go build -trimpath -o ./bin/gossamer -ldflags="-s -w" ./cmd/gossamer diff --git a/dot/parachain/availability-store/availability_store.go b/dot/parachain/availability-store/availability_store.go index 4717cdd7cf..f519f6a094 100644 --- a/dot/parachain/availability-store/availability_store.go +++ b/dot/parachain/availability-store/availability_store.go @@ -6,27 +6,70 @@ package availabilitystore import ( "context" "encoding/binary" - "encoding/json" "errors" "fmt" "sync" + "time" parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/internal/log" "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/erasure" + "github.com/ChainSafe/gossamer/lib/trie" + "github.com/ChainSafe/gossamer/pkg/scale" ) var logger = log.NewFromGlobal(log.AddContext("pkg", "parachain-availability-store")) const ( - avaliableDataPrefix = "available" + availableDataPrefix = "available" chunkPrefix = "chunk" metaPrefix = "meta" unfinalizedPrefix = "unfinalized" pruneByTimePrefix = "prune_by_time" + + // Unavailable blocks are kept for 1 hour. + keepUnavilableFor = time.Hour + + // Finalized data is kept for 25 hours. + keepFinalizedFor = time.Hour * 25 + + // The pruning interval. + pruningInterval = time.Minute * 5 ) +// BETimestamp is a unix time wrapper with big-endian encoding +type BETimestamp uint64 + +// ToBigEndianBytes returns the big-endian encoding of the timestamp +func (b BETimestamp) ToBigEndianBytes() []byte { + res := make([]byte, 8) + binary.BigEndian.PutUint64(res, uint64(b)) + return res +} + +type subsystemClock struct{} + +func (sc *subsystemClock) Now() BETimestamp { + return BETimestamp(time.Now().Unix()) +} + +// pruningConfig Struct holding pruning timing configuration. +// The only purpose of this structure is to use different timing +// configurations in production and in testing. +type pruningConfig struct { + keepUnavailableFor time.Duration + keepFinalizedFor time.Duration + pruningInterval time.Duration +} + +var defaultPruningConfig = pruningConfig{ + keepUnavailableFor: keepUnavilableFor, + keepFinalizedFor: keepFinalizedFor, + pruningInterval: pruningInterval, +} + // AvailabilityStoreSubsystem is the struct that holds subsystem data for the availability store type AvailabilityStoreSubsystem struct { ctx context.Context @@ -35,143 +78,319 @@ type AvailabilityStoreSubsystem struct { SubSystemToOverseer chan<- any OverseerToSubSystem <-chan any - availabilityStore AvailabilityStore + availabilityStore availabilityStore + pruningConfig pruningConfig + clock subsystemClock //TODO: pruningConfig PruningConfig //TODO: clock Clock //TODO: metrics Metrics } -// AvailabilityStore is the struct that holds data for the availability store -type AvailabilityStore struct { - availableTable database.Table - chunkTable database.Table - metaTable database.Table - //TODO: unfinalizedTable database.Table - //TODO: pruneByTimeTable database.Table +// availabilityStore is the struct that holds data for the availability store +type availabilityStore struct { + available database.Table + chunk database.Table + meta database.Table + unfinalized database.Table + pruneByTime database.Table +} + +type availabilityStoreBatch struct { + available database.Batch + chunk database.Batch + meta database.Batch + unfinalized database.Batch + pruneByTime database.Batch +} + +func newAvailabilityStoreBatch(as *availabilityStore) *availabilityStoreBatch { + return &availabilityStoreBatch{ + available: as.available.NewBatch(), + chunk: as.chunk.NewBatch(), + meta: as.meta.NewBatch(), + unfinalized: as.unfinalized.NewBatch(), + pruneByTime: as.pruneByTime.NewBatch(), + } +} + +// flush flushes the batch and resets the batch if error during flushing +func (asb *availabilityStoreBatch) flush() error { + err := asb.flushAll() + if err != nil { + asb.reset() + } + return err +} + +// flushAll flushes all the batches and returns the error +func (asb *availabilityStoreBatch) flushAll() error { + err := asb.available.Flush() + if err != nil { + return fmt.Errorf("writing available batch: %w", err) + } + err = asb.chunk.Flush() + if err != nil { + return fmt.Errorf("writing chunk batch: %w", err) + } + err = asb.meta.Flush() + if err != nil { + return fmt.Errorf("writing meta batch: %w", err) + } + err = asb.unfinalized.Flush() + if err != nil { + return fmt.Errorf("writing unfinalized batch: %w", err) + } + err = asb.pruneByTime.Flush() + if err != nil { + return fmt.Errorf("writing prune by time batch: %w", err) + } + return nil +} + +// reset resets the batch and returns the error +func (asb *availabilityStoreBatch) reset() { + asb.available.Reset() + asb.chunk.Reset() + asb.meta.Reset() + asb.unfinalized.Reset() + asb.pruneByTime.Reset() } // NewAvailabilityStore creates a new instance of AvailabilityStore -func NewAvailabilityStore(db database.Database) *AvailabilityStore { - return &AvailabilityStore{ - availableTable: database.NewTable(db, avaliableDataPrefix), - chunkTable: database.NewTable(db, chunkPrefix), - metaTable: database.NewTable(db, metaPrefix), +func NewAvailabilityStore(db database.Database) *availabilityStore { + return &availabilityStore{ + available: database.NewTable(db, availableDataPrefix), + chunk: database.NewTable(db, chunkPrefix), + meta: database.NewTable(db, metaPrefix), + unfinalized: database.NewTable(db, unfinalizedPrefix), + pruneByTime: database.NewTable(db, pruneByTimePrefix), } } // loadAvailableData loads available data from the availability store -func (as *AvailabilityStore) loadAvailableData(candidate common.Hash) (*AvailableData, error) { - resultBytes, err := as.availableTable.Get(candidate[:]) +func (as *availabilityStore) loadAvailableData(candidate parachaintypes.CandidateHash) (*AvailableData, error) { + resultBytes, err := as.available.Get(candidate.Value[:]) if err != nil { - return nil, fmt.Errorf("getting candidate %v from available table: %w", candidate, err) + return nil, fmt.Errorf("getting candidate %v from available table: %w", candidate.Value, err) } result := AvailableData{} - err = json.Unmarshal(resultBytes, &result) + err = scale.Unmarshal(resultBytes, &result) if err != nil { return nil, fmt.Errorf("unmarshalling available data: %w", err) } return &result, nil } -// loadMetaData loads metadata from the availability store -func (as *AvailabilityStore) loadMetaData(candidate common.Hash) (*CandidateMeta, error) { - resultBytes, err := as.metaTable.Get(candidate[:]) +// loadMeta loads meta data from the availability store +func (as *availabilityStore) loadMeta(candidate parachaintypes.CandidateHash) (*CandidateMeta, error) { + resultBytes, err := as.meta.Get(candidate.Value[:]) if err != nil { - return nil, fmt.Errorf("getting candidate %v from available table: %w", candidate, err) + return nil, fmt.Errorf("getting candidate %v from meta table: %w", candidate.Value, err) } result := CandidateMeta{} - err = json.Unmarshal(resultBytes, &result) + err = scale.Unmarshal(resultBytes, &result) if err != nil { return nil, fmt.Errorf("unmarshalling candidate meta: %w", err) } return &result, nil } -// storeMetaData stores metadata in the availability store -func (as *AvailabilityStore) storeMetaData(candidate common.Hash, meta CandidateMeta) error { - dataBytes, err := json.Marshal(meta) - if err != nil { - return fmt.Errorf("marshalling meta for candidate: %w", err) - } - err = as.metaTable.Put(candidate[:], dataBytes) - if err != nil { - return fmt.Errorf("storing metadata for candidate %v: %w", candidate, err) - } - return nil -} - // loadChunk loads a chunk from the availability store -func (as *AvailabilityStore) loadChunk(candidate common.Hash, validatorIndex uint32) (*ErasureChunk, error) { - resultBytes, err := as.chunkTable.Get(append(candidate[:], uint32ToBytes(validatorIndex)...)) +func (as *availabilityStore) loadChunk(candidate parachaintypes.CandidateHash, validatorIndex uint32) (*ErasureChunk, + error) { + resultBytes, err := as.chunk.Get(append(candidate.Value[:], uint32ToBytes(validatorIndex)...)) if err != nil { - return nil, fmt.Errorf("getting candidate %v, index %d from chunk table: %w", candidate, validatorIndex, err) + return nil, fmt.Errorf("getting candidate %v, index %d from chunk table: %w", candidate.Value, validatorIndex, err) } result := ErasureChunk{} - err = json.Unmarshal(resultBytes, &result) + err = scale.Unmarshal(resultBytes, &result) if err != nil { return nil, fmt.Errorf("unmarshalling chunk: %w", err) } return &result, nil } -// storeChunk stores a chunk in the availability store -func (as *AvailabilityStore) storeChunk(candidate common.Hash, chunk ErasureChunk) error { - meta, err := as.loadMetaData(candidate) - if err != nil { +// storeChunk stores a chunk in the availability store, returns true on success, false on failure, +// and error on internal error. +func (as *availabilityStore) storeChunk(candidate parachaintypes.CandidateHash, chunk ErasureChunk) (bool, + error) { + batch := newAvailabilityStoreBatch(as) + meta, err := as.loadMeta(candidate) + if err != nil { if errors.Is(err, database.ErrNotFound) { - // TODO: were creating metadata here, but we should be doing it in the parachain block import? - // TODO: also we need to determine how many chunks we need to store - meta = &CandidateMeta{ - ChunksStored: make([]bool, 16), - } + // we weren't informed of this candidate by import events + return false, nil } else { - return fmt.Errorf("load metadata: %w", err) + return false, fmt.Errorf("load metadata: %w", err) } } if meta.ChunksStored[chunk.Index] { logger.Debugf("Chunk %d already stored", chunk.Index) - return nil // already stored - } else { - dataBytes, err := json.Marshal(chunk) - if err != nil { - return fmt.Errorf("marshalling chunk: %w", err) + return true, nil // already stored + } + + dataBytes, err := scale.Marshal(chunk) + if err != nil { + return false, fmt.Errorf("marshalling chunk for candidate %v, index %d: %w", candidate, chunk.Index, err) + } + err = batch.chunk.Put(append(candidate.Value[:], uint32ToBytes(chunk.Index)...), dataBytes) + if err != nil { + return false, fmt.Errorf("writing chunk for candidate %v, index %d: %w", candidate, chunk.Index, err) + } + + meta.ChunksStored[chunk.Index] = true + + dataBytes, err = scale.Marshal(*meta) + if err != nil { + return false, fmt.Errorf("marshalling meta for candidate: %w", err) + } + err = batch.meta.Put(candidate.Value[:], dataBytes) + if err != nil { + return false, fmt.Errorf("storing metadata for candidate %v: %w", candidate, err) + } + + err = batch.flush() + if err != nil { + return false, fmt.Errorf("writing batch: %w", err) + } + + logger.Debugf("stored chuck %d for %v", chunk.Index, candidate) + return true, nil +} + +func (as *availabilityStore) storeAvailableData(subsystem *AvailabilityStoreSubsystem, + candidate parachaintypes.CandidateHash, nValidators uint, data AvailableData, + expectedErasureRoot common.Hash) (bool, error) { + batch := newAvailabilityStoreBatch(as) + meta, err := as.loadMeta(candidate) + if err != nil && !errors.Is(err, database.ErrNotFound) { + return false, fmt.Errorf("load metadata: %w", err) + } + if meta != nil && meta.DataAvailable { + return true, nil // already stored + } + + meta = &CandidateMeta{} + + now := subsystem.clock.Now() + pruneAt := now + BETimestamp(subsystem.pruningConfig.keepUnavailableFor.Seconds()) + + pruneKey := append(pruneAt.ToBigEndianBytes(), candidate.Value[:]...) + err = batch.pruneByTime.Put(pruneKey, nil) + if err != nil { + return false, fmt.Errorf("writing pruning key: %w", err) + } + + meta.State = NewStateVDT() + err = meta.State.Set(Unavailable{Timestamp: now}) + if err != nil { + return false, fmt.Errorf("setting state to unavailable: %w", err) + } + meta.DataAvailable = false + meta.ChunksStored = make([]bool, nValidators) + + dataEncoded, err := scale.Marshal(data) + if err != nil { + return false, fmt.Errorf("encoding data: %w", err) + } + + chunks, err := erasure.ObtainChunks(nValidators, dataEncoded) + if err != nil { + return false, fmt.Errorf("obtaining chunks: %w", err) + } + + branches, err := branchesFromChunks(chunks) + if err != nil { + return false, fmt.Errorf("creating branches from chunks: %w", err) + } + if branches.root != expectedErasureRoot { + return false, errInvalidErasureRoot + } + + for i, chunk := range chunks { + erasureChunk := ErasureChunk{ + Index: uint32(i), + Chunk: chunk, } - err = as.chunkTable.Put(append(candidate[:], uint32ToBytes(chunk.Index)...), dataBytes) + + dataBytes, err := scale.Marshal(erasureChunk) if err != nil { - return fmt.Errorf("storing chunk for candidate %v, index %d: %w", candidate, chunk.Index, err) + return false, fmt.Errorf("marshalling chunk for candidate %v, index %d: %w", candidate, erasureChunk.Index, err) } - - meta.ChunksStored[chunk.Index] = true - err = as.storeMetaData(candidate, *meta) + err = batch.chunk.Put(append(candidate.Value[:], uint32ToBytes(erasureChunk.Index)...), dataBytes) if err != nil { - return fmt.Errorf("storing metadata for candidate %v: %w", candidate, err) + return false, fmt.Errorf("writing chunk for candidate %v, index %d: %w", candidate, erasureChunk.Index, err) } + + meta.ChunksStored[i] = true + } + + meta.DataAvailable = true + meta.ChunksStored = make([]bool, nValidators) + for i := range meta.ChunksStored { + meta.ChunksStored[i] = true } - logger.Debugf("stored chuck %d for %v", chunk.Index, candidate) - return nil -} -// storeAvailableData stores available data in the availability store -func (as *AvailabilityStore) storeAvailableData(candidate common.Hash, data AvailableData) error { - dataBytes, err := json.Marshal(data) + dataBytes, err := scale.Marshal(meta) if err != nil { - return fmt.Errorf("marshalling available data: %w", err) + return false, fmt.Errorf("marshalling meta for candidate: %w", err) } - err = as.availableTable.Put(candidate[:], dataBytes) + err = batch.meta.Put(candidate.Value[:], dataBytes) if err != nil { - return fmt.Errorf("storing available data for candidate %v: %w", candidate, err) + return false, fmt.Errorf("storing metadata for candidate %v: %w", candidate, err) } - return nil + + dataBytes, err = scale.Marshal(data) + if err != nil { + return false, fmt.Errorf("marshalling available data: %w", err) + } + err = batch.available.Put(candidate.Value[:], dataBytes) + if err != nil { + return false, fmt.Errorf("storing available data for candidate %v: %w", candidate, err) + } + + err = batch.flush() + if err != nil { + return false, fmt.Errorf("writing batch: %w", err) + } + + logger.Debugf("stored data and chunks for %v", candidate.Value) + return true, nil } +// todo(ed) determine if this should be LittleEndian or BigEndian func uint32ToBytes(value uint32) []byte { result := make([]byte, 4) binary.LittleEndian.PutUint32(result, value) return result } +func uint32ToBytesBigEndian(value uint32) []byte { + result := make([]byte, 4) + binary.BigEndian.PutUint32(result, value) + return result +} + +func branchesFromChunks(chunks [][]byte) (branches, error) { + tr := trie.NewEmptyTrie() + + for i, chunk := range chunks { + err := tr.Put(uint32ToBytes(uint32(i)), common.MustBlake2bHash(chunk).ToBytes()) + if err != nil { + return branches{}, fmt.Errorf("putting chunk %d in trie: %w", i, err) + } + } + b := branches{ + trieStorage: tr, + root: tr.MustHash(), + chunks: chunks, + currentPos: 0, + } + return b, nil +} + // Run runs the availability store subsystem func (av *AvailabilityStoreSubsystem) Run(ctx context.Context, OverseerToSubsystem chan any, SubsystemToOverseer chan any) { @@ -272,7 +491,7 @@ func (av *AvailabilityStoreSubsystem) handleQueryAvailableData(msg QueryAvailabl } func (av *AvailabilityStoreSubsystem) handleQueryDataAvailability(msg QueryDataAvailability) error { - _, err := av.availabilityStore.loadMetaData(msg.CandidateHash) + _, err := av.availabilityStore.loadMeta(msg.CandidateHash) if err != nil { if errors.Is(err, database.ErrNotFound) { msg.Sender <- false @@ -296,7 +515,7 @@ func (av *AvailabilityStoreSubsystem) handleQueryChunk(msg QueryChunk) error { } func (av *AvailabilityStoreSubsystem) handleQueryChunkSize(msg QueryChunkSize) error { - meta, err := av.availabilityStore.loadMetaData(msg.CandidateHash) + meta, err := av.availabilityStore.loadMeta(msg.CandidateHash) if err != nil { return fmt.Errorf("load metadata: %w", err) } @@ -317,7 +536,7 @@ func (av *AvailabilityStoreSubsystem) handleQueryChunkSize(msg QueryChunkSize) e } func (av *AvailabilityStoreSubsystem) handleQueryAllChunks(msg QueryAllChunks) error { - meta, err := av.availabilityStore.loadMetaData(msg.CandidateHash) + meta, err := av.availabilityStore.loadMeta(msg.CandidateHash) if err != nil { msg.Sender <- []ErasureChunk{} return fmt.Errorf("load metadata: %w", err) @@ -339,7 +558,7 @@ func (av *AvailabilityStoreSubsystem) handleQueryAllChunks(msg QueryAllChunks) e } func (av *AvailabilityStoreSubsystem) handleQueryChunkAvailability(msg QueryChunkAvailability) error { - meta, err := av.availabilityStore.loadMetaData(msg.CandidateHash) + meta, err := av.availabilityStore.loadMeta(msg.CandidateHash) if err != nil { msg.Sender <- false return fmt.Errorf("load metadata: %w", err) @@ -349,7 +568,7 @@ func (av *AvailabilityStoreSubsystem) handleQueryChunkAvailability(msg QueryChun } func (av *AvailabilityStoreSubsystem) handleStoreChunk(msg StoreChunk) error { - err := av.availabilityStore.storeChunk(msg.CandidateHash, msg.Chunk) + _, err := av.availabilityStore.storeChunk(msg.CandidateHash, msg.Chunk) if err != nil { msg.Sender <- err return fmt.Errorf("store chunk: %w", err) @@ -359,12 +578,27 @@ func (av *AvailabilityStoreSubsystem) handleStoreChunk(msg StoreChunk) error { } func (av *AvailabilityStoreSubsystem) handleStoreAvailableData(msg StoreAvailableData) error { - err := av.availabilityStore.storeAvailableData(msg.CandidateHash.Value, msg.AvailableData) - if err != nil { + // TODO: add to metric on_chunks_received + + res, err := av.availabilityStore.storeAvailableData(av, msg.CandidateHash, uint(msg.NumValidators), + msg.AvailableData, + msg.ExpectedErasureRoot) + if res { + msg.Sender <- nil + return nil + } + if err != nil && errors.Is(err, errInvalidErasureRoot) { msg.Sender <- err return fmt.Errorf("store available data: %w", err) } - msg.Sender <- err // TODO: determine how to replicate Rust's Result type + if err != nil { + // We do not bubble up internal errors to caller subsystems, instead the + // tx channel is dropped and that error is caught by the caller subsystem. + // + // We bubble up the specific error here so `av-store` logs still tell what + // happened. + return fmt.Errorf("store available data: %w", err) + } return nil } diff --git a/dot/parachain/availability-store/availability_store_test.go b/dot/parachain/availability-store/availability_store_test.go index a0bc7017a3..04ec82e328 100644 --- a/dot/parachain/availability-store/availability_store_test.go +++ b/dot/parachain/availability-store/availability_store_test.go @@ -4,6 +4,7 @@ package availabilitystore import ( + "bytes" "errors" "testing" @@ -11,6 +12,7 @@ import ( "github.com/ChainSafe/gossamer/dot/state" "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/pkg/scale" "github.com/stretchr/testify/require" ) @@ -31,52 +33,370 @@ var ( ParentHead: parachaintypes.HeadData{Data: []byte("parentHead")}, }, } + + testCandidateHash = parachaintypes.CandidateHash{Value: common.Hash{0x01}} ) func setupTestDB(t *testing.T) database.Database { inmemoryDB := state.NewInMemoryDB(t) as := NewAvailabilityStore(inmemoryDB) + batch := newAvailabilityStoreBatch(as) + metaState := NewStateVDT() + err := metaState.Set(Unavailable{}) + require.NoError(t, err) + meta := CandidateMeta{ + State: metaState, + DataAvailable: false, + ChunksStored: []bool{false, false, false}, + } + + dataBytes, err := scale.Marshal(meta) + require.NoError(t, err) + err = batch.meta.Put(testCandidateHash.Value[:], dataBytes) + require.NoError(t, err) + + err = batch.flush() + require.NoError(t, err) - err := as.storeChunk(common.Hash{0x01}, testChunk1) + stored, err := as.storeChunk(parachaintypes.CandidateHash{Value: common.Hash{0x01}}, testChunk1) require.NoError(t, err) - err = as.storeChunk(common.Hash{0x01}, testChunk2) + require.Equal(t, true, stored) + stored, err = as.storeChunk(parachaintypes.CandidateHash{Value: common.Hash{0x01}}, testChunk2) require.NoError(t, err) + require.Equal(t, true, stored) - err = as.storeAvailableData(common.Hash{0x01}, testavailableData1) + batch = newAvailabilityStoreBatch(as) + dataBytes, err = scale.Marshal(testavailableData1) + require.NoError(t, err) + err = batch.available.Put(testCandidateHash.Value[:], dataBytes) + require.NoError(t, err) + + err = batch.flush() require.NoError(t, err) return inmemoryDB } -func TestAvailabilityStore_StoreLoadAvailableData(t *testing.T) { + +func TestAvailabilityStore_WriteLoadDeleteAvailableData(t *testing.T) { inmemoryDB := state.NewInMemoryDB(t) as := NewAvailabilityStore(inmemoryDB) + batch := newAvailabilityStoreBatch(as) + + dataBytes, err := scale.Marshal(testavailableData1) + require.NoError(t, err) + err = batch.available.Put(testCandidateHash.Value[:], dataBytes) + require.NoError(t, err) - err := as.storeAvailableData(common.Hash{0x01}, testavailableData1) + err = batch.flush() require.NoError(t, err) - got, err := as.loadAvailableData(common.Hash{0x01}) + got, err := as.loadAvailableData(parachaintypes.CandidateHash{Value: common.Hash{0x01}}) require.NoError(t, err) require.Equal(t, &testavailableData1, got) - got, err = as.loadAvailableData(common.Hash{0x02}) + got, err = as.loadAvailableData(parachaintypes.CandidateHash{Value: common.Hash{0x02}}) require.EqualError(t, err, "getting candidate 0x0200000000000000000000000000000000000000000000000000000000000000"+ " from available table: pebble: not found") - var ExpectedAvailableData *AvailableData = nil - require.Equal(t, ExpectedAvailableData, got) + require.Equal(t, (*AvailableData)(nil), got) + + batch = newAvailabilityStoreBatch(as) + + err = batch.available.Del(testCandidateHash.Value[:]) + require.NoError(t, err) + + err = batch.flush() + require.NoError(t, err) + + got, err = as.loadAvailableData(parachaintypes.CandidateHash{Value: common.Hash{0x01}}) + require.EqualError(t, err, "getting candidate 0x0100000000000000000000000000000000000000000000000000000000000000"+ + " from available table: pebble: not found") + require.Equal(t, (*AvailableData)(nil), got) } -func TestAvailabilityStore_StoreLoadChuckData(t *testing.T) { +func TestAvailabilityStore_WriteLoadDeleteChuckData(t *testing.T) { inmemoryDB := state.NewInMemoryDB(t) as := NewAvailabilityStore(inmemoryDB) + batch := newAvailabilityStoreBatch(as) + metaState := NewStateVDT() + err := metaState.Set(Unavailable{}) + require.NoError(t, err) + meta := CandidateMeta{ + State: metaState, + DataAvailable: false, + ChunksStored: []bool{false, false}, + } + dataBytes, err := scale.Marshal(meta) + require.NoError(t, err) + err = batch.meta.Put(parachaintypes.CandidateHash{Value: common.Hash{0x01}}.Value.ToBytes(), dataBytes) + require.NoError(t, err) + + err = batch.flush() + require.NoError(t, err) - err := as.storeChunk(common.Hash{0x01}, testChunk1) + got, err := as.storeChunk(parachaintypes.CandidateHash{Value: common.Hash{0x01}}, testChunk1) require.NoError(t, err) - err = as.storeChunk(common.Hash{0x01}, testChunk2) + require.Equal(t, true, got) + got, err = as.storeChunk(parachaintypes.CandidateHash{Value: common.Hash{0x01}}, testChunk2) require.NoError(t, err) + require.Equal(t, true, got) - resultChunk, err := as.loadChunk(common.Hash{0x01}, 0) + resultChunk, err := as.loadChunk(parachaintypes.CandidateHash{Value: common.Hash{0x01}}, 0) require.NoError(t, err) require.Equal(t, &testChunk1, resultChunk) + + resultChunk, err = as.loadChunk(parachaintypes.CandidateHash{Value: common.Hash{0x01}}, 1) + require.NoError(t, err) + require.Equal(t, &testChunk2, resultChunk) + + batch = newAvailabilityStoreBatch(as) + err = batch.chunk.Del(append(testCandidateHash.Value[:], uint32ToBytes(0)...)) + require.NoError(t, err) + + err = batch.flush() + require.NoError(t, err) + + resultChunk, err = as.loadChunk(parachaintypes.CandidateHash{Value: common.Hash{0x01}}, 0) + require.EqualError(t, err, "getting candidate 0x0100000000000000000000000000000000000000000000000000000000000000,"+ + " index 0 from chunk table: pebble: not found") + require.Equal(t, (*ErasureChunk)(nil), resultChunk) +} + +func TestAvailabilityStore_WriteLoadDeleteMeta(t *testing.T) { + inmemoryDB := state.NewInMemoryDB(t) + as := NewAvailabilityStore(inmemoryDB) + batch := newAvailabilityStoreBatch(as) + metaState := NewStateVDT() + err := metaState.Set(Unavailable{}) + require.NoError(t, err) + meta := &CandidateMeta{ + State: metaState, + } + + dataBytes, err := scale.Marshal(*meta) + require.NoError(t, err) + err = batch.meta.Put(testCandidateHash.Value[:], dataBytes) + require.NoError(t, err) + + err = batch.flush() + require.NoError(t, err) + + got, err := as.loadMeta(parachaintypes.CandidateHash{Value: common.Hash{0x01}}) + require.NoError(t, err) + require.Equal(t, meta, got) + + batch = newAvailabilityStoreBatch(as) + + err = batch.meta.Del(testCandidateHash.Value[:]) + require.NoError(t, err) + + err = batch.flush() + require.NoError(t, err) + + got, err = as.loadMeta(parachaintypes.CandidateHash{Value: common.Hash{0x01}}) + require.EqualError(t, err, "getting candidate 0x0100000000000000000000000000000000000000000000000000000000000000"+ + " from meta table: pebble: not found") + require.Equal(t, (*CandidateMeta)(nil), got) +} + +func TestAvailabilityStore_WriteLoadDeleteUnfinalizedHeight(t *testing.T) { + inmemoryDB := state.NewInMemoryDB(t) + as := NewAvailabilityStore(inmemoryDB) + batch := newAvailabilityStoreBatch(as) + blockNumber := parachaintypes.BlockNumber(1) + hash := common.Hash{0x02} + hash6 := common.Hash{0x06} + candidateHash := parachaintypes.CandidateHash{Value: common.Hash{0x03}} + + key := append(uint32ToBytesBigEndian(uint32(blockNumber)), hash[:]...) + key = append(key, candidateHash.Value[:]...) + err := batch.unfinalized.Put(key, nil) + require.NoError(t, err) + + key = append(uint32ToBytesBigEndian(uint32(blockNumber)), hash6[:]...) + key = append(key, candidateHash.Value[:]...) + err = batch.unfinalized.Put(key, nil) + require.NoError(t, err) + key = append(uint32ToBytesBigEndian(uint32(0)), hash[:]...) + key = append(key, candidateHash.Value[:]...) + err = batch.unfinalized.Put(key, nil) + require.NoError(t, err) + key = append(uint32ToBytesBigEndian(uint32(2)), hash[:]...) + key = append(key, candidateHash.Value[:]...) + err = batch.unfinalized.Put(key, nil) + + require.NoError(t, err) + + err = batch.flush() + require.NoError(t, err) + + // check that the key is written + key12 := append(uint32ToBytesBigEndian(uint32(blockNumber)), hash[:]...) + key12 = append(key12, candidateHash.Value[:]...) + + got, err := as.unfinalized.Get(key12) + require.NoError(t, err) + require.Equal(t, []byte{}, got) + + key16 := append(uint32ToBytesBigEndian(uint32(blockNumber)), hash6[:]...) + key16 = append(key16, candidateHash.Value[:]...) + + got, err = as.unfinalized.Get(key16) + require.NoError(t, err) + require.Equal(t, []byte{}, got) + + // delete height, (block 1) + batch = newAvailabilityStoreBatch(as) + keyPrefix := append([]byte(unfinalizedPrefix), uint32ToBytesBigEndian(uint32(blockNumber))...) + itr := as.unfinalized.NewIterator() + defer itr.Release() + + for itr.First(); itr.Valid(); itr.Next() { + comp := bytes.Compare(itr.Key()[0:len(keyPrefix)], keyPrefix) + if comp < 0 { + continue + } else if comp > 0 { + break + } + err := batch.unfinalized.Del(itr.Key()[len(unfinalizedPrefix):]) + require.NoError(t, err) + } + err = batch.flush() + require.NoError(t, err) + + // check that the key is deleted + got, err = as.unfinalized.Get(key12) + require.EqualError(t, err, "pebble: not found") + require.Equal(t, []byte(nil), got) + + got, err = as.unfinalized.Get(key16) + require.EqualError(t, err, "pebble: not found") + require.Equal(t, []byte(nil), got) + + // check that the other keys are not deleted + key = append(uint32ToBytesBigEndian(uint32(0)), hash[:]...) + key = append(key, candidateHash.Value[:]...) + got, err = as.unfinalized.Get(key) + require.NoError(t, err) + require.Equal(t, []byte{}, got) +} + +func TestAvailabilityStore_WriteLoadDeleteUnfinalizedInclusion(t *testing.T) { + inmemoryDB := state.NewInMemoryDB(t) + as := NewAvailabilityStore(inmemoryDB) + batch := newAvailabilityStoreBatch(as) + blockNumber := parachaintypes.BlockNumber(1) + hash := common.Hash{0x02} + hash6 := common.Hash{0x06} + candidateHash := parachaintypes.CandidateHash{Value: common.Hash{0x03}} + key := append(uint32ToBytesBigEndian(uint32(blockNumber)), hash[:]...) + key = append(key, candidateHash.Value[:]...) + err := batch.unfinalized.Put(key, nil) + require.NoError(t, err) + key = append(uint32ToBytesBigEndian(uint32(blockNumber)), hash6[:]...) + key = append(key, candidateHash.Value[:]...) + err = batch.unfinalized.Put(key, nil) + require.NoError(t, err) + key = append(uint32ToBytesBigEndian(uint32(0)), hash[:]...) + key = append(key, candidateHash.Value[:]...) + err = batch.unfinalized.Put(key, nil) + require.NoError(t, err) + key = append(uint32ToBytesBigEndian(uint32(2)), hash[:]...) + key = append(key, candidateHash.Value[:]...) + err = batch.unfinalized.Put(key, nil) + require.NoError(t, err) + + err = batch.flush() + require.NoError(t, err) + + // check that the key is written + key12 := append(uint32ToBytesBigEndian(uint32(blockNumber)), hash[:]...) + key12 = append(key12, candidateHash.Value[:]...) + + got, err := as.unfinalized.Get(key12) + require.NoError(t, err) + require.Equal(t, []byte{}, got) + + key16 := append(uint32ToBytesBigEndian(uint32(blockNumber)), hash6[:]...) + key16 = append(key16, candidateHash.Value[:]...) + + got, err = as.unfinalized.Get(key16) + require.NoError(t, err) + require.Equal(t, []byte{}, got) + + // delete inclusion, (block 1, hash 2) + batch = newAvailabilityStoreBatch(as) + key = append(uint32ToBytesBigEndian(uint32(blockNumber)), hash[:]...) + key = append(key, candidateHash.Value[:]...) + err = batch.unfinalized.Del(key) + require.NoError(t, err) + + err = batch.flush() + require.NoError(t, err) + + // check that the key is deleted + got, err = as.unfinalized.Get(key12) + require.EqualError(t, err, "pebble: not found") + require.Equal(t, []byte(nil), got) + + // check that the other keys are not deleted + got, err = as.unfinalized.Get(key16) + require.NoError(t, err) + require.Equal(t, []byte{}, got) + + key = append(uint32ToBytesBigEndian(uint32(0)), hash[:]...) + key = append(key, candidateHash.Value[:]...) + got, err = as.unfinalized.Get(key) + require.NoError(t, err) + require.Equal(t, []byte{}, got) +} + +func TestAvailabilityStore_WriteDeletePruningKey(t *testing.T) { + inmemoryDB := state.NewInMemoryDB(t) + as := NewAvailabilityStore(inmemoryDB) + batch := newAvailabilityStoreBatch(as) + candidateHash := parachaintypes.CandidateHash{Value: common.Hash{0x03}} + + pruneKey := append(BETimestamp(1).ToBigEndianBytes(), candidateHash.Value[:]...) + err := batch.pruneByTime.Put(pruneKey, nil) + require.NoError(t, err) + + pruneKey = append(BETimestamp(2).ToBigEndianBytes(), candidateHash.Value[:]...) + err = batch.pruneByTime.Put(pruneKey, nil) + require.NoError(t, err) + + err = batch.flush() + require.NoError(t, err) + + // check that the key is written + key1 := append(BETimestamp(1).ToBigEndianBytes(), candidateHash.Value[:]...) + + got, err := as.pruneByTime.Get(key1) + require.NoError(t, err) + require.Equal(t, []byte{}, got) + + key2 := append(BETimestamp(2).ToBigEndianBytes(), candidateHash.Value[:]...) + got, err = as.pruneByTime.Get(key2) + require.NoError(t, err) + require.Equal(t, []byte{}, got) + + // delete pruning key, timestamp 1 + batch = newAvailabilityStoreBatch(as) + pruneKey = append(BETimestamp(1).ToBigEndianBytes(), candidateHash.Value[:]...) + err = batch.pruneByTime.Del(pruneKey) + require.NoError(t, err) + + err = batch.flush() + require.NoError(t, err) + + // check that the key is deleted + got, err = as.pruneByTime.Get(key1) + require.EqualError(t, err, "pebble: not found") + require.Equal(t, []byte(nil), got) + + // check that the other keys are not deleted + got, err = as.pruneByTime.Get(key2) + require.NoError(t, err) + require.Equal(t, []byte{}, got) } func TestAvailabilityStoreSubsystem_handleQueryAvailableData(t *testing.T) { @@ -95,7 +415,7 @@ func TestAvailabilityStoreSubsystem_handleQueryAvailableData(t *testing.T) { }{ "available_data_found": { msg: QueryAvailableData{ - CandidateHash: common.Hash{0x01}, + CandidateHash: parachaintypes.CandidateHash{Value: common.Hash{0x01}}, }, msgSenderChan: make(chan AvailableData), expectedResult: testavailableData1, @@ -103,7 +423,7 @@ func TestAvailabilityStoreSubsystem_handleQueryAvailableData(t *testing.T) { }, "available_data_not_found": { msg: QueryAvailableData{ - CandidateHash: common.Hash{0x07}, + CandidateHash: parachaintypes.CandidateHash{Value: common.Hash{0x07}}, }, msgSenderChan: make(chan AvailableData), expectedResult: AvailableData{}, @@ -150,7 +470,7 @@ func TestAvailabilityStoreSubsystem_handleQueryDataAvailability(t *testing.T) { }{ "data_available_true": { msg: QueryDataAvailability{ - CandidateHash: common.Hash{0x01}, + CandidateHash: parachaintypes.CandidateHash{Value: common.Hash{0x01}}, }, msgSenderChan: make(chan bool), expectedResult: true, @@ -158,7 +478,7 @@ func TestAvailabilityStoreSubsystem_handleQueryDataAvailability(t *testing.T) { }, "data_available_false": { msg: QueryDataAvailability{ - CandidateHash: common.Hash{0x07}, + CandidateHash: parachaintypes.CandidateHash{Value: common.Hash{0x07}}, }, msgSenderChan: make(chan bool), expectedResult: false, @@ -200,7 +520,7 @@ func TestAvailabilityStoreSubsystem_handleQueryChunk(t *testing.T) { }{ "chunk_found": { msg: QueryChunk{ - CandidateHash: common.Hash{0x01}, + CandidateHash: parachaintypes.CandidateHash{Value: common.Hash{0x01}}, }, msgSenderChan: make(chan ErasureChunk), expectedResult: testChunk1, @@ -208,7 +528,7 @@ func TestAvailabilityStoreSubsystem_handleQueryChunk(t *testing.T) { }, "query_chunk_not_found": { msg: QueryChunk{ - CandidateHash: common.Hash{0x07}, + CandidateHash: parachaintypes.CandidateHash{Value: common.Hash{0x07}}, }, msgSenderChan: make(chan ErasureChunk), expectedResult: ErasureChunk{}, @@ -255,7 +575,7 @@ func TestAvailabilityStoreSubsystem_handleQueryAllChunks(t *testing.T) { }{ "chunks_found": { msg: QueryAllChunks{ - CandidateHash: common.Hash{0x01}, + CandidateHash: parachaintypes.CandidateHash{Value: common.Hash{0x01}}, }, msgSenderChan: make(chan []ErasureChunk), expectedResult: []ErasureChunk{testChunk1, testChunk2}, @@ -263,13 +583,13 @@ func TestAvailabilityStoreSubsystem_handleQueryAllChunks(t *testing.T) { }, "query_chunks_not_found": { msg: QueryAllChunks{ - CandidateHash: common.Hash{0x07}, + CandidateHash: parachaintypes.CandidateHash{Value: common.Hash{0x07}}, }, msgSenderChan: make(chan []ErasureChunk), expectedResult: []ErasureChunk{}, err: errors.New( "load metadata: getting candidate 0x0700000000000000000000000000000000000000000000000000000000000000" + - " from available table: pebble: not found"), + " from meta table: pebble: not found"), }, } for name, tt := range tests { @@ -310,7 +630,7 @@ func TestAvailabilityStoreSubsystem_handleQueryChunkAvailability(t *testing.T) { }{ "query_chuck_availability_true": { msg: QueryChunkAvailability{ - CandidateHash: common.Hash{0x01}, + CandidateHash: parachaintypes.CandidateHash{Value: common.Hash{0x01}}, ValidatorIndex: 0, }, msgSenderChan: make(chan bool), @@ -318,7 +638,7 @@ func TestAvailabilityStoreSubsystem_handleQueryChunkAvailability(t *testing.T) { }, "query_chuck_availability_false": { msg: QueryChunkAvailability{ - CandidateHash: common.Hash{0x01}, + CandidateHash: parachaintypes.CandidateHash{Value: common.Hash{0x01}}, ValidatorIndex: 2, }, msgSenderChan: make(chan bool), @@ -326,14 +646,14 @@ func TestAvailabilityStoreSubsystem_handleQueryChunkAvailability(t *testing.T) { }, "query_chuck_availability_candidate_not_found_false": { msg: QueryChunkAvailability{ - CandidateHash: common.Hash{0x07}, + CandidateHash: parachaintypes.CandidateHash{Value: common.Hash{0x07}}, ValidatorIndex: 0, }, msgSenderChan: make(chan bool), expectedResult: false, err: errors.New( "load metadata: getting candidate 0x0700000000000000000000000000000000000000000000000000000000000000" + - " from available table: pebble: not found"), + " from meta table: pebble: not found"), }, } for name, tt := range tests { @@ -366,7 +686,7 @@ func TestAvailabilityStore_handleStoreChunk(t *testing.T) { } msgSenderChan := make(chan any) msg := StoreChunk{ - CandidateHash: common.Hash{0x01}, + CandidateHash: parachaintypes.CandidateHash{Value: common.Hash{0x01}}, Chunk: testChunk1, Sender: msgSenderChan, } @@ -384,16 +704,86 @@ func TestAvailabilityStore_handleStoreAvailableData(t *testing.T) { } msgSenderChan := make(chan error) msg := StoreAvailableData{ - CandidateHash: parachaintypes.CandidateHash{ - Value: common.Hash{0x01}, - }, - NumValidators: 0, + CandidateHash: parachaintypes.CandidateHash{Value: common.Hash{0x01}}, + NumValidators: 2, AvailableData: AvailableData{}, - ExpectedErasureRoot: common.Hash{}, + ExpectedErasureRoot: common.MustHexToHash("0xc3d486f444a752cbf49857ceb2fce0a235268fb8b63e9e019eab619d192650bc"), Sender: msgSenderChan, } - go asSub.handleStoreAvailableData(msg) + go func() { + err := asSub.handleStoreAvailableData(msg) + require.NoError(t, err) + }() msgSenderChanResult := <-msg.Sender require.Equal(t, nil, msgSenderChanResult) } + +func TestAvailabilityStore_storeAvailableData(t *testing.T) { + t.Parallel() + type args struct { + candidate parachaintypes.CandidateHash + nValidators uint + data AvailableData + expectedErasureRoot common.Hash + } + tests := map[string]struct { + args args + want bool + err error + }{ + "empty_availableData": { + args: args{ + candidate: parachaintypes.CandidateHash{}, + nValidators: 0, + data: AvailableData{}, + expectedErasureRoot: common.Hash{}, + }, + want: false, + err: errors.New("obtaining chunks: expected at least 2 validators"), + }, + "2_validators": { + args: args{ + candidate: parachaintypes.CandidateHash{}, + nValidators: 2, + data: AvailableData{ + PoV: parachaintypes.PoV{BlockData: []byte{2}}, + }, + expectedErasureRoot: common.MustHexToHash("0x513489282098e960bfd57ed52d62838ce9395f3f59257f1f40fadd02261a7991"), + }, + want: true, + err: nil, + }, + "2_validators_error_erasure_root": { + args: args{ + candidate: parachaintypes.CandidateHash{}, + nValidators: 2, + data: AvailableData{ + PoV: parachaintypes.PoV{BlockData: []byte{2}}, + }, + expectedErasureRoot: common.Hash{}, + }, + want: false, + err: errInvalidErasureRoot, + }, + } + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + t.Parallel() + inmemoryDB := setupTestDB(t) + as := NewAvailabilityStore(inmemoryDB) + asSub := &AvailabilityStoreSubsystem{ + availabilityStore: *as, + } + got, err := as.storeAvailableData(asSub, tt.args.candidate, tt.args.nValidators, + tt.args.data, tt.args.expectedErasureRoot) + if tt.err == nil { + require.NoError(t, err) + } else { + require.EqualError(t, err, tt.err.Error()) + } + require.Equal(t, tt.want, got) + }) + } +} diff --git a/dot/parachain/availability-store/messages.go b/dot/parachain/availability-store/messages.go index 1486f4fc37..1435f6862f 100644 --- a/dot/parachain/availability-store/messages.go +++ b/dot/parachain/availability-store/messages.go @@ -4,16 +4,21 @@ package availabilitystore import ( + "errors" + "fmt" "time" parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/trie" "github.com/ChainSafe/gossamer/pkg/scale" ) +var errInvalidErasureRoot = errors.New("Invalid erasure root") + // QueryAvailableData query a AvailableData from the AV store type QueryAvailableData struct { - CandidateHash common.Hash + CandidateHash parachaintypes.CandidateHash Sender chan AvailableData } @@ -23,7 +28,7 @@ type QueryAvailableData struct { // matters, but we don't want to necessarily pass around multiple // megabytes of data to get a single bit of information. type QueryDataAvailability struct { - CandidateHash common.Hash + CandidateHash parachaintypes.CandidateHash Sender chan bool } @@ -36,20 +41,20 @@ type ErasureChunk struct { // QueryChunk query an `ErasureChunk` from the AV store by candidate hash and validator index type QueryChunk struct { - CandidateHash common.Hash + CandidateHash parachaintypes.CandidateHash ValidatorIndex uint32 Sender chan ErasureChunk } // QueryChunkSize get the size of an `ErasureChunk` from the AV store by candidate hash type QueryChunkSize struct { - CandidateHash common.Hash + CandidateHash parachaintypes.CandidateHash Sender chan uint32 } // QueryAllChunks query all chunks that we have for the given candidate hash type QueryAllChunks struct { - CandidateHash common.Hash + CandidateHash parachaintypes.CandidateHash Sender chan []ErasureChunk } @@ -59,14 +64,14 @@ type QueryAllChunks struct { // matters, but we don't want to necessarily pass around multiple // megabytes of data to get a single bit of information. type QueryChunkAvailability struct { - CandidateHash common.Hash + CandidateHash parachaintypes.CandidateHash ValidatorIndex uint32 Sender chan bool } // StoreChunk store an `ErasureChunk` in the AV store type StoreChunk struct { - CandidateHash common.Hash + CandidateHash parachaintypes.CandidateHash Chunk ErasureChunk Sender chan any } @@ -74,13 +79,9 @@ type StoreChunk struct { // StoreAvailableData computes and checks the erasure root of `AvailableData` // before storing its chunks in the AV store. type StoreAvailableData struct { - // A hash of the candidate this `ASMStoreAvailableData` belongs to. - CandidateHash parachaintypes.CandidateHash - // The number of validators in the session. - NumValidators uint32 - // The `AvailableData` itself. - AvailableData AvailableData - // Erasure root we expect to get after chunking. + CandidateHash parachaintypes.CandidateHash + NumValidators uint32 + AvailableData AvailableData ExpectedErasureRoot common.Hash // channel to send result to. Sender chan error @@ -102,15 +103,31 @@ type CandidateMeta struct { // State is the state of candidate data type State scale.VaryingDataType +// New will enable scale to create new instance when needed +func (State) New() State { + return NewStateVDT() +} + // NewState creates a new State -func NewState() State { +func NewStateVDT() State { vdt := scale.MustNewVaryingDataType(Unavailable{}, Unfinalized{}, Finalized{}) return State(vdt) } +// Set will set VaryingDataTypeValue using underlying VaryingDataType +func (s *State) Set(val scale.VaryingDataTypeValue) (err error) { + vdt := scale.VaryingDataType(*s) + err = vdt.Set(val) + if err != nil { + return fmt.Errorf("setting value te varying data type: %w", err) + } + *s = State(vdt) + return nil +} + // Unavailable candidate data was first observed at the given time but in not available in any black type Unavailable struct { - Timestamp time.Time + Timestamp BETimestamp } // Index returns the index of the varying data type @@ -148,3 +165,10 @@ type BlockNumberHash struct { blockNumber parachaintypes.BlockNumber //nolint:unused,structcheck blockHash common.Hash //nolint:unused,structcheck } + +type branches struct { + trieStorage *trie.Trie + root common.Hash + chunks [][]byte + currentPos uint +} diff --git a/dot/parachain/availability-store/register.go b/dot/parachain/availability-store/register.go index 932945b405..b77e81df4b 100644 --- a/dot/parachain/availability-store/register.go +++ b/dot/parachain/availability-store/register.go @@ -9,6 +9,7 @@ func Register(overseerChan chan<- any, st *state.Service) (*AvailabilityStoreSub availabilityStore := NewAvailabilityStore(st.DB()) availabilityStoreSubsystem := AvailabilityStoreSubsystem{ + pruningConfig: defaultPruningConfig, SubSystemToOverseer: overseerChan, availabilityStore: *availabilityStore, }