From d6f3daaf306d298f4267faa917201146b5352b62 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 14 Sep 2023 11:47:32 -0500 Subject: [PATCH 001/126] Inline child array/map data slab into parent slab Currently, every array or map are stored in its own slab and parent slab refers to child array or map by SlabID. The current approach can lead to many small slabs, especially for Cadence data structures with multiple nested levels. This commit inlines child array/map in parent slab when: - child array/map fits in one slab (root slab is data slab) - encoded size of inlined child array/map is less than the max inline size limit enforced by parent This commit optimizes encoding size by: - reusing inlined array types - reusing seed, digests, and field names of inlined composite types Also update debugging code to handle inlined array/map element. --- array.go | 657 ++- array_debug.go | 236 +- array_test.go | 4959 ++++++++++++----- basicarray.go | 15 +- cmd/main/main.go | 10 +- cmd/stress/storable.go | 2 +- cmd/stress/typeinfo.go | 10 + encode.go | 33 +- map.go | 885 ++- map_debug.go | 103 +- map_test.go | 11229 +++++++++++++++++++++++++++++---------- storable.go | 17 + storable_test.go | 69 +- storage.go | 20 + storage_test.go | 2 - typeinfo.go | 343 ++ utils_test.go | 133 +- value.go | 9 + 18 files changed, 14336 insertions(+), 4396 deletions(-) diff --git a/array.go b/array.go index f661e9c4..03d35617 100644 --- a/array.go +++ b/array.go @@ -19,6 +19,7 @@ package atree import ( + "bytes" "encoding/binary" "fmt" "math" @@ -57,6 +58,15 @@ const ( // 32 is faster than 24 and 40. linearScanThreshold = 32 + + // inlined array data slab prefix size: + // tag number (2 bytes) + + // 3-element array head (1 byte) + + // extra data ref index (2 bytes) [0, 255] + + // value ID index head (1 byte) + + // value ID index (8 bytes) + + // element array head (3 bytes) + inlinedArrayDataSlabPrefixSize = 2 + 1 + 2 + 1 + 8 + arrayDataSlabElementHeadSize ) type ArraySlabHeader struct { @@ -69,6 +79,8 @@ type ArrayExtraData struct { TypeInfo TypeInfo // array type } +var _ ExtraData = &ArrayExtraData{} + // ArrayDataSlab is leaf node, implementing ArraySlab. type ArrayDataSlab struct { next SlabID @@ -78,6 +90,10 @@ type ArrayDataSlab struct { // extraData is data that is prepended to encoded slab data. // It isn't included in slab size calculation for splitting and merging. extraData *ArrayExtraData + + // inlined indicates whether this slab is stored inlined in its parent slab. + // This flag affects Encode(), ByteSize(), etc. + inlined bool } func (a *ArrayDataSlab) StoredValue(storage SlabStorage) (Value, error) { @@ -85,8 +101,9 @@ func (a *ArrayDataSlab) StoredValue(storage SlabStorage) (Value, error) { return nil, NewNotValueError(a.SlabID()) } return &Array{ - Storage: storage, - root: a, + Storage: storage, + root: a, + mutableElementIndex: make(map[ValueID]uint64), }, nil } @@ -113,8 +130,9 @@ func (a *ArrayMetaDataSlab) StoredValue(storage SlabStorage) (Value, error) { return nil, NewNotValueError(a.SlabID()) } return &Array{ - Storage: storage, - root: a, + Storage: storage, + root: a, + mutableElementIndex: make(map[ValueID]uint64), }, nil } @@ -142,24 +160,35 @@ type ArraySlab interface { SetExtraData(*ArrayExtraData) PopIterate(SlabStorage, ArrayPopIterationFunc) error + + Inlined() bool + Inlinable(maxInlineSize uint64) bool } // Array is tree type Array struct { Storage SlabStorage root ArraySlab + + // parentUpdater is a callback that notifies parent container when this array is modified. + // If this callback is null, this array has no parent. Otherwise, this array has parent + // and this callback must be used when this array is changed by Append, Insert, Set, + // Remove, etc. + parentUpdater parentUpdater + + // mutableElementIndex tracks index of mutable element, such as Array and OrderedMap. + // This is needed by mutable element to properly update itself through parentUpdater. + // TODO: maybe optimize by replacing map to get faster updates. + mutableElementIndex map[ValueID]uint64 } var _ Value = &Array{} +var _ valueNotifier = &Array{} func (a *Array) Address() Address { return a.root.SlabID().address } -func (a *Array) Storable(_ SlabStorage, _ Address, _ uint64) (Storable, error) { - return SlabIDStorable(a.SlabID()), nil -} - const arrayExtraDataLength = 1 func newArrayExtraDataFromData( @@ -208,6 +237,10 @@ func newArrayExtraData(dec *cbor.StreamDecoder, decodeTypeInfo TypeInfoDecoder) return &ArrayExtraData{TypeInfo: typeInfo}, nil } +func (a *ArrayExtraData) isExtraData() bool { + return true +} + // Encode encodes extra data as CBOR array: // // [type info] @@ -353,25 +386,26 @@ func newArrayDataSlabFromDataV0( return nil, NewDecodingError(err) } + // Compute slab size for version 1. + slabSize := uint32(arrayDataSlabPrefixSize) + if h.isRoot() { + slabSize = arrayRootDataSlabPrefixSize + } + elements := make([]Storable, elemCount) for i := 0; i < int(elemCount); i++ { - storable, err := decodeStorable(cborDec, SlabIDUndefined) + storable, err := decodeStorable(cborDec, id, nil) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode array element") } elements[i] = storable - } - - // Compute slab size for version 1. - slabSize := versionAndFlagSize + cborDec.NumBytesDecoded() - if !h.isRoot() { - slabSize += slabIDSize + slabSize += storable.ByteSize() } header := ArraySlabHeader{ slabID: id, - size: uint32(slabSize), + size: slabSize, count: uint32(elemCount), } @@ -387,21 +421,22 @@ func newArrayDataSlabFromDataV0( // // Root DataSlab Header: // -// +-------------------------------+------------+ -// | slab version + flag (2 bytes) | extra data | -// +-------------------------------+------------+ +// +-------------------------------+------------+---------------------------------+ +// | slab version + flag (2 bytes) | extra data | inlined extra data (if present) | +// +-------------------------------+------------+---------------------------------+ // -// Non-root DataSlab Header (18 bytes): +// Non-root DataSlab Header: // -// +-------------------------------+-----------------------------+ -// | slab version + flag (2 bytes) | next sib slab ID (16 bytes) | -// +-------------------------------+-----------------------------+ +// +-------------------------------+---------------------------------+-----------------------------+ +// | slab version + flag (2 bytes) | inlined extra data (if present) | next slab ID (if non-empty) | +// +-------------------------------+---------------------------------+-----------------------------+ // // Content: // // CBOR encoded array of elements // // See ArrayExtraData.Encode() for extra data section format. +// See InlinedExtraData.Encode() for inlined extra data section format. func newArrayDataSlabFromDataV1( id SlabID, h head, @@ -415,6 +450,7 @@ func newArrayDataSlabFromDataV1( ) { var err error var extraData *ArrayExtraData + var inlinedExtraData []ExtraData var next SlabID // Decode extra data @@ -426,6 +462,20 @@ func newArrayDataSlabFromDataV1( } } + // Decode inlined slab extra data + if h.hasInlinedSlabs() { + inlinedExtraData, data, err = newInlinedExtraDataFromData( + data, + decMode, + decodeStorable, + decodeTypeInfo, + ) + if err != nil { + // err is categorized already by newInlinedExtraDataFromData. + return nil, err + } + } + // Decode next slab ID if h.hasNextSlabID() { next, err = NewSlabIDFromRawBytes(data) @@ -450,14 +500,20 @@ func newArrayDataSlabFromDataV1( return nil, NewDecodingError(err) } + slabSize := uint32(arrayDataSlabPrefixSize) + if h.isRoot() { + slabSize = arrayRootDataSlabPrefixSize + } + elements := make([]Storable, elemCount) for i := 0; i < int(elemCount); i++ { - storable, err := decodeStorable(cborDec, SlabIDUndefined) + storable, err := decodeStorable(cborDec, id, inlinedExtraData) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode array element") } elements[i] = storable + slabSize += storable.ByteSize() } // Check if data reached EOF @@ -465,15 +521,9 @@ func newArrayDataSlabFromDataV1( return nil, NewDecodingErrorf("data has %d bytes of extraneous data for array data slab", len(data)-cborDec.NumBytesDecoded()) } - // Compute slab size for version 1. - slabSize := versionAndFlagSize + cborDec.NumBytesDecoded() - if !h.isRoot() { - slabSize += slabIDSize - } - header := ArraySlabHeader{ slabID: id, - size: uint32(slabSize), + size: slabSize, count: uint32(elemCount), } @@ -482,30 +532,234 @@ func newArrayDataSlabFromDataV1( header: header, elements: elements, extraData: extraData, + inlined: false, // this function is only called when slab is not inlined. }, nil } +// DecodeInlinedArrayStorable decodes inlined array data slab. Encoding is +// version 1 with CBOR tag having tag number CBORTagInlinedArray, and tag contant +// as 3-element array: +// +// - index of inlined extra data +// - value ID index +// - CBOR array of elements +// +// NOTE: This function doesn't decode tag number because tag number is decoded +// in the caller and decoder only contains tag content. +func DecodeInlinedArrayStorable( + dec *cbor.StreamDecoder, + decodeStorable StorableDecoder, + parentSlabID SlabID, + inlinedExtraData []ExtraData, +) ( + Storable, + error, +) { + const inlinedArrayDataSlabArrayCount = 3 + + arrayCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + if arrayCount != inlinedArrayDataSlabArrayCount { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined array data slab: expect %d elements, got %d elements", + inlinedArrayDataSlabArrayCount, + arrayCount)) + } + + // element 0: extra data index + extraDataIndex, err := dec.DecodeUint64() + if err != nil { + return nil, NewDecodingError(err) + } + if extraDataIndex >= uint64(len(inlinedExtraData)) { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined array data slab: inlined extra data index %d exceeds number of inlined extra data %d", + extraDataIndex, + len(inlinedExtraData))) + } + + extraData, ok := inlinedExtraData[extraDataIndex].(*ArrayExtraData) + if !ok { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined array data slab: expect *ArrayExtraData, got %T", + inlinedExtraData[extraDataIndex])) + } + + // element 1: slab index + b, err := dec.DecodeBytes() + if err != nil { + return nil, NewDecodingError(err) + } + if len(b) != slabIndexSize { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined array data slab: expect %d bytes for slab index, got %d bytes", + slabIndexSize, + len(b))) + } + + var index [8]byte + copy(index[:], b) + + slabID := NewSlabID(parentSlabID.address, index) + + // Decode array elements (CBOR array) + elemCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + size := uint32(inlinedArrayDataSlabPrefixSize) + + elements := make([]Storable, elemCount) + for i := 0; i < int(elemCount); i++ { + storable, err := decodeStorable(dec, slabID, inlinedExtraData) + if err != nil { + // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode array element") + } + elements[i] = storable + + size += storable.ByteSize() + } + + header := ArraySlabHeader{ + slabID: slabID, + size: size, + count: uint32(elemCount), + } + + return &ArrayDataSlab{ + header: header, + elements: elements, + extraData: extraData, + inlined: true, + }, nil +} + +// encodeAsInlined encodes inlined array data slab. Encoding is +// version 1 with CBOR tag having tag number CBORTagInlinedArray, +// and tag contant as 3-element array: +// +// - index of inlined extra data +// - value ID index +// - CBOR array of elements +func (a *ArrayDataSlab) encodeAsInlined(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { + if a.extraData == nil { + return NewEncodingError( + fmt.Errorf("failed to encode non-root array data slab as inlined")) + } + + if !a.inlined { + return NewEncodingError( + fmt.Errorf("failed to encode standalone array data slab as inlined")) + } + + extraDataIndex := inlinedTypeInfo.addArrayExtraData(a.extraData) + + if extraDataIndex > 255 { + return NewEncodingError( + fmt.Errorf("failed to encode inlined array data slab: extra data index %d exceeds limit 255", extraDataIndex)) + } + + var err error + + // Encode tag number and array head of 3 elements + err = enc.CBOR.EncodeRawBytes([]byte{ + // tag number + 0xd8, CBORTagInlinedArray, + // array head of 3 elements + 0x83, + }) + if err != nil { + return NewEncodingError(err) + } + + // element 0: extra data index + // NOTE: encoded extra data index is fixed sized CBOR uint + err = enc.CBOR.EncodeRawBytes([]byte{ + 0x18, + byte(extraDataIndex), + }) + if err != nil { + return NewEncodingError(err) + } + + // element 1: slab index + err = enc.CBOR.EncodeBytes(a.header.slabID.index[:]) + if err != nil { + return NewEncodingError(err) + } + + // element 2: array elements + err = a.encodeElements(enc, inlinedTypeInfo) + if err != nil { + return NewEncodingError(err) + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + // Encode encodes this array data slab to the given encoder. // // Root DataSlab Header: // -// +-------------------------------+------------+ -// | slab version + flag (2 bytes) | extra data | -// +-------------------------------+------------+ +// +-------------------------------+------------+---------------------------------+ +// | slab version + flag (2 bytes) | extra data | inlined extra data (if present) | +// +-------------------------------+------------+---------------------------------+ // -// Non-root DataSlab Header (18 bytes): +// Non-root DataSlab Header: // -// +-------------------------------+-----------------------------+ -// | slab version + flag (2 bytes) | next sib slab ID (16 bytes) | -// +-------------------------------+-----------------------------+ +// +-------------------------------+---------------------------------+-----------------------------+ +// | slab version + flag (2 bytes) | inlined extra data (if present) | next slab ID (if non-empty) | +// +-------------------------------+---------------------------------+-----------------------------+ // // Content: // // CBOR encoded array of elements // // See ArrayExtraData.Encode() for extra data section format. +// See InlinedExtraData.Encode() for inlined extra data section format. func (a *ArrayDataSlab) Encode(enc *Encoder) error { + if a.inlined { + return NewEncodingError( + fmt.Errorf("failed to encode inlined array data slab as standalone slab")) + } + + // Encoding is done in two steps: + // + // 1. Encode array elements using a new buffer while collecting inlined extra data from inlined elements. + // 2. Encode slab with deduplicated inlined extra data and copy encoded elements from previous buffer. + + inlinedTypes := newInlinedExtraData() + + // TODO: maybe use a buffer pool + var elementBuf bytes.Buffer + elementEnc := NewEncoder(&elementBuf, enc.encMode) + + err := a.encodeElements(elementEnc, inlinedTypes) + if err != nil { + // err is already categorized by Array.encodeElements(). + return err + } + + err = elementEnc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + const version = 1 h, err := newArraySlabHead(version, slabArrayData) @@ -525,15 +779,18 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { h.setRoot() } + if !inlinedTypes.empty() { + h.setHasInlinedSlabs() + } + // Encode head (version + flag) _, err = enc.Write(h[:]) if err != nil { return NewEncodingError(err) } - // Encode header + // Encode extra data if a.extraData != nil { - // Encode extra data err = a.extraData.Encode(enc) if err != nil { // err is already categorized by ArrayExtraData.Encode(). @@ -541,6 +798,15 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { } } + // Encode inlined extra data + if !inlinedTypes.empty() { + err = inlinedTypes.Encode(enc) + if err != nil { + // err is already categorized by inlinedExtraData.Encode(). + return err + } + } + // Encode next slab ID if a.next != SlabIDUndefined { n, err := a.next.ToRawBytes(enc.Scratch[:]) @@ -555,6 +821,21 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { } } + // Encode elements by copying raw bytes from previous buffer + err = enc.CBOR.EncodeRawBytes(elementBuf.Bytes()) + if err != nil { + return NewEncodingError(err) + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +func (a *ArrayDataSlab) encodeElements(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { // Encode CBOR array size manually for fix-sized encoding enc.Scratch[0] = 0x80 | 25 @@ -568,14 +849,14 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { // Write scratch content to encoder totalSize := countOffset + countSize - _, err = enc.Write(enc.Scratch[:totalSize]) + err := enc.CBOR.EncodeRawBytes(enc.Scratch[:totalSize]) if err != nil { return NewEncodingError(err) } // Encode data slab content (array of elements) for _, e := range a.elements { - err = e.Encode(enc) + err = encodeStorableAsElement(enc, e, inlinedTypeInfo) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode array element") @@ -590,6 +871,35 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { return nil } +func (a *ArrayDataSlab) Inlined() bool { + return a.inlined +} + +// Inlinable returns true if +// - array data slab is root slab +// - size of inlined array data slab <= maxInlineSize +func (a *ArrayDataSlab) Inlinable(maxInlineSize uint64) bool { + if a.extraData == nil { + // Non-root data slab is not inlinable. + return false + } + + // At this point, this data slab is either + // - inlined data slab, or + // - not inlined root data slab + + // Compute inlined size from cached slab size + inlinedSize := a.header.size + if !a.inlined { + inlinedSize = inlinedSize - + arrayRootDataSlabPrefixSize + + inlinedArrayDataSlabPrefixSize + } + + // Inlined byte size must be less than max inline size. + return uint64(inlinedSize) <= maxInlineSize +} + func (a *ArrayDataSlab) hasPointer() bool { for _, e := range a.elements { if hasPointer(e) { @@ -606,6 +916,9 @@ func (a *ArrayDataSlab) ChildStorables() []Storable { } func (a *ArrayDataSlab) getPrefixSize() uint32 { + if a.inlined { + return inlinedArrayDataSlabPrefixSize + } if a.extraData != nil { return arrayRootDataSlabPrefixSize } @@ -644,10 +957,12 @@ func (a *ArrayDataSlab) Set(storage SlabStorage, address Address, index uint64, a.header.size = size - err = storage.Store(a.header.slabID, a) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + if !a.inlined { + err := storage.Store(a.header.slabID, a) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + } } return oldElem, nil @@ -675,10 +990,12 @@ func (a *ArrayDataSlab) Insert(storage SlabStorage, address Address, index uint6 a.header.count++ a.header.size += storable.ByteSize() - err = storage.Store(a.header.slabID, a) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + if !a.inlined { + err := storage.Store(a.header.slabID, a) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + } } return nil @@ -705,10 +1022,12 @@ func (a *ArrayDataSlab) Remove(storage SlabStorage, index uint64) (Storable, err a.header.count-- a.header.size -= v.ByteSize() - err := storage.Store(a.header.slabID, a) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + if !a.inlined { + err := storage.Store(a.header.slabID, a) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + } } return v, nil @@ -2201,7 +2520,15 @@ func (a *ArrayMetaDataSlab) CanLendToRight(size uint32) bool { return a.header.size-arraySlabHeaderSize*n > uint32(minThreshold) } -func (a ArrayMetaDataSlab) IsData() bool { +func (a *ArrayMetaDataSlab) Inlined() bool { + return false +} + +func (a *ArrayMetaDataSlab) Inlinable(_ uint64) bool { + return false +} + +func (a *ArrayMetaDataSlab) IsData() bool { return false } @@ -2314,8 +2641,9 @@ func NewArray(storage SlabStorage, address Address, typeInfo TypeInfo) (*Array, } return &Array{ - Storage: storage, - root: root, + Storage: storage, + root: root, + mutableElementIndex: make(map[ValueID]uint64), }, nil } @@ -2336,22 +2664,98 @@ func NewArrayWithRootID(storage SlabStorage, rootID SlabID) (*Array, error) { } return &Array{ - Storage: storage, - root: root, + Storage: storage, + root: root, + mutableElementIndex: make(map[ValueID]uint64), }, nil } +// TODO: maybe optimize this +func (a *Array) incrementIndexFrom(index uint64) { + for id, i := range a.mutableElementIndex { + if i >= index { + a.mutableElementIndex[id]++ + } + } +} + +// TODO: maybe optimize this +func (a *Array) decrementIndexFrom(index uint64) { + for id, i := range a.mutableElementIndex { + if i > index { + a.mutableElementIndex[id]-- + } + } +} + +func (a *Array) getIndexByValueID(id ValueID) (uint64, bool) { + index, exist := a.mutableElementIndex[id] + return index, exist +} + +func (a *Array) setParentUpdater(f parentUpdater) { + a.parentUpdater = f +} + +// setCallbackWithChild sets up callback function with child value so +// parent array a can be notified when child value is modified. +func (a *Array) setCallbackWithChild(i uint64, child Value) { + c, ok := child.(valueNotifier) + if !ok { + return + } + + vid := c.ValueID() + + // Index i will be updated with array operations, which affects element index. + a.mutableElementIndex[vid] = i + + c.setParentUpdater(func() error { + + // Get latest index by child value ID. + index, exist := a.getIndexByValueID(vid) + if !exist { + return NewFatalError(fmt.Errorf("failed to get index for child element with value id %s", vid)) + } + + // Set child value with parent array using updated index. + // Set() calls c.Storable() which returns inlined or not-inlined child storable. + existingValueStorable, err := a.Set(index, c) + if err != nil { + return err + } + + if existingValueStorable == nil { + return NewFatalError(fmt.Errorf("failed to reset child value in parent updater callback because previous value is nil")) + } + + return nil + }) +} + +// notifyParentIfNeeded calls parent updater if this array is a child value. +func (a *Array) notifyParentIfNeeded() error { + if a.parentUpdater == nil { + return nil + } + return a.parentUpdater() +} + func (a *Array) Get(i uint64) (Value, error) { storable, err := a.root.Get(a.Storage, i) if err != nil { // Don't need to wrap error as external error because err is already categorized by ArraySlab.Get(). return nil, err } + v, err := storable.StoredValue(a.Storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") } + + a.setCallbackWithChild(i, v) + return v, nil } @@ -2382,6 +2786,11 @@ func (a *Array) Set(index uint64, value Value) (Storable, error) { } } + err = a.notifyParentIfNeeded() + if err != nil { + return nil, err + } + return existingStorable, nil } @@ -2402,7 +2811,9 @@ func (a *Array) Insert(index uint64, value Value) error { return a.splitRoot() } - return nil + a.incrementIndexFrom(index) + + return a.notifyParentIfNeeded() } func (a *Array) Remove(index uint64) (Storable, error) { @@ -2424,6 +2835,13 @@ func (a *Array) Remove(index uint64) (Storable, error) { } } + a.decrementIndexFrom(index) + + err = a.notifyParentIfNeeded() + if err != nil { + return nil, err + } + return storable, nil } @@ -2534,6 +2952,89 @@ func (a *Array) promoteChildAsNewRoot(childID SlabID) error { return nil } +func (a *Array) Inlined() bool { + return a.root.Inlined() +} + +// Storable returns array a as either: +// - SlabIDStorable, or +// - inlined data slab storable +func (a *Array) Storable(_ SlabStorage, _ Address, maxInlineSize uint64) (Storable, error) { + + inlined := a.root.Inlined() + inlinable := a.root.Inlinable(maxInlineSize) + + if inlinable && inlined { + // Root slab is inlinable and was inlined. + // Return root slab as storable, no size adjustment and change to storage. + return a.root, nil + } + + if !inlinable && !inlined { + // Root slab is not inlinable and was not inlined. + // Return root slab ID as storable, no size adjustment and change to storage. + return SlabIDStorable(a.SlabID()), nil + } + + if inlinable && !inlined { + // Root slab is inlinable and was NOT inlined. + + // Inline root data slab. + + // Inlineable root slab must be data slab. + rootDataSlab, ok := a.root.(*ArrayDataSlab) + if !ok { + return nil, NewFatalError(fmt.Errorf("unexpected inlinable array slab type %T", a.root)) + } + + rootID := rootDataSlab.header.slabID + + // Remove root slab from storage because it is going to be inlined. + err := a.Storage.Remove(rootID) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to remove slab %s", rootID)) + } + + // Update root data slab size as inlined slab. + rootDataSlab.header.size = rootDataSlab.header.size - + arrayRootDataSlabPrefixSize + + inlinedArrayDataSlabPrefixSize + + // Update root data slab inlined status. + rootDataSlab.inlined = true + + return rootDataSlab, nil + } + + // here, root slab is NOT inlinable and was previously inlined. + + // Un-inline root slab. + + // Inlined root slab must be data slab. + rootDataSlab, ok := a.root.(*ArrayDataSlab) + if !ok { + return nil, NewFatalError(fmt.Errorf("unexpected inlined array slab type %T", a.root)) + } + + // Update root data slab size + rootDataSlab.header.size = rootDataSlab.header.size - + inlinedArrayDataSlabPrefixSize + + arrayRootDataSlabPrefixSize + + // Update root data slab inlined status. + rootDataSlab.inlined = false + + // Store root slab in storage + err := a.Storage.Store(rootDataSlab.header.slabID, a.root) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.SlabID())) + } + + return SlabIDStorable(a.SlabID()), nil +} + var emptyArrayIterator = &ArrayIterator{} type ArrayIterator struct { @@ -2716,17 +3217,14 @@ func (a *Array) Count() uint64 { } func (a *Array) SlabID() SlabID { + if a.root.Inlined() { + return SlabIDUndefined + } return a.root.SlabID() } func (a *Array) ValueID() ValueID { - sid := a.SlabID() - - var id ValueID - copy(id[:], sid.address[:]) - copy(id[8:], sid.index[:]) - - return id + return slabIDToValueID(a.root.SlabID()) } func (a *Array) Type() TypeInfo { @@ -2831,20 +3329,30 @@ func (a *Array) PopIterate(fn ArrayPopIterationFunc) error { extraData := a.root.ExtraData() + inlined := a.root.Inlined() + + size := uint32(arrayRootDataSlabPrefixSize) + if inlined { + size = inlinedArrayDataSlabPrefixSize + } + // Set root to empty data slab a.root = &ArrayDataSlab{ header: ArraySlabHeader{ slabID: rootID, - size: arrayRootDataSlabPrefixSize, + size: size, }, extraData: extraData, + inlined: inlined, } // Save root slab - err = a.Storage.Store(a.root.SlabID(), a.root) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.root.SlabID())) + if !a.Inlined() { + err = a.Storage.Store(a.root.SlabID(), a.root) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.root.SlabID())) + } } return nil @@ -3007,8 +3515,9 @@ func NewArrayFromBatchData(storage SlabStorage, address Address, typeInfo TypeIn } return &Array{ - Storage: storage, - root: root, + Storage: storage, + root: root, + mutableElementIndex: make(map[ValueID]uint64), }, nil } diff --git a/array_debug.go b/array_debug.go index 64cf0a07..9c18cbb1 100644 --- a/array_debug.go +++ b/array_debug.go @@ -66,12 +66,9 @@ func GetArrayStats(a *Array) (ArrayStats, error) { if slab.IsData() { dataSlabCount++ - childStorables := slab.ChildStorables() - for _, s := range childStorables { - if _, ok := s.(SlabIDStorable); ok { - storableSlabCount++ - } - } + ids := getSlabIDFromStorable(slab, nil) + storableSlabCount += uint64(len(ids)) + } else { metaDataSlabCount++ @@ -134,12 +131,7 @@ func DumpArraySlabs(a *Array) ([]string, error) { dataSlab := slab.(*ArrayDataSlab) dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, dataSlab)) - childStorables := dataSlab.ChildStorables() - for _, e := range childStorables { - if id, ok := e.(SlabIDStorable); ok { - overflowIDs = append(overflowIDs, SlabID(id)) - } - } + overflowIDs = getSlabIDFromStorable(dataSlab, overflowIDs) } else { meta := slab.(*ArrayMetaDataSlab) @@ -193,7 +185,7 @@ func ValidArray(a *Array, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInp } computedCount, dataSlabIDs, nextDataSlabIDs, err := - validArraySlab(tic, hip, a.Storage, a.root.Header().slabID, 0, nil, []SlabID{}, []SlabID{}) + validArraySlab(tic, hip, a.Storage, a.root, 0, nil, []SlabID{}, []SlabID{}) if err != nil { // Don't need to wrap error as external error because err is already categorized by validArraySlab(). return err @@ -217,7 +209,7 @@ func validArraySlab( tic TypeInfoComparator, hip HashInputProvider, storage SlabStorage, - id SlabID, + slab ArraySlab, level int, headerFromParentSlab *ArraySlabHeader, dataSlabIDs []SlabID, @@ -229,34 +221,30 @@ func validArraySlab( err error, ) { - slab, err := getArraySlab(storage, id) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by getArraySlab(). - return 0, nil, nil, err - } + id := slab.Header().slabID if level > 0 { // Verify that non-root slab doesn't have extra data if slab.ExtraData() != nil { - return 0, nil, nil, NewFatalError(fmt.Errorf("non-root slab %d has extra data", id)) + return 0, nil, nil, NewFatalError(fmt.Errorf("non-root slab %s has extra data", id)) } // Verify that non-root slab doesn't underflow if underflowSize, underflow := slab.IsUnderflow(); underflow { - return 0, nil, nil, NewFatalError(fmt.Errorf("slab %d underflows by %d bytes", id, underflowSize)) + return 0, nil, nil, NewFatalError(fmt.Errorf("slab %s underflows by %d bytes", id, underflowSize)) } } // Verify that slab doesn't overflow if slab.IsFull() { - return 0, nil, nil, NewFatalError(fmt.Errorf("slab %d overflows", id)) + return 0, nil, nil, NewFatalError(fmt.Errorf("slab %s overflows", id)) } // Verify that header is in sync with header from parent slab if headerFromParentSlab != nil { if !reflect.DeepEqual(*headerFromParentSlab, slab.Header()) { - return 0, nil, nil, NewFatalError(fmt.Errorf("slab %d header %+v is different from header %+v from parent slab", + return 0, nil, nil, NewFatalError(fmt.Errorf("slab %s header %+v is different from header %+v from parent slab", id, slab.Header(), headerFromParentSlab)) } } @@ -264,25 +252,34 @@ func validArraySlab( if slab.IsData() { dataSlab, ok := slab.(*ArrayDataSlab) if !ok { - return 0, nil, nil, NewFatalError(fmt.Errorf("slab %d is not ArrayDataSlab", id)) + return 0, nil, nil, NewFatalError(fmt.Errorf("slab %s is not ArrayDataSlab", id)) } // Verify that element count is the same as header.count if uint32(len(dataSlab.elements)) != dataSlab.header.count { - return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %d header count %d is wrong, want %d", + return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %s header count %d is wrong, want %d", id, dataSlab.header.count, len(dataSlab.elements))) } + // Verify that only root slab can be inlined + if level > 0 && slab.Inlined() { + return 0, nil, nil, NewFatalError(fmt.Errorf("non-root slab %s is inlined", id)) + } + // Verify that aggregated element size + slab prefix is the same as header.size computedSize := uint32(arrayDataSlabPrefixSize) if level == 0 { computedSize = uint32(arrayRootDataSlabPrefixSize) + if slab.Inlined() { + computedSize = uint32(inlinedArrayDataSlabPrefixSize) + } } + for _, e := range dataSlab.elements { // Verify element size is <= inline size if e.ByteSize() > uint32(maxInlineArrayElementSize) { - return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %d element %s size %d is too large, want < %d", + return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %s element %s size %d is too large, want < %d", id, e, e.ByteSize(), maxInlineArrayElementSize)) } @@ -290,7 +287,7 @@ func validArraySlab( } if computedSize != dataSlab.header.size { - return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %d header size %d is wrong, want %d", + return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %s header size %d is wrong, want %d", id, dataSlab.header.size, computedSize)) } @@ -315,7 +312,7 @@ func validArraySlab( if err != nil { // Don't need to wrap error as external error because err is already categorized by ValidValue(). return 0, nil, nil, fmt.Errorf( - "data slab %d element %s isn't valid: %w", + "data slab %s element %q isn't valid: %w", id, e, err, ) } @@ -351,10 +348,16 @@ func validArraySlab( for i := 0; i < len(meta.childrenHeaders); i++ { h := meta.childrenHeaders[i] + childSlab, err := getArraySlab(storage, h.slabID) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by getArraySlab(). + return 0, nil, nil, err + } + // Verify child slabs var count uint32 count, dataSlabIDs, nextDataSlabIDs, err = - validArraySlab(tic, hip, storage, h.slabID, level+1, &h, dataSlabIDs, nextDataSlabIDs) + validArraySlab(tic, hip, storage, childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs) if err != nil { // Don't need to wrap error as external error because err is already categorized by validArraySlab(). return 0, nil, nil, err @@ -446,15 +449,30 @@ func validArraySlabSerialization( } // Extra check: encoded data size == header.size - encodedSlabSize, err := computeSlabSize(data) + // This check is skipped for slabs with inlined composite because + // encoded size and slab size differ for inlined composites. + // For inlined composites, digests and field keys are encoded in + // composite extra data section for reuse, and only composite field + // values are encoded in non-extra data section. + // This reduces encoding size because composite values of the same + // composite type can reuse encoded type info, seed, digests, and field names. + // TODO: maybe add size check for slabs with inlined composite by decoding entire slab. + inlinedComposite, err := hasInlinedComposite(data) if err != nil { - // Don't need to wrap error as external error because err is already categorized by computeSlabSize(). + // Don't need to wrap error as external error because err is already categorized by hasInlinedComposite(). return err } + if !inlinedComposite { + encodedSlabSize, err := computeSize(data) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by computeSize(). + return err + } - if slab.Header().size != uint32(encodedSlabSize) { - return NewFatalError(fmt.Errorf("slab %d encoded size %d != header.size %d", - id, encodedSlabSize, slab.Header().size)) + if slab.Header().size != uint32(encodedSlabSize) { + return NewFatalError(fmt.Errorf("slab %s encoded size %d != header.size %d", + id, encodedSlabSize, slab.Header().size)) + } } // Compare encoded data of original slab with encoded data of decoded slab @@ -548,6 +566,11 @@ func arrayDataSlabEqual( return err } + // Compare inlined + if expected.inlined != actual.inlined { + return NewFatalError(fmt.Errorf("inlined %t is wrong, want %t", actual.inlined, expected.inlined)) + } + // Compare next if expected.next != actual.next { return NewFatalError(fmt.Errorf("next %d is wrong, want %d", actual.next, expected.next)) @@ -567,14 +590,14 @@ func arrayDataSlabEqual( for i := 0; i < len(expected.elements); i++ { ee := expected.elements[i] ae := actual.elements[i] - if !compare(ee, ae) { - return NewFatalError(fmt.Errorf("element %d %+v is wrong, want %+v", i, ae, ee)) - } - // Compare nested element - if idStorable, ok := ee.(SlabIDStorable); ok { + switch ee := ee.(type) { + case SlabIDStorable: + if !compare(ee, ae) { + return NewFatalError(fmt.Errorf("element %d %+v is wrong, want %+v", i, ae, ee)) + } - ev, err := idStorable.StoredValue(storage) + ev, err := ee.StoredValue(storage) if err != nil { // Don't need to wrap error as external error because err is already categorized by SlabIDStorable.StoredValue(). return err @@ -588,6 +611,27 @@ func arrayDataSlabEqual( decodeTypeInfo, compare, ) + + case *ArrayDataSlab: + ae, ok := ae.(*ArrayDataSlab) + if !ok { + return NewFatalError(fmt.Errorf("expect element as *ArrayDataSlab, actual %T", ae)) + } + + return arrayDataSlabEqual(ee, ae, storage, cborDecMode, cborEncMode, decodeStorable, decodeTypeInfo, compare) + + case *MapDataSlab: + ae, ok := ae.(*MapDataSlab) + if !ok { + return NewFatalError(fmt.Errorf("expect element as *MapDataSlab, actual %T", ae)) + } + + return mapDataSlabEqual(ee, ae, storage, cborDecMode, cborEncMode, decodeStorable, decodeTypeInfo, compare) + + default: + if !compare(ee, ae) { + return NewFatalError(fmt.Errorf("element %d %+v is wrong, want %+v", i, ae, ee)) + } } } @@ -670,7 +714,7 @@ func ValidValueSerialization( return nil } -func computeSlabSize(data []byte) (int, error) { +func computeSize(data []byte) (int, error) { if len(data) < versionAndFlagSize { return 0, NewDecodingError(fmt.Errorf("data is too short")) } @@ -680,20 +724,23 @@ func computeSlabSize(data []byte) (int, error) { return 0, NewDecodingError(err) } - slabExtraDataSize, err := getExtraDataSize(h, data[versionAndFlagSize:]) + slabExtraDataSize, inlinedSlabExtrDataSize, err := getExtraDataSizes(h, data[versionAndFlagSize:]) if err != nil { return 0, err } - // Computed slab size (slab header size): - // - excludes slab extra data size - // - adds next slab ID for non-root data slab if not encoded - size := len(data) - slabExtraDataSize - isDataSlab := h.getSlabArrayType() == slabArrayData || h.getSlabMapType() == slabMapData || h.getSlabMapType() == slabMapCollisionGroup + // computed size (slab header size): + // - excludes slab extra data size + // - excludes inlined slab extra data size + // - adds next slab ID for non-root data slab if not encoded + size := len(data) + size -= slabExtraDataSize + size -= inlinedSlabExtrDataSize + if !h.isRoot() && isDataSlab && !h.hasNextSlabID() { size += slabIDSize } @@ -701,15 +748,102 @@ func computeSlabSize(data []byte) (int, error) { return size, nil } -func getExtraDataSize(h head, data []byte) (int, error) { +func hasInlinedComposite(data []byte) (bool, error) { + if len(data) < versionAndFlagSize { + return false, NewDecodingError(fmt.Errorf("data is too short")) + } + + h, err := newHeadFromData(data[:versionAndFlagSize]) + if err != nil { + return false, NewDecodingError(err) + } + + if !h.hasInlinedSlabs() { + return false, nil + } + + data = data[versionAndFlagSize:] + + // Skip slab extra data if needed. if h.isRoot() { dec := cbor.NewStreamDecoder(bytes.NewBuffer(data)) b, err := dec.DecodeRawBytes() if err != nil { - return 0, NewDecodingError(err) + return false, NewDecodingError(err) + } + + data = data[len(b):] + } + + // Parse inlined extra data to find composite extra data. + dec := cbor.NewStreamDecoder(bytes.NewBuffer(data)) + count, err := dec.DecodeArrayHead() + if err != nil { + return false, NewDecodingError(err) + } + + for i := uint64(0); i < count; i++ { + tagNum, err := dec.DecodeTagNumber() + if err != nil { + return false, NewDecodingError(err) + } + if tagNum == CBORTagInlinedCompositeExtraData { + return true, nil + } + err = dec.Skip() + if err != nil { + return false, NewDecodingError(err) + } + } + + return false, nil +} + +func getExtraDataSizes(h head, data []byte) (int, int, error) { + + var slabExtraDataSize, inlinedSlabExtraDataSize int + + if h.isRoot() { + dec := cbor.NewStreamDecoder(bytes.NewBuffer(data)) + b, err := dec.DecodeRawBytes() + if err != nil { + return 0, 0, NewDecodingError(err) + } + slabExtraDataSize = len(b) + + data = data[slabExtraDataSize:] + } + + if h.hasInlinedSlabs() { + dec := cbor.NewStreamDecoder(bytes.NewBuffer(data)) + b, err := dec.DecodeRawBytes() + if err != nil { + return 0, 0, NewDecodingError(err) + } + inlinedSlabExtraDataSize = len(b) + } + + return slabExtraDataSize, inlinedSlabExtraDataSize, nil +} + +// getSlabIDFromStorable appends slab IDs from storable to ids. +// This function traverses child storables. If child storable +// is inlined map or array, inlined map or array is also traversed. +func getSlabIDFromStorable(storable Storable, ids []SlabID) []SlabID { + childStorables := storable.ChildStorables() + + for _, e := range childStorables { + switch e := e.(type) { + case SlabIDStorable: + ids = append(ids, SlabID(e)) + + case *ArrayDataSlab: + ids = getSlabIDFromStorable(e, ids) + + case *MapDataSlab: + ids = getSlabIDFromStorable(e, ids) } - return len(b), nil } - return 0, nil + return ids } diff --git a/array_test.go b/array_test.go index ad08ac9d..ed1f57d4 100644 --- a/array_test.go +++ b/array_test.go @@ -1374,7 +1374,7 @@ func TestArrayNestedArrayMap(t *testing.T) { storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - // Create a list of arrays with 2 elements. + // Create a list of arrays with 1 element. nestedArrays := make([]Value, arraySize) for i := uint64(0); i < arraySize; i++ { nested, err := NewArray(storage, address, nestedTypeInfo) @@ -1782,7 +1782,7 @@ func TestArrayEncodeDecode(t *testing.T) { verifyEmptyArray(t, storage2, typeInfo, address, array2) }) - t.Run("dataslab as root", func(t *testing.T) { + t.Run("root dataslab", func(t *testing.T) { typeInfo := testTypeInfo{42} storage := newTestBasicStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} @@ -1828,7 +1828,7 @@ func TestArrayEncodeDecode(t *testing.T) { verifyArray(t, storage2, typeInfo, address, array2, values, false) }) - t.Run("has pointers", func(t *testing.T) { + t.Run("root metadata slab", func(t *testing.T) { typeInfo := testTypeInfo{42} storage := newTestBasicStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} @@ -1836,35 +1836,19 @@ func TestArrayEncodeDecode(t *testing.T) { array, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - const arraySize = 20 + const arraySize = 18 values := make([]Value, arraySize) - for i := uint64(0); i < arraySize-1; i++ { + for i := uint64(0); i < arraySize; i++ { v := NewStringValue(strings.Repeat("a", 22)) values[i] = v + err := array.Append(v) require.NoError(t, err) } - typeInfo2 := testTypeInfo{43} - - nestedArray, err := NewArray(storage, address, typeInfo2) - require.NoError(t, err) - - err = nestedArray.Append(Uint64Value(0)) - require.NoError(t, err) - - values[arraySize-1] = nestedArray - - err = array.Append(nestedArray) - require.NoError(t, err) - - require.Equal(t, uint64(arraySize), array.Count()) - require.Equal(t, uint64(1), nestedArray.Count()) - id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - id4 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} // Expected serialized slab data with slab id expected := map[SlabID][]byte{ @@ -1892,8 +1876,8 @@ func TestArrayEncodeDecode(t *testing.T) { 0x00, 0xe4, // child header 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - 0x00, 0x00, 0x00, 0x0b, - 0x01, 0x0e, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0xe4, }, // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] @@ -1918,14 +1902,14 @@ func TestArrayEncodeDecode(t *testing.T) { 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, }, - // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... SlabID(...)] + // (data slab) data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] id3: { // version 0x10, // array data slab flag - 0x40, + 0x00, // CBOR encoded array head (fixed size 3 byte) - 0x99, 0x00, 0x0b, + 0x99, 0x00, 0x09, // CBOR encoded array elements 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, @@ -1936,27 +1920,6 @@ func TestArrayEncodeDecode(t *testing.T) { 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - }, - - // (data slab) next: 0, data: [0] - id4: { - // version - 0x10, - // extra data flag - 0x80, - - // extra data - // array of extra data - 0x81, - // type info - 0x18, 0x2b, - - // CBOR encoded array head (fixed size 3 byte) - 0x99, 0x00, 0x01, - // CBOR encoded array elements - 0xd8, 0xa4, 0x00, }, } @@ -1966,7 +1929,6 @@ func TestArrayEncodeDecode(t *testing.T) { require.Equal(t, expected[id1], m[id1]) require.Equal(t, expected[id2], m[id2]) require.Equal(t, expected[id3], m[id3]) - require.Equal(t, expected[id4], m[id4]) // Decode data to new storage storage2 := newTestPersistentStorageWithData(t, m) @@ -1977,1589 +1939,4197 @@ func TestArrayEncodeDecode(t *testing.T) { verifyArray(t, storage2, typeInfo, address, array2, values, false) }) -} - -func TestArrayEncodeDecodeRandomValues(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + // Same type info is reused. + t.Run("root data slab, inlined child array of same type", func(t *testing.T) { + typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - const opCount = 8192 + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - r := newRand(t) + const arraySize = 2 + values := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + v := Uint64Value(i) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + childArray, err := NewArray(storage, address, typeInfo2) + require.NoError(t, err) - array, values := testArrayAppendSetInsertRemoveRandomValues(t, r, storage, typeInfo, address, opCount) + err = childArray.Append(v) + require.NoError(t, err) - verifyArray(t, storage, typeInfo, address, array, values, false) + err = parentArray.Append(childArray) + require.NoError(t, err) - // Decode data to new storage - storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) + values[i] = childArray + } - // Test new array from storage2 - array2, err := NewArrayWithRootID(storage2, array.SlabID()) - require.NoError(t, err) + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - verifyArray(t, storage2, typeInfo, address, array2, values, false) -} + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + // (data slab) data: [[0] [1]] + id1: { + // version + 0x11, + // array data slab flag + 0x80, -func TestEmptyArray(t *testing.T) { + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, - t.Parallel() + // inlined extra data + 0x81, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestBasicStorage(t) + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x02, + // CBOR encoded array elements + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x00, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x01, + }, + } - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + m, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) - t.Run("get", func(t *testing.T) { - s, err := array.Get(0) - require.Equal(t, 1, errorCategorizationCount(err)) - var userError *UserError - var indexOutOfBoundsError *IndexOutOfBoundsError - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &indexOutOfBoundsError) - require.ErrorAs(t, userError, &indexOutOfBoundsError) - require.Nil(t, s) - }) + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) - t.Run("set", func(t *testing.T) { - s, err := array.Set(0, Uint64Value(0)) - require.Equal(t, 1, errorCategorizationCount(err)) - var userError *UserError - var indexOutOfBoundsError *IndexOutOfBoundsError - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &indexOutOfBoundsError) - require.ErrorAs(t, userError, &indexOutOfBoundsError) - require.Nil(t, s) - }) + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, parentArray.SlabID()) + require.NoError(t, err) - t.Run("insert", func(t *testing.T) { - err := array.Insert(1, Uint64Value(0)) - require.Equal(t, 1, errorCategorizationCount(err)) - var userError *UserError - var indexOutOfBoundsError *IndexOutOfBoundsError - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &indexOutOfBoundsError) - require.ErrorAs(t, userError, &indexOutOfBoundsError) + verifyArray(t, storage2, typeInfo, address, array2, values, false) }) - t.Run("remove", func(t *testing.T) { - s, err := array.Remove(0) - require.Equal(t, 1, errorCategorizationCount(err)) - var userError *UserError - var indexOutOfBoundsError *IndexOutOfBoundsError - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &indexOutOfBoundsError) - require.ErrorAs(t, userError, &indexOutOfBoundsError) - require.Nil(t, s) - }) + // Different type info are encoded. + t.Run("root data slab, inlined array of different type", func(t *testing.T) { + typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + typeInfo3 := testTypeInfo{44} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - t.Run("iterate", func(t *testing.T) { - i := uint64(0) - err := array.Iterate(func(v Value) (bool, error) { - i++ - return true, nil - }) + parentArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - require.Equal(t, uint64(0), i) - }) - t.Run("count", func(t *testing.T) { - count := array.Count() - require.Equal(t, uint64(0), count) - }) + const arraySize = 2 + values := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + v := Uint64Value(i) - t.Run("type", func(t *testing.T) { - require.True(t, typeInfoComparator(typeInfo, array.Type())) - }) + var ti TypeInfo + if i == 0 { + ti = typeInfo3 + } else { + ti = typeInfo2 + } + childArray, err := NewArray(storage, address, ti) + require.NoError(t, err) - // TestArrayEncodeDecode/empty tests empty array encoding and decoding -} + err = childArray.Append(v) + require.NoError(t, err) -func TestArrayStringElement(t *testing.T) { + err = parentArray.Append(childArray) + require.NoError(t, err) - t.Parallel() + values[i] = childArray + } - t.Run("inline", func(t *testing.T) { + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - const arraySize = 4096 + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + // (data slab) data: [[0] [1]] + id1: { + // version + 0x11, + // array data slab flag + 0x80, - r := newRand(t) + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, - stringSize := int(maxInlineArrayElementSize - 3) + // inlined extra data + 0x82, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2c, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - s := randStr(r, stringSize) - values[i] = NewStringValue(s) + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x02, + // CBOR encoded array elements + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x00, + 0xd8, 0xfa, 0x83, 0x18, 0x01, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x01, + }, } - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - typeInfo := testTypeInfo{42} + m, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) - array, err := NewArray(storage, address, typeInfo) + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, parentArray.SlabID()) require.NoError(t, err) - for i := uint64(0); i < arraySize; i++ { - err := array.Append(values[i]) - require.NoError(t, err) - } + verifyArray(t, storage2, typeInfo, address, array2, values, false) + }) - verifyArray(t, storage, typeInfo, address, array, values, false) + // Same type info is reused. + t.Run("root data slab, multiple levels of inlined array of same type", func(t *testing.T) { + typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + typeInfo3 := testTypeInfo{44} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - stats, err := GetArrayStats(array) + parentArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - require.Equal(t, uint64(0), stats.StorableSlabCount) - }) - t.Run("external slab", func(t *testing.T) { + const arraySize = 2 + values := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + v := Uint64Value(i) - const arraySize = 4096 + gchildArray, err := NewArray(storage, address, typeInfo2) + require.NoError(t, err) - r := newRand(t) + err = gchildArray.Append(v) + require.NoError(t, err) - stringSize := int(maxInlineArrayElementSize + 512) + childArray, err := NewArray(storage, address, typeInfo3) + require.NoError(t, err) - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - s := randStr(r, stringSize) - values[i] = NewStringValue(s) + err = childArray.Append(gchildArray) + require.NoError(t, err) + + err = parentArray.Append(childArray) + require.NoError(t, err) + + values[i] = childArray } - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - typeInfo := testTypeInfo{42} + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + // (data slab) data: [[0] [1]] + id1: { + // version + 0x11, + // array data slab flag + 0x80, - for i := uint64(0); i < arraySize; i++ { - err := array.Append(values[i]) - require.NoError(t, err) - } + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, - verifyArray(t, storage, typeInfo, address, array, values, false) + // inlined extra data + 0x82, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2c, + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, - stats, err := GetArrayStats(array) + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x02, + // CBOR encoded array elements + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x99, 0x00, 0x01, 0xd8, 0xfa, 0x83, 0x18, 0x01, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x00, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x99, 0x00, 0x01, 0xd8, 0xfa, 0x83, 0x18, 0x01, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x01, + }, + } + + m, err := storage.Encode() require.NoError(t, err) - require.Equal(t, uint64(arraySize), stats.StorableSlabCount) - }) -} + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) -func TestArrayStoredValue(t *testing.T) { + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) - const arraySize = 4096 + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, parentArray.SlabID()) + require.NoError(t, err) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + verifyArray(t, storage2, typeInfo, address, array2, values, false) + }) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + t.Run("root data slab, multiple levels of inlined array of different type", func(t *testing.T) { + typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + typeInfo3 := testTypeInfo{44} + typeInfo4 := testTypeInfo{45} + typeInfo5 := testTypeInfo{46} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := Uint64Value(i) - values[i] = v - err := array.Append(v) + parentArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - } - - rootID := array.SlabID() - slabIterator, err := storage.SlabIterator() - require.NoError(t, err) + const arraySize = 2 + values := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + v := Uint64Value(i) - for { - id, slab := slabIterator() + var ti TypeInfo + if i == 0 { + ti = typeInfo2 + } else { + ti = typeInfo4 + } + gchildArray, err := NewArray(storage, address, ti) + require.NoError(t, err) - if id == SlabIDUndefined { - break - } + err = gchildArray.Append(v) + require.NoError(t, err) - value, err := slab.StoredValue(storage) + if i == 0 { + ti = typeInfo3 + } else { + ti = typeInfo5 + } + childArray, err := NewArray(storage, address, ti) + require.NoError(t, err) - if id == rootID { + err = childArray.Append(gchildArray) require.NoError(t, err) - array2, ok := value.(*Array) - require.True(t, ok) + err = parentArray.Append(childArray) + require.NoError(t, err) - verifyArray(t, storage, typeInfo, address, array2, values, false) - } else { - require.Equal(t, 1, errorCategorizationCount(err)) - var fatalError *FatalError - var notValueError *NotValueError - require.ErrorAs(t, err, &fatalError) - require.ErrorAs(t, err, ¬ValueError) - require.ErrorAs(t, fatalError, ¬ValueError) - require.Nil(t, value) + values[i] = childArray } - } -} - -func TestArrayPopIterate(t *testing.T) { - - t.Run("empty", func(t *testing.T) { - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) - - i := uint64(0) - err = array.PopIterate(func(v Storable) { - i++ - }) - require.NoError(t, err) - require.Equal(t, uint64(0), i) + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - verifyEmptyArray(t, storage, typeInfo, address, array) - }) + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + // (data slab) data: [[0] [1]] + id1: { + // version + 0x11, + // array data slab flag + 0x80, - t.Run("root-dataslab", func(t *testing.T) { + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, - const arraySize = 10 + // inlined extra data + 0x84, + // typeInfo3 + 0xd8, 0xf7, + 0x81, + 0x18, 0x2c, + // typeInfo2 + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, + // typeInfo5 + 0xd8, 0xf7, + 0x81, + 0x18, 0x2e, + // typeInfo4 + 0xd8, 0xf7, + 0x81, + 0x18, 0x2d, - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x02, + // CBOR encoded array elements + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x99, 0x00, 0x01, 0xd8, 0xfa, 0x83, 0x18, 0x01, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x00, + 0xd8, 0xfa, 0x83, 0x18, 0x02, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x99, 0x00, 0x01, 0xd8, 0xfa, 0x83, 0x18, 0x03, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x01, + }, + } - array, err := NewArray(storage, address, typeInfo) + m, err := storage.Encode() require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := Uint64Value(i) - values[i] = v - err := array.Append(v) - require.NoError(t, err) - } + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) - i := 0 - err = array.PopIterate(func(v Storable) { - vv, err := v.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, values[arraySize-i-1], vv) - i++ - }) + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, parentArray.SlabID()) require.NoError(t, err) - require.Equal(t, arraySize, i) - verifyEmptyArray(t, storage, typeInfo, address, array) + verifyArray(t, storage2, typeInfo, address, array2, values, false) }) - t.Run("root-metaslab", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) - - const arraySize = 4096 + t.Run("root metadata slab, inlined array of same type", func(t *testing.T) { typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) + typeInfo2 := testTypeInfo{43} + storage := newTestBasicStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} array, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := Uint64Value(i) - values[i] = v + const arraySize = 20 + values := make([]Value, 0, arraySize) + for i := uint64(0); i < arraySize-2; i++ { + v := NewStringValue(strings.Repeat("a", 22)) + err := array.Append(v) require.NoError(t, err) + + values = append(values, v) } - i := 0 - err = array.PopIterate(func(v Storable) { - vv, err := v.StoredValue(storage) + for i := 0; i < 2; i++ { + childArray, err := NewArray(storage, address, typeInfo2) require.NoError(t, err) - valueEqual(t, typeInfoComparator, values[arraySize-i-1], vv) - i++ - }) - require.NoError(t, err) - require.Equal(t, arraySize, i) - - verifyEmptyArray(t, storage, typeInfo, address, array) - }) -} -func TestArrayFromBatchData(t *testing.T) { + err = childArray.Append(Uint64Value(i)) + require.NoError(t, err) - t.Run("empty", func(t *testing.T) { - typeInfo := testTypeInfo{42} + err = array.Append(childArray) + require.NoError(t, err) - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) - require.NoError(t, err) - require.Equal(t, uint64(0), array.Count()) + values = append(values, childArray) + } - iter, err := array.Iterator() - require.NoError(t, err) + require.Equal(t, uint64(arraySize), array.Count()) - // Create a new array with new storage, new address, and original array's elements. - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - storage := newTestPersistentStorage(t) - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) - require.NoError(t, err) - require.NotEqual(t, copied.SlabID(), array.SlabID()) + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - verifyEmptyArray(t, storage, typeInfo, address, copied) - }) + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ - t.Run("root-dataslab", func(t *testing.T) { + // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:268 count:11} ] + id1: { + // version + 0x10, + // flag + 0x81, - const arraySize = 10 + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, - typeInfo := testTypeInfo{42} - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) - require.NoError(t, err) + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + // child header count + 0x00, 0x02, + // child header 1 (slab index, count, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0xe4, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x0b, + 0x01, 0x0c, + }, - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := Uint64Value(i) - values[i] = v - err := array.Append(v) - require.NoError(t, err) - } + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + id2: { + // version + 0x12, + // array data slab flag + 0x00, + // next slab id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, - require.Equal(t, uint64(arraySize), array.Count()) + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... [0] [1]] + id3: { + // version + 0x11, + // array data slab flag + 0x00, + // inlined extra data + 0x81, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x0, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x1, + }, + } - iter, err := array.Iterator() + m, err := storage.Encode() require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) + require.Equal(t, expected[id2], m[id2]) + require.Equal(t, expected[id3], m[id3]) - // Create a new array with new storage, new address, and original array's elements. - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - storage := newTestPersistentStorage(t) - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, array.SlabID()) require.NoError(t, err) - require.NotEqual(t, copied.SlabID(), array.SlabID()) - verifyArray(t, storage, typeInfo, address, copied, values, false) + verifyArray(t, storage2, typeInfo, address, array2, values, false) }) - t.Run("root-metaslab", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) - - const arraySize = 4096 + t.Run("root metadata slab, inlined array of different type", func(t *testing.T) { typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + typeInfo3 := testTypeInfo{44} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) + array, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := Uint64Value(i) - values[i] = v + const arraySize = 20 + values := make([]Value, 0, arraySize) + for i := uint64(0); i < arraySize-2; i++ { + v := NewStringValue(strings.Repeat("a", 22)) + err := array.Append(v) require.NoError(t, err) + + values = append(values, v) } - require.Equal(t, uint64(arraySize), array.Count()) + for i := 0; i < 2; i++ { + var ti TypeInfo + if i == 0 { + ti = typeInfo3 + } else { + ti = typeInfo2 + } - iter, err := array.Iterator() - require.NoError(t, err) + childArray, err := NewArray(storage, address, ti) + require.NoError(t, err) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - storage := newTestPersistentStorage(t) - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) + err = childArray.Append(Uint64Value(i)) + require.NoError(t, err) - require.NoError(t, err) - require.NotEqual(t, array.SlabID(), copied.SlabID()) + err = array.Append(childArray) + require.NoError(t, err) - verifyArray(t, storage, typeInfo, address, copied, values, false) - }) + values = append(values, childArray) + } - t.Run("rebalance two data slabs", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + require.Equal(t, uint64(arraySize), array.Count()) - typeInfo := testTypeInfo{42} + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) - require.NoError(t, err) + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ - var values []Value - var v Value + // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:268 count:11} ] + id1: { + // version + 0x10, + // flag + 0x81, - v = NewStringValue(strings.Repeat("a", int(maxInlineArrayElementSize-2))) - values = append(values, v) + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, - err = array.Insert(0, v) - require.NoError(t, err) + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + // child header count + 0x00, 0x02, + // child header 1 (slab index, count, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0xe4, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x0b, + 0x01, 0x0c, + }, - for i := 0; i < 35; i++ { - v = Uint64Value(i) - values = append(values, v) + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + id2: { + // version + 0x12, + // array data slab flag + 0x00, + // next slab id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, - err = array.Append(v) - require.NoError(t, err) + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... [0] [1]] + id3: { + // version + 0x11, + // array data slab flag + 0x00, + // inlined extra data + 0x82, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2c, + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x0, + 0xd8, 0xfa, 0x83, 0x18, 0x01, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x1, + }, } - require.Equal(t, uint64(36), array.Count()) - - iter, err := array.Iterator() + m, err := storage.Encode() require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) + require.Equal(t, expected[id2], m[id2]) + require.Equal(t, expected[id3], m[id3]) - storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, array.SlabID()) require.NoError(t, err) - require.NotEqual(t, array.SlabID(), copied.SlabID()) - verifyArray(t, storage, typeInfo, address, copied, values, false) + verifyArray(t, storage2, typeInfo, address, array2, values, false) }) - t.Run("merge two data slabs", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) - + t.Run("has pointers", func(t *testing.T) { typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) + array, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - var values []Value - var v Value - for i := 0; i < 35; i++ { - v = Uint64Value(i) - values = append(values, v) - err = array.Append(v) + const arraySize = 20 + values := make([]Value, 0, arraySize) + for i := uint64(0); i < arraySize-1; i++ { + v := NewStringValue(strings.Repeat("a", 22)) + + err := array.Append(v) require.NoError(t, err) - } + + values = append(values, v) + } + + childArray, err := NewArray(storage, address, typeInfo2) + require.NoError(t, err) + + for i := 0; i < 5; i++ { + v := NewStringValue(strings.Repeat("b", 22)) + err = childArray.Append(v) + require.NoError(t, err) + } + + err = array.Append(childArray) + require.NoError(t, err) + + values = append(values, childArray) + + require.Equal(t, uint64(arraySize), array.Count()) + require.Equal(t, uint64(5), childArray.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + id4 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:270 count:11} ] + id1: { + // version + 0x10, + // flag + 0x81, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + // child header count + 0x00, 0x02, + // child header 1 (slab index, count, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0xe4, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x0b, + 0x01, 0x0e, + }, + + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + id2: { + // version + 0x12, + // array data slab flag + 0x00, + // next slab id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... SlabID(...)] + id3: { + // version (no next slab ID, no inlined slabs) + 0x10, + // array data slab flag + 0x40, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + }, + + // (data slab) next: 0, data: [bbbbbbbbbbbbbbbbbbbbbb ...] + id4: { + // version + 0x10, + // extra data flag + 0x80, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2b, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x05, + // CBOR encoded array elements + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + }, + } + + m, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) + require.Equal(t, expected[id2], m[id2]) + require.Equal(t, expected[id3], m[id3]) + require.Equal(t, expected[id4], m[id4]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, array.SlabID()) + require.NoError(t, err) + + verifyArray(t, storage2, typeInfo, address, array2, values, false) + }) + + t.Run("has pointers in inlined slab", func(t *testing.T) { + typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + typeInfo3 := testTypeInfo{44} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + const arraySize = 20 + values := make([]Value, 0, arraySize) + for i := uint64(0); i < arraySize-1; i++ { + v := NewStringValue(strings.Repeat("a", 22)) + + err := array.Append(v) + require.NoError(t, err) + + values = append(values, v) + } + + childArray, err := NewArray(storage, address, typeInfo3) + require.NoError(t, err) + + gchildArray, err := NewArray(storage, address, typeInfo2) + require.NoError(t, err) + + for i := 0; i < 5; i++ { + v := NewStringValue(strings.Repeat("b", 22)) + + err = gchildArray.Append(v) + require.NoError(t, err) + } + + err = childArray.Append(gchildArray) + require.NoError(t, err) + + err = array.Append(childArray) + require.NoError(t, err) + + values = append(values, childArray) + + require.Equal(t, uint64(arraySize), array.Count()) + require.Equal(t, uint64(1), childArray.Count()) + require.Equal(t, uint64(5), gchildArray.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + id4 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 5}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:287 count:11} ] + id1: { + // version + 0x10, + // flag + 0x81, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + // child header count + 0x00, 0x02, + // child header 1 (slab index, count, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0xe4, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x0b, + 0x01, 0x1f, + }, + + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + id2: { + // version + 0x12, + // array data slab flag + 0x00, + // next slab id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... [SlabID(...)]] + id3: { + // version (no next slab ID, has inlined slabs) + 0x11, + // array data slab flag (has pointer) + 0x40, + + // inlined array of extra data + 0x81, + // type info + 0xd8, 0xf7, + 0x81, + 0x18, 0x2c, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x99, 0x00, 0x01, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + }, + + // (data slab) data: [bbbbbbbbbbbbbbbbbbbbbb ...] + id4: { + // version + 0x10, + // extra data flag + 0x80, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2b, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x05, + // CBOR encoded array elements + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + }, + } + + m, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) + require.Equal(t, expected[id2], m[id2]) + require.Equal(t, expected[id3], m[id3]) + require.Equal(t, expected[id4], m[id4]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, array.SlabID()) + require.NoError(t, err) + + verifyArray(t, storage2, typeInfo, address, array2, values, false) + }) +} + +func TestArrayEncodeDecodeRandomValues(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + const opCount = 8192 + + r := newRand(t) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, values := testArrayAppendSetInsertRemoveRandomValues(t, r, storage, typeInfo, address, opCount) + + verifyArray(t, storage, typeInfo, address, array, values, false) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, array.SlabID()) + require.NoError(t, err) + + verifyArray(t, storage2, typeInfo, address, array2, values, false) +} + +func TestEmptyArray(t *testing.T) { + + t.Parallel() + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestBasicStorage(t) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + t.Run("get", func(t *testing.T) { + s, err := array.Get(0) + require.Equal(t, 1, errorCategorizationCount(err)) + var userError *UserError + var indexOutOfBoundsError *IndexOutOfBoundsError + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &indexOutOfBoundsError) + require.ErrorAs(t, userError, &indexOutOfBoundsError) + require.Nil(t, s) + }) + + t.Run("set", func(t *testing.T) { + s, err := array.Set(0, Uint64Value(0)) + require.Equal(t, 1, errorCategorizationCount(err)) + var userError *UserError + var indexOutOfBoundsError *IndexOutOfBoundsError + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &indexOutOfBoundsError) + require.ErrorAs(t, userError, &indexOutOfBoundsError) + require.Nil(t, s) + }) + + t.Run("insert", func(t *testing.T) { + err := array.Insert(1, Uint64Value(0)) + require.Equal(t, 1, errorCategorizationCount(err)) + var userError *UserError + var indexOutOfBoundsError *IndexOutOfBoundsError + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &indexOutOfBoundsError) + require.ErrorAs(t, userError, &indexOutOfBoundsError) + }) + + t.Run("remove", func(t *testing.T) { + s, err := array.Remove(0) + require.Equal(t, 1, errorCategorizationCount(err)) + var userError *UserError + var indexOutOfBoundsError *IndexOutOfBoundsError + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &indexOutOfBoundsError) + require.ErrorAs(t, userError, &indexOutOfBoundsError) + require.Nil(t, s) + }) + + t.Run("iterate", func(t *testing.T) { + i := uint64(0) + err := array.Iterate(func(v Value) (bool, error) { + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, uint64(0), i) + }) + + t.Run("count", func(t *testing.T) { + count := array.Count() + require.Equal(t, uint64(0), count) + }) + + t.Run("type", func(t *testing.T) { + require.True(t, typeInfoComparator(typeInfo, array.Type())) + }) + + // TestArrayEncodeDecode/empty tests empty array encoding and decoding +} + +func TestArrayStringElement(t *testing.T) { + + t.Parallel() + + t.Run("inline", func(t *testing.T) { + + const arraySize = 4096 + + r := newRand(t) + + stringSize := int(maxInlineArrayElementSize - 3) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + s := randStr(r, stringSize) + values[i] = NewStringValue(s) + } + + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + typeInfo := testTypeInfo{42} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(values[i]) + require.NoError(t, err) + } + + verifyArray(t, storage, typeInfo, address, array, values, false) + + stats, err := GetArrayStats(array) + require.NoError(t, err) + require.Equal(t, uint64(0), stats.StorableSlabCount) + }) + + t.Run("external slab", func(t *testing.T) { + + const arraySize = 4096 + + r := newRand(t) + + stringSize := int(maxInlineArrayElementSize + 512) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + s := randStr(r, stringSize) + values[i] = NewStringValue(s) + } + + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + typeInfo := testTypeInfo{42} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(values[i]) + require.NoError(t, err) + } + + verifyArray(t, storage, typeInfo, address, array, values, false) + + stats, err := GetArrayStats(array) + require.NoError(t, err) + require.Equal(t, uint64(arraySize), stats.StorableSlabCount) + }) +} + +func TestArrayStoredValue(t *testing.T) { + + const arraySize = 4096 + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := Uint64Value(i) + values[i] = v + err := array.Append(v) + require.NoError(t, err) + } + + rootID := array.SlabID() + + slabIterator, err := storage.SlabIterator() + require.NoError(t, err) + + for { + id, slab := slabIterator() + + if id == SlabIDUndefined { + break + } + + value, err := slab.StoredValue(storage) + + if id == rootID { + require.NoError(t, err) + + array2, ok := value.(*Array) + require.True(t, ok) + + verifyArray(t, storage, typeInfo, address, array2, values, false) + } else { + require.Equal(t, 1, errorCategorizationCount(err)) + var fatalError *FatalError + var notValueError *NotValueError + require.ErrorAs(t, err, &fatalError) + require.ErrorAs(t, err, ¬ValueError) + require.ErrorAs(t, fatalError, ¬ValueError) + require.Nil(t, value) + } + } +} + +func TestArrayPopIterate(t *testing.T) { + + t.Run("empty", func(t *testing.T) { + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + i := uint64(0) + err = array.PopIterate(func(v Storable) { + i++ + }) + require.NoError(t, err) + require.Equal(t, uint64(0), i) + + verifyEmptyArray(t, storage, typeInfo, address, array) + }) + + t.Run("root-dataslab", func(t *testing.T) { + + const arraySize = 10 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := Uint64Value(i) + values[i] = v + err := array.Append(v) + require.NoError(t, err) + } + + i := 0 + err = array.PopIterate(func(v Storable) { + vv, err := v.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, typeInfoComparator, values[arraySize-i-1], vv) + i++ + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + + verifyEmptyArray(t, storage, typeInfo, address, array) + }) + + t.Run("root-metaslab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 4096 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := Uint64Value(i) + values[i] = v + err := array.Append(v) + require.NoError(t, err) + } + + i := 0 + err = array.PopIterate(func(v Storable) { + vv, err := v.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, typeInfoComparator, values[arraySize-i-1], vv) + i++ + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + + verifyEmptyArray(t, storage, typeInfo, address, array) + }) +} + +func TestArrayFromBatchData(t *testing.T) { + + t.Run("empty", func(t *testing.T) { + typeInfo := testTypeInfo{42} + + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + require.Equal(t, uint64(0), array.Count()) + + iter, err := array.Iterator() + require.NoError(t, err) + + // Create a new array with new storage, new address, and original array's elements. + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + storage := newTestPersistentStorage(t) + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + require.NoError(t, err) + require.NotEqual(t, copied.SlabID(), array.SlabID()) + + verifyEmptyArray(t, storage, typeInfo, address, copied) + }) + + t.Run("root-dataslab", func(t *testing.T) { + + const arraySize = 10 + + typeInfo := testTypeInfo{42} + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := Uint64Value(i) + values[i] = v + err := array.Append(v) + require.NoError(t, err) + } + + require.Equal(t, uint64(arraySize), array.Count()) + + iter, err := array.Iterator() + require.NoError(t, err) + + // Create a new array with new storage, new address, and original array's elements. + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + storage := newTestPersistentStorage(t) + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + + require.NoError(t, err) + require.NotEqual(t, copied.SlabID(), array.SlabID()) + + verifyArray(t, storage, typeInfo, address, copied, values, false) + }) + + t.Run("root-metaslab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 4096 + + typeInfo := testTypeInfo{42} + + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := Uint64Value(i) + values[i] = v + err := array.Append(v) + require.NoError(t, err) + } + + require.Equal(t, uint64(arraySize), array.Count()) + + iter, err := array.Iterator() + require.NoError(t, err) + + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + storage := newTestPersistentStorage(t) + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + + require.NoError(t, err) + require.NotEqual(t, array.SlabID(), copied.SlabID()) + + verifyArray(t, storage, typeInfo, address, copied, values, false) + }) + + t.Run("rebalance two data slabs", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + typeInfo := testTypeInfo{42} + + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + + var values []Value + var v Value + + v = NewStringValue(strings.Repeat("a", int(maxInlineArrayElementSize-2))) + values = append(values, v) + + err = array.Insert(0, v) + require.NoError(t, err) + + for i := 0; i < 35; i++ { + v = Uint64Value(i) + values = append(values, v) + + err = array.Append(v) + require.NoError(t, err) + } + + require.Equal(t, uint64(36), array.Count()) + + iter, err := array.Iterator() + require.NoError(t, err) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + + require.NoError(t, err) + require.NotEqual(t, array.SlabID(), copied.SlabID()) + + verifyArray(t, storage, typeInfo, address, copied, values, false) + }) + + t.Run("merge two data slabs", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + typeInfo := testTypeInfo{42} + + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + + var values []Value + var v Value + for i := 0; i < 35; i++ { + v = Uint64Value(i) + values = append(values, v) + err = array.Append(v) + require.NoError(t, err) + } v = NewStringValue(strings.Repeat("a", int(maxInlineArrayElementSize-2))) values = append(values, nil) copy(values[25+1:], values[25:]) values[25] = v - err = array.Insert(25, v) + err = array.Insert(25, v) + require.NoError(t, err) + + require.Equal(t, uint64(36), array.Count()) + + iter, err := array.Iterator() + require.NoError(t, err) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + + require.NoError(t, err) + require.NotEqual(t, array.SlabID(), copied.SlabID()) + + verifyArray(t, storage, typeInfo, address, copied, values, false) + }) + + t.Run("random", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 4096 + + r := newRand(t) + + typeInfo := testTypeInfo{42} + + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := randomValue(r, int(maxInlineArrayElementSize)) + values[i] = v + + err := array.Append(v) + require.NoError(t, err) + } + + require.Equal(t, uint64(arraySize), array.Count()) + + iter, err := array.Iterator() + require.NoError(t, err) + + storage := newTestPersistentStorage(t) + + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + + require.NoError(t, err) + require.NotEqual(t, array.SlabID(), copied.SlabID()) + + verifyArray(t, storage, typeInfo, address, copied, values, false) + }) + + t.Run("data slab too large", func(t *testing.T) { + // Slab size must not exceed maxThreshold. + // We cannot make this problem happen after Atree Issue #193 + // was fixed by PR #194 & PR #197. This test is to catch regressions. + + SetThreshold(256) + defer SetThreshold(1024) + + r := newRand(t) + + typeInfo := testTypeInfo{42} + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + + var values []Value + var v Value + + v = NewStringValue(randStr(r, int(maxInlineArrayElementSize-2))) + values = append(values, v) + err = array.Append(v) + require.NoError(t, err) + + v = NewStringValue(randStr(r, int(maxInlineArrayElementSize-2))) + values = append(values, v) + err = array.Append(v) + require.NoError(t, err) + + v = NewStringValue(randStr(r, int(maxInlineArrayElementSize-2))) + values = append(values, v) + err = array.Append(v) + require.NoError(t, err) + + iter, err := array.Iterator() + require.NoError(t, err) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + + require.NoError(t, err) + require.NotEqual(t, array.SlabID(), copied.SlabID()) + + verifyArray(t, storage, typeInfo, address, copied, values, false) + }) +} + +func TestArrayNestedStorables(t *testing.T) { + + t.Parallel() + + typeInfo := testTypeInfo{42} + + const arraySize = 1024 * 4 + + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + s := strings.Repeat("a", int(i)) + v := SomeValue{Value: NewStringValue(s)} + values[i] = v + + err := array.Append(v) + require.NoError(t, err) + } + + verifyArray(t, storage, typeInfo, address, array, values, true) +} + +func TestArrayMaxInlineElement(t *testing.T) { + t.Parallel() + + r := newRand(t) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + var values []Value + for i := 0; i < 2; i++ { + // String length is MaxInlineArrayElementSize - 3 to account for string encoding overhead. + v := NewStringValue(randStr(r, int(maxInlineArrayElementSize-3))) + values = append(values, v) + + err = array.Append(v) + require.NoError(t, err) + } + + require.True(t, array.root.IsData()) + + // Size of root data slab with two elements of max inlined size is target slab size minus + // slab id size (next slab id is omitted in root slab), and minus 1 byte + // (for rounding when computing max inline array element size). + require.Equal(t, targetThreshold-slabIDSize-1, uint64(array.root.Header().size)) + + verifyArray(t, storage, typeInfo, address, array, values, false) +} + +func TestArrayString(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("small", func(t *testing.T) { + const arraySize = 6 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + } + + want := `[0 1 2 3 4 5]` + require.Equal(t, want, array.String()) + }) + + t.Run("large", func(t *testing.T) { + const arraySize = 120 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + } + + want := `[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119]` + require.Equal(t, want, array.String()) + }) +} + +func TestArraySlabDump(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("small", func(t *testing.T) { + const arraySize = 6 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + } + + want := []string{ + "level 1, ArrayDataSlab id:0x102030405060708.1 size:23 count:6 elements: [0 1 2 3 4 5]", + } + dumps, err := DumpArraySlabs(array) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("large", func(t *testing.T) { + const arraySize = 120 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + } + + want := []string{ + "level 1, ArrayMetaDataSlab id:0x102030405060708.1 size:40 count:120 children: [{id:0x102030405060708.2 size:213 count:54} {id:0x102030405060708.3 size:285 count:66}]", + "level 2, ArrayDataSlab id:0x102030405060708.2 size:213 count:54 elements: [0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53]", + "level 2, ArrayDataSlab id:0x102030405060708.3 size:285 count:66 elements: [54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119]", + } + + dumps, err := DumpArraySlabs(array) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("overflow", func(t *testing.T) { + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + err = array.Append(NewStringValue(strings.Repeat("a", int(maxInlineArrayElementSize)))) + require.NoError(t, err) + + want := []string{ + "level 1, ArrayDataSlab id:0x102030405060708.1 size:24 count:1 elements: [SlabIDStorable({[1 2 3 4 5 6 7 8] [0 0 0 0 0 0 0 2]})]", + "StorableSlab id:0x102030405060708.2 storable:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + } + + dumps, err := DumpArraySlabs(array) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) +} + +func errorCategorizationCount(err error) int { + var fatalError *FatalError + var userError *UserError + var externalError *ExternalError + + count := 0 + if errors.As(err, &fatalError) { + count++ + } + if errors.As(err, &userError) { + count++ + } + if errors.As(err, &externalError) { + count++ + } + return count +} + +func TestArrayLoadedValueIterator(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("empty", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + // parent array: 1 root data slab + require.Equal(t, 1, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, nil) + }) + + t.Run("root data slab with simple values", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 3 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root data slab + require.Equal(t, 1, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + }) + + t.Run("root data slab with composite values", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 3 + array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+arraySize, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + }) + + t.Run("root data slab with composite values, unload composite element from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 3 + array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+arraySize, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + + // Unload composite element from front to back + for i := 0; i < len(values); i++ { + v := values[i] + + nestedArray, ok := v.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) + + expectedValues := values[i+1:] + verifyArrayLoadedElements(t, array, expectedValues) + } + }) + + t.Run("root data slab with composite values, unload composite element from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 3 + array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+arraySize, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + + // Unload composite element from back to front + for i := len(values) - 1; i >= 0; i-- { + v := values[i] + + nestedArray, ok := v.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) + + expectedValues := values[:i] + verifyArrayLoadedElements(t, array, expectedValues) + } + }) + + t.Run("root data slab with composite values, unload composite element in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 3 + array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+arraySize, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + + // Unload composite element in the middle + unloadValueIndex := 1 + + v := values[unloadValueIndex] + + nestedArray, ok := v.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) + + copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) + values = values[:len(values)-1] + + verifyArrayLoadedElements(t, array, values) + }) + + t.Run("root data slab with composite values, unload composite elements during iteration", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 3 + array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+arraySize, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + + i := 0 + err := array.IterateLoadedValues(func(v Value) (bool, error) { + // At this point, iterator returned first element (v). + + // Remove all other nested composite elements (except first element) from storage. + for _, value := range values[1:] { + nestedArray, ok := value.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) + } + + require.Equal(t, 0, i) + valueEqual(t, typeInfoComparator, values[0], v) + i++ + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, 1, i) // Only first element is iterated because other elements are remove during iteration. + }) + + t.Run("root data slab with simple and composite values, unload composite element", func(t *testing.T) { + const arraySize = 3 + + // Create an array with nested composite value at specified index + for nestedCompositeIndex := 0; nestedCompositeIndex < arraySize; nestedCompositeIndex++ { + storage := newTestPersistentStorage(t) + + array, values := createArrayWithSimpleAndCompositeValues(t, storage, address, typeInfo, arraySize, nestedCompositeIndex) + + // parent array: 1 root data slab + // nested composite element: 1 root data slab + require.Equal(t, 2, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + + // Unload composite element + v := values[nestedCompositeIndex].(*Array) + + err := storage.Remove(v.SlabID()) + require.NoError(t, err) + + copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) + values = values[:len(values)-1] + + verifyArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab with simple values", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 20 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root metadata slab, 2 data slabs + require.Equal(t, 3, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + }) + + t.Run("root metadata slab with composite values", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 20 + array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root metadata slab, 2 data slabs + // nested composite value element: 1 root data slab for each + require.Equal(t, 3+arraySize, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + }) + + t.Run("root metadata slab with composite values, unload composite element from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 20 + array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root metadata slab, 2 data slabs + // nested composite value element: 1 root data slab for each + require.Equal(t, 3+arraySize, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + + // Unload composite element from front to back + for i := 0; i < len(values); i++ { + v := values[i] + + nestedArray, ok := v.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) + + expectedValues := values[i+1:] + verifyArrayLoadedElements(t, array, expectedValues) + } + }) + + t.Run("root metadata slab with composite values, unload composite element from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 20 + array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root metadata slab, 2 data slabs + // nested composite value element: 1 root data slab for each + require.Equal(t, 3+arraySize, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + + // Unload composite element from back to front + for i := len(values) - 1; i >= 0; i-- { + v := values[i] + + nestedArray, ok := v.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) + + expectedValues := values[:i] + verifyArrayLoadedElements(t, array, expectedValues) + } + }) + + t.Run("root metadata slab with composite values, unload composite element in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 20 + array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root metadata slab, 2 data slabs + // nested composite value element: 1 root data slab for each + require.Equal(t, 3+arraySize, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + + // Unload composite element in the middle + for _, index := range []int{4, 14} { + + v := values[index] + + nestedArray, ok := v.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) + + copy(values[index:], values[index+1:]) + values = values[:len(values)-1] + + verifyArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab with simple and composite values, unload composite element", func(t *testing.T) { + const arraySize = 20 + + // Create an array with composite value at specified index. + for nestedCompositeIndex := 0; nestedCompositeIndex < arraySize; nestedCompositeIndex++ { + storage := newTestPersistentStorage(t) + + array, values := createArrayWithSimpleAndCompositeValues(t, storage, address, typeInfo, arraySize, nestedCompositeIndex) + + // parent array: 1 root metadata slab, 2 data slabs + // nested composite value element: 1 root data slab for each + require.Equal(t, 3+1, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + + // Unload composite value + v := values[nestedCompositeIndex].(*Array) + + err := storage.Remove(v.SlabID()) + require.NoError(t, err) + + copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) + values = values[:len(values)-1] + + verifyArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab, unload data slab from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 30 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + + metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + // Unload data slabs from front to back + for i := 0; i < len(metaDataSlab.childrenHeaders); i++ { + + childHeader := metaDataSlab.childrenHeaders[i] + + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) + + values = values[childHeader.count:] + + verifyArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab, unload data slab from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 30 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + + metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + // Unload data slabs from back to front + for i := len(metaDataSlab.childrenHeaders) - 1; i >= 0; i-- { + + childHeader := metaDataSlab.childrenHeaders[i] + + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) + + values = values[:len(values)-int(childHeader.count)] + + verifyArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab, unload data slab in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 30 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + + metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + require.True(t, len(metaDataSlab.childrenHeaders) > 2) + + index := 1 + childHeader := metaDataSlab.childrenHeaders[index] + + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) + + copy(values[metaDataSlab.childrenCountSum[index-1]:], values[metaDataSlab.childrenCountSum[index]:]) + values = values[:array.Count()-uint64(childHeader.count)] + + verifyArrayLoadedElements(t, array, values) + }) + + t.Run("root metadata slab, unload non-root metadata slab from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 250 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array (3 levels): 1 root metadata slab, 2 non-root metadata slabs, n data slabs + require.Equal(t, 3, getArrayMetaDataSlabCount(storage)) + + rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + // Unload non-root metadata slabs from front to back + for i := 0; i < len(rootMetaDataSlab.childrenHeaders); i++ { + + childHeader := rootMetaDataSlab.childrenHeaders[i] + + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) + + values = values[childHeader.count:] + + verifyArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab, unload non-root metadata slab from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 250 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array (3 levels): 1 root metadata slab, 2 child metadata slabs, n data slabs + require.Equal(t, 3, getArrayMetaDataSlabCount(storage)) + + rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + // Unload non-root metadata slabs from back to front + for i := len(rootMetaDataSlab.childrenHeaders) - 1; i >= 0; i-- { + + childHeader := rootMetaDataSlab.childrenHeaders[i] + + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) + + values = values[childHeader.count:] + + verifyArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab with composite values, unload random composite value", func(t *testing.T) { + + storage := newTestPersistentStorage(t) + + const arraySize = 500 + array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + + // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs + // nested composite elements: 1 root data slab for each + require.True(t, len(storage.deltas) > 1+arraySize) + require.True(t, getArrayMetaDataSlabCount(storage) > 1) + + verifyArrayLoadedElements(t, array, values) + + r := newRand(t) + + // Unload random composite element + for len(values) > 0 { + + i := r.Intn(len(values)) + + v := values[i] + + nestedArray, ok := v.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) + + copy(values[i:], values[i+1:]) + values = values[:len(values)-1] + + verifyArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab with composite values, unload random data slab", func(t *testing.T) { + + storage := newTestPersistentStorage(t) + + const arraySize = 500 + array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + + // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs + // nested composite elements: 1 root data slab for each + require.True(t, len(storage.deltas) > 1+arraySize) + require.True(t, getArrayMetaDataSlabCount(storage) > 1) + + verifyArrayLoadedElements(t, array, values) + + rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + type slabInfo struct { + id SlabID + startIndex int + count int + } + + count := 0 + var dataSlabInfos []*slabInfo + for _, mheader := range rootMetaDataSlab.childrenHeaders { + nonrootMetaDataSlab, ok := storage.deltas[mheader.slabID].(*ArrayMetaDataSlab) + require.True(t, ok) + + for _, h := range nonrootMetaDataSlab.childrenHeaders { + dataSlabInfo := &slabInfo{id: h.slabID, startIndex: count, count: int(h.count)} + dataSlabInfos = append(dataSlabInfos, dataSlabInfo) + count += int(h.count) + } + } + + r := newRand(t) + + // Unload random data slab. + for len(dataSlabInfos) > 0 { + indexToUnload := r.Intn(len(dataSlabInfos)) + + slabInfoToUnload := dataSlabInfos[indexToUnload] + + // Update startIndex for all data slabs after indexToUnload. + for i := indexToUnload + 1; i < len(dataSlabInfos); i++ { + dataSlabInfos[i].startIndex -= slabInfoToUnload.count + } + + // Remove slabInfo to be unloaded from dataSlabInfos. + copy(dataSlabInfos[indexToUnload:], dataSlabInfos[indexToUnload+1:]) + dataSlabInfos = dataSlabInfos[:len(dataSlabInfos)-1] + + err := storage.Remove(slabInfoToUnload.id) + require.NoError(t, err) + + copy(values[slabInfoToUnload.startIndex:], values[slabInfoToUnload.startIndex+slabInfoToUnload.count:]) + values = values[:len(values)-slabInfoToUnload.count] + + verifyArrayLoadedElements(t, array, values) + } + + require.Equal(t, 0, len(values)) + }) + + t.Run("root metadata slab with composite values, unload random slab", func(t *testing.T) { + + storage := newTestPersistentStorage(t) + + const arraySize = 500 + array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + + // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs + // nested composite elements: 1 root data slab for each + require.True(t, len(storage.deltas) > 1+arraySize) + require.True(t, getArrayMetaDataSlabCount(storage) > 1) + + verifyArrayLoadedElements(t, array, values) + + type slabInfo struct { + id SlabID + startIndex int + count int + children []*slabInfo + } + + rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + var dataSlabCount, metadataSlabCount int + nonrootMetadataSlabInfos := make([]*slabInfo, len(rootMetaDataSlab.childrenHeaders)) + for i, mheader := range rootMetaDataSlab.childrenHeaders { + + nonrootMetadataSlabInfo := &slabInfo{ + id: mheader.slabID, + startIndex: metadataSlabCount, + count: int(mheader.count), + } + metadataSlabCount += int(mheader.count) + + nonrootMetadataSlab, ok := storage.deltas[mheader.slabID].(*ArrayMetaDataSlab) + require.True(t, ok) + + children := make([]*slabInfo, len(nonrootMetadataSlab.childrenHeaders)) + for i, h := range nonrootMetadataSlab.childrenHeaders { + children[i] = &slabInfo{ + id: h.slabID, + startIndex: dataSlabCount, + count: int(h.count), + } + dataSlabCount += int(h.count) + } + + nonrootMetadataSlabInfo.children = children + nonrootMetadataSlabInfos[i] = nonrootMetadataSlabInfo + } + + r := newRand(t) + + const ( + metadataSlabType int = iota + dataSlabType + maxSlabType + ) + + for len(nonrootMetadataSlabInfos) > 0 { + + var slabInfoToBeRemoved *slabInfo + var isLastSlab bool + + // Unload random metadata or data slab. + switch r.Intn(maxSlabType) { + + case metadataSlabType: + // Unload metadata slab at random index. + metadataSlabIndex := r.Intn(len(nonrootMetadataSlabInfos)) + + isLastSlab = metadataSlabIndex == len(nonrootMetadataSlabInfos)-1 + + slabInfoToBeRemoved = nonrootMetadataSlabInfos[metadataSlabIndex] + + count := slabInfoToBeRemoved.count + + // Update startIndex for subsequence metadata and data slabs. + for i := metadataSlabIndex + 1; i < len(nonrootMetadataSlabInfos); i++ { + nonrootMetadataSlabInfos[i].startIndex -= count + + for j := 0; j < len(nonrootMetadataSlabInfos[i].children); j++ { + nonrootMetadataSlabInfos[i].children[j].startIndex -= count + } + } + + copy(nonrootMetadataSlabInfos[metadataSlabIndex:], nonrootMetadataSlabInfos[metadataSlabIndex+1:]) + nonrootMetadataSlabInfos = nonrootMetadataSlabInfos[:len(nonrootMetadataSlabInfos)-1] + + case dataSlabType: + // Unload data slab at randome index. + metadataSlabIndex := r.Intn(len(nonrootMetadataSlabInfos)) + + metaSlabInfo := nonrootMetadataSlabInfos[metadataSlabIndex] + + dataSlabIndex := r.Intn(len(metaSlabInfo.children)) + + slabInfoToBeRemoved = metaSlabInfo.children[dataSlabIndex] + + isLastSlab = (metadataSlabIndex == len(nonrootMetadataSlabInfos)-1) && + (dataSlabIndex == len(metaSlabInfo.children)-1) + + count := slabInfoToBeRemoved.count + + // Update startIndex for subsequence data slabs. + for i := dataSlabIndex + 1; i < len(metaSlabInfo.children); i++ { + metaSlabInfo.children[i].startIndex -= count + } + + copy(metaSlabInfo.children[dataSlabIndex:], metaSlabInfo.children[dataSlabIndex+1:]) + metaSlabInfo.children = metaSlabInfo.children[:len(metaSlabInfo.children)-1] + + metaSlabInfo.count -= count + + // Update startIndex for all subsequence metadata slabs. + for i := metadataSlabIndex + 1; i < len(nonrootMetadataSlabInfos); i++ { + nonrootMetadataSlabInfos[i].startIndex -= count + + for j := 0; j < len(nonrootMetadataSlabInfos[i].children); j++ { + nonrootMetadataSlabInfos[i].children[j].startIndex -= count + } + } + + if len(metaSlabInfo.children) == 0 { + copy(nonrootMetadataSlabInfos[metadataSlabIndex:], nonrootMetadataSlabInfos[metadataSlabIndex+1:]) + nonrootMetadataSlabInfos = nonrootMetadataSlabInfos[:len(nonrootMetadataSlabInfos)-1] + } + } + + err := storage.Remove(slabInfoToBeRemoved.id) + require.NoError(t, err) + + if isLastSlab { + values = values[:slabInfoToBeRemoved.startIndex] + } else { + copy(values[slabInfoToBeRemoved.startIndex:], values[slabInfoToBeRemoved.startIndex+slabInfoToBeRemoved.count:]) + values = values[:len(values)-slabInfoToBeRemoved.count] + } + + verifyArrayLoadedElements(t, array, values) + } + + require.Equal(t, 0, len(values)) + }) +} + +func createArrayWithSimpleValues( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + arraySize int, +) (*Array, []Value) { + + // Create parent array + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + r := rune('a') + for i := 0; i < arraySize; i++ { + values[i] = NewStringValue(strings.Repeat(string(r), 20)) + + err := array.Append(values[i]) require.NoError(t, err) + } - require.Equal(t, uint64(36), array.Count()) + return array, values +} + +func createArrayWithCompositeValues( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + arraySize int, +) (*Array, []Value) { + + // Create parent array + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + // Create nested array + nested, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for j := 0; j < 50; j++ { + err = nested.Append(Uint64Value(j)) + require.NoError(t, err) + } + + expectedValues[i] = nested + + // Append nested array to parent + err = array.Append(nested) + require.NoError(t, err) + } + + return array, expectedValues +} + +func createArrayWithSimpleAndCompositeValues( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + arraySize int, + compositeValueIndex int, +) (*Array, []Value) { + require.True(t, compositeValueIndex < arraySize) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + r := 'a' + for i := 0; i < arraySize; i++ { + + if compositeValueIndex == i { + // Create nested array with one element + a, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for j := 0; j < 50; j++ { + err = a.Append(Uint64Value(j)) + require.NoError(t, err) + } + + values[i] = a + } else { + values[i] = NewStringValue(strings.Repeat(string(r), 20)) + r++ + } + + err = array.Append(values[i]) + require.NoError(t, err) + } + + return array, values +} + +func verifyArrayLoadedElements(t *testing.T, array *Array, expectedValues []Value) { + i := 0 + err := array.IterateLoadedValues(func(v Value) (bool, error) { + require.True(t, i < len(expectedValues)) + valueEqual(t, typeInfoComparator, expectedValues[i], v) + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, len(expectedValues), i) +} + +func getArrayMetaDataSlabCount(storage *PersistentSlabStorage) int { + var counter int + for _, slab := range storage.deltas { + if _, ok := slab.(*ArrayMetaDataSlab); ok { + counter++ + } + } + return counter +} + +func TestArrayID(t *testing.T) { + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + sid := array.SlabID() + id := array.ValueID() + + require.Equal(t, sid.address[:], id[:8]) + require.Equal(t, sid.index[:], id[8:]) +} + +func TestSlabSizeWhenResettingMutableStorable(t *testing.T) { + const ( + arraySize = 3 + initialStorableSize = 1 + mutatedStorableSize = 5 + ) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]*testMutableValue, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := newTestMutableValue(initialStorableSize) + values[i] = v + + err := array.Append(v) + require.NoError(t, err) + } + + require.True(t, array.root.IsData()) + + expectedArrayRootDataSlabSize := arrayRootDataSlabPrefixSize + initialStorableSize*arraySize + require.Equal(t, uint32(expectedArrayRootDataSlabSize), array.root.ByteSize()) + + err = ValidArray(array, typeInfo, typeInfoComparator, hashInputProvider) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + mv := values[i] + mv.updateStorableSize(mutatedStorableSize) + + existingStorable, err := array.Set(i, mv) + require.NoError(t, err) + require.NotNil(t, existingStorable) + } + + require.True(t, array.root.IsData()) + + expectedArrayRootDataSlabSize = arrayRootDataSlabPrefixSize + mutatedStorableSize*arraySize + require.Equal(t, uint32(expectedArrayRootDataSlabSize), array.root.ByteSize()) + + err = ValidArray(array, typeInfo, typeInfoComparator, hashInputProvider) + require.NoError(t, err) +} + +func TestChildArrayInlinabilityInParentArray(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) - iter, err := array.Iterator() - require.NoError(t, err) + t.Run("parent is root data slab, with one child array", func(t *testing.T) { + const arraySize = 1 + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - require.NoError(t, err) - require.NotEqual(t, array.SlabID(), copied.SlabID()) + // Create an array with empty child array as element. + parentArray, expectedValues := createArrayWithEmptyChildArray(t, storage, address, typeInfo, arraySize) - verifyArray(t, storage, typeInfo, address, copied, values, false) - }) + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. - t.Run("random", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + // Test parent slab size with 1 empty inlined child arrays + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - const arraySize = 4096 + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - r := newRand(t) + // Get inlined child array + e, err := parentArray.Get(0) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - typeInfo := testTypeInfo{42} + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) - require.NoError(t, err) + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := randomValue(r, int(maxInlineArrayElementSize)) - values[i] = v + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size - err := array.Append(v) + // Appending 10 elements to child array so that inlined child array reaches max inlined size as array element. + for i := 0; i < 10; i++ { + err = childArray.Append(v) require.NoError(t, err) - } + require.Equal(t, uint64(i+1), childArray.Count()) - require.Equal(t, uint64(arraySize), array.Count()) + require.True(t, childArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - iter, err := array.Iterator() - require.NoError(t, err) + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - storage := newTestPersistentStorage(t) + // Test parent slab size + expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + // Add one more element to child array which triggers inlined child array slab becomes standalone slab + err = childArray.Append(v) require.NoError(t, err) - require.NotEqual(t, array.SlabID(), copied.SlabID()) - verifyArray(t, storage, typeInfo, address, copied, values, false) - }) + require.False(t, childArray.Inlined()) + require.Equal(t, 2, getStoredDeltas(storage)) // There are 2 stored slab because child array is no longer inlined. - t.Run("data slab too large", func(t *testing.T) { - // Slab size must not exceed maxThreshold. - // We cannot make this problem happen after Atree Issue #193 - // was fixed by PR #194 & PR #197. This test is to catch regressions. + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - SetThreshold(256) - defer SetThreshold(1024) + expectedStandaloneSlabSize := arrayRootDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) - r := newRand(t) + expectedParentSize = arrayRootDataSlabPrefixSize + SlabIDStorable(expectedSlabID).ByteSize() + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - typeInfo := testTypeInfo{42} - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) - require.NoError(t, err) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - var values []Value - var v Value + // Remove elements from child array which triggers standalone array slab becomes inlined slab again. + for childArray.Count() > 0 { + existingStorable, err := childArray.Remove(0) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) - v = NewStringValue(randStr(r, int(maxInlineArrayElementSize-2))) - values = append(values, v) - err = array.Append(v) - require.NoError(t, err) + require.True(t, childArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, valueID, childArray.ValueID()) // value ID is unchanged - v = NewStringValue(randStr(r, int(maxInlineArrayElementSize-2))) - values = append(values, v) - err = array.Append(v) - require.NoError(t, err) + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - v = NewStringValue(randStr(r, int(maxInlineArrayElementSize-2))) - values = append(values, v) - err = array.Append(v) - require.NoError(t, err) + expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - iter, err := array.Iterator() - require.NoError(t, err) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + + require.Equal(t, uint64(0), childArray.Count()) + require.Equal(t, uint64(arraySize), parentArray.Count()) + }) + + t.Run("parent is root data slab, with two child arrays", func(t *testing.T) { + const arraySize = 2 + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - require.NoError(t, err) - require.NotEqual(t, array.SlabID(), copied.SlabID()) + // Create an array with empty child array as element. + parentArray, expectedValues := createArrayWithEmptyChildArray(t, storage, address, typeInfo, arraySize) - verifyArray(t, storage, typeInfo, address, copied, values, false) - }) -} + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. -func TestArrayNestedStorables(t *testing.T) { + // Test parent slab size with 2 empty inlined child arrays + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - t.Parallel() + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - typeInfo := testTypeInfo{42} + children := make([]struct { + array *Array + valueID ValueID + }, arraySize) - const arraySize = 1024 * 4 + for i := 0; i < arraySize; i++ { + e, err := parentArray.Get(uint64(i)) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - s := strings.Repeat("a", int(i)) - v := SomeValue{Value: NewStringValue(s)} - values[i] = v + children[i].array = childArray + children[i].valueID = valueID + } - err := array.Append(v) - require.NoError(t, err) - } + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size - verifyArray(t, storage, typeInfo, address, array, values, true) -} + // Appending 10 elements to child array so that inlined child array reaches max inlined size as array element. + for i := 0; i < 10; i++ { + for _, child := range children { + childArray := child.array + childValueID := child.valueID -func TestArrayMaxInlineElement(t *testing.T) { - t.Parallel() + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(i+1), childArray.Count()) - r := newRand(t) + require.True(t, childArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // Test parent slab size + expectedParentSize += vSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - var values []Value - for i := 0; i < 2; i++ { - // String length is MaxInlineArrayElementSize - 3 to account for string encoding overhead. - v := NewStringValue(randStr(r, int(maxInlineArrayElementSize-3))) - values = append(values, v) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + } - err = array.Append(v) - require.NoError(t, err) - } + expectedStoredDeltas := 1 - require.True(t, array.root.IsData()) + // Add one more element to child array which triggers inlined child array slab becomes standalone slab + for _, child := range children { + childArray := child.array + childValueID := child.valueID - // Size of root data slab with two elements of max inlined size is target slab size minus - // slab id size (next slab id is omitted in root slab), and minus 1 byte - // (for rounding when computing max inline array element size). - require.Equal(t, targetThreshold-slabIDSize-1, uint64(array.root.Header().size)) + err := childArray.Append(v) + require.NoError(t, err) + require.False(t, childArray.Inlined()) - verifyArray(t, storage, typeInfo, address, array, values, false) -} + expectedStoredDeltas++ + require.Equal(t, expectedStoredDeltas, getStoredDeltas(storage)) // There are more stored slab because child array is no longer inlined. -func TestArrayString(t *testing.T) { + expectedSlabID := valueIDToSlabID(childValueID) + require.Equal(t, expectedSlabID, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged - SetThreshold(256) - defer SetThreshold(1024) + expectedStandaloneSlabSize := arrayRootDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) - t.Run("small", func(t *testing.T) { - const arraySize = 6 + //expectedParentSize := arrayRootDataSlabPrefixSize + SlabIDStorable(expectedSlabID).ByteSize() + expectedParentSize -= inlinedArrayDataSlabPrefixSize + uint32(childArray.Count()-1)*vSize + expectedParentSize += SlabIDStorable(expectedSlabID).ByteSize() + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // Remove one element from child array which triggers standalone array slab becomes inlined slab again. + for _, child := range children { + childArray := child.array + childValueID := child.valueID - for i := uint64(0); i < arraySize; i++ { - err := array.Append(Uint64Value(i)) + existingStorable, err := childArray.Remove(0) require.NoError(t, err) - } + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) - want := `[0 1 2 3 4 5]` - require.Equal(t, want, array.String()) - }) + require.True(t, childArray.Inlined()) - t.Run("large", func(t *testing.T) { - const arraySize = 120 + expectedStoredDeltas-- + require.Equal(t, expectedStoredDeltas, getStoredDeltas(storage)) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, childValueID, childArray.ValueID()) // value ID is unchanged - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - for i := uint64(0); i < arraySize; i++ { - err := array.Append(Uint64Value(i)) - require.NoError(t, err) + expectedParentSize -= SlabIDStorable{}.ByteSize() + expectedParentSize += expectedInlinedSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } - want := `[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119]` - require.Equal(t, want, array.String()) - }) -} + // Remove remaining elements from inlined child array + childArrayCount := children[0].array.Count() + for i := 0; i < int(childArrayCount); i++ { + for _, child := range children { + childArray := child.array + childValueID := child.valueID -func TestArraySlabDump(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + existingStorable, err := childArray.Remove(0) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) - t.Run("small", func(t *testing.T) { - const arraySize = 6 + require.True(t, childArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, childValueID, childArray.ValueID()) // value ID is unchanged - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + expectedParentSize -= vSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - for i := uint64(0); i < arraySize; i++ { - err := array.Append(Uint64Value(i)) - require.NoError(t, err) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } } - want := []string{ - "level 1, ArrayDataSlab id:0x102030405060708.1 size:23 count:6 elements: [0 1 2 3 4 5]", + for _, child := range children { + require.Equal(t, uint64(0), child.array.Count()) } - dumps, err := DumpArraySlabs(array) - require.NoError(t, err) - require.Equal(t, want, dumps) + require.Equal(t, uint64(arraySize), parentArray.Count()) }) - t.Run("large", func(t *testing.T) { - const arraySize = 120 + t.Run("parent is root metadata slab, with four child arrays", func(t *testing.T) { + const arraySize = 4 typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // Create an array with empty child array as element. + parentArray, expectedValues := createArrayWithEmptyChildArray(t, storage, address, typeInfo, arraySize) - for i := uint64(0); i < arraySize; i++ { - err := array.Append(Uint64Value(i)) + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. + + // Test parent slab size with 4 empty inlined child arrays + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + children := make([]struct { + array *Array + valueID ValueID + }, arraySize) + + for i := 0; i < arraySize; i++ { + e, err := parentArray.Get(uint64(i)) require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) + + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + + children[i].array = childArray + children[i].valueID = valueID } - want := []string{ - "level 1, ArrayMetaDataSlab id:0x102030405060708.1 size:40 count:120 children: [{id:0x102030405060708.2 size:213 count:54} {id:0x102030405060708.3 size:285 count:66}]", - "level 2, ArrayDataSlab id:0x102030405060708.2 size:213 count:54 elements: [0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53]", - "level 2, ArrayDataSlab id:0x102030405060708.3 size:285 count:66 elements: [54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119]", + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size + + // Appending 10 elements to child array so that inlined child array reaches max inlined size as array element. + for i := 0; i < 10; i++ { + for _, child := range children { + childArray := child.array + childValueID := child.valueID + + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(i+1), childArray.Count()) + + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged + + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } } - dumps, err := DumpArraySlabs(array) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) + // Parent array has 1 meta data slab and 2 data slabs. + // All child arrays are inlined. + require.Equal(t, 3, getStoredDeltas(storage)) + require.False(t, parentArray.root.IsData()) - t.Run("overflow", func(t *testing.T) { + // Add one more element to child array which triggers inlined child array slab becomes standalone slab + for _, child := range children { + childArray := child.array + childValueID := child.valueID - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + err := childArray.Append(v) + require.NoError(t, err) + require.False(t, childArray.Inlined()) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + expectedSlabID := valueIDToSlabID(childValueID) + require.Equal(t, expectedSlabID, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged - err = array.Append(NewStringValue(strings.Repeat("a", int(maxInlineArrayElementSize)))) - require.NoError(t, err) + expectedStandaloneSlabSize := arrayRootDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) - want := []string{ - "level 1, ArrayDataSlab id:0x102030405060708.1 size:24 count:1 elements: [SlabIDStorable({[1 2 3 4 5 6 7 8] [0 0 0 0 0 0 0 2]})]", - "StorableSlab id:0x102030405060708.2 storable:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + + // Parent array has one data slab and all child arrays are not inlined. + require.Equal(t, 1+arraySize, getStoredDeltas(storage)) + require.True(t, parentArray.root.IsData()) + + // Remove one element from child array which triggers standalone array slab becomes inlined slab again. + for _, child := range children { + childArray := child.array + childValueID := child.valueID + + existingStorable, err := childArray.Remove(0) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) + + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, childValueID, childArray.ValueID()) // value ID is unchanged + + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } - dumps, err := DumpArraySlabs(array) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) -} + // Parent array has 1 meta data slab and 2 data slabs. + // All child arrays are inlined. + require.Equal(t, 3, getStoredDeltas(storage)) + require.False(t, parentArray.root.IsData()) + + // Remove remaining elements from inlined child array + childArrayCount := children[0].array.Count() + for i := 0; i < int(childArrayCount); i++ { + for _, child := range children { + childArray := child.array + childValueID := child.valueID + + existingStorable, err := childArray.Remove(0) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) + + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, childValueID, childArray.ValueID()) // value ID is unchanged + + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + } -func errorCategorizationCount(err error) int { - var fatalError *FatalError - var userError *UserError - var externalError *ExternalError + // Parent array has 1 data slab. + // All child arrays are inlined. + require.Equal(t, 1, getStoredDeltas(storage)) + require.True(t, parentArray.root.IsData()) - count := 0 - if errors.As(err, &fatalError) { - count++ - } - if errors.As(err, &userError) { - count++ - } - if errors.As(err, &externalError) { - count++ - } - return count + for _, child := range children { + require.Equal(t, uint64(0), child.array.Count()) + } + require.Equal(t, uint64(arraySize), parentArray.Count()) + }) } -func TestArrayLoadedValueIterator(t *testing.T) { +func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { SetThreshold(256) defer SetThreshold(1024) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + t.Run("parent is root data slab, one child array, one grand child array, changes to grand child array triggers child array slab to become standalone slab", func(t *testing.T) { + const arraySize = 1 - t.Run("empty", func(t *testing.T) { + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // Create an array with empty child array as element, which has empty child array. + parentArray, expectedValues := createArrayWithEmpty2LevelChildArray(t, storage, address, typeInfo, arraySize) - // parent array: 1 root data slab - require.Equal(t, 1, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. - verifyArrayLoadedElements(t, array, nil) - }) + // Test parent slab size with 1 inlined child array + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*2*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - t.Run("root data slab with simple values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - const arraySize = 3 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + // Get inlined child array + e, err := parentArray.Get(0) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - // parent array: 1 root data slab - require.Equal(t, 1, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) - verifyArrayLoadedElements(t, array, values) - }) + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) - t.Run("root data slab with composite values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Get inlined grand child array + e, err = childArray.Get(0) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - const arraySize = 3 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + gchildArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, gchildArray.Inlined()) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) - // parent array: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+arraySize, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + gValueID := gchildArray.ValueID() + require.Equal(t, address[:], gValueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], gValueID[slabAddressSize:]) + require.NotEqual(t, valueID[slabAddressSize:], gValueID[slabAddressSize:]) - verifyArrayLoadedElements(t, array, values) - }) + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size - t.Run("root data slab with composite values, unload composite element from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Appending 8 elements to grand child array so that inlined grand child array reaches max inlined size as array element. + for i := 0; i < 8; i++ { + err = gchildArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(i+1), gchildArray.Count()) + require.Equal(t, uint64(1), childArray.Count()) - const arraySize = 3 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + require.True(t, childArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - // parent array: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+arraySize, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - verifyArrayLoadedElements(t, array, values) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - // Unload composite element from front to back - for i := 0; i < len(values); i++ { - v := values[i] + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - nestedArray, ok := v.(*Array) - require.True(t, ok) + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + // Test parent slab size + expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedChildSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - expectedValues := values[i+1:] - verifyArrayLoadedElements(t, array, expectedValues) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } - }) - t.Run("root data slab with composite values, unload composite element from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Add one more element to grand child array which triggers inlined child array slab (NOT grand child array slab) becomes standalone slab + err = gchildArray.Append(v) + require.NoError(t, err) - const arraySize = 3 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + require.True(t, gchildArray.Inlined()) + require.False(t, childArray.Inlined()) + require.Equal(t, 2, getStoredDeltas(storage)) // There are 2 stored slab because child array is no longer inlined. - // parent array: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+arraySize, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - verifyArrayLoadedElements(t, array, values) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - // Unload composite element from back to front - for i := len(values) - 1; i >= 0; i-- { - v := values[i] + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - nestedArray, ok := v.(*Array) - require.True(t, ok) + expectedStandaloneSlabSize := arrayRootDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) - err := storage.Remove(nestedArray.SlabID()) + expectedParentSize = arrayRootDataSlabPrefixSize + SlabIDStorable(expectedSlabID).ByteSize() + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Remove elements from grand child array which triggers standalone child array slab becomes inlined slab again. + for gchildArray.Count() > 0 { + existingStorable, err := gchildArray.Remove(0) require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) - expectedValues := values[:i] - verifyArrayLoadedElements(t, array, expectedValues) + require.True(t, gchildArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, valueID, childArray.ValueID()) // value ID is unchanged + + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + require.Equal(t, gValueID, gchildArray.ValueID()) // value ID is unchanged + + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) + + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) + + // Test parent slab size + expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedChildSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } + + require.Equal(t, uint64(0), gchildArray.Count()) + require.Equal(t, uint64(1), childArray.Count()) + require.Equal(t, uint64(arraySize), parentArray.Count()) }) - t.Run("root data slab with composite values, unload composite element in the middle", func(t *testing.T) { + t.Run("parent is root data slab, one child array, one grand child array, changes to grand child array triggers grand child array slab to become standalone slab", func(t *testing.T) { + const arraySize = 1 + + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - const arraySize = 3 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + // Create an array with empty child array as element, which has empty child array. + parentArray, expectedValues := createArrayWithEmpty2LevelChildArray(t, storage, address, typeInfo, arraySize) - // parent array: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+arraySize, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. - verifyArrayLoadedElements(t, array, values) + // Test parent slab size with 1 inlined child array + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*2*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - // Unload composite element in the middle - unloadValueIndex := 1 + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - v := values[unloadValueIndex] + // Get inlined child array + e, err := parentArray.Get(0) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - nestedArray, ok := v.(*Array) + childArray, ok := e.(*Array) require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) - err := storage.Remove(nestedArray.SlabID()) + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + + // Get inlined grand child array + e, err = childArray.Get(0) require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) - values = values[:len(values)-1] + gchildArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, gchildArray.Inlined()) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) - verifyArrayLoadedElements(t, array, values) - }) + gValueID := gchildArray.ValueID() + require.Equal(t, address[:], gValueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], gValueID[slabAddressSize:]) + require.NotEqual(t, valueID[slabAddressSize:], gValueID[slabAddressSize:]) - t.Run("root data slab with composite values, unload composite elements during iteration", func(t *testing.T) { - storage := newTestPersistentStorage(t) + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size - const arraySize = 3 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + // Appending 8 elements to grand child array so that inlined grand child array reaches max inlined size as array element. + for i := 0; i < 8; i++ { + err = gchildArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(i+1), gchildArray.Count()) + require.Equal(t, uint64(1), childArray.Count()) - // parent array: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+arraySize, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + require.True(t, childArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - verifyArrayLoadedElements(t, array, values) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - i := 0 - err := array.IterateLoadedValues(func(v Value) (bool, error) { - // At this point, iterator returned first element (v). + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - // Remove all other nested composite elements (except first element) from storage. - for _, value := range values[1:] { - nestedArray, ok := value.(*Array) - require.True(t, ok) + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) - } + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) - require.Equal(t, 0, i) - valueEqual(t, typeInfoComparator, values[0], v) - i++ - return true, nil - }) + // Test parent slab size + expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedChildSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + // Add one more element to grand child array which triggers inlined grand child array slab (NOT child array slab) becomes standalone slab + largeValue := NewStringValue(strings.Repeat("b", 20)) + largeValueSize := largeValue.ByteSize() + err = gchildArray.Append(largeValue) require.NoError(t, err) - require.Equal(t, 1, i) // Only first element is iterated because other elements are remove during iteration. - }) - t.Run("root data slab with simple and composite values, unload composite element", func(t *testing.T) { - const arraySize = 3 + require.False(t, gchildArray.Inlined()) + require.True(t, childArray.Inlined()) + require.Equal(t, 2, getStoredDeltas(storage)) // There are 2 stored slab because child array is no longer inlined. - // Create an array with nested composite value at specified index - for nestedCompositeIndex := 0; nestedCompositeIndex < arraySize; nestedCompositeIndex++ { - storage := newTestPersistentStorage(t) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - array, values := createArrayWithSimpleAndCompositeValues(t, storage, address, typeInfo, arraySize, nestedCompositeIndex) + expectedSlabID := valueIDToSlabID(gValueID) + require.Equal(t, expectedSlabID, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - // parent array: 1 root data slab - // nested composite element: 1 root data slab - require.Equal(t, 2, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + // Test inlined grand child slab size + expectedInlinedGrandChildSize := arrayRootDataSlabPrefixSize + uint32(gchildArray.Count()-1)*vSize + largeValueSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - verifyArrayLoadedElements(t, array, values) + expectedStandaloneSlabSize := inlinedArrayDataSlabPrefixSize + SlabIDStorable(expectedSlabID).ByteSize() + require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) - // Unload composite element - v := values[nestedCompositeIndex].(*Array) + expectedParentSize = arrayRootDataSlabPrefixSize + expectedStandaloneSlabSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - err := storage.Remove(v.SlabID()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Remove elements from grand child array which triggers standalone child array slab becomes inlined slab again. + for gchildArray.Count() > 0 { + _, err := gchildArray.Remove(gchildArray.Count() - 1) require.NoError(t, err) + // require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) - copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) - values = values[:len(values)-1] + require.True(t, gchildArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - verifyArrayLoadedElements(t, array, values) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, valueID, childArray.ValueID()) // value ID is unchanged + + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + require.Equal(t, gValueID, gchildArray.ValueID()) // value ID is unchanged + + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) + + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) + + // Test parent slab size + expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedChildSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } + + require.Equal(t, uint64(0), gchildArray.Count()) + require.Equal(t, uint64(1), childArray.Count()) + require.Equal(t, uint64(arraySize), parentArray.Count()) }) - t.Run("root metadata slab with simple values", func(t *testing.T) { + t.Run("parent is root data slab, two child array, one grand child array each, changes to child array triggers child array slab to become standalone slab", func(t *testing.T) { + const arraySize = 2 + + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - const arraySize = 20 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size - // parent array: 1 root metadata slab, 2 data slabs - require.Equal(t, 3, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - verifyArrayLoadedElements(t, array, values) - }) + expectedValues := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + // Create child array + child, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - t.Run("root metadata slab with composite values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Create grand child array + gchild, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - const arraySize = 20 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + // Append element to grand child array + err = gchild.Append(v) + require.NoError(t, err) - // parent array: 1 root metadata slab, 2 data slabs - // nested composite value element: 1 root data slab for each - require.Equal(t, 3+arraySize, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + // Append grand child array to child array + err = child.Append(gchild) + require.NoError(t, err) - verifyArrayLoadedElements(t, array, values) - }) + // Append child array to parent + err = parentArray.Append(child) + require.NoError(t, err) - t.Run("root metadata slab with composite values, unload composite element from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + expectedValues[i] = child + } - const arraySize = 20 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. - // parent array: 1 root metadata slab, 2 data slabs - // nested composite value element: 1 root data slab for each - require.Equal(t, 3+arraySize, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + // Test parent slab size with 1 inlined child array + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*2*arraySize + vSize*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - verifyArrayLoadedElements(t, array, values) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - // Unload composite element from front to back - for i := 0; i < len(values); i++ { - v := values[i] + type arrayInfo struct { + array *Array + valueID ValueID + child *arrayInfo + } - nestedArray, ok := v.(*Array) + children := make([]arrayInfo, arraySize) + + for i := 0; i < arraySize; i++ { + e, err := parentArray.Get(uint64(i)) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) + + childArray, ok := e.(*Array) require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) - err := storage.Remove(nestedArray.SlabID()) + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + + e, err = childArray.Get(0) require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - expectedValues := values[i+1:] - verifyArrayLoadedElements(t, array, expectedValues) + gchildArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, gchildArray.Inlined()) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + + gValueID := gchildArray.ValueID() + require.Equal(t, address[:], gValueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], gValueID[slabAddressSize:]) + require.NotEqual(t, valueID[slabAddressSize:], gValueID[slabAddressSize:]) + + children[i] = arrayInfo{ + array: childArray, + valueID: valueID, + child: &arrayInfo{array: gchildArray, valueID: gValueID}, + } } - }) - t.Run("root metadata slab with composite values, unload composite element from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Appending 7 elements to child array so that inlined child array reaches max inlined size as array element. + for i := 0; i < 7; i++ { + for _, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - const arraySize = 20 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(i+2), childArray.Count()) - // parent array: 1 root metadata slab, 2 data slabs - // nested composite value element: 1 root data slab for each - require.Equal(t, 3+arraySize, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + require.True(t, childArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - verifyArrayLoadedElements(t, array, values) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - // Unload composite element from back to front - for i := len(values) - 1; i >= 0; i-- { - v := values[i] + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - nestedArray, ok := v.(*Array) - require.True(t, ok) + // Test inlined grand child slab size (1 element, unchanged) + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) + + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + vSize*uint32(i+1) + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) + + // Test parent slab size + expectedParentSize += vSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + } + + // Add one more element to child array which triggers inlined child array slab (NOT grand child array slab) becomes standalone slab + for i, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - err := storage.Remove(nestedArray.SlabID()) + err = childArray.Append(v) require.NoError(t, err) - expectedValues := values[:i] - verifyArrayLoadedElements(t, array, expectedValues) - } - }) + require.True(t, gchildArray.Inlined()) + require.False(t, childArray.Inlined()) + require.Equal(t, 2+i, getStoredDeltas(storage)) // There are >1 stored slab because child array is no longer inlined. - t.Run("root metadata slab with composite values, unload composite element in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - const arraySize = 20 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - // parent array: 1 root metadata slab, 2 data slabs - // nested composite value element: 1 root data slab for each - require.Equal(t, 3+arraySize, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - verifyArrayLoadedElements(t, array, values) + expectedStandaloneSlabSize := arrayRootDataSlabPrefixSize + expectedInlinedGrandChildSize + vSize*uint32(childArray.Count()-1) + require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) - // Unload composite element in the middle - for _, index := range []int{4, 14} { + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } - v := values[index] + require.Equal(t, 3, getStoredDeltas(storage)) // There are 3 stored slab because child array is no longer inlined. - nestedArray, ok := v.(*Array) - require.True(t, ok) + expectedParentSize = arrayRootDataSlabPrefixSize + SlabIDStorable(SlabID{}).ByteSize()*2 + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + // Remove one elements from each child array to trigger child arrays being inlined again. + expectedParentSize = arrayRootDataSlabPrefixSize - copy(values[index:], values[index+1:]) - values = values[:len(values)-1] + for i, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - verifyArrayLoadedElements(t, array, values) - } - }) + _, err = childArray.Remove(childArray.Count() - 1) + require.NoError(t, err) - t.Run("root metadata slab with simple and composite values, unload composite element", func(t *testing.T) { - const arraySize = 20 + require.True(t, gchildArray.Inlined()) + require.True(t, childArray.Inlined()) + require.Equal(t, 2-i, getStoredDeltas(storage)) - // Create an array with composite value at specified index. - for nestedCompositeIndex := 0; nestedCompositeIndex < arraySize; nestedCompositeIndex++ { - storage := newTestPersistentStorage(t) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - array, values := createArrayWithSimpleAndCompositeValues(t, storage, address, typeInfo, arraySize, nestedCompositeIndex) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - // parent array: 1 root metadata slab, 2 data slabs - // nested composite value element: 1 root data slab for each - require.Equal(t, 3+1, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - verifyArrayLoadedElements(t, array, values) + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + vSize*uint32(childArray.Count()-1) + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) - // Unload composite value - v := values[nestedCompositeIndex].(*Array) + expectedParentSize += expectedInlinedChildSize - err := storage.Remove(v.SlabID()) - require.NoError(t, err) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } - copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) - values = values[:len(values)-1] + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - verifyArrayLoadedElements(t, array, values) - } - }) + // Remove elements from child array. + elementCount := children[0].array.Count() - t.Run("root metadata slab, unload data slab from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + for i := uint64(0); i < elementCount-1; i++ { + for _, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - const arraySize = 30 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + existingStorable, err := childArray.Remove(childArray.Count() - 1) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) - // parent array (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + require.True(t, gchildArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - verifyArrayLoadedElements(t, array, values) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, valueID, childArray.ValueID()) // value ID is unchanged - metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + require.Equal(t, gValueID, gchildArray.ValueID()) // value ID is unchanged - // Unload data slabs from front to back - for i := 0; i < len(metaDataSlab.childrenHeaders); i++ { + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - childHeader := metaDataSlab.childrenHeaders[i] + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + vSize*uint32(childArray.Count()-1) + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) - err := storage.Remove(childHeader.slabID) - require.NoError(t, err) + // Test parent slab size + expectedParentSize -= vSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - values = values[childHeader.count:] + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + } - verifyArrayLoadedElements(t, array, values) + for _, child := range children { + require.Equal(t, uint64(1), child.child.array.Count()) + require.Equal(t, uint64(1), child.array.Count()) } + require.Equal(t, uint64(arraySize), parentArray.Count()) }) - t.Run("root metadata slab, unload data slab from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) - - const arraySize = 30 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + t.Run("parent is root metadata slab, with four child arrays, each child array has grand child arrays", func(t *testing.T) { + const arraySize = 4 - // parent array (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - verifyArrayLoadedElements(t, array, values) + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size - metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // Unload data slabs from back to front - for i := len(metaDataSlab.childrenHeaders) - 1; i >= 0; i-- { + expectedValues := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + // Create child array + child, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - childHeader := metaDataSlab.childrenHeaders[i] + // Create grand child array + gchild, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - err := storage.Remove(childHeader.slabID) + // Append grand child array to child array + err = child.Append(gchild) require.NoError(t, err) - values = values[:len(values)-int(childHeader.count)] + // Append child array to parent + err = parentArray.Append(child) + require.NoError(t, err) - verifyArrayLoadedElements(t, array, values) + expectedValues[i] = child } - }) - t.Run("root metadata slab, unload data slab in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. - const arraySize = 30 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*2*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - // parent array (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - verifyArrayLoadedElements(t, array, values) + type arrayInfo struct { + array *Array + valueID ValueID + child *arrayInfo + } - metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + children := make([]arrayInfo, arraySize) - require.True(t, len(metaDataSlab.childrenHeaders) > 2) + for i := 0; i < arraySize; i++ { + e, err := parentArray.Get(uint64(i)) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - index := 1 - childHeader := metaDataSlab.childrenHeaders[index] + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) - err := storage.Remove(childHeader.slabID) - require.NoError(t, err) + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) - copy(values[metaDataSlab.childrenCountSum[index-1]:], values[metaDataSlab.childrenCountSum[index]:]) - values = values[:array.Count()-uint64(childHeader.count)] + e, err = childArray.Get(0) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - verifyArrayLoadedElements(t, array, values) - }) + gchildArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, gchildArray.Inlined()) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + + gValueID := gchildArray.ValueID() + require.Equal(t, address[:], gValueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], gValueID[slabAddressSize:]) + require.NotEqual(t, valueID[slabAddressSize:], gValueID[slabAddressSize:]) + + children[i] = arrayInfo{ + array: childArray, + valueID: valueID, + child: &arrayInfo{array: gchildArray, valueID: gValueID}, + } + } - t.Run("root metadata slab, unload non-root metadata slab from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Appending 6 elements to grand child array so that parent array root slab is metadata slab. + for i := uint32(0); i < 6; i++ { + for _, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - const arraySize = 250 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + err := gchildArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(i+1), gchildArray.Count()) - // parent array (3 levels): 1 root metadata slab, 2 non-root metadata slabs, n data slabs - require.Equal(t, 3, getArrayMetaDataSlabCount(storage)) + require.True(t, childArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - // Unload non-root metadata slabs from front to back - for i := 0; i < len(rootMetaDataSlab.childrenHeaders); i++ { + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - childHeader := rootMetaDataSlab.childrenHeaders[i] + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + vSize*(i+1) + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - err := storage.Remove(childHeader.slabID) + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + } + + // Add one more element to grand child array which triggers parent array slab becomes metadata slab (all elements are still inlined). + for _, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID + + err = gchildArray.Append(v) require.NoError(t, err) - values = values[childHeader.count:] + require.True(t, gchildArray.Inlined()) + require.True(t, childArray.Inlined()) + require.Equal(t, 3, getStoredDeltas(storage)) // There are 3 stored slab because parent root slab is metdata. - verifyArrayLoadedElements(t, array, values) - } - }) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - t.Run("root metadata slab, unload non-root metadata slab from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - const arraySize = 250 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - // parent array (3 levels): 1 root metadata slab, 2 child metadata slabs, n data slabs - require.Equal(t, 3, getArrayMetaDataSlabCount(storage)) + expectedInlinedChildSlabSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSlabSize, childArray.root.ByteSize()) - rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } - // Unload non-root metadata slabs from back to front - for i := len(rootMetaDataSlab.childrenHeaders) - 1; i >= 0; i-- { + require.Equal(t, 3, getStoredDeltas(storage)) // There are 3 stored slab because child array is no longer inlined. + require.False(t, parentArray.root.IsData()) - childHeader := rootMetaDataSlab.childrenHeaders[i] + // Add one more element to grand child array which triggers + // - child arrays become standalone slab (grand child arrays are still inlined) + // - parent array slab becomes data slab + for _, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - err := storage.Remove(childHeader.slabID) - require.NoError(t, err) + for i := 0; i < 2; i++ { + err = gchildArray.Append(v) + require.NoError(t, err) + } - values = values[childHeader.count:] + require.True(t, gchildArray.Inlined()) + require.False(t, childArray.Inlined()) - verifyArrayLoadedElements(t, array, values) - } - }) + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - t.Run("root metadata slab with composite values, unload random composite value", func(t *testing.T) { + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - storage := newTestPersistentStorage(t) + // Test standalone grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - const arraySize = 500 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + expectedStandaloneChildSlabSize := arrayRootDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedStandaloneChildSlabSize, childArray.root.ByteSize()) - // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs - // nested composite elements: 1 root data slab for each - require.True(t, len(storage.deltas) > 1+arraySize) - require.True(t, getArrayMetaDataSlabCount(storage) > 1) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } - verifyArrayLoadedElements(t, array, values) + // Parent array has one root data slab, 4 grand child array with standalone root data slab. + require.Equal(t, 1+arraySize, getStoredDeltas(storage)) + require.True(t, parentArray.root.IsData()) - r := newRand(t) + // Remove elements from grand child array to trigger child array inlined again. + for _, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - // Unload random composite element - for len(values) > 0 { + for i := 0; i < 2; i++ { + _, err = gchildArray.Remove(0) + require.NoError(t, err) + } - i := r.Intn(len(values)) + require.True(t, gchildArray.Inlined()) + require.True(t, childArray.Inlined()) - v := values[i] + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - nestedArray, ok := v.(*Array) - require.True(t, ok) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - copy(values[i:], values[i+1:]) - values = values[:len(values)-1] + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) - verifyArrayLoadedElements(t, array, values) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } - }) - t.Run("root metadata slab with composite values, unload random data slab", func(t *testing.T) { + // Parent array has 1 metadata slab, and two data slab, all child and grand child arrays are inlined. + require.Equal(t, 3, getStoredDeltas(storage)) + require.False(t, parentArray.root.IsData()) - storage := newTestPersistentStorage(t) + // Remove elements from grand child array. + elementCount := children[0].child.array.Count() - const arraySize = 500 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + for i := uint64(0); i < elementCount; i++ { + for _, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs - // nested composite elements: 1 root data slab for each - require.True(t, len(storage.deltas) > 1+arraySize) - require.True(t, getArrayMetaDataSlabCount(storage) > 1) + existingStorable, err := gchildArray.Remove(0) + require.NoError(t, err) + require.Equal(t, v, existingStorable) - verifyArrayLoadedElements(t, array, values) + require.True(t, gchildArray.Inlined()) + require.True(t, gchildArray.Inlined()) - rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, valueID, childArray.ValueID()) // value ID is unchanged - type slabInfo struct { - id SlabID - startIndex int - count int - } + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + require.Equal(t, gValueID, gchildArray.ValueID()) // value ID is unchanged - count := 0 - var dataSlabInfos []*slabInfo - for _, mheader := range rootMetaDataSlab.childrenHeaders { - nonrootMetaDataSlab, ok := storage.deltas[mheader.slabID].(*ArrayMetaDataSlab) - require.True(t, ok) + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - for _, h := range nonrootMetaDataSlab.childrenHeaders { - dataSlabInfo := &slabInfo{id: h.slabID, startIndex: count, count: int(h.count)} - dataSlabInfos = append(dataSlabInfos, dataSlabInfo) - count += int(h.count) + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } } - r := newRand(t) + for _, child := range children { + require.Equal(t, uint64(0), child.child.array.Count()) + require.Equal(t, uint64(1), child.array.Count()) + } + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.Equal(t, 1, getStoredDeltas(storage)) - // Unload random data slab. - for len(dataSlabInfos) > 0 { - indexToUnload := r.Intn(len(dataSlabInfos)) + expectedParentSize = uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*arraySize*2 + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + }) +} - slabInfoToUnload := dataSlabInfos[indexToUnload] +func TestChildArrayWhenParentArrayIsModified(t *testing.T) { - // Update startIndex for all data slabs after indexToUnload. - for i := indexToUnload + 1; i < len(dataSlabInfos); i++ { - dataSlabInfos[i].startIndex -= slabInfoToUnload.count - } + const arraySize = 2 - // Remove slabInfo to be unloaded from dataSlabInfos. - copy(dataSlabInfos[indexToUnload:], dataSlabInfos[indexToUnload+1:]) - dataSlabInfos = dataSlabInfos[:len(dataSlabInfos)-1] + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - err := storage.Remove(slabInfoToUnload.id) - require.NoError(t, err) + // Create an array with empty child array as element. + parentArray, expectedValues := createArrayWithEmptyChildArray(t, storage, address, typeInfo, arraySize) - copy(values[slabInfoToUnload.startIndex:], values[slabInfoToUnload.startIndex+slabInfoToUnload.count:]) - values = values[:len(values)-slabInfoToUnload.count] + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. - verifyArrayLoadedElements(t, array, values) - } + // Test parent slab size with empty inlined child arrays + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - require.Equal(t, 0, len(values)) - }) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - t.Run("root metadata slab with composite values, unload random slab", func(t *testing.T) { + children := make([]struct { + array *Array + valueID ValueID + }, arraySize) - storage := newTestPersistentStorage(t) + for i := 0; i < arraySize; i++ { + e, err := parentArray.Get(uint64(i)) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - const arraySize = 500 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) - // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs - // nested composite elements: 1 root data slab for each - require.True(t, len(storage.deltas) > 1+arraySize) - require.True(t, getArrayMetaDataSlabCount(storage) > 1) + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) - verifyArrayLoadedElements(t, array, values) + children[i].array = childArray + children[i].valueID = valueID + } + + t.Run("insert elements in parent array", func(t *testing.T) { + // insert value at index 0, so all child array indexes are moved by +1 + v := Uint64Value(0) + err := parentArray.Insert(0, v) + require.NoError(t, err) - type slabInfo struct { - id SlabID - startIndex int - count int - children []*slabInfo - } + expectedValues = append(expectedValues, nil) + copy(expectedValues[1:], expectedValues) + expectedValues[0] = v - rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + for i, child := range children { + childArray := child.array + childValueID := child.valueID - var dataSlabCount, metadataSlabCount int - nonrootMetadataSlabInfos := make([]*slabInfo, len(rootMetaDataSlab.childrenHeaders)) - for i, mheader := range rootMetaDataSlab.childrenHeaders { + v := Uint64Value(i) + vSize := v.ByteSize() - nonrootMetadataSlabInfo := &slabInfo{ - id: mheader.slabID, - startIndex: metadataSlabCount, - count: int(mheader.count), - } - metadataSlabCount += int(mheader.count) + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(1), childArray.Count()) - nonrootMetadataSlab, ok := storage.deltas[mheader.slabID].(*ArrayMetaDataSlab) - require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged - children := make([]*slabInfo, len(nonrootMetadataSlab.childrenHeaders)) - for i, h := range nonrootMetadataSlab.childrenHeaders { - children[i] = &slabInfo{ - id: h.slabID, - startIndex: dataSlabCount, - count: int(h.count), - } - dataSlabCount += int(h.count) - } + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - nonrootMetadataSlabInfo.children = children - nonrootMetadataSlabInfos[i] = nonrootMetadataSlabInfo + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } - r := newRand(t) + // insert value at index 2, so only second child array index is moved by +1 + v = Uint64Value(2) + err = parentArray.Insert(2, v) + require.NoError(t, err) - const ( - metadataSlabType int = iota - dataSlabType - maxSlabType - ) + expectedValues = append(expectedValues, nil) + copy(expectedValues[3:], expectedValues[2:]) + expectedValues[2] = v - for len(nonrootMetadataSlabInfos) > 0 { + for i, child := range children { + childArray := child.array + childValueID := child.valueID - var slabInfoToBeRemoved *slabInfo - var isLastSlab bool + v := Uint64Value(i) + vSize := v.ByteSize() - // Unload random metadata or data slab. - switch r.Intn(maxSlabType) { + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(2), childArray.Count()) - case metadataSlabType: - // Unload metadata slab at random index. - metadataSlabIndex := r.Intn(len(nonrootMetadataSlabInfos)) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged - isLastSlab = metadataSlabIndex == len(nonrootMetadataSlabInfos)-1 + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - slabInfoToBeRemoved = nonrootMetadataSlabInfos[metadataSlabIndex] + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } - count := slabInfoToBeRemoved.count + // insert value at index 4, so none of child array indexes are affected. + v = Uint64Value(4) + err = parentArray.Insert(4, v) + require.NoError(t, err) - // Update startIndex for subsequence metadata and data slabs. - for i := metadataSlabIndex + 1; i < len(nonrootMetadataSlabInfos); i++ { - nonrootMetadataSlabInfos[i].startIndex -= count + expectedValues = append(expectedValues, nil) + expectedValues[4] = v - for j := 0; j < len(nonrootMetadataSlabInfos[i].children); j++ { - nonrootMetadataSlabInfos[i].children[j].startIndex -= count - } - } + for i, child := range children { + childArray := child.array + childValueID := child.valueID - copy(nonrootMetadataSlabInfos[metadataSlabIndex:], nonrootMetadataSlabInfos[metadataSlabIndex+1:]) - nonrootMetadataSlabInfos = nonrootMetadataSlabInfos[:len(nonrootMetadataSlabInfos)-1] + v := Uint64Value(i) + vSize := v.ByteSize() - case dataSlabType: - // Unload data slab at randome index. - metadataSlabIndex := r.Intn(len(nonrootMetadataSlabInfos)) + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(3), childArray.Count()) - metaSlabInfo := nonrootMetadataSlabInfos[metadataSlabIndex] + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged - dataSlabIndex := r.Intn(len(metaSlabInfo.children)) + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - slabInfoToBeRemoved = metaSlabInfo.children[dataSlabIndex] + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + }) - isLastSlab = (metadataSlabIndex == len(nonrootMetadataSlabInfos)-1) && - (dataSlabIndex == len(metaSlabInfo.children)-1) + t.Run("remove elements from parent array", func(t *testing.T) { + // remove value at index 0, so all child array indexes are moved by -1. + existingStorable, err := parentArray.Remove(0) + require.NoError(t, err) + require.Equal(t, Uint64Value(0), existingStorable) - count := slabInfoToBeRemoved.count + copy(expectedValues, expectedValues[1:]) + expectedValues[len(expectedValues)-1] = nil + expectedValues = expectedValues[:len(expectedValues)-1] - // Update startIndex for subsequence data slabs. - for i := dataSlabIndex + 1; i < len(metaSlabInfo.children); i++ { - metaSlabInfo.children[i].startIndex -= count - } + for i, child := range children { + childArray := child.array + childValueID := child.valueID - copy(metaSlabInfo.children[dataSlabIndex:], metaSlabInfo.children[dataSlabIndex+1:]) - metaSlabInfo.children = metaSlabInfo.children[:len(metaSlabInfo.children)-1] + v := Uint64Value(i) + vSize := v.ByteSize() - metaSlabInfo.count -= count + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(4), childArray.Count()) - // Update startIndex for all subsequence metadata slabs. - for i := metadataSlabIndex + 1; i < len(nonrootMetadataSlabInfos); i++ { - nonrootMetadataSlabInfos[i].startIndex -= count + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged - for j := 0; j < len(nonrootMetadataSlabInfos[i].children); j++ { - nonrootMetadataSlabInfos[i].children[j].startIndex -= count - } - } + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - if len(metaSlabInfo.children) == 0 { - copy(nonrootMetadataSlabInfos[metadataSlabIndex:], nonrootMetadataSlabInfos[metadataSlabIndex+1:]) - nonrootMetadataSlabInfos = nonrootMetadataSlabInfos[:len(nonrootMetadataSlabInfos)-1] - } - } + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } - err := storage.Remove(slabInfoToBeRemoved.id) + // Remove value at index 1, so only second child array index is moved by -1 + existingStorable, err = parentArray.Remove(1) + require.NoError(t, err) + require.Equal(t, Uint64Value(2), existingStorable) + + copy(expectedValues[1:], expectedValues[2:]) + expectedValues[len(expectedValues)-1] = nil + expectedValues = expectedValues[:len(expectedValues)-1] + + for i, child := range children { + childArray := child.array + childValueID := child.valueID + + v := Uint64Value(i) + vSize := v.ByteSize() + + err := childArray.Append(v) require.NoError(t, err) + require.Equal(t, uint64(5), childArray.Count()) - if isLastSlab { - values = values[:slabInfoToBeRemoved.startIndex] - } else { - copy(values[slabInfoToBeRemoved.startIndex:], values[slabInfoToBeRemoved.startIndex+slabInfoToBeRemoved.count:]) - values = values[:len(values)-slabInfoToBeRemoved.count] - } + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged - verifyArrayLoadedElements(t, array, values) + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } - require.Equal(t, 0, len(values)) - }) -} + // Remove value at index 2 (last element), so none of child array indexes are affected. + existingStorable, err = parentArray.Remove(2) + require.NoError(t, err) + require.Equal(t, Uint64Value(4), existingStorable) -func createArrayWithSimpleValues( - t *testing.T, - storage SlabStorage, - address Address, - typeInfo TypeInfo, - arraySize int, -) (*Array, []Value) { + expectedValues[len(expectedValues)-1] = nil + expectedValues = expectedValues[:len(expectedValues)-1] - // Create parent array - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + for i, child := range children { + childArray := child.array + childValueID := child.valueID - values := make([]Value, arraySize) - r := rune('a') - for i := 0; i < arraySize; i++ { - values[i] = NewStringValue(strings.Repeat(string(r), 20)) + v := Uint64Value(i) + vSize := v.ByteSize() - err := array.Append(values[i]) - require.NoError(t, err) - } + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(6), childArray.Count()) - return array, values + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged + + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + }) } -func createArrayWithCompositeValues( +func createArrayWithEmptyChildArray( t *testing.T, storage SlabStorage, address Address, @@ -3573,143 +6143,62 @@ func createArrayWithCompositeValues( expectedValues := make([]Value, arraySize) for i := 0; i < arraySize; i++ { - // Create nested array - nested, err := NewArray(storage, address, typeInfo) + // Create child array + child, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - err = nested.Append(Uint64Value(i)) + // Append child array to parent + err = array.Append(child) require.NoError(t, err) - expectedValues[i] = nested - - // Append nested array to parent - err = array.Append(nested) - require.NoError(t, err) + expectedValues[i] = child } return array, expectedValues } -func createArrayWithSimpleAndCompositeValues( +func createArrayWithEmpty2LevelChildArray( t *testing.T, storage SlabStorage, address Address, typeInfo TypeInfo, arraySize int, - compositeValueIndex int, ) (*Array, []Value) { - require.True(t, compositeValueIndex < arraySize) + // Create parent array array, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - values := make([]Value, arraySize) - r := 'a' + expectedValues := make([]Value, arraySize) for i := 0; i < arraySize; i++ { + // Create child array + child, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - if compositeValueIndex == i { - // Create nested array with one element - a, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) - - err = a.Append(Uint64Value(i)) - require.NoError(t, err) + // Create grand child array + gchild, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - values[i] = a - } else { - values[i] = NewStringValue(strings.Repeat(string(r), 20)) - r++ - } + // Append grand child array to child array + err = child.Append(gchild) + require.NoError(t, err) - err = array.Append(values[i]) + // Append child array to parent + err = array.Append(child) require.NoError(t, err) - } - return array, values -} + expectedValues[i] = child + } -func verifyArrayLoadedElements(t *testing.T, array *Array, expectedValues []Value) { - i := 0 - err := array.IterateLoadedValues(func(v Value) (bool, error) { - require.True(t, i < len(expectedValues)) - valueEqual(t, typeInfoComparator, expectedValues[i], v) - i++ - return true, nil - }) - require.NoError(t, err) - require.Equal(t, len(expectedValues), i) + return array, expectedValues } -func getArrayMetaDataSlabCount(storage *PersistentSlabStorage) int { - var counter int +func getStoredDeltas(storage *PersistentSlabStorage) int { + count := 0 for _, slab := range storage.deltas { - if _, ok := slab.(*ArrayMetaDataSlab); ok { - counter++ + if slab != nil { + count++ } } - return counter -} - -func TestArrayID(t *testing.T) { - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) - - sid := array.SlabID() - id := array.ValueID() - - require.Equal(t, sid.address[:], id[:8]) - require.Equal(t, sid.index[:], id[8:]) -} - -func TestSlabSizeWhenResettingMutableStorable(t *testing.T) { - const ( - arraySize = 3 - initialStorableSize = 1 - mutatedStorableSize = 5 - ) - - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) - - values := make([]*mutableValue, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := newMutableValue(initialStorableSize) - values[i] = v - - err := array.Append(v) - require.NoError(t, err) - } - - require.True(t, array.root.IsData()) - - expectedArrayRootDataSlabSize := arrayRootDataSlabPrefixSize + initialStorableSize*arraySize - require.Equal(t, uint32(expectedArrayRootDataSlabSize), array.root.ByteSize()) - - err = ValidArray(array, typeInfo, typeInfoComparator, hashInputProvider) - require.NoError(t, err) - - for i := uint64(0); i < arraySize; i++ { - mv := values[i] - mv.updateStorableSize(mutatedStorableSize) - - existingStorable, err := array.Set(i, mv) - require.NoError(t, err) - require.NotNil(t, existingStorable) - } - - require.True(t, array.root.IsData()) - - expectedArrayRootDataSlabSize = arrayRootDataSlabPrefixSize + mutatedStorableSize*arraySize - require.Equal(t, uint32(expectedArrayRootDataSlabSize), array.root.ByteSize()) - - err = ValidArray(array, typeInfo, typeInfoComparator, hashInputProvider) - require.NoError(t, err) + return count } diff --git a/basicarray.go b/basicarray.go index b5267e4c..143bec35 100644 --- a/basicarray.go +++ b/basicarray.go @@ -76,7 +76,7 @@ func newBasicArrayDataSlabFromData( ) } - cborDec := decMode.NewByteStreamDecoder(data[2:]) + cborDec := decMode.NewByteStreamDecoder(data[versionAndFlagSize:]) elemCount, err := cborDec.DecodeArrayHead() if err != nil { @@ -85,7 +85,7 @@ func newBasicArrayDataSlabFromData( elements := make([]Storable, elemCount) for i := 0; i < int(elemCount); i++ { - storable, err := decodeStorable(cborDec, SlabIDUndefined) + storable, err := decodeStorable(cborDec, id, nil) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode array element") @@ -101,10 +101,17 @@ func newBasicArrayDataSlabFromData( func (a *BasicArrayDataSlab) Encode(enc *Encoder) error { - flag := maskBasicArray | maskSlabRoot + const version = 1 + + h, err := newArraySlabHead(version, slabBasicArray) + if err != nil { + return NewEncodingError(err) + } + + h.setRoot() // Encode flag - _, err := enc.Write([]byte{0x0, flag}) + _, err = enc.Write(h[:]) if err != nil { return NewEncodingError(err) } diff --git a/cmd/main/main.go b/cmd/main/main.go index 3e0cf470..3b2eaebd 100644 --- a/cmd/main/main.go +++ b/cmd/main/main.go @@ -77,6 +77,14 @@ type testTypeInfo struct{} var _ atree.TypeInfo = testTypeInfo{} +func (testTypeInfo) IsComposite() bool { + return false +} + +func (i testTypeInfo) ID() string { + return fmt.Sprintf("uint64(%d)", i) +} + func (testTypeInfo) Encode(e *cbor.StreamEncoder) error { return e.EncodeUint8(42) } @@ -86,7 +94,7 @@ func (i testTypeInfo) Equal(other atree.TypeInfo) bool { return ok } -func decodeStorable(dec *cbor.StreamDecoder, _ atree.SlabID) (atree.Storable, error) { +func decodeStorable(dec *cbor.StreamDecoder, _ atree.SlabID, _ []atree.ExtraData) (atree.Storable, error) { tagNumber, err := dec.DecodeTagNumber() if err != nil { return nil, err diff --git a/cmd/stress/storable.go b/cmd/stress/storable.go index b3fba90a..a2bdf1da 100644 --- a/cmd/stress/storable.go +++ b/cmd/stress/storable.go @@ -413,7 +413,7 @@ func (v StringValue) String() string { return v.str } -func decodeStorable(dec *cbor.StreamDecoder, _ atree.SlabID) (atree.Storable, error) { +func decodeStorable(dec *cbor.StreamDecoder, _ atree.SlabID, _ []atree.ExtraData) (atree.Storable, error) { t, err := dec.NextType() if err != nil { return nil, err diff --git a/cmd/stress/typeinfo.go b/cmd/stress/typeinfo.go index 4618dc12..b14c212b 100644 --- a/cmd/stress/typeinfo.go +++ b/cmd/stress/typeinfo.go @@ -19,6 +19,8 @@ package main import ( + "fmt" + "github.com/onflow/atree" "github.com/fxamacker/cbor/v2" @@ -30,6 +32,14 @@ type testTypeInfo struct { var _ atree.TypeInfo = testTypeInfo{} +func (i testTypeInfo) IsComposite() bool { + return false +} + +func (i testTypeInfo) ID() string { + return fmt.Sprintf("uint64(%d)", i) +} + func (i testTypeInfo) Encode(e *cbor.StreamEncoder) error { return e.EncodeUint64(i.value) } diff --git a/encode.go b/encode.go index c88fa3a8..5f46505c 100644 --- a/encode.go +++ b/encode.go @@ -30,19 +30,45 @@ type Encoder struct { io.Writer CBOR *cbor.StreamEncoder Scratch [64]byte + encMode cbor.EncMode } func NewEncoder(w io.Writer, encMode cbor.EncMode) *Encoder { streamEncoder := encMode.NewStreamEncoder(w) return &Encoder{ - Writer: w, - CBOR: streamEncoder, + Writer: w, + CBOR: streamEncoder, + encMode: encMode, } } +// encodeStorableAsElement encodes storable as Array or OrderedMap element. +// Storable is encode as an inlined ArrayDataSlab or MapDataSlab if it is ArrayDataSlab or MapDataSlab. +func encodeStorableAsElement(enc *Encoder, storable Storable, inlinedTypeInfo *inlinedExtraData) error { + + switch storable := storable.(type) { + + case *ArrayDataSlab: + return storable.encodeAsInlined(enc, inlinedTypeInfo) + + case *MapDataSlab: + return storable.encodeAsInlined(enc, inlinedTypeInfo) + + default: + err := storable.Encode(enc) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storable interface. + return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode map value") + } + } + + return nil +} + type StorableDecoder func( decoder *cbor.StreamDecoder, storableSlabID SlabID, + inlinedExtraData []ExtraData, ) ( Storable, error, @@ -101,7 +127,7 @@ func DecodeSlab( case slabStorable: cborDec := decMode.NewByteStreamDecoder(data[versionAndFlagSize:]) - storable, err := decodeStorable(cborDec, id) + storable, err := decodeStorable(cborDec, id, nil) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode slab storable") @@ -116,7 +142,6 @@ func DecodeSlab( } } -// TODO: make it inline func GetUintCBORSize(n uint64) uint32 { if n <= 23 { return 1 diff --git a/map.go b/map.go index 7d9eabf8..f9f6b596 100644 --- a/map.go +++ b/map.go @@ -19,6 +19,7 @@ package atree import ( + "bytes" "encoding/binary" "errors" "fmt" @@ -83,6 +84,14 @@ const ( // CircleHash64fx and SipHash might use this const as part of their // 128-bit seed (when they don't use 64-bit -> 128-bit seed expansion func). typicalRandomConstant = uint64(0x1BD11BDAA9FC1A22) // DO NOT MODIFY + + // inlined map data slab prefix size: + // tag number (2 bytes) + + // 3-element array head (1 byte) + + // extra data ref index (2 bytes) [0, 255] + + // value index head (1 byte) + + // value index (8 bytes) + inlinedMapDataSlabPrefixSize = 2 + 1 + 2 + 1 + 8 ) // MaxCollisionLimitPerDigest is the noncryptographic hash collision limit @@ -134,7 +143,7 @@ type element interface { key Value, ) (MapKey, MapValue, element, error) - Encode(*Encoder) error + Encode(*Encoder, *inlinedExtraData) error hasPointer() bool @@ -174,7 +183,8 @@ type elements interface { Element(int) (element, error) - Encode(*Encoder) error + Encode(*Encoder, *inlinedExtraData) error + EncodeCompositeValues(*Encoder, []MapKey, *inlinedExtraData) error hasPointer() bool @@ -239,6 +249,8 @@ type MapExtraData struct { Seed uint64 } +var _ ExtraData = &MapExtraData{} + // MapDataSlab is leaf node, implementing MapSlab. // anySize is true for data slab that isn't restricted by size requirement. type MapDataSlab struct { @@ -253,9 +265,11 @@ type MapDataSlab struct { anySize bool collisionGroup bool + inlined bool } var _ MapSlab = &MapDataSlab{} +var _ Storable = &MapDataSlab{} // MapMetaDataSlab is internal node, implementing MapSlab. type MapMetaDataSlab struct { @@ -292,15 +306,20 @@ type MapSlab interface { SetExtraData(*MapExtraData) PopIterate(SlabStorage, MapPopIterationFunc) error + + Inlined() bool + Inlinable(maxInlineSize uint64) bool } type OrderedMap struct { Storage SlabStorage root MapSlab digesterBuilder DigesterBuilder + parentUpdater parentUpdater } var _ Value = &OrderedMap{} +var _ valueNotifier = &OrderedMap{} const mapExtraDataLength = 3 @@ -365,6 +384,10 @@ func newMapExtraData(dec *cbor.StreamDecoder, decodeTypeInfo TypeInfoDecoder) (* }, nil } +func (m *MapExtraData) isExtraData() bool { + return true +} + // Encode encodes extra data as CBOR array: // // [type info, count, seed] @@ -399,7 +422,7 @@ func (m *MapExtraData) Encode(enc *Encoder) error { return nil } -func newElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder) (element, error) { +func newElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder, slabID SlabID, inlinedExtraData []ExtraData) (element, error) { nt, err := cborDec.NextType() if err != nil { return nil, NewDecodingError(err) @@ -408,7 +431,7 @@ func newElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDeco switch nt { case cbor.ArrayType: // Don't need to wrap error as external error because err is already categorized by newSingleElementFromData(). - return newSingleElementFromData(cborDec, decodeStorable) + return newSingleElementFromData(cborDec, decodeStorable, slabID, inlinedExtraData) case cbor.TagType: tagNum, err := cborDec.DecodeTagNumber() @@ -418,10 +441,10 @@ func newElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDeco switch tagNum { case CBORTagInlineCollisionGroup: // Don't need to wrap error as external error because err is already categorized by newInlineCollisionGroupFromData(). - return newInlineCollisionGroupFromData(cborDec, decodeStorable) + return newInlineCollisionGroupFromData(cborDec, decodeStorable, slabID, inlinedExtraData) case CBORTagExternalCollisionGroup: // Don't need to wrap error as external error because err is already categorized by newExternalCollisionGroupFromData(). - return newExternalCollisionGroupFromData(cborDec, decodeStorable) + return newExternalCollisionGroupFromData(cborDec, decodeStorable, slabID, inlinedExtraData) default: return nil, NewDecodingError(fmt.Errorf("failed to decode element: unrecognized tag number %d", tagNum)) } @@ -452,7 +475,7 @@ func newSingleElement(storage SlabStorage, address Address, key Value, value Val }, nil } -func newSingleElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder) (*singleElement, error) { +func newSingleElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder, slabID SlabID, inlinedExtraData []ExtraData) (*singleElement, error) { elemCount, err := cborDec.DecodeArrayHead() if err != nil { return nil, NewDecodingError(err) @@ -462,13 +485,13 @@ func newSingleElementFromData(cborDec *cbor.StreamDecoder, decodeStorable Storab return nil, NewDecodingError(fmt.Errorf("failed to decode single element: expect array of 2 elements, got %d elements", elemCount)) } - key, err := decodeStorable(cborDec, SlabIDUndefined) + key, err := decodeStorable(cborDec, slabID, inlinedExtraData) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode key's storable") } - value, err := decodeStorable(cborDec, SlabIDUndefined) + value, err := decodeStorable(cborDec, slabID, inlinedExtraData) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode value's storable") @@ -484,7 +507,7 @@ func newSingleElementFromData(cborDec *cbor.StreamDecoder, decodeStorable Storab // Encode encodes singleElement to the given encoder. // // CBOR encoded array of 2 elements (key, value). -func (e *singleElement) Encode(enc *Encoder) error { +func (e *singleElement) Encode(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { // Encode CBOR array head for 2 elements err := enc.CBOR.EncodeRawBytes([]byte{0x82}) @@ -500,7 +523,7 @@ func (e *singleElement) Encode(enc *Encoder) error { } // Encode value - err = e.value.Encode(enc) + err = encodeStorableAsElement(enc, e.value, inlinedTypeInfo) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode map value") @@ -648,8 +671,8 @@ func (e *singleElement) String() string { return fmt.Sprintf("%s:%s", e.key, e.value) } -func newInlineCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder) (*inlineCollisionGroup, error) { - elements, err := newElementsFromData(cborDec, decodeStorable) +func newInlineCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder, slabID SlabID, inlinedExtraData []ExtraData) (*inlineCollisionGroup, error) { + elements, err := newElementsFromData(cborDec, decodeStorable, slabID, inlinedExtraData) if err != nil { // Don't need to wrap error as external error because err is already categorized by newElementsFromData(). return nil, err @@ -661,7 +684,7 @@ func newInlineCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorable // Encode encodes inlineCollisionGroup to the given encoder. // // CBOR tag (number: CBORTagInlineCollisionGroup, content: elements) -func (e *inlineCollisionGroup) Encode(enc *Encoder) error { +func (e *inlineCollisionGroup) Encode(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { err := enc.CBOR.EncodeRawBytes([]byte{ // tag number CBORTagInlineCollisionGroup @@ -671,7 +694,7 @@ func (e *inlineCollisionGroup) Encode(enc *Encoder) error { return NewEncodingError(err) } - err = e.elements.Encode(enc) + err = e.elements.Encode(enc, inlinedTypeInfo) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Encode(). return err @@ -829,9 +852,9 @@ func (e *inlineCollisionGroup) String() string { return "inline[" + e.elements.String() + "]" } -func newExternalCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder) (*externalCollisionGroup, error) { +func newExternalCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder, slabID SlabID, inlinedExtraData []ExtraData) (*externalCollisionGroup, error) { - storable, err := decodeStorable(cborDec, SlabIDUndefined) + storable, err := decodeStorable(cborDec, slabID, inlinedExtraData) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode Storable") @@ -851,7 +874,7 @@ func newExternalCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorab // Encode encodes externalCollisionGroup to the given encoder. // // CBOR tag (number: CBORTagExternalCollisionGroup, content: slab ID) -func (e *externalCollisionGroup) Encode(enc *Encoder) error { +func (e *externalCollisionGroup) Encode(enc *Encoder, _ *inlinedExtraData) error { err := enc.CBOR.EncodeRawBytes([]byte{ // tag number CBORTagExternalCollisionGroup 0xd8, CBORTagExternalCollisionGroup, @@ -1029,7 +1052,7 @@ func (e *externalCollisionGroup) String() string { return fmt.Sprintf("external(%s)", e.slabID) } -func newElementsFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder) (elements, error) { +func newElementsFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder, slabID SlabID, inlinedExtraData []ExtraData) (elements, error) { arrayCount, err := cborDec.DecodeArrayHead() if err != nil { @@ -1076,7 +1099,7 @@ func newElementsFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDec size := uint32(singleElementsPrefixSize) elems := make([]*singleElement, elemCount) for i := 0; i < int(elemCount); i++ { - elem, err := newSingleElementFromData(cborDec, decodeStorable) + elem, err := newSingleElementFromData(cborDec, decodeStorable, slabID, inlinedExtraData) if err != nil { // Don't need to wrap error as external error because err is already categorized by newSingleElementFromData(). return nil, err @@ -1102,7 +1125,7 @@ func newElementsFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDec size := uint32(hkeyElementsPrefixSize) elems := make([]element, elemCount) for i := 0; i < int(elemCount); i++ { - elem, err := newElementFromData(cborDec, decodeStorable) + elem, err := newElementFromData(cborDec, decodeStorable, slabID, inlinedExtraData) if err != nil { // Don't need to wrap error as external error because err is already categorized by newElementFromData(). return nil, err @@ -1146,7 +1169,7 @@ func newHkeyElementsWithElement(level uint, hkey Digest, elem element) *hkeyElem // 1: hkeys (byte string) // 2: elements (array) // ] -func (e *hkeyElements) Encode(enc *Encoder) error { +func (e *hkeyElements) Encode(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { if e.level > maxDigestLevel { return NewFatalError(fmt.Errorf("hash level %d exceeds max digest level %d", e.level, maxDigestLevel)) @@ -1200,7 +1223,7 @@ func (e *hkeyElements) Encode(enc *Encoder) error { // Encode each element for _, e := range e.elems { - err = e.Encode(enc) + err = e.Encode(enc, inlinedTypeInfo) if err != nil { // Don't need to wrap error as external error because err is already categorized by element.Encode(). return err @@ -1216,6 +1239,70 @@ func (e *hkeyElements) Encode(enc *Encoder) error { return nil } +// EncodeCompositeValues encodes hkeyElements as an array of values ordered by orderedKeys. +// Level is not encoded because it is always 0. Digests are not encoded because +// they are encoded with composite keys in the composite extra data section. +func (e *hkeyElements) EncodeCompositeValues(enc *Encoder, orderedKeys []MapKey, inlinedTypeInfo *inlinedExtraData) error { + if e.level != 0 { + return NewEncodingError(fmt.Errorf("hash level must be 0 to be encoded as composite, got %d", e.level)) + } + + if len(e.elems) != len(orderedKeys) { + return NewEncodingError(fmt.Errorf("number of elements %d is different from number of elements in composite extra data %d", len(e.elems), len(orderedKeys))) + } + + var err error + + err = enc.CBOR.EncodeArrayHead(uint64(len(orderedKeys))) + if err != nil { + return NewEncodingError(err) + } + + keyIndexes := make([]int, len(e.elems)) + for i := 0; i < len(e.elems); i++ { + keyIndexes[i] = i + } + + // Encode values in the same order as orderedKeys. + for i, k := range orderedKeys { + key, ok := k.(EquatableStorable) + if !ok { + return NewEncodingError(fmt.Errorf("composite keys must be implement EquableStorable")) + } + + found := false + for j := i; j < len(keyIndexes); j++ { + index := keyIndexes[j] + se, ok := e.elems[index].(*singleElement) + if !ok { + return NewEncodingError(fmt.Errorf("composite element must not have collision")) + } + if key.Equal(se.key) { + found = true + keyIndexes[i], keyIndexes[j] = keyIndexes[j], keyIndexes[i] + + err = encodeStorableAsElement(enc, se.value, inlinedTypeInfo) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by encodeStorable(). + return err + } + + break + } + } + if !found { + return NewEncodingError(fmt.Errorf("failed to find key %v", k)) + } + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + func (e *hkeyElements) Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapValue, error) { if level >= digester.Levels() { @@ -1797,7 +1884,7 @@ func newSingleElementsWithElement(level uint, elem *singleElement) *singleElemen // 1: hkeys (0 length byte string) // 2: elements (array) // ] -func (e *singleElements) Encode(enc *Encoder) error { +func (e *singleElements) Encode(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { if e.level > maxDigestLevel { return NewFatalError(fmt.Errorf("digest level %d exceeds max digest level %d", e.level, maxDigestLevel)) @@ -1828,7 +1915,7 @@ func (e *singleElements) Encode(enc *Encoder) error { // Encode each element for _, e := range e.elems { - err = e.Encode(enc) + err = e.Encode(enc, inlinedTypeInfo) if err != nil { // Don't need to wrap error as external error because err is already categorized by singleElement.Encode(). return err @@ -1844,6 +1931,10 @@ func (e *singleElements) Encode(enc *Encoder) error { return nil } +func (e *singleElements) EncodeCompositeValues(_ *Encoder, _ []MapKey, _ *inlinedExtraData) error { + return NewEncodingError(fmt.Errorf("singleElements can't encoded as composite value")) +} + func (e *singleElements) Get(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapValue, error) { if level != digester.Levels() { @@ -2147,7 +2238,7 @@ func newMapDataSlabFromDataV0( // Decode elements cborDec := decMode.NewByteStreamDecoder(data) - elements, err := newElementsFromData(cborDec, decodeStorable) + elements, err := newElementsFromData(cborDec, decodeStorable, id, nil) if err != nil { // Don't need to wrap error as external error because err is already categorized by newElementsFromDataV0(). return nil, err @@ -2179,21 +2270,22 @@ func newMapDataSlabFromDataV0( // // Root DataSlab Header: // -// +-------------------------------+------------+ -// | slab version + flag (2 bytes) | extra data | -// +-------------------------------+------------+ +// +-------------------------------+------------+---------------------------------+ +// | slab version + flag (2 bytes) | extra data | inlined extra data (if present) | +// +-------------------------------+------------+---------------------------------+ // -// Non-root DataSlab Header (18 bytes): +// Non-root DataSlab Header: // -// +-------------------------------+-----------------------------+ -// | slab version + flag (2 bytes) | next sib slab ID (16 bytes) | -// +-------------------------------+-----------------------------+ +// +-------------------------------+---------------------------------+-----------------------------+ +// | slab version + flag (2 bytes) | inlined extra data (if present) | next slab ID (if non-empty) | +// +-------------------------------+---------------------------------+-----------------------------+ // // Content: // // CBOR encoded elements // // See MapExtraData.Encode() for extra data section format. +// See InlinedExtraData.Encode() for inlined extra data section format. // See hkeyElements.Encode() and singleElements.Encode() for elements section format. func newMapDataSlabFromDataV1( id SlabID, @@ -2208,6 +2300,7 @@ func newMapDataSlabFromDataV1( ) { var err error var extraData *MapExtraData + var inlinedExtraData []ExtraData var next SlabID // Decode extra data @@ -2219,7 +2312,21 @@ func newMapDataSlabFromDataV1( } } - // Decode next slab ID + // Decode inlined extra data + if h.hasInlinedSlabs() { + inlinedExtraData, data, err = newInlinedExtraDataFromData( + data, + decMode, + decodeStorable, + decodeTypeInfo, + ) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by newInlinedExtraDataFromData(). + return nil, err + } + } + + // Decode next slab ID for non-root slab if h.hasNextSlabID() { if len(data) < slabIDSize { return nil, NewDecodingErrorf("data is too short for map data slab") @@ -2236,7 +2343,7 @@ func newMapDataSlabFromDataV1( // Decode elements cborDec := decMode.NewByteStreamDecoder(data) - elements, err := newElementsFromData(cborDec, decodeStorable) + elements, err := newElementsFromData(cborDec, decodeStorable, id, inlinedExtraData) if err != nil { // Don't need to wrap error as external error because err is already categorized by newElementsFromDataV1(). return nil, err @@ -2264,28 +2371,285 @@ func newMapDataSlabFromDataV1( }, nil } +// DecodeInlinedCompositeStorable decodes inlined composite data. Encoding is +// version 1 with CBOR tag having tag number CBORTagInlinedComposite, and tag contant +// as 3-element array: +// +// - index of inlined extra data +// - value ID index +// - CBOR array of elements +// +// NOTE: This function doesn't decode tag number because tag number is decoded +// in the caller and decoder only contains tag content. +func DecodeInlinedCompositeStorable( + dec *cbor.StreamDecoder, + decodeStorable StorableDecoder, + parentSlabID SlabID, + inlinedExtraData []ExtraData, +) ( + Storable, + error, +) { + const inlinedMapDataSlabArrayCount = 3 + + arrayCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + if arrayCount != inlinedMapDataSlabArrayCount { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined composite, expect array of %d elements, got %d elements", + inlinedMapDataSlabArrayCount, + arrayCount)) + } + + // element 0: extra data index + extraDataIndex, err := dec.DecodeUint64() + if err != nil { + return nil, NewDecodingError(err) + } + if extraDataIndex >= uint64(len(inlinedExtraData)) { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined composite: inlined extra data index %d exceeds number of inlined extra data %d", + extraDataIndex, + len(inlinedExtraData))) + } + + extraData, ok := inlinedExtraData[extraDataIndex].(*compositeExtraData) + if !ok { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined composite: expect *compositeExtraData, got %T", + inlinedExtraData[extraDataIndex])) + } + + // element 1: slab index + b, err := dec.DecodeBytes() + if err != nil { + return nil, NewDecodingError(err) + } + if len(b) != slabIndexSize { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined composite: expect %d bytes for slab index, got %d bytes", + slabIndexSize, + len(b))) + } + + var index [8]byte + copy(index[:], b) + + slabID := NewSlabID(parentSlabID.address, SlabIndex(index)) + + // Decode values + elemCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + if elemCount != uint64(len(extraData.keys)) { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode composite values: got %d, expect %d", + elemCount, + extraData.mapExtraData.Count)) + } + + hkeys := make([]Digest, len(extraData.hkeys)) + copy(hkeys, extraData.hkeys) + + // Decode values + size := uint32(hkeyElementsPrefixSize) + elems := make([]element, elemCount) + for i := 0; i < int(elemCount); i++ { + value, err := decodeStorable(dec, parentSlabID, inlinedExtraData) + if err != nil { + return nil, err + } + + elemSize := singleElementPrefixSize + extraData.keys[i].ByteSize() + value.ByteSize() + // TODO: does key need to be copied? + elem := &singleElement{extraData.keys[i], value, elemSize} + + elems[i] = elem + size += digestSize + elem.Size() + } + + // Create hkeyElements + elements := &hkeyElements{ + hkeys: hkeys, + elems: elems, + level: 0, + size: size, + } + + header := MapSlabHeader{ + slabID: slabID, + size: inlinedMapDataSlabPrefixSize + elements.Size(), + firstKey: elements.firstKey(), + } + + // TODO: does extra data needs to be copied? + copiedExtraData := &MapExtraData{ + TypeInfo: extraData.mapExtraData.TypeInfo, + Count: extraData.mapExtraData.Count, + Seed: extraData.mapExtraData.Seed, + } + + return &MapDataSlab{ + header: header, + elements: elements, + extraData: copiedExtraData, + anySize: false, + collisionGroup: false, + inlined: true, + }, nil +} + +// DecodeInlinedMapStorable decodes inlined map data slab. Encoding is +// version 1 with CBOR tag having tag number CBORTagInlinedMap, and tag contant +// as 3-element array: +// +// - index of inlined extra data +// - value ID index +// - CBOR array of elements +// +// NOTE: This function doesn't decode tag number because tag number is decoded +// in the caller and decoder only contains tag content. +func DecodeInlinedMapStorable( + dec *cbor.StreamDecoder, + decodeStorable StorableDecoder, + parentSlabID SlabID, + inlinedExtraData []ExtraData, +) ( + Storable, + error, +) { + const inlinedMapDataSlabArrayCount = 3 + + arrayCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + if arrayCount != inlinedMapDataSlabArrayCount { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined map data slab, expect array of %d elements, got %d elements", + inlinedMapDataSlabArrayCount, + arrayCount)) + } + + // element 0: extra data index + extraDataIndex, err := dec.DecodeUint64() + if err != nil { + return nil, NewDecodingError(err) + } + if extraDataIndex >= uint64(len(inlinedExtraData)) { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined composite: inlined extra data index %d exceeds number of inlined extra data %d", + extraDataIndex, + len(inlinedExtraData))) + } + extraData, ok := inlinedExtraData[extraDataIndex].(*MapExtraData) + if !ok { + return nil, NewDecodingError( + fmt.Errorf( + "extra data (%T) is wrong type, expect *MapExtraData", + inlinedExtraData[extraDataIndex])) + } + + // element 1: slab index + b, err := dec.DecodeBytes() + if err != nil { + return nil, NewDecodingError(err) + } + if len(b) != slabIndexSize { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined composite: expect %d bytes for slab index, got %d bytes", + slabIndexSize, + len(b))) + } + + var index [8]byte + copy(index[:], b) + + slabID := NewSlabID(parentSlabID.address, SlabIndex(index)) + + // Decode elements + elements, err := newElementsFromData(dec, decodeStorable, parentSlabID, inlinedExtraData) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by newElementsFromData(). + return nil, err + } + + header := MapSlabHeader{ + slabID: slabID, + size: inlinedMapDataSlabPrefixSize + elements.Size(), + firstKey: elements.firstKey(), + } + + // NOTE: extra data doesn't need to be copied because every inlined map has its own inlined extra data. + + return &MapDataSlab{ + header: header, + elements: elements, + extraData: extraData, + anySize: false, + collisionGroup: false, + inlined: true, + }, nil +} + // Encode encodes this map data slab to the given encoder. // // Root DataSlab Header: // -// +-------------------------------+------------+ -// | slab version + flag (2 bytes) | extra data | -// +-------------------------------+------------+ +// +-------------------------------+------------+---------------------------------+ +// | slab version + flag (2 bytes) | extra data | inlined extra data (if present) | +// +-------------------------------+------------+---------------------------------+ // -// Non-root DataSlab Header (18 bytes): +// Non-root DataSlab Header: // -// +-------------------------------+-------------------------+ -// | slab version + flag (2 bytes) | next slab ID (16 bytes) | -// +-------------------------------+-------------------------+ +// +-------------------------------+---------------------------------+-----------------------------+ +// | slab version + flag (2 bytes) | inlined extra data (if present) | next slab ID (if non-empty) | +// +-------------------------------+---------------------------------+-----------------------------+ // // Content: // // CBOR encoded elements // // See MapExtraData.Encode() for extra data section format. +// See InlinedExtraData.Encode() for inlined extra data section format. // See hkeyElements.Encode() and singleElements.Encode() for elements section format. func (m *MapDataSlab) Encode(enc *Encoder) error { + if m.inlined { + return NewEncodingError( + fmt.Errorf("failed to encode inlined map data slab as standalone slab")) + } + + // Encoding is done in two steps: + // + // 1. Encode map elements using a new buffer while collecting inlined extra data from inlined elements. + // 2. Encode slab with deduplicated inlined extra data and copy encoded elements from previous buffer. + + inlinedTypes := newInlinedExtraData() + + // TODO: maybe use a buffer pool + var buf bytes.Buffer + elemEnc := NewEncoder(&buf, enc.encMode) + + err := m.encodeElements(elemEnc, inlinedTypes) + if err != nil { + return err + } + const version = 1 slabType := slabMapData @@ -2314,7 +2678,11 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { h.setRoot() } - // Write head (version + flag) + if !inlinedTypes.empty() { + h.setHasInlinedSlabs() + } + + // Encode head _, err = enc.Write(h[:]) if err != nil { return NewEncodingError(err) @@ -2329,7 +2697,15 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { } } - // Encode next slab ID + // Encode inlined types + if !inlinedTypes.empty() { + err = inlinedTypes.Encode(enc) + if err != nil { + return NewEncodingError(err) + } + } + + // Encode next slab ID for non-root slab if m.next != SlabIDUndefined { n, err := m.next.ToRawBytes(enc.Scratch[:]) if err != nil { @@ -2345,7 +2721,21 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { } // Encode elements - err = m.elements.Encode(enc) + err = enc.CBOR.EncodeRawBytes(buf.Bytes()) + if err != nil { + return NewEncodingError(err) + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +func (m *MapDataSlab) encodeElements(enc *Encoder, inlinedTypes *inlinedExtraData) error { + err := m.elements.Encode(enc, inlinedTypes) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Encode(). return err @@ -2359,6 +2749,196 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { return nil } +// encodeAsInlined encodes inlined map data slab. Encoding is +// version 1 with CBOR tag having tag number CBORTagInlinedMap, +// and tag contant as 3-element array: +// +// - index of inlined extra data +// - value ID index +// - CBOR array of elements +func (m *MapDataSlab) encodeAsInlined(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { + if m.extraData == nil { + return NewEncodingError( + fmt.Errorf("failed to encode non-root map data slab as inlined")) + } + + if !m.inlined { + return NewEncodingError( + fmt.Errorf("failed to encode standalone map data slab as inlined")) + } + + if m.canBeEncodedAsComposite() { + return m.encodeAsInlinedComposite(enc, inlinedTypeInfo) + } + + return m.encodeAsInlinedMap(enc, inlinedTypeInfo) +} + +func (m *MapDataSlab) encodeAsInlinedMap(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { + + extraDataIndex := inlinedTypeInfo.addMapExtraData(m.extraData) + + if extraDataIndex > 255 { + return NewEncodingError(fmt.Errorf("extra data index %d exceeds limit 255", extraDataIndex)) + } + + var err error + + // Encode tag number and array head of 3 elements + err = enc.CBOR.EncodeRawBytes([]byte{ + // tag number + 0xd8, CBORTagInlinedMap, + // array head of 3 elements + 0x83, + }) + if err != nil { + return NewEncodingError(err) + } + + // element 0: extra data index + // NOTE: encoded extra data index is fixed sized CBOR uint + err = enc.CBOR.EncodeRawBytes([]byte{ + 0x18, + byte(extraDataIndex), + }) + if err != nil { + return NewEncodingError(err) + } + + // element 1: slab index + err = enc.CBOR.EncodeBytes(m.header.slabID.index[:]) + if err != nil { + return NewEncodingError(err) + } + + // element 2: map elements + err = m.elements.Encode(enc, inlinedTypeInfo) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by elements.Encode(). + return err + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +func (m *MapDataSlab) encodeAsInlinedComposite(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { + + // Composite extra data is deduplicated by TypeInfo.ID() and number of fields, + // Composite fields can be removed but new fields can't be added, and existing field types can't be modified. + // Given this, composites with same type ID and same number of fields have the same fields. + // See https://developers.flow.com/cadence/language/contract-updatability#fields + + extraDataIndex, orderedKeys, exist := inlinedTypeInfo.getCompositeTypeInfo(m.extraData.TypeInfo, int(m.extraData.Count)) + + if !exist { + elements, ok := m.elements.(*hkeyElements) + if !ok { + // This should never happen because canBeEncodedAsComposite() + // returns false for map containing any collision elements. + return NewEncodingError(fmt.Errorf("singleElements can't be encoded as composite elements")) + } + + orderedKeys = make([]MapKey, len(elements.elems)) + for i, e := range elements.elems { + e, ok := e.(*singleElement) + if !ok { + // This should never happen because canBeEncodedAsComposite() + // returns false for map containing any collision elements. + return NewEncodingError(fmt.Errorf("non-singleElement can't be encoded as composite elements")) + } + orderedKeys[i] = e.key + } + + extraDataIndex = inlinedTypeInfo.addCompositeExtraData(m.extraData, elements.hkeys, orderedKeys) + } + + if extraDataIndex > 255 { + // This should never happen because of slab size. + return NewEncodingError(fmt.Errorf("extra data index %d exceeds limit 255", extraDataIndex)) + } + + var err error + + // Encode tag number and array head of 3 elements + err = enc.CBOR.EncodeRawBytes([]byte{ + // tag number + 0xd8, CBORTagInlinedComposite, + // array head of 3 elements + 0x83, + }) + if err != nil { + return NewEncodingError(err) + } + + // element 0: extra data index + // NOTE: encoded extra data index is fixed sized CBOR uint + err = enc.CBOR.EncodeRawBytes([]byte{ + 0x18, + byte(extraDataIndex), + }) + if err != nil { + return NewEncodingError(err) + } + + // element 1: slab id + err = enc.CBOR.EncodeBytes(m.header.slabID.index[:]) + if err != nil { + return NewEncodingError(err) + } + + // element 2: map elements + err = m.elements.EncodeCompositeValues(enc, orderedKeys, inlinedTypeInfo) + if err != nil { + return NewEncodingError(err) + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +// canBeEncodedAsComposite returns true if: +// - map data slab is inlined +// - map is composite type +// - no collision elements +// - keys are stored inline (not in a separate slab) +func (m *MapDataSlab) canBeEncodedAsComposite() bool { + if !m.inlined { + return false + } + + if !m.extraData.TypeInfo.IsComposite() { + return false + } + + elements, ok := m.elements.(*hkeyElements) + if !ok { + return false + } + + for _, e := range elements.elems { + se, ok := e.(*singleElement) + if !ok { + // Has collision element + return false + } + if _, ok = se.key.(SlabIDStorable); ok { + // Key is stored in a separate slab + return false + } + } + + return true +} + func (m *MapDataSlab) hasPointer() bool { return m.elements.hasPointer() } @@ -2368,12 +2948,34 @@ func (m *MapDataSlab) ChildStorables() []Storable { } func (m *MapDataSlab) getPrefixSize() uint32 { + if m.inlined { + return inlinedMapDataSlabPrefixSize + } if m.extraData != nil { return mapRootDataSlabPrefixSize } return mapDataSlabPrefixSize } +func (m *MapDataSlab) Inlined() bool { + return m.inlined +} + +// Inlinable returns true if +// - map data slab is root slab +// - size of inlined map data slab <= maxInlineSize +func (m *MapDataSlab) Inlinable(maxInlineSize uint64) bool { + if m.extraData == nil { + // Non-root data slab is not inlinable. + return false + } + + inlinedSize := inlinedMapDataSlabPrefixSize + m.elements.Size() + + // Inlined byte size must be less than max inline size. + return uint64(inlinedSize) <= maxInlineSize +} + func elementsStorables(elems elements, childStorables []Storable) []Storable { switch v := elems.(type) { @@ -2441,10 +3043,12 @@ func (m *MapDataSlab) Set(storage SlabStorage, b DigesterBuilder, digester Diges m.header.size = m.getPrefixSize() + m.elements.Size() // Store modified slab - err = storage.Store(m.header.slabID, m) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + if !m.inlined { + err := storage.Store(m.header.slabID, m) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + } } return existingValue, nil @@ -2465,10 +3069,12 @@ func (m *MapDataSlab) Remove(storage SlabStorage, digester Digester, level uint, m.header.size = m.getPrefixSize() + m.elements.Size() // Store modified slab - err = storage.Store(m.header.slabID, m) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + if !m.inlined { + err := storage.Store(m.header.slabID, m) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + } } return k, v, nil @@ -3027,6 +3633,14 @@ func (m *MapMetaDataSlab) Encode(enc *Encoder) error { return nil } +func (m *MapMetaDataSlab) Inlined() bool { + return false +} + +func (m *MapMetaDataSlab) Inlinable(_ uint64) bool { + return false +} + func (m *MapMetaDataSlab) StoredValue(storage SlabStorage) (Value, error) { if m.extraData == nil { return nil, NewNotValueError(m.SlabID()) @@ -3888,6 +4502,51 @@ func NewMapWithRootID(storage SlabStorage, rootID SlabID, digestBuilder Digester }, nil } +func (m *OrderedMap) Inlined() bool { + return m.root.Inlined() +} + +func (m *OrderedMap) setParentUpdater(f parentUpdater) { + m.parentUpdater = f +} + +// setCallbackWithChild sets up callback function with child value so +// parent map m can be notified when child value is modified. +func (m *OrderedMap) setCallbackWithChild( + comparator ValueComparator, + hip HashInputProvider, + key Value, + child Value, +) { + c, ok := child.(valueNotifier) + if !ok { + return + } + + c.setParentUpdater(func() error { + // Set child value with parent map using same key. + // Set() calls c.Storable() which returns inlined or not-inlined child storable. + existingValueStorable, err := m.Set(comparator, hip, key, c) + if err != nil { + return err + } + + if existingValueStorable == nil { + return NewFatalError(fmt.Errorf("failed to reset child value in parent updater callback because previous value is nil")) + } + + return nil + }) +} + +// notifyParentIfNeeded calls parent updater if this map is a child value. +func (m *OrderedMap) notifyParentIfNeeded() error { + if m.parentUpdater == nil { + return nil + } + return m.parentUpdater() +} + func (m *OrderedMap) Has(comparator ValueComparator, hip HashInputProvider, key Value) (bool, error) { _, err := m.get(comparator, hip, key) if err != nil { @@ -3914,6 +4573,9 @@ func (m *OrderedMap) Get(comparator ValueComparator, hip HashInputProvider, key // Wrap err as external error (if needed) because err is returned by Storable interface. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") } + + m.setCallbackWithChild(comparator, hip, key, v) + return v, nil } @@ -3986,6 +4648,11 @@ func (m *OrderedMap) Set(comparator ValueComparator, hip HashInputProvider, key } } + err = m.notifyParentIfNeeded() + if err != nil { + return nil, err + } + return existingValue, nil } @@ -4035,6 +4702,11 @@ func (m *OrderedMap) Remove(comparator ValueComparator, hip HashInputProvider, k } } + err = m.notifyParentIfNeeded() + if err != nil { + return nil, nil, err + } + return k, v, nil } @@ -4142,24 +4814,88 @@ func (m *OrderedMap) promoteChildAsNewRoot(childID SlabID) error { } func (m *OrderedMap) SlabID() SlabID { + if m.root.Inlined() { + return SlabIDUndefined + } return m.root.SlabID() } func (m *OrderedMap) ValueID() ValueID { - sid := m.SlabID() + return slabIDToValueID(m.root.SlabID()) +} - var id ValueID - copy(id[:], sid.address[:]) - copy(id[8:], sid.index[:]) +// Storable returns OrderedMap m as either: +// - SlabIDStorable, or +// - inlined data slab storable +func (m *OrderedMap) Storable(_ SlabStorage, _ Address, maxInlineSize uint64) (Storable, error) { - return id -} + inlined := m.root.Inlined() + inlinable := m.root.Inlinable(maxInlineSize) -func (m *OrderedMap) StoredValue(_ SlabStorage) (Value, error) { - return m, nil -} + if inlinable && inlined { + // Root slab is inlinable and was inlined. + // Return root slab as storable, no size adjustment and change to storage. + return m.root, nil + } + + if !inlinable && !inlined { + // Root slab is not inlinable and was not inlined. + // Return root slab as storable, no size adjustment and change to storage. + return SlabIDStorable(m.SlabID()), nil + } + + if inlinable && !inlined { + // Root slab is inlinable and was NOT inlined. + + // Inline root data slab. + + // Inlineable root slab must be data slab. + rootDataSlab, ok := m.root.(*MapDataSlab) + if !ok { + return nil, NewFatalError(fmt.Errorf("unexpected inlinable map slab type %T", m.root)) + } + + rootID := rootDataSlab.header.slabID + + // Remove root slab from storage because it is going to be inlined. + err := m.Storage.Remove(rootID) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to remove slab %s", rootID)) + } + + // Update root data slab size from not inlined to inlined + rootDataSlab.header.size = inlinedMapDataSlabPrefixSize + rootDataSlab.elements.Size() + + // Update root data slab inlined status. + rootDataSlab.inlined = true + + return rootDataSlab, nil + } + + // here, root slab is NOT inlinable and was inlined. + + // Un-inline root slab. + + // Inlined root slab must be data slab. + rootDataSlab, ok := m.root.(*MapDataSlab) + if !ok { + return nil, NewFatalError(fmt.Errorf("unexpected inlined map slab type %T", m.root)) + } + + // Update root data slab size from inlined to not inlined. + rootDataSlab.header.size = mapRootDataSlabPrefixSize + rootDataSlab.elements.Size() + + // Update root data slab inlined status. + rootDataSlab.inlined = false + + // Store root slab in storage + err := m.Storage.Store(m.SlabID(), m.root) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.SlabID())) + } -func (m *OrderedMap) Storable(_ SlabStorage, _ Address, _ uint64) (Storable, error) { return SlabIDStorable(m.SlabID()), nil } @@ -4564,22 +5300,33 @@ func (m *OrderedMap) PopIterate(fn MapPopIterationFunc) error { extraData := m.root.ExtraData() extraData.Count = 0 + inlined := m.root.Inlined() + + prefixSize := uint32(mapRootDataSlabPrefixSize) + if inlined { + prefixSize = uint32(inlinedMapDataSlabPrefixSize) + } + // Set root to empty data slab m.root = &MapDataSlab{ header: MapSlabHeader{ slabID: rootID, - size: mapRootDataSlabPrefixSize + hkeyElementsPrefixSize, + size: prefixSize + hkeyElementsPrefixSize, }, elements: newHkeyElements(0), extraData: extraData, + inlined: inlined, } - // Save root slab - err = m.Storage.Store(m.root.SlabID(), m.root) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.root.SlabID())) + if !m.Inlined() { + // Save root slab + err = m.Storage.Store(m.root.SlabID(), m.root) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.root.SlabID())) + } } + return nil } diff --git a/map_debug.go b/map_debug.go index 051b7acb..9e752325 100644 --- a/map_debug.go +++ b/map_debug.go @@ -107,6 +107,9 @@ func GetMapStats(m *OrderedMap) (MapStats, error) { if _, ok := e.value.(SlabIDStorable); ok { storableDataSlabCount++ } + // This handles use case of inlined array or map value containing SlabID + ids := getSlabIDFromStorable(e.value, nil) + storableDataSlabCount += uint64(len(ids)) } } } @@ -188,12 +191,7 @@ func DumpMapSlabs(m *OrderedMap) ([]string, error) { } } - childStorables := dataSlab.ChildStorables() - for _, e := range childStorables { - if id, ok := e.(SlabIDStorable); ok { - overflowIDs = append(overflowIDs, SlabID(id)) - } - } + overflowIDs = getSlabIDFromStorable(dataSlab, overflowIDs) } else { meta := slab.(*MapMetaDataSlab) @@ -271,7 +269,7 @@ func ValidMap(m *OrderedMap, typeInfo TypeInfo, tic TypeInfoComparator, hip Hash } computedCount, dataSlabIDs, nextDataSlabIDs, firstKeys, err := validMapSlab( - m.Storage, m.digesterBuilder, tic, hip, m.root.SlabID(), 0, nil, []SlabID{}, []SlabID{}, []Digest{}) + m.Storage, m.digesterBuilder, tic, hip, m.root, 0, nil, []SlabID{}, []SlabID{}, []Digest{}) if err != nil { // Don't need to wrap error as external error because err is already categorized by validMapSlab(). return err @@ -320,7 +318,7 @@ func validMapSlab( digesterBuilder DigesterBuilder, tic TypeInfoComparator, hip HashInputProvider, - id SlabID, + slab MapSlab, level int, headerFromParentSlab *MapSlabHeader, dataSlabIDs []SlabID, @@ -334,11 +332,7 @@ func validMapSlab( err error, ) { - slab, err := getMapSlab(storage, id) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by getMapSlab(). - return 0, nil, nil, nil, err - } + id := slab.Header().slabID if level > 0 { // Verify that non-root slab doesn't have extra data. @@ -388,10 +382,18 @@ func validMapSlab( id, dataSlab.header.firstKey, dataSlab.elements.firstKey())) } + // Verify that only root slab can be inlined + if level > 0 && slab.Inlined() { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("non-root slab %s is inlined", id)) + } + // Verify that aggregated element size + slab prefix is the same as header.size computedSize := uint32(mapDataSlabPrefixSize) if level == 0 { computedSize = uint32(mapRootDataSlabPrefixSize) + if dataSlab.Inlined() { + computedSize = uint32(inlinedMapDataSlabPrefixSize) + } } computedSize += elementSize @@ -444,10 +446,16 @@ func validMapSlab( for i := 0; i < len(meta.childrenHeaders); i++ { h := meta.childrenHeaders[i] + childSlab, err := getMapSlab(storage, h.slabID) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by getMapSlab(). + return 0, nil, nil, nil, err + } + // Verify child slabs count := uint64(0) count, dataSlabIDs, nextDataSlabIDs, firstKeys, err = - validMapSlab(storage, digesterBuilder, tic, hip, h.slabID, level+1, &h, dataSlabIDs, nextDataSlabIDs, firstKeys) + validMapSlab(storage, digesterBuilder, tic, hip, childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs, firstKeys) if err != nil { // Don't need to wrap error as external error because err is already categorized by validMapSlab(). return 0, nil, nil, nil, err @@ -849,16 +857,31 @@ func validMapSlabSerialization( } // Extra check: encoded data size == header.size - encodedSlabSize, err := computeSlabSize(data) + // This check is skipped for slabs with inlined composite because + // encoded size and slab size differ for inlined composites. + // For inlined composites, digests and field keys are encoded in + // composite extra data section for reuse, and only composite field + // values are encoded in non-extra data section. + // This reduces encoding size because composite values of the same + // composite type can reuse encoded type info, seed, digests, and field names. + // TODO: maybe add size check for slabs with inlined composite by decoding entire slab. + inlinedComposite, err := hasInlinedComposite(data) if err != nil { - // Don't need to wrap error as external error because err is already categorized by computeSlabSize(). + // Don't need to wrap error as external error because err is already categorized by hasInlinedComposite(). return err } + if !inlinedComposite { + encodedSlabSize, err := computeSize(data) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by computeSize(). + return err + } - if slab.Header().size != uint32(encodedSlabSize) { - return NewFatalError( - fmt.Errorf("slab %d encoded size %d != header.size %d", - id, encodedSlabSize, slab.Header().size)) + if slab.Header().size != uint32(encodedSlabSize) { + return NewFatalError( + fmt.Errorf("slab %d encoded size %d != header.size %d", + id, encodedSlabSize, slab.Header().size)) + } } // Compare encoded data of original slab with encoded data of decoded slab @@ -953,6 +976,11 @@ func mapDataSlabEqual( return err } + // Compare inlined + if expected.inlined != actual.inlined { + return NewFatalError(fmt.Errorf("inlined %t is wrong, want %t", actual.inlined, expected.inlined)) + } + // Compare next if expected.next != actual.next { return NewFatalError(fmt.Errorf("next %d is wrong, want %d", actual.next, expected.next)) @@ -1287,14 +1315,14 @@ func mapSingleElementEqual( } } - if !compare(expected.value, actual.value) { - return NewFatalError(fmt.Errorf("singleElement value %v is wrong, want %v", actual.value, expected.value)) - } - - // Compare value stored in a separate slab - if idStorable, ok := expected.value.(SlabIDStorable); ok { + // Compare nested element + switch ee := expected.value.(type) { + case SlabIDStorable: + if !compare(expected.value, actual.value) { + return NewFatalError(fmt.Errorf("singleElement value %v is wrong, want %v", actual.value, expected.value)) + } - v, err := idStorable.StoredValue(storage) + v, err := ee.StoredValue(storage) if err != nil { // Don't need to wrap error as external error because err is already categorized by SlabIDStorable.StoredValue(). return err @@ -1312,6 +1340,27 @@ func mapSingleElementEqual( // Don't need to wrap error as external error because err is already categorized by ValidValueSerialization(). return err } + + case *ArrayDataSlab: + ae, ok := actual.value.(*ArrayDataSlab) + if !ok { + return NewFatalError(fmt.Errorf("expect element as *ArrayDataSlab, actual %T", ae)) + } + + return arrayDataSlabEqual(ee, ae, storage, cborDecMode, cborEncMode, decodeStorable, decodeTypeInfo, compare) + + case *MapDataSlab: + ae, ok := actual.value.(*MapDataSlab) + if !ok { + return NewFatalError(fmt.Errorf("expect element as *MapDataSlab, actual %T", ae)) + } + + return mapDataSlabEqual(ee, ae, storage, cborDecMode, cborEncMode, decodeStorable, decodeTypeInfo, compare) + + default: + if !compare(expected.value, actual.value) { + return NewFatalError(fmt.Errorf("singleElement value %v is wrong, want %v", actual.value, expected.value)) + } } return nil diff --git a/map_test.go b/map_test.go index 6ccf380b..6d4cc71f 100644 --- a/map_test.go +++ b/map_test.go @@ -2639,7 +2639,7 @@ func TestMapEncodeDecode(t *testing.T) { verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) - t.Run("has pointer no collision", func(t *testing.T) { + t.Run("has inlined array", func(t *testing.T) { SetThreshold(256) defer SetThreshold(1024) @@ -2697,7 +2697,6 @@ func TestMapEncodeDecode(t *testing.T) { id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - id4 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} // Expected serialized slab data with slab id expected := map[SlabID][]byte{ @@ -2731,7 +2730,7 @@ func TestMapEncodeDecode(t *testing.T) { // child header 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - 0x00, 0xf2, + 0x00, 0xf3, }, // data slab @@ -2786,9 +2785,16 @@ func TestMapEncodeDecode(t *testing.T) { // data slab id3: { // version - 0x10, - // flag: has pointer + map data - 0x48, + 0x11, + // flag: has inlined slab + map data + 0x08, + + // inlined slab extra data + 0x81, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, // the following encoded data is valid CBOR @@ -2827,23 +2833,7 @@ func TestMapEncodeDecode(t *testing.T) { // element: [hhhhhhhhhhhhhhhhhhhhhh:SlabID(1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,4)] 0x82, 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, - 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - }, - // array data slab - id4: { - // version - 0x10, - // flag: root + array data - 0x80, - // extra data (CBOR encoded array of 1 elements) - 0x81, - // type info - 0x18, 0x2b, - - // CBOR encoded array head (fixed size 3 byte) - 0x99, 0x00, 0x01, - // CBOR encoded array elements - 0xd8, 0xa4, 0x00, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x0, }, } @@ -2855,15 +2845,15 @@ func TestMapEncodeDecode(t *testing.T) { require.Equal(t, expected[id1], stored[id1]) require.Equal(t, expected[id2], stored[id2]) require.Equal(t, expected[id3], stored[id3]) - require.Equal(t, expected[id4], stored[id4]) // Verify slab size in header is correct. meta, ok := m.root.(*MapMetaDataSlab) require.True(t, ok) require.Equal(t, 2, len(meta.childrenHeaders)) require.Equal(t, uint32(len(stored[id2])), meta.childrenHeaders[0].size) - // Need to add slabIDSize to encoded data slab here because empty slab ID is omitted during encoding. - require.Equal(t, uint32(len(stored[id3])+slabIDSize), meta.childrenHeaders[1].size) + + const inlinedExtraDataSize = 6 + require.Equal(t, uint32(len(stored[id3])-inlinedExtraDataSize+slabIDSize), meta.childrenHeaders[1].size) // Decode data to new storage storage2 := newTestPersistentStorageWithData(t, stored) @@ -2875,180 +2865,175 @@ func TestMapEncodeDecode(t *testing.T) { verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) - t.Run("inline collision 1 level", func(t *testing.T) { - + t.Run("root data slab, inlined child map of same type", func(t *testing.T) { SetThreshold(256) defer SetThreshold(1024) + childMapTypeInfo := testTypeInfo{43} + // Create and populate map in memory storage := newTestBasicStorage(t) digesterBuilder := &mockDigesterBuilder{} // Create map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) require.NoError(t, err) - const mapSize = 8 + const mapSize = 2 keyValues := make(map[Value]Value, mapSize) + r := 'a' for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i * 2) + var k, v Value - digests := []Digest{Digest(i % 4), Digest(i)} - digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + k = Uint64Value(i) + v = Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) - keyValues[k] = v + k = NewStringValue(string(r)) + r++ + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = childMap } - require.Equal(t, uint64(mapSize), m.Count()) + require.Equal(t, uint64(mapSize), parentMap.Count()) id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} // Expected serialized slab data with slab id expected := map[SlabID][]byte{ - - // map metadata slab id1: { - // version - 0x10, + // version, has inlined slab + 0x11, // flag: root + map data 0x88, - // extra data (CBOR encoded array of 3 elements) + + // slab extra data + // CBOR encoded array of 3 elements 0x83, - // type info: "map" - 0x18, 0x2A, - // count: 8 - 0x08, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, // seed 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, - // the following encoded data is valid CBOR - - // elements (array of 3 elements) + // 2 inlined slab extra data + 0x82, + // element 0 + // inlined map extra data + 0xd8, 0xf8, 0x83, - - // level: 0 - 0x00, - - // hkeys (byte string of length 8 * 4) - 0x59, 0x00, 0x20, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - // hkey: 2 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // hkey: 3 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - - // elements (array of 2 elements) - 0x99, 0x00, 0x04, - - // inline collision group corresponding to hkey 0 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // element 1 + // inlined map extra data + 0xd8, 0xf8, 0x83, - - // level: 1 + // type info + 0x18, 0x2b, + // count: 1 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, - // hkeys (byte string of length 8 * 2) - 0x59, 0x00, 0x10, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // hkey: 4 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - - // elements (array of 2 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x02, - // element: [uint64(0), uint64(0)] - 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, - // element: [uint64(4), uint64(8)] - 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, + // the following encoded data is valid CBOR - // inline collision group corresponding to hkey 1 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) + // elements (array of 3 elements) 0x83, - // level: 1 - 0x01, + // level: 0 + 0x00, // hkeys (byte string of length 8 * 2) 0x59, 0x00, 0x10, // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - // hkey: 5 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, // elements (array of 2 elements) // each element is encoded as CBOR array of 2 elements (key, value) 0x99, 0x00, 0x02, - // element: [uint64(1), uint64(2)] - 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, - // element: [uint64(5), uint64(10)] - 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, - - // inline collision group corresponding to hkey 2 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) + // element 0: + 0x82, + // key: "a" + 0x61, 0x61, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements 0x83, - - // level: 1 - 0x01, - - // hkeys (byte string of length 8 * 2) - 0x59, 0x00, 0x10, - // hkey: 2 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // hkey: 6 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, - - // elements (array of 2 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x02, - // element: [uint64(2), uint64(4)] - 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, - // element: [uint64(6), uint64(12)] - 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, - - // inline collision group corresponding to hkey 3 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined map elements (array of 3 elements) 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, - // level: 1 - 0x01, - - // hkeys (byte string of length 8 * 2) - 0x59, 0x00, 0x10, - // hkey: 3 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - // hkey: 7 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - - // elements (array of 2 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x02, - // element: [uint64(3), uint64(6)] - 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, - // element: [uint64(7), uint64(14)] - 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, + // element 1: + 0x82, + // key: "b" + 0x61, 0x62, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xca, 0x96, 0x9f, 0xeb, 0x5f, 0x29, 0x4f, 0xb9, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: 1 + 0xd8, 0xa4, 0x02, }, } + // Verify encoded data stored, err := storage.Encode() require.NoError(t, err) + require.Equal(t, len(expected), len(stored)) require.Equal(t, expected[id1], stored[id1]) @@ -3062,58 +3047,105 @@ func TestMapEncodeDecode(t *testing.T) { verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) - t.Run("inline collision 2 levels", func(t *testing.T) { - + t.Run("root data slab, inlined child map of different type", func(t *testing.T) { SetThreshold(256) defer SetThreshold(1024) + childMapTypeInfo1 := testTypeInfo{43} + childMapTypeInfo2 := testTypeInfo{44} + // Create and populate map in memory storage := newTestBasicStorage(t) digesterBuilder := &mockDigesterBuilder{} // Create map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) require.NoError(t, err) - const mapSize = 8 - keyValues := make(map[Value]Value) + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + r := 'a' for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i * 2) + var k, v Value - digests := []Digest{Digest(i % 4), Digest(i % 2)} - digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + var ti TypeInfo + if i%2 == 0 { + ti = childMapTypeInfo2 + } else { + ti = childMapTypeInfo1 + } - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), ti) + require.NoError(t, err) + + k = Uint64Value(i) + v = Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) - keyValues[k] = v + k = NewStringValue(string(r)) + r++ + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = childMap } - require.Equal(t, uint64(mapSize), m.Count()) + require.Equal(t, uint64(mapSize), parentMap.Count()) id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} // Expected serialized slab data with slab id expected := map[SlabID][]byte{ - - // map data slab id1: { - // version - 0x10, + // version, has inlined slab + 0x11, // flag: root + map data 0x88, - // extra data (CBOR encoded array of 3 elements) + + // slab extra data + // CBOR encoded array of 3 elements 0x83, - // type info: "map" + // type info 0x18, 0x2a, - // count: 8 - 0x08, + // count: 2 + 0x02, // seed 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + // 2 inlined slab extra data + 0x82, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2c, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + // the following encoded data is valid CBOR // elements (array of 3 elements) @@ -3122,170 +3154,76 @@ func TestMapEncodeDecode(t *testing.T) { // level: 0 0x00, - // hkeys (byte string of length 8 * 4) - 0x59, 0x00, 0x20, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - // hkey: 2 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // hkey: 3 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - - // elements (array of 4 elements) - 0x99, 0x00, 0x04, - - // inline collision group corresponding to hkey 0 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, - - // level 1 - 0x01, - - // hkeys (byte string of length 8 * 1) - 0x59, 0x00, 0x08, - // hkey: 0 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - - // elements (array of 1 elements) - 0x99, 0x00, 0x01, - - // inline collision group corresponding to hkey [0, 0] - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, - - // level: 2 - 0x02, - - // hkeys (empty byte string) - 0x40, - - // elements (array of 2 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x02, - // element: [uint64(0), uint64(0)] - 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, - // element: [uint64(4), uint64(8)] - 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, - - // inline collision group corresponding to hkey 1 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, - - // level: 1 - 0x01, - - // hkeys (byte string of length 8 * 1) - 0x59, 0x00, 0x08, - // hkey: 1 + // hkey: 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - // elements (array of 1 elements) - 0x99, 0x00, 0x01, - - // inline collision group corresponding to hkey [1, 1] - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, - - // level: 2 - 0x02, - - // hkeys (empty byte string) - 0x40, - // elements (array of 2 elements) // each element is encoded as CBOR array of 2 elements (key, value) 0x99, 0x00, 0x02, - // element: [uint64(1), uint64(2)] - 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, - // element: [uint64(5), uint64(10)] - 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, - - // inline collision group corresponding to hkey 2 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) + // element 0: + 0x82, + // key: "a" + 0x61, 0x61, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements 0x83, - - // level: 1 - 0x01, - - // hkeys (byte string of length 8 * 1) + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes 0x59, 0x00, 0x08, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - - // elements (array of 1 element) + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, - // inline collision group corresponding to hkey [2, 0] - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) + // element 1: + 0x82, + // key: "b" + 0x61, 0x62, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements 0x83, - - // level: 2 - 0x02, - - // hkeys (empty byte string) - 0x40, - - // elements (array of 2 element) - 0x99, 0x00, 0x02, - // element: [uint64(2), uint64(4)] - 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, - // element: [uint64(6), uint64(12)] - 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, - - // inline collision group corresponding to hkey 3 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) + // extra data index 0 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined map elements (array of 3 elements) 0x83, - - // level: 1 - 0x01, - - // hkeys (byte string of length 8 * 1) + // level 0 + 0x00, + // hkey bytes 0x59, 0x00, 0x08, - // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - - // elements (array of 1 element) + 0xca, 0x96, 0x9f, 0xeb, 0x5f, 0x29, 0x4f, 0xb9, + // 1 element 0x99, 0x00, 0x01, - - // inline collision group corresponding to hkey [3, 1] - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, - - // level: 2 - 0x02, - - // hkeys (empty byte string) - 0x40, - - // elements (array of 2 element) - 0x99, 0x00, 0x02, - // element: [uint64(3), uint64(6)] - 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, - // element: [uint64(7), uint64(14)] - 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: 1 + 0xd8, 0xa4, 0x02, }, } + // Verify encoded data stored, err := storage.Encode() require.NoError(t, err) + require.Equal(t, len(expected), len(stored)) require.Equal(t, expected[id1], stored[id1]) @@ -3299,60 +3237,128 @@ func TestMapEncodeDecode(t *testing.T) { verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) - t.Run("external collision", func(t *testing.T) { - + t.Run("root data slab, multiple levels of inlined child map of same type", func(t *testing.T) { SetThreshold(256) defer SetThreshold(1024) + childMapTypeInfo := testTypeInfo{43} + // Create and populate map in memory storage := newTestBasicStorage(t) digesterBuilder := &mockDigesterBuilder{} // Create map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) require.NoError(t, err) - const mapSize = 20 - keyValues := make(map[Value]Value) + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + r := 'a' for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i * 2) + var k, v Value - digests := []Digest{Digest(i % 2), Digest(i)} - digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + // Create grand child map + gchildMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + k = Uint64Value(i) + v = Uint64Value(i * 2) + + // Insert element to grand child map + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) - keyValues[k] = v + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + k = Uint64Value(i) + + // Insert grand child map to child map + existingStorable, err = childMap.Set(compare, hashInputProvider, k, gchildMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k = NewStringValue(string(r)) + r++ + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = childMap } - require.Equal(t, uint64(mapSize), m.Count()) + require.Equal(t, uint64(mapSize), parentMap.Count()) id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} - id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} // Expected serialized slab data with slab id expected := map[SlabID][]byte{ - - // map data slab id1: { - // version - 0x10, - // flag: root + has pointer + map data - 0xc8, - // extra data (CBOR encoded array of 3 elements) + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements 0x83, - // type info: "map" - 0x18, 0x2A, - // count: 10 - 0x14, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, // seed 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + // 4 inlined slab extra data + 0x84, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // element 2 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xeb, 0x0e, 0x1d, 0xca, 0x7a, 0x7e, 0xe1, 0x19, + // element 3 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x8d, 0x99, 0xcc, 0x54, 0xc8, 0x6b, 0xab, 0x50, + // the following encoded data is valid CBOR // elements (array of 3 elements) @@ -3363,161 +3369,118 @@ func TestMapEncodeDecode(t *testing.T) { // hkeys (byte string of length 8 * 2) 0x59, 0x00, 0x10, - // hkey: 0 + // hkey: 1 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) 0x99, 0x00, 0x02, - // external collision group corresponding to hkey 0 - // (tag number CBORTagExternalCollisionGroup) - 0xd8, 0xfe, - // (tag content: slab id) - 0xd8, 0xff, 0x50, - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - - // external collision group corresponding to hkey 1 - // (tag number CBORTagExternalCollisionGroup) - 0xd8, 0xfe, - // (tag content: slab id) - 0xd8, 0xff, 0x50, - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - }, - - // external collision group - id2: { - // version - 0x10, - // flag: any size + collision group - 0x2b, - - // the following encoded data is valid CBOR + // element 0: + 0x82, + // key: "a" + 0x61, 0x61, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - // elements (array of 3 elements) + // inlined map elements (array of 3 elements) 0x83, - - // level: 1 - 0x01, - - // hkeys (byte string of length 8 * 10) - 0x59, 0x00, 0x50, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // hkey: 2 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // hkey: 4 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - // hkey: 6 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, - // hkey: 8 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, - // hkey: 10 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, - // hkey: 12 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, - // hkey: 14 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, - // hkey: 16 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, - // hkey: 18 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, - - // elements (array of 10 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x0a, - // element: [uint64(0), uint64(0)] - 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, - // element: [uint64(2), uint64(4)] - 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, - // element: [uint64(4), uint64(8)] - 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, - // element: [uint64(6), uint64(12)] - 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, - // element: [uint64(8), uint64(16)] - 0x82, 0xd8, 0xa4, 0x08, 0xd8, 0xa4, 0x10, - // element: [uint64(10), uint64(20)] - 0x82, 0xd8, 0xa4, 0x0a, 0xd8, 0xa4, 0x14, - // element: [uint64(12), uint64(24)] - 0x82, 0xd8, 0xa4, 0x0c, 0xd8, 0xa4, 0x18, 0x18, - // element: [uint64(14), uint64(28)] - 0x82, 0xd8, 0xa4, 0x0e, 0xd8, 0xa4, 0x18, 0x1c, - // element: [uint64(16), uint64(32)] - 0x82, 0xd8, 0xa4, 0x10, 0xd8, 0xa4, 0x18, 0x20, - // element: [uint64(18), uint64(36)] - 0x82, 0xd8, 0xa4, 0x12, 0xd8, 0xa4, 0x18, 0x24, - }, - - // external collision group - id3: { - // version - 0x10, - // flag: any size + collision group - 0x2b, - - // the following encoded data is valid CBOR - - // elements (array of 3 elements) + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xc0, 0xba, 0xe2, 0x41, 0xcf, 0xda, 0xb7, 0x84, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined grand child map (tag: CBORTagInlineMap) + 0xd8, 0xfb, + // array of 3 elements 0x83, + // extra data index 1 + 0x18, 0x1, + // inlined map slab index + 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, + // inlined grand child map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x0, + // value: 0 + 0xd8, 0xa4, 0x0, - // level: 1 - 0x01, - - // hkeys (byte string of length 8 * 10) - 0x59, 0x00, 0x50, - // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - // hkey: 3 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - // hkey: 5 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, - // hkey: 7 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - // hkey: 9 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - // hkey: 11 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, - // hkey: 13 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, - // hkey: 15 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, - // hkey: 17 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, - // hkey: 19 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, - - // elements (array of 10 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x0a, - // element: [uint64(1), uint64(2)] - 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, - // element: [uint64(3), uint64(6)] - 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, - // element: [uint64(5), uint64(10)] - 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, - // element: [uint64(7), uint64(14)] - 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, - // element: [uint64(9), uint64(18)] - 0x82, 0xd8, 0xa4, 0x09, 0xd8, 0xa4, 0x12, - // element: [uint64(11), uint64(22))] - 0x82, 0xd8, 0xa4, 0x0b, 0xd8, 0xa4, 0x16, - // element: [uint64(13), uint64(26)] - 0x82, 0xd8, 0xa4, 0x0d, 0xd8, 0xa4, 0x18, 0x1a, - // element: [uint64(15), uint64(30)] - 0x82, 0xd8, 0xa4, 0x0f, 0xd8, 0xa4, 0x18, 0x1e, - // element: [uint64(17), uint64(34)] - 0x82, 0xd8, 0xa4, 0x11, 0xd8, 0xa4, 0x18, 0x22, - // element: [uint64(19), uint64(38)] - 0x82, 0xd8, 0xa4, 0x13, 0xd8, 0xa4, 0x18, 0x26, + // element 1: + 0x82, + // key: "b" + 0x61, 0x62, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x3a, 0x2d, 0x24, 0x7c, 0xca, 0xdf, 0xa0, 0x58, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: inlined grand child map (tag: CBORTagInlineMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 3 + 0x18, 0x3, + // inlined map slab index + 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4, + // inlined grand child map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x68, 0x9f, 0x33, 0x33, 0x89, 0x0d, 0x89, 0xd1, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x1, + // value: 2 + 0xd8, 0xa4, 0x2, }, } + // Verify encoded data stored, err := storage.Encode() require.NoError(t, err) + require.Equal(t, len(expected), len(stored)) require.Equal(t, expected[id1], stored[id1]) - require.Equal(t, expected[id2], stored[id2]) - require.Equal(t, expected[id3], stored[id3]) // Decode data to new storage storage2 := newTestPersistentStorageWithData(t, stored) @@ -3529,3345 +3492,8805 @@ func TestMapEncodeDecode(t *testing.T) { verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) - t.Run("pointer", func(t *testing.T) { + t.Run("root data slab, multiple levels of inlined child map of different type", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo1 := testTypeInfo{43} + childMapTypeInfo2 := testTypeInfo{44} + gchildMapTypeInfo1 := testTypeInfo{45} + gchildMapTypeInfo2 := testTypeInfo{46} + // Create and populate map in memory storage := newTestBasicStorage(t) digesterBuilder := &mockDigesterBuilder{} // Create map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) require.NoError(t, err) - k := Uint64Value(0) - v := Uint64Value(0) + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize; i++ { + var k, v Value - digests := []Digest{Digest(0), Digest(1)} - digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + var gti TypeInfo + if i%2 == 0 { + gti = gchildMapTypeInfo2 + } else { + gti = gchildMapTypeInfo1 + } - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) + // Create grand child map + gchildMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), gti) + require.NoError(t, err) - require.Equal(t, uint64(1), m.Count()) + k = Uint64Value(i) + v = Uint64Value(i * 2) - id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + // Insert element to grand child map + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - expectedNoPointer := []byte{ + var cti TypeInfo + if i%2 == 0 { + cti = childMapTypeInfo2 + } else { + cti = childMapTypeInfo1 + } - // version - 0x10, - // flag: root + map data - 0x88, - // extra data (CBOR encoded array of 3 elements) - 0x83, - // type info: "map" - 0x18, 0x2A, - // count: 10 - 0x01, - // seed - 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), cti) + require.NoError(t, err) - // the following encoded data is valid CBOR + k = Uint64Value(i) - // elements (array of 3 elements) - 0x83, + // Insert grand child map to child map + existingStorable, err = childMap.Set(compare, hashInputProvider, k, gchildMap) + require.NoError(t, err) + require.Nil(t, existingStorable) - // level: 0 - 0x00, + k = NewStringValue(string(r)) + r++ - // hkeys (byte string of length 8 * 1) - 0x59, 0x00, 0x08, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) - // elements (array of 1 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x01, - // element: [uint64(0), uint64(0)] - 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, - } + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) - // Verify encoded data - stored, err := storage.Encode() - require.NoError(t, err) - require.Equal(t, 1, len(stored)) - require.Equal(t, expectedNoPointer, stored[id1]) + keyValues[k] = childMap + } - // Overwrite existing value with long string - vs := NewStringValue(strings.Repeat("a", 512)) - existingStorable, err = m.Set(compare, hashInputProvider, k, vs) - require.NoError(t, err) + require.Equal(t, uint64(mapSize), parentMap.Count()) - existingValue, err := existingStorable.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, v, existingValue) + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - expectedHasPointer := []byte{ + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version 1, flag: has inlined slab + 0x11, + // flag: root + map data + 0x88, - // version - 0x10, - // flag: root + pointer + map data - 0xc8, - // extra data (CBOR encoded array of 3 elements) - 0x83, - // type info: "map" - 0x18, 0x2A, - // count: 10 - 0x01, - // seed - 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, - // the following encoded data is valid CBOR + // 4 inlined slab extra data + 0x84, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info: 44 + 0x18, 0x2c, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, - // elements (array of 3 elements) - 0x83, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info: 46 + 0x18, 0x2e, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, - // level: 0 - 0x00, + // element 2 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info: 43 + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xeb, 0x0e, 0x1d, 0xca, 0x7a, 0x7e, 0xe1, 0x19, + + // element 3 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info: 45 + 0x18, 0x2d, + // count: 1 + 0x01, + // seed + 0x1b, 0x8d, 0x99, 0xcc, 0x54, 0xc8, 0x6b, 0xab, 0x50, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + + // element 0: + 0x82, + // key: "a" + 0x61, 0x61, + // value: inlined child map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined child map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xc0, 0xba, 0xe2, 0x41, 0xcf, 0xda, 0xb7, 0x84, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined grand child map (tag: CBORTagInlineMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x1, + // inlined map slab index + 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, + // inlined grand child map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: "b" + 0x61, 0x62, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x3a, 0x2d, 0x24, 0x7c, 0xca, 0xdf, 0xa0, 0x58, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: inlined grand child map (tag: CBORTagInlineMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 3 + 0x18, 0x3, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // inlined grand child map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x68, 0x9f, 0x33, 0x33, 0x89, 0x0d, 0x89, 0xd1, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x1, + // value: 2 + 0xd8, 0xa4, 0x2, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("root metadata slab, inlined child map of same type", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo := testTypeInfo{43} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 8 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize; i++ { + var k, v Value + + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + k = Uint64Value(i) + v = Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k = NewStringValue(string(r)) + r++ + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = childMap + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 10}} // inlined maps index 2-9 + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 11}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version + 0x10, + // flag: root + map metadata + 0x89, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + + // child header count + 0x00, 0x02, + // child header 1 (slab id, first key, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xda, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0xda, + }, + id2: { + // version, flag: has inlined slab, has next slab ID + 0x13, + // flag: map data + 0x08, + + // 4 inlined slab extra data + 0x84, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + // element 2 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x8d, 0x99, 0xcc, 0x54, 0xc8, 0x6b, 0xab, 0x50, + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xeb, 0x0e, 0x1d, 0xca, 0x7a, 0x7e, 0xe1, 0x19, + + // next slab ID + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + + // element 0: + 0x82, + // key: "a" + 0x61, 0x61, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: "b" + 0x61, 0x62, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xca, 0x96, 0x9f, 0xeb, 0x5f, 0x29, 0x4f, 0xb9, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: 1 + 0xd8, 0xa4, 0x02, + + // element 3: + 0x82, + // key: "c" + 0x61, 0x63, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xc4, 0x85, 0xc1, 0xd1, 0xd5, 0xc0, 0x40, 0x96, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 2 + 0xd8, 0xa4, 0x02, + // value: 4 + 0xd8, 0xa4, 0x04, + + // element 4: + 0x82, + // key: "d" + 0x61, 0x64, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 3 + 0x18, 0x03, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xc5, 0x75, 0x9c, 0xf7, 0x20, 0xc5, 0x65, 0xa1, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 3 + 0xd8, 0xa4, 0x03, + // value: 6 + 0xd8, 0xa4, 0x06, + }, + + id3: { + // version, flag: has inlined slab + 0x11, + // flag: map data + 0x08, + + // 4 inlined slab extra data + 0x84, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x4f, 0xca, 0x11, 0xbd, 0x8d, 0xcb, 0xfb, 0x64, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xdc, 0xe4, 0xe4, 0x6, 0xa9, 0x50, 0x40, 0xb9, + // element 2 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x79, 0xb3, 0x45, 0x84, 0x9e, 0x66, 0xa5, 0xa4, + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xdd, 0xbd, 0x43, 0x10, 0xbe, 0x2d, 0xa9, 0xfc, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + + // element 0: + 0x82, + // key: "e" + 0x61, 0x65, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x8e, 0x5e, 0x4f, 0xf6, 0xec, 0x2f, 0x2a, 0xcf, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 4 + 0xd8, 0xa4, 0x04, + // value: 8 + 0xd8, 0xa4, 0x08, + + // element 1: + 0x82, + // key: "f" + 0x61, 0x66, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x0d, 0x36, 0x1e, 0xfd, 0xbb, 0x5c, 0x05, 0xdf, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 5 + 0xd8, 0xa4, 0x05, + // value: 10 + 0xd8, 0xa4, 0x0a, + + // element 3: + 0x82, + // key: "g" + 0x61, 0x67, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x6d, 0x8e, 0x42, 0xa2, 0x00, 0xc6, 0x71, 0xf2, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 6 + 0xd8, 0xa4, 0x06, + // value: 12 + 0xd8, 0xa4, 0x0c, + + // element 4: + 0x82, + // key: "h" + 0x61, 0x68, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 3 + 0x18, 0x03, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xbb, 0x06, 0x37, 0x6e, 0x3a, 0x78, 0xe8, 0x6c, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 7 + 0xd8, 0xa4, 0x07, + // value: 14 + 0xd8, 0xa4, 0x0e, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + require.Equal(t, expected[id3], stored[id3]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("root metadata slab, inlined child map of different type", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo1 := testTypeInfo{43} + childMapTypeInfo2 := testTypeInfo{44} + childMapTypeInfo3 := testTypeInfo{45} + childMapTypeInfo4 := testTypeInfo{46} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 8 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize; i++ { + var k, v Value + + var ti TypeInfo + switch i % 4 { + case 0: + ti = childMapTypeInfo1 + case 1: + ti = childMapTypeInfo2 + case 2: + ti = childMapTypeInfo3 + case 3: + ti = childMapTypeInfo4 + } + + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), ti) + require.NoError(t, err) + + k = Uint64Value(i) + v = Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k = NewStringValue(string(r)) + r++ + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = childMap + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 10}} // inlined maps index 2-9 + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 11}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version + 0x10, + // flag: root + map metadata + 0x89, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + + // child header count + 0x00, 0x02, + // child header 1 (slab id, first key, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xda, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0xda, + }, + id2: { + // version, flag: has inlined slab, has next slab ID + 0x13, + // flag: map data + 0x08, + + // 4 inlined slab extra data + 0x84, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2c, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + // element 2 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2d, + // count: 1 + 0x01, + // seed + 0x1b, 0x8d, 0x99, 0xcc, 0x54, 0xc8, 0x6b, 0xab, 0x50, + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2e, + // count: 1 + 0x01, + // seed + 0x1b, 0xeb, 0x0e, 0x1d, 0xca, 0x7a, 0x7e, 0xe1, 0x19, + + // next slab ID + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + + // element 0: + 0x82, + // key: "a" + 0x61, 0x61, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: "b" + 0x61, 0x62, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xca, 0x96, 0x9f, 0xeb, 0x5f, 0x29, 0x4f, 0xb9, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: 1 + 0xd8, 0xa4, 0x02, + + // element 3: + 0x82, + // key: "c" + 0x61, 0x63, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xc4, 0x85, 0xc1, 0xd1, 0xd5, 0xc0, 0x40, 0x96, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 2 + 0xd8, 0xa4, 0x02, + // value: 4 + 0xd8, 0xa4, 0x04, + + // element 4: + 0x82, + // key: "d" + 0x61, 0x64, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 3 + 0x18, 0x03, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xc5, 0x75, 0x9c, 0xf7, 0x20, 0xc5, 0x65, 0xa1, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 3 + 0xd8, 0xa4, 0x03, + // value: 6 + 0xd8, 0xa4, 0x06, + }, + + id3: { + // version, flag: has inlined slab + 0x11, + // flag: map data + 0x08, + + // 4 inlined slab extra data + 0x84, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x4f, 0xca, 0x11, 0xbd, 0x8d, 0xcb, 0xfb, 0x64, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2c, + // count: 1 + 0x01, + // seed + 0x1b, 0xdc, 0xe4, 0xe4, 0x6, 0xa9, 0x50, 0x40, 0xb9, + // element 2 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2d, + // count: 1 + 0x01, + // seed + 0x1b, 0x79, 0xb3, 0x45, 0x84, 0x9e, 0x66, 0xa5, 0xa4, + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2e, + // count: 1 + 0x01, + // seed + 0x1b, 0xdd, 0xbd, 0x43, 0x10, 0xbe, 0x2d, 0xa9, 0xfc, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + + // element 0: + 0x82, + // key: "e" + 0x61, 0x65, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x8e, 0x5e, 0x4f, 0xf6, 0xec, 0x2f, 0x2a, 0xcf, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 4 + 0xd8, 0xa4, 0x04, + // value: 8 + 0xd8, 0xa4, 0x08, + + // element 1: + 0x82, + // key: "f" + 0x61, 0x66, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x0d, 0x36, 0x1e, 0xfd, 0xbb, 0x5c, 0x05, 0xdf, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 5 + 0xd8, 0xa4, 0x05, + // value: 10 + 0xd8, 0xa4, 0x0a, + + // element 3: + 0x82, + // key: "g" + 0x61, 0x67, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x6d, 0x8e, 0x42, 0xa2, 0x00, 0xc6, 0x71, 0xf2, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 6 + 0xd8, 0xa4, 0x06, + // value: 12 + 0xd8, 0xa4, 0x0c, + + // element 4: + 0x82, + // key: "h" + 0x61, 0x68, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 3 + 0x18, 0x03, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xbb, 0x06, 0x37, 0x6e, 0x3a, 0x78, 0xe8, 0x6c, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 7 + 0xd8, 0xa4, 0x07, + // value: 14 + 0xd8, 0xa4, 0x0e, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + require.Equal(t, expected[id3], stored[id3]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("inline collision 1 level", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 8 + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{Digest(i % 4), Digest(i)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + } + + require.Equal(t, uint64(mapSize), m.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // map data slab + id1: { + // version + 0x10, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 2 elements) + 0x99, 0x00, 0x04, + + // inline collision group corresponding to hkey 0 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [uint64(0), uint64(0)] + 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + // element: [uint64(4), uint64(8)] + 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, + + // inline collision group corresponding to hkey 1 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [uint64(1), uint64(2)] + 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, + // element: [uint64(5), uint64(10)] + 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, + + // inline collision group corresponding to hkey 2 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [uint64(2), uint64(4)] + 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, + // element: [uint64(6), uint64(12)] + 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, + + // inline collision group corresponding to hkey 3 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [uint64(3), uint64(6)] + 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, + // element: [uint64(7), uint64(14)] + 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, + }, + } + + stored, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("inline collision 2 levels", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 8 + keyValues := make(map[Value]Value) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{Digest(i % 4), Digest(i % 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + } + + require.Equal(t, uint64(mapSize), m.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // map data slab + id1: { + // version + 0x10, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2a, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + 0x99, 0x00, 0x04, + + // inline collision group corresponding to hkey 0 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level 1 + 0x01, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + 0x99, 0x00, 0x01, + + // inline collision group corresponding to hkey [0, 0] + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 2 + 0x02, + + // hkeys (empty byte string) + 0x40, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [uint64(0), uint64(0)] + 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + // element: [uint64(4), uint64(8)] + 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, + + // inline collision group corresponding to hkey 1 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x08, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 1 elements) + 0x99, 0x00, 0x01, + + // inline collision group corresponding to hkey [1, 1] + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 2 + 0x02, + + // hkeys (empty byte string) + 0x40, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [uint64(1), uint64(2)] + 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, + // element: [uint64(5), uint64(10)] + 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, + + // inline collision group corresponding to hkey 2 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 element) + 0x99, 0x00, 0x01, + + // inline collision group corresponding to hkey [2, 0] + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 2 + 0x02, + + // hkeys (empty byte string) + 0x40, + + // elements (array of 2 element) + 0x99, 0x00, 0x02, + // element: [uint64(2), uint64(4)] + 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, + // element: [uint64(6), uint64(12)] + 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, + + // inline collision group corresponding to hkey 3 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x08, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 1 element) + 0x99, 0x00, 0x01, + + // inline collision group corresponding to hkey [3, 1] + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 2 + 0x02, + + // hkeys (empty byte string) + 0x40, + + // elements (array of 2 element) + 0x99, 0x00, 0x02, + // element: [uint64(3), uint64(6)] + 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, + // element: [uint64(7), uint64(14)] + 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, + }, + } + + stored, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("external collision", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 20 + keyValues := make(map[Value]Value) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{Digest(i % 2), Digest(i)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + } + + require.Equal(t, uint64(mapSize), m.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // map data slab + id1: { + // version + 0x10, + // flag: root + has pointer + map data + 0xc8, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 10 + 0x14, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + 0x99, 0x00, 0x02, + + // external collision group corresponding to hkey 0 + // (tag number CBORTagExternalCollisionGroup) + 0xd8, 0xfe, + // (tag content: slab id) + 0xd8, 0xff, 0x50, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + + // external collision group corresponding to hkey 1 + // (tag number CBORTagExternalCollisionGroup) + 0xd8, 0xfe, + // (tag content: slab id) + 0xd8, 0xff, 0x50, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + }, + + // external collision group + id2: { + // version + 0x10, + // flag: any size + collision group + 0x2b, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 10) + 0x59, 0x00, 0x50, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 8 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 10 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, + // hkey: 12 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, + // hkey: 14 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, + // hkey: 16 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, + // hkey: 18 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, + + // elements (array of 10 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x0a, + // element: [uint64(0), uint64(0)] + 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + // element: [uint64(2), uint64(4)] + 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, + // element: [uint64(4), uint64(8)] + 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, + // element: [uint64(6), uint64(12)] + 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, + // element: [uint64(8), uint64(16)] + 0x82, 0xd8, 0xa4, 0x08, 0xd8, 0xa4, 0x10, + // element: [uint64(10), uint64(20)] + 0x82, 0xd8, 0xa4, 0x0a, 0xd8, 0xa4, 0x14, + // element: [uint64(12), uint64(24)] + 0x82, 0xd8, 0xa4, 0x0c, 0xd8, 0xa4, 0x18, 0x18, + // element: [uint64(14), uint64(28)] + 0x82, 0xd8, 0xa4, 0x0e, 0xd8, 0xa4, 0x18, 0x1c, + // element: [uint64(16), uint64(32)] + 0x82, 0xd8, 0xa4, 0x10, 0xd8, 0xa4, 0x18, 0x20, + // element: [uint64(18), uint64(36)] + 0x82, 0xd8, 0xa4, 0x12, 0xd8, 0xa4, 0x18, 0x24, + }, + + // external collision group + id3: { + // version + 0x10, + // flag: any size + collision group + 0x2b, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 10) + 0x59, 0x00, 0x50, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + // hkey: 9 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, + // hkey: 11 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, + // hkey: 13 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, + // hkey: 15 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, + // hkey: 17 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, + // hkey: 19 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, + + // elements (array of 10 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x0a, + // element: [uint64(1), uint64(2)] + 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, + // element: [uint64(3), uint64(6)] + 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, + // element: [uint64(5), uint64(10)] + 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, + // element: [uint64(7), uint64(14)] + 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, + // element: [uint64(9), uint64(18)] + 0x82, 0xd8, 0xa4, 0x09, 0xd8, 0xa4, 0x12, + // element: [uint64(11), uint64(22))] + 0x82, 0xd8, 0xa4, 0x0b, 0xd8, 0xa4, 0x16, + // element: [uint64(13), uint64(26)] + 0x82, 0xd8, 0xa4, 0x0d, 0xd8, 0xa4, 0x18, 0x1a, + // element: [uint64(15), uint64(30)] + 0x82, 0xd8, 0xa4, 0x0f, 0xd8, 0xa4, 0x18, 0x1e, + // element: [uint64(17), uint64(34)] + 0x82, 0xd8, 0xa4, 0x11, 0xd8, 0xa4, 0x18, 0x22, + // element: [uint64(19), uint64(38)] + 0x82, 0xd8, 0xa4, 0x13, 0xd8, 0xa4, 0x18, 0x26, + }, + } + + stored, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + require.Equal(t, expected[id3], stored[id3]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("pointer to child map", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize-1; i++ { + k := NewStringValue(strings.Repeat(string(r), 22)) + v := NewStringValue(strings.Repeat(string(r), 22)) + keyValues[k] = v + + digests := []Digest{Digest(i), Digest(i * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + r++ + } + + // Create child map + typeInfo2 := testTypeInfo{43} + + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo2) + require.NoError(t, err) + + for i := 0; i < 2; i++ { + k := Uint64Value(i) + v := NewStringValue(strings.Repeat("b", 22)) + + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + k := NewStringValue(strings.Repeat(string(r), 22)) + v := childMap + keyValues[k] = v + + digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + // Insert child map + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(mapSize), m.Count()) + + // root slab (data slab) ID + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + // child map slab ID + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // data slab + id1: { + // version + 0x10, + // flag: root + has pointer + map data + 0xc8, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:SlabID(1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,2)] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + }, + + // map data slab + id2: { + // version + 0x10, + // flag: root + map data + 0x88, + + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2b, + // count + 0x02, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey + 0x4f, 0x6a, 0x3e, 0x93, 0xdd, 0xb1, 0xbe, 0x5, + // hkey + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [1:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0xd8, 0xa4, 0x1, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [0:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0xd8, 0xa4, 0x0, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("pointer to grand child map", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize-1; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + keyValues[k] = v + + digests := []Digest{Digest(i), Digest(i * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // Create child map + childTypeInfo := testTypeInfo{43} + + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childTypeInfo) + require.NoError(t, err) + + // Create grand child map + gchildTypeInfo := testTypeInfo{44} + + gchildMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), gchildTypeInfo) + require.NoError(t, err) + + r := 'a' + for i := 0; i < 2; i++ { + k := NewStringValue(strings.Repeat(string(r), 22)) + v := NewStringValue(strings.Repeat(string(r), 22)) + + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + r++ + } + + // Insert grand child map to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, Uint64Value(0), gchildMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(mapSize - 1) + v := childMap + keyValues[k] = v + + digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + // Insert child map + existingStorable, err = m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(mapSize), m.Count()) + + // root slab (data slab) ID + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + // grand child map slab ID + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // data slab + id1: { + // version, flag: has inlined slab + 0x11, + // flag: root + has pointer + map data + 0xc8, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // array of inlined slab extra data + 0x81, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [0:0] + 0x82, + 0xd8, 0xa4, 0x0, + 0xd8, 0xa4, 0x0, + // element: [1:inlined map] + 0x82, + // key: 1 + 0xd8, 0xa4, 0x1, + + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: SlabID{...3} + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + }, + + // map data slab + id2: { + // version + 0x10, + // flag: root + map data + 0x88, + + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2c, + // count + 0x02, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0xa, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey + 0x30, 0x43, 0xc5, 0x14, 0x8f, 0x52, 0x18, 0x43, + // hkey + 0x98, 0x0f, 0x5c, 0xdb, 0x37, 0x71, 0x6c, 0x13, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("pointer to child array", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 8 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize-1; i++ { + k := NewStringValue(strings.Repeat(string(r), 22)) + v := NewStringValue(strings.Repeat(string(r), 22)) + keyValues[k] = v + + digests := []Digest{Digest(i), Digest(i * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + r++ + } + + // Create nested array + typeInfo2 := testTypeInfo{43} + + nestedArray, err := NewArray(storage, address, typeInfo2) + require.NoError(t, err) + + for i := 0; i < 5; i++ { + v := NewStringValue(strings.Repeat("b", 22)) + err = nestedArray.Append(v) + require.NoError(t, err) + } + + k := NewStringValue(strings.Repeat(string(r), 22)) + v := nestedArray + keyValues[k] = v + + digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + // Insert nested array + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(mapSize), m.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + id4 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // metadata slab + id1: { + // version + 0x10, + // flag: root + map meta + 0x89, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + + // child header count + 0x00, 0x02, + // child header 1 (slab id, first key, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xf6, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0xf2, + }, + + // data slab + id2: { + // version + 0x12, + // flag: map data + 0x08, + // next slab id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [cccccccccccccccccccccc:cccccccccccccccccccccc] + 0x82, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + // element: [dddddddddddddddddddddd:dddddddddddddddddddddd] + 0x82, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + }, + + // data slab + id3: { + // version + 0x10, + // flag: has pointer + map data + 0x48, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] + 0x82, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] + 0x82, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] + 0x82, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + // element: [hhhhhhhhhhhhhhhhhhhhhh:SlabID(1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,4)] + 0x82, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + }, + + // array data slab + id4: { + // version + 0x10, + // flag: root + array data + 0x80, + // extra data (CBOR encoded array of 1 elements) + 0x81, + // type info + 0x18, 0x2b, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x05, + // CBOR encoded array elements + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + require.Equal(t, expected[id3], stored[id3]) + require.Equal(t, expected[id4], stored[id4]) + + // Verify slab size in header is correct. + meta, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) + require.Equal(t, 2, len(meta.childrenHeaders)) + require.Equal(t, uint32(len(stored[id2])), meta.childrenHeaders[0].size) + require.Equal(t, uint32(len(stored[id3])+slabIDSize), meta.childrenHeaders[1].size) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("pointer to grand child array", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize-1; i++ { + k := NewStringValue(strings.Repeat(string(r), 22)) + v := NewStringValue(strings.Repeat(string(r), 22)) + keyValues[k] = v + + digests := []Digest{Digest(i), Digest(i * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + r++ + } + + // Create child array + childTypeInfo := testTypeInfo{43} + + childArray, err := NewArray(storage, address, childTypeInfo) + require.NoError(t, err) + + // Create grand child array + gchildTypeInfo := testTypeInfo{44} + + gchildArray, err := NewArray(storage, address, gchildTypeInfo) + require.NoError(t, err) + + for i := 0; i < 5; i++ { + v := NewStringValue(strings.Repeat("b", 22)) + err = gchildArray.Append(v) + require.NoError(t, err) + } + + // Insert grand child array to child array + err = childArray.Append(gchildArray) + require.NoError(t, err) + + k := NewStringValue(strings.Repeat(string(r), 22)) + v := childArray + keyValues[k] = v + + digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + // Insert child array to parent map + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(mapSize), m.Count()) + + // parent map root slab ID + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + // grand child array root slab ID + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // data slab + id1: { + // version, flag: has inlined slab + 0x11, + // flag: root + has pointer + map data + 0xc8, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // array of inlined slab extra data + 0x81, + // element 0 + // inlined array extra data + 0xd8, 0xf7, + 0x81, + // type info + 0x18, 0x2b, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:inlined array] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + + // value: inlined array (tag: CBORTagInlinedArray) + 0xd8, 0xfa, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined array elements (1 element) + 0x99, 0x00, 0x01, + // SlabID{...3} + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + }, + + // grand array data slab + id2: { + // version + 0x10, + // flag: root + array data + 0x80, + // extra data (CBOR encoded array of 1 elements) + 0x81, + // type info + 0x18, 0x2c, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x05, + // CBOR encoded array elements + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("pointer to storable slab", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + k := Uint64Value(0) + v := Uint64Value(0) + + digests := []Digest{Digest(0), Digest(1)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), m.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + + expectedNoPointer := []byte{ + + // version + 0x10, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 10 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x01, + // element: [uint64(0), uint64(0)] + 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, 1, len(stored)) + require.Equal(t, expectedNoPointer, stored[id1]) + + // Overwrite existing value with long string + vs := NewStringValue(strings.Repeat("a", 128)) + existingStorable, err = m.Set(compare, hashInputProvider, k, vs) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, typeInfoComparator, v, existingValue) + + expectedHasPointer := []byte{ + + // version + 0x10, + // flag: root + pointer + map data + 0xc8, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 10 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, // hkeys (byte string of length 8 * 1) 0x59, 0x00, 0x08, // hkey: 0 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // elements (array of 1 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x01, - // element: [uint64(0), slab id] - 0x82, 0xd8, 0xa4, 0x00, - // (tag content: slab id) - 0xd8, 0xff, 0x50, - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x01, + // element: [uint64(0), slab id] + 0x82, 0xd8, 0xa4, 0x00, + // (tag content: slab id) + 0xd8, 0xff, 0x50, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + } + + expectedStorableSlab := []byte{ + // version + 0x10, + // flag: storable + no size limit + 0x3f, + // "aaaa..." + 0x78, 0x80, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + } + + stored, err = storage.Encode() + require.NoError(t, err) + require.Equal(t, 2, len(stored)) + require.Equal(t, expectedHasPointer, stored[id1]) + require.Equal(t, expectedStorableSlab, stored[id2]) + }) + + t.Run("same composite with one field", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo := testCompositeTypeInfo{43} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + var k, v Value + + // Create child map, composite with one field "uuid" + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + k = NewStringValue("uuid") + v = Uint64Value(i) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k = Uint64Value(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = childMap + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // 1 inlined slab extra data + 0x81, + // element 0 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // composite digests + 0x48, 0x4c, 0x1f, 0x34, 0x74, 0x38, 0x15, 0x64, 0xe5, + // composite keys ["uuid"] + 0x81, 0x64, 0x75, 0x75, 0x69, 0x64, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element 0: + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined composite elements (array of 1 elements) + 0x81, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: 2 + 0xd8, 0xa4, 0x01, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined composite elements (array of 1 elements) + 0x81, + // value: 1 + 0xd8, 0xa4, 0x01, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("same composite with two fields (same order)", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo := testCompositeTypeInfo{43} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + var k, v Value + + // Create child map, composite with one field "uuid" + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + k = NewStringValue("uuid") + v = Uint64Value(i) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k = NewStringValue("amount") + v = Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err = childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k = Uint64Value(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = childMap + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // 1 inlined slab extra data + 0x81, + // element 0 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count: 2 + 0x02, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // composite digests + 0x50, 0x3b, 0xef, 0x5b, 0xe2, 0x9b, 0x8d, 0xf9, 0x65, 0x4c, 0x1f, 0x34, 0x74, 0x38, 0x15, 0x64, 0xe5, + // composite keys ["amount", "uuid"] + 0x82, 0x66, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x64, 0x75, 0x75, 0x69, 0x64, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // 0x99, 0x0, 0x2, 0x82, 0xd8, 0xa4, 0x0, 0xd8, 0xfc, 0x83, 0x18, 0x0, 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x82, 0xd8, 0xa4, 0x0, 0xd8, 0xa4, 0x0, 0x82, 0xd8, 0xa4, 0x1, 0xd8, 0xfc, 0x83, 0x18, 0x0, 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x82, 0xd8, 0xa4, 0x2, 0xd8, 0xa4, 0x1 + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element 0: + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: 2 + 0xd8, 0xa4, 0x01, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 2 + 0xd8, 0xa4, 0x02, + // value: 1 + 0xd8, 0xa4, 0x01, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("same composite with two fields (different order)", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo := testCompositeTypeInfo{43} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + // fields are ordered differently because of different seed. + for i := uint64(0); i < mapSize; i++ { + var k, v Value + + // Create child map, composite with one field "uuid" + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + k = NewStringValue("uuid") + v = Uint64Value(i) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k = NewStringValue("a") + v = Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err = childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k = Uint64Value(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = childMap + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // 1 inlined slab extra data + 0x81, + // element 0 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count: 2 + 0x02, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // composite digests + 0x50, + 0x42, 0xa5, 0xa2, 0x7f, 0xb3, 0xc9, 0x0c, 0xa1, + 0x4c, 0x1f, 0x34, 0x74, 0x38, 0x15, 0x64, 0xe5, + // composite keys ["a", "uuid"] + 0x82, 0x61, 0x61, 0x64, 0x75, 0x75, 0x69, 0x64, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // 0x99, 0x0, 0x2, 0x82, 0xd8, 0xa4, 0x0, 0xd8, 0xfc, 0x83, 0x18, 0x0, 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x82, 0xd8, 0xa4, 0x0, 0xd8, 0xa4, 0x0, 0x82, 0xd8, 0xa4, 0x1, 0xd8, 0xfc, 0x83, 0x18, 0x0, 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x82, 0xd8, 0xa4, 0x2, 0xd8, 0xa4, 0x1 + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element 0: + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: 2 + 0xd8, 0xa4, 0x01, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 2 + 0xd8, 0xa4, 0x02, + // value: 1 + 0xd8, 0xa4, 0x01, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("same composite with different number of fields", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo := testCompositeTypeInfo{43} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + // fields are ordered differently because of different seed. + for i := uint64(0); i < mapSize; i++ { + var k, v Value + + // Create child map, composite with one field "uuid" + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + k = NewStringValue("uuid") + v = Uint64Value(i) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + if i == 0 { + k = NewStringValue("a") + v = Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err = childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + k = Uint64Value(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = childMap + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // 2 inlined slab extra data + 0x82, + // element 0 + // inlined map extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count: 2 + 0x02, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // composite digests + 0x50, + 0x42, 0xa5, 0xa2, 0x7f, 0xb3, 0xc9, 0x0c, 0xa1, + 0x4c, 0x1f, 0x34, 0x74, 0x38, 0x15, 0x64, 0xe5, + // composite keys ["a", "uuid"] + 0x82, 0x61, 0x61, 0x64, 0x75, 0x75, 0x69, 0x64, + // element 0 + // inlined map extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + // composite digests + 0x48, + 0x74, 0x0a, 0x02, 0xc1, 0x19, 0x6f, 0xb8, 0x9e, + // composite keys ["uuid"] + 0x81, 0x64, 0x75, 0x75, 0x69, 0x64, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element 0: + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: 2 + 0xd8, 0xa4, 0x01, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined composite elements (array of 2 elements) + 0x81, + // value: 1 + 0xd8, 0xa4, 0x01, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("different composite", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo1 := testCompositeTypeInfo{43} + childMapTypeInfo2 := testCompositeTypeInfo{44} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 4 + keyValues := make(map[Value]Value, mapSize) + // fields are ordered differently because of different seed. + for i := uint64(0); i < mapSize; i++ { + var ti TypeInfo + if i%2 == 0 { + ti = childMapTypeInfo1 + } else { + ti = childMapTypeInfo2 + } + + var k, v Value + + // Create child map, composite with two field "uuid" and "a" + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), ti) + require.NoError(t, err) + + k = NewStringValue("uuid") + v = Uint64Value(i) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k = NewStringValue("a") + v = Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err = childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k = Uint64Value(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = childMap + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 4 + 0x04, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // 2 inlined slab extra data + 0x82, + // element 0 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count: 2 + 0x02, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // composite digests + 0x50, + 0x42, 0xa5, 0xa2, 0x7f, 0xb3, 0xc9, 0x0c, 0xa1, + 0x4c, 0x1f, 0x34, 0x74, 0x38, 0x15, 0x64, 0xe5, + // composite keys ["a", "uuid"] + 0x82, 0x61, 0x61, 0x64, 0x75, 0x75, 0x69, 0x64, + // element 1 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2c, + // count: 2 + 0x02, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + // composite digests + 0x50, + 0x74, 0x0a, 0x02, 0xc1, 0x19, 0x6f, 0xb8, 0x9e, + 0xea, 0x8e, 0x6f, 0x69, 0x81, 0x19, 0x68, 0x81, + // composite keys ["uuid", "a"] + 0x82, 0x64, 0x75, 0x75, 0x69, 0x64, 0x61, 0x61, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + // element 0: + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 1 + 0xd8, 0xa4, 0x01, + // value: 2 + 0xd8, 0xa4, 0x02, + + // element 2: + 0x82, + // key: 2 + 0xd8, 0xa4, 0x02, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 4 + 0xd8, 0xa4, 0x04, + // value: 2 + 0xd8, 0xa4, 0x02, + + // element 3: + 0x82, + // key: 3 + 0xd8, 0xa4, 0x03, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 3 + 0xd8, 0xa4, 0x03, + // value: 6 + 0xd8, 0xa4, 0x06, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) +} + +func TestMapEncodeDecodeRandomValues(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + r := newRand(t) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, keyValues := testMapSetRemoveRandomValues(t, r, storage, typeInfo, address) + + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Create a new storage with encoded data from base storage + storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) + + // Create new map from new storage + m2, err := NewMapWithRootID(storage2, m.SlabID(), m.digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, m2, keyValues, nil, false) +} + +func TestMapStoredValue(t *testing.T) { + + const mapSize = 4096 + + r := newRand(t) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + keyValues := make(map[Value]Value, mapSize) + i := 0 + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, 16)) + keyValues[k] = Uint64Value(i) + i++ + } + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + rootID := m.SlabID() + + slabIterator, err := storage.SlabIterator() + require.NoError(t, err) + + for { + id, slab := slabIterator() + + if id == SlabIDUndefined { + break + } + + value, err := slab.StoredValue(storage) + + if id == rootID { + require.NoError(t, err) + + m2, ok := value.(*OrderedMap) + require.True(t, ok) + + verifyMap(t, storage, typeInfo, address, m2, keyValues, nil, false) + } else { + require.Equal(t, 1, errorCategorizationCount(err)) + var fatalError *FatalError + var notValueError *NotValueError + require.ErrorAs(t, err, &fatalError) + require.ErrorAs(t, err, ¬ValueError) + require.ErrorAs(t, fatalError, ¬ValueError) + require.Nil(t, value) + } + } +} + +func TestMapPopIterate(t *testing.T) { + + t.Run("empty", func(t *testing.T) { + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + err = storage.Commit() + require.NoError(t, err) + + require.Equal(t, 1, storage.Count()) + + i := uint64(0) + err = m.PopIterate(func(k Storable, v Storable) { + i++ + }) + require.NoError(t, err) + require.Equal(t, uint64(0), i) + + verifyEmptyMap(t, storage, typeInfo, address, m) + }) + + t.Run("root-dataslab", func(t *testing.T) { + const mapSize = 10 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + key, value := Uint64Value(i), Uint64Value(i*10) + sortedKeys[i] = key + keyValues[key] = value + + existingStorable, err := m.Set(compare, hashInputProvider, key, value) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + require.Equal(t, uint64(mapSize), m.Count()) + + err = storage.Commit() + require.NoError(t, err) + + require.Equal(t, 1, storage.Count()) + + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := mapSize + err = m.PopIterate(func(k, v Storable) { + i-- + + kv, err := k.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, typeInfoComparator, sortedKeys[i], kv) + + vv, err := v.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, typeInfoComparator, keyValues[sortedKeys[i]], vv) + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + verifyEmptyMap(t, storage, typeInfo, address, m) + }) + + t.Run("root-metaslab", func(t *testing.T) { + const mapSize = 4096 + + r := newRand(t) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + i := 0 + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, 16)) + if _, found := keyValues[k]; !found { + sortedKeys[i] = k + keyValues[k] = NewStringValue(randStr(r, 16)) + i++ + } + } + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + err = storage.Commit() + require.NoError(t, err) + + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate key value pairs + i = len(keyValues) + err = m.PopIterate(func(k Storable, v Storable) { + i-- + + kv, err := k.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, typeInfoComparator, sortedKeys[i], kv) + + vv, err := v.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, typeInfoComparator, keyValues[sortedKeys[i]], vv) + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + verifyEmptyMap(t, storage, typeInfo, address, m) + }) + + t.Run("collision", func(t *testing.T) { + //MetaDataSlabCount:1 DataSlabCount:13 CollisionDataSlabCount:100 + + const mapSize = 1024 + + SetThreshold(512) + defer SetThreshold(1024) + + r := newRand(t) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := &mockDigesterBuilder{} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + i := 0 + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, 16)) + + if _, found := keyValues[k]; !found { + + sortedKeys[i] = k + keyValues[k] = NewStringValue(randStr(r, 16)) + + digests := []Digest{ + Digest(i % 100), + Digest(i % 5), + } + + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, keyValues[k]) + require.NoError(t, err) + require.Nil(t, existingStorable) + + i++ + } + } + + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + err = storage.Commit() + require.NoError(t, err) + + // Iterate key value pairs + i = mapSize + err = m.PopIterate(func(k Storable, v Storable) { + i-- + + kv, err := k.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, typeInfoComparator, sortedKeys[i], kv) + + vv, err := v.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, typeInfoComparator, keyValues[sortedKeys[i]], vv) + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + verifyEmptyMap(t, storage, typeInfo, address, m) + }) +} + +func TestEmptyMap(t *testing.T) { + + t.Parallel() + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) + + t.Run("get", func(t *testing.T) { + s, err := m.Get(compare, hashInputProvider, Uint64Value(0)) + require.Equal(t, 1, errorCategorizationCount(err)) + var userError *UserError + var keyNotFoundError *KeyNotFoundError + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &keyNotFoundError) + require.ErrorAs(t, userError, &keyNotFoundError) + require.Nil(t, s) + }) + + t.Run("remove", func(t *testing.T) { + existingKey, existingValue, err := m.Remove(compare, hashInputProvider, Uint64Value(0)) + require.Equal(t, 1, errorCategorizationCount(err)) + var userError *UserError + var keyNotFoundError *KeyNotFoundError + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &keyNotFoundError) + require.ErrorAs(t, userError, &keyNotFoundError) + require.Nil(t, existingKey) + require.Nil(t, existingValue) + }) + + t.Run("iterate", func(t *testing.T) { + i := 0 + err := m.Iterate(func(k Value, v Value) (bool, error) { + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, 0, i) + }) + + t.Run("count", func(t *testing.T) { + count := m.Count() + require.Equal(t, uint64(0), count) + }) + + t.Run("type", func(t *testing.T) { + require.True(t, typeInfoComparator(typeInfo, m.Type())) + }) + + t.Run("address", func(t *testing.T) { + require.Equal(t, address, m.Address()) + }) + + // TestMapEncodeDecode/empty tests empty map encoding and decoding +} + +func TestMapFromBatchData(t *testing.T) { + + t.Run("empty", func(t *testing.T) { + typeInfo := testTypeInfo{42} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + NewDefaultDigesterBuilder(), + typeInfo, + ) + require.NoError(t, err) + require.Equal(t, uint64(0), m.Count()) + + iter, err := m.Iterator() + require.NoError(t, err) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + + // Create a map with new storage, new address, and original map's elements. + copied, err := NewMapFromBatchData( + storage, + address, + NewDefaultDigesterBuilder(), + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + return iter.Next() + }) + require.NoError(t, err) + require.NotEqual(t, copied.SlabID(), m.SlabID()) + + verifyEmptyMap(t, storage, typeInfo, address, copied) + }) + + t.Run("root-dataslab", func(t *testing.T) { + SetThreshold(1024) + + const mapSize = 10 + + typeInfo := testTypeInfo{42} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + NewDefaultDigesterBuilder(), + typeInfo, + ) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) + require.NoError(t, err) + require.Nil(t, storable) + } + + require.Equal(t, uint64(mapSize), m.Count()) + + iter, err := m.Iterator() + require.NoError(t, err) + + var sortedKeys []Value + keyValues := make(map[Value]Value) + + storage := newTestPersistentStorage(t) + digesterBuilder := NewDefaultDigesterBuilder() + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + + // Create a map with new storage, new address, and original map's elements. + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + + k, v, err := iter.Next() + + // Save key value pair + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, copied.SlabID(), m.SlabID()) + + verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) + + t.Run("root-metaslab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 4096 + + typeInfo := testTypeInfo{42} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + NewDefaultDigesterBuilder(), + typeInfo, + ) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) + require.NoError(t, err) + require.Nil(t, storable) + } + + require.Equal(t, uint64(mapSize), m.Count()) + + iter, err := m.Iterator() + require.NoError(t, err) + + var sortedKeys []Value + keyValues := make(map[Value]Value) + + storage := newTestPersistentStorage(t) + digesterBuilder := NewDefaultDigesterBuilder() + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + k, v, err := iter.Next() + + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, m.SlabID(), copied.SlabID()) + + verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) + + t.Run("rebalance two data slabs", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 10 + + typeInfo := testTypeInfo{42} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + NewDefaultDigesterBuilder(), + typeInfo, + ) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) + require.NoError(t, err) + require.Nil(t, storable) + } + + k := NewStringValue(strings.Repeat("a", int(maxInlineMapElementSize-2))) + v := NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize-2))) + storable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, storable) + + require.Equal(t, uint64(mapSize+1), m.Count()) + + iter, err := m.Iterator() + require.NoError(t, err) + + var sortedKeys []Value + keyValues := make(map[Value]Value) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + digesterBuilder := NewDefaultDigesterBuilder() + + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + k, v, err := iter.Next() + + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, m.SlabID(), copied.SlabID()) + + verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) + + t.Run("merge two data slabs", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 8 + + typeInfo := testTypeInfo{42} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + NewDefaultDigesterBuilder(), + typeInfo, + ) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) + require.NoError(t, err) + require.Nil(t, storable) + } + + storable, err := m.Set( + compare, + hashInputProvider, + NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize-2))), + NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize-2))), + ) + require.NoError(t, err) + require.Nil(t, storable) + + require.Equal(t, uint64(mapSize+1), m.Count()) + require.Equal(t, typeInfo, m.Type()) + + iter, err := m.Iterator() + require.NoError(t, err) + + var sortedKeys []Value + keyValues := make(map[Value]Value) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + digesterBuilder := NewDefaultDigesterBuilder() + + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + k, v, err := iter.Next() + + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, m.SlabID(), copied.SlabID()) + + verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) + + t.Run("random", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 4096 + + r := newRand(t) + + typeInfo := testTypeInfo{42} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + NewDefaultDigesterBuilder(), + typeInfo, + ) + require.NoError(t, err) + + for m.Count() < mapSize { + k := randomValue(r, int(maxInlineMapElementSize)) + v := randomValue(r, int(maxInlineMapElementSize)) + + _, err = m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + } + + require.Equal(t, uint64(mapSize), m.Count()) + + iter, err := m.Iterator() + require.NoError(t, err) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + digesterBuilder := NewDefaultDigesterBuilder() + + var sortedKeys []Value + keyValues := make(map[Value]Value, mapSize) + + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + k, v, err := iter.Next() + + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, m.SlabID(), copied.SlabID()) + + verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) + + t.Run("collision", func(t *testing.T) { + + const mapSize = 1024 + + SetThreshold(512) + defer SetThreshold(1024) + + savedMaxCollisionLimitPerDigest := MaxCollisionLimitPerDigest + defer func() { + MaxCollisionLimitPerDigest = savedMaxCollisionLimitPerDigest + }() + MaxCollisionLimitPerDigest = mapSize / 2 + + typeInfo := testTypeInfo{42} + + digesterBuilder := &mockDigesterBuilder{} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + digesterBuilder, + typeInfo, + ) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + + k, v := Uint64Value(i), Uint64Value(i*10) + + digests := make([]Digest, 2) + if i%2 == 0 { + digests[0] = 0 + } else { + digests[0] = Digest(i % (mapSize / 2)) + } + digests[1] = Digest(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + storable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, storable) + } + + require.Equal(t, uint64(mapSize), m.Count()) + + iter, err := m.Iterator() + require.NoError(t, err) + + var sortedKeys []Value + keyValues := make(map[Value]Value) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + + i := 0 + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + k, v, err := iter.Next() + + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + i++ + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, m.SlabID(), copied.SlabID()) + + verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) + + t.Run("data slab too large", func(t *testing.T) { + // Slab size must not exceed maxThreshold. + // We cannot make this problem happen after Atree Issue #193 + // was fixed by PR #194 & PR #197. This test is to catch regressions. + + SetThreshold(256) + defer SetThreshold(1024) + + r := newRand(t) + + maxStringSize := int(maxInlineMapKeySize - 2) + + typeInfo := testTypeInfo{42} + + digesterBuilder := &mockDigesterBuilder{} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + digesterBuilder, + typeInfo, + ) + require.NoError(t, err) + + k := NewStringValue(randStr(r, maxStringSize)) + v := NewStringValue(randStr(r, maxStringSize)) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{3881892766069237908}}) + + storable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, storable) + + k = NewStringValue(randStr(r, maxStringSize)) + v = NewStringValue(randStr(r, maxStringSize)) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{3882976639190041664}}) + + storable, err = m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, storable) + + k = NewStringValue("zFKUYYNfIfJCCakcDuIEHj") + v = NewStringValue("EZbaCxxjDtMnbRlXJMgfHnZ") + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{3883321011075439822}}) + + storable, err = m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, storable) + + iter, err := m.Iterator() + require.NoError(t, err) + + var sortedKeys []Value + keyValues := make(map[Value]Value) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + k, v, err := iter.Next() + + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, m.SlabID(), copied.SlabID()) + + verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) +} + +func TestMapNestedStorables(t *testing.T) { + + t.Run("SomeValue", func(t *testing.T) { + + const mapSize = 4096 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value) + for i := uint64(0); i < mapSize; i++ { + + ks := strings.Repeat("a", int(i)) + k := SomeValue{Value: NewStringValue(ks)} + + vs := strings.Repeat("b", int(i)) + v := SomeValue{Value: NewStringValue(vs)} + + keyValues[k] = v + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, true) + }) + + t.Run("Array", func(t *testing.T) { + + const mapSize = 4096 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value) + for i := uint64(0); i < mapSize; i++ { + + // Create a nested array with one element + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + vs := strings.Repeat("b", int(i)) + v := SomeValue{Value: NewStringValue(vs)} + + err = array.Append(v) + require.NoError(t, err) + + // Insert nested array into map + ks := strings.Repeat("a", int(i)) + k := SomeValue{Value: NewStringValue(ks)} + + keyValues[k] = array + + existingStorable, err := m.Set(compare, hashInputProvider, k, array) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, true) + }) +} + +func TestMapMaxInlineElement(t *testing.T) { + t.Parallel() + + r := newRand(t) + maxStringSize := int(maxInlineMapKeySize - 2) + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value) + for len(keyValues) < 2 { + // String length is maxInlineMapKeySize - 2 to account for string encoding overhead. + k := NewStringValue(randStr(r, maxStringSize)) + v := NewStringValue(randStr(r, maxStringSize)) + keyValues[k] = v + + _, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + } + + require.True(t, m.root.IsData()) + + // Size of root data slab with two elements (key+value pairs) of + // max inlined size is target slab size minus + // slab id size (next slab id is omitted in root slab) + require.Equal(t, targetThreshold-slabIDSize, uint64(m.root.Header().size)) + + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) +} + +func TestMapString(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("small", func(t *testing.T) { + const mapSize = 3 + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + want := `[0:0 1:1 2:2]` + require.Equal(t, want, m.String()) + }) + + t.Run("large", func(t *testing.T) { + const mapSize = 30 + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + want := `[0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 8:8 9:9 10:10 11:11 12:12 13:13 14:14 15:15 16:16 17:17 18:18 19:19 20:20 21:21 22:22 23:23 24:24 25:25 26:26 27:27 28:28 29:29]` + require.Equal(t, want, m.String()) + }) +} + +func TestMapSlabDump(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("small", func(t *testing.T) { + const mapSize = 3 + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + want := []string{ + "level 1, MapDataSlab id:0x102030405060708.1 size:55 firstkey:0 elements: [0:0:0 1:1:1 2:2:2]", + } + dumps, err := DumpMapSlabs(m) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("large", func(t *testing.T) { + const mapSize = 30 + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + want := []string{ + "level 1, MapMetaDataSlab id:0x102030405060708.1 size:48 firstKey:0 children: [{id:0x102030405060708.2 size:221 firstKey:0} {id:0x102030405060708.3 size:293 firstKey:13}]", + "level 2, MapDataSlab id:0x102030405060708.2 size:221 firstkey:0 elements: [0:0:0 1:1:1 2:2:2 3:3:3 4:4:4 5:5:5 6:6:6 7:7:7 8:8:8 9:9:9 10:10:10 11:11:11 12:12:12]", + "level 2, MapDataSlab id:0x102030405060708.3 size:293 firstkey:13 elements: [13:13:13 14:14:14 15:15:15 16:16:16 17:17:17 18:18:18 19:19:19 20:20:20 21:21:21 22:22:22 23:23:23 24:24:24 25:25:25 26:26:26 27:27:27 28:28:28 29:29:29]", + } + dumps, err := DumpMapSlabs(m) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("inline collision", func(t *testing.T) { + const mapSize = 30 + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i % 10)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + want := []string{ + "level 1, MapMetaDataSlab id:0x102030405060708.1 size:48 firstKey:0 children: [{id:0x102030405060708.2 size:213 firstKey:0} {id:0x102030405060708.3 size:221 firstKey:5}]", + "level 2, MapDataSlab id:0x102030405060708.2 size:213 firstkey:0 elements: [0:inline[:0:0 :10:10 :20:20] 1:inline[:1:1 :11:11 :21:21] 2:inline[:2:2 :12:12 :22:22] 3:inline[:3:3 :13:13 :23:23] 4:inline[:4:4 :14:14 :24:24]]", + "level 2, MapDataSlab id:0x102030405060708.3 size:221 firstkey:5 elements: [5:inline[:5:5 :15:15 :25:25] 6:inline[:6:6 :16:16 :26:26] 7:inline[:7:7 :17:17 :27:27] 8:inline[:8:8 :18:18 :28:28] 9:inline[:9:9 :19:19 :29:29]]", + } + dumps, err := DumpMapSlabs(m) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("external collision", func(t *testing.T) { + const mapSize = 30 + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i % 2)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + want := []string{ + "level 1, MapDataSlab id:0x102030405060708.1 size:68 firstkey:0 elements: [0:external(0x102030405060708.2) 1:external(0x102030405060708.3)]", + "collision: MapDataSlab id:0x102030405060708.2 size:135 firstkey:0 elements: [:0:0 :2:2 :4:4 :6:6 :8:8 :10:10 :12:12 :14:14 :16:16 :18:18 :20:20 :22:22 :24:24 :26:26 :28:28]", + "collision: MapDataSlab id:0x102030405060708.3 size:135 firstkey:0 elements: [:1:1 :3:3 :5:5 :7:7 :9:9 :11:11 :13:13 :15:15 :17:17 :19:19 :21:21 :23:23 :25:25 :27:27 :29:29]", + } + dumps, err := DumpMapSlabs(m) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("key overflow", func(t *testing.T) { + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + k := NewStringValue(strings.Repeat("a", int(maxInlineMapKeySize))) + v := NewStringValue(strings.Repeat("b", int(maxInlineMapKeySize))) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(0)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + want := []string{ + "level 1, MapDataSlab id:0x102030405060708.1 size:93 firstkey:0 elements: [0:SlabIDStorable({[1 2 3 4 5 6 7 8] [0 0 0 0 0 0 0 2]}):bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb]", + "StorableSlab id:0x102030405060708.2 storable:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + } + dumps, err := DumpMapSlabs(m) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("value overflow", func(t *testing.T) { + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + k := NewStringValue(strings.Repeat("a", int(maxInlineMapKeySize-2))) + v := NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize))) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(0)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + want := []string{ + "level 1, MapDataSlab id:0x102030405060708.1 size:91 firstkey:0 elements: [0:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa:SlabIDStorable({[1 2 3 4 5 6 7 8] [0 0 0 0 0 0 0 2]})]", + "StorableSlab id:0x102030405060708.2 storable:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + } + dumps, err := DumpMapSlabs(m) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) +} + +func TestMaxCollisionLimitPerDigest(t *testing.T) { + savedMaxCollisionLimitPerDigest := MaxCollisionLimitPerDigest + defer func() { + MaxCollisionLimitPerDigest = savedMaxCollisionLimitPerDigest + }() + + t.Run("collision limit 0", func(t *testing.T) { + const mapSize = 1024 + + SetThreshold(256) + defer SetThreshold(1024) + + // Set noncryptographic hash collision limit as 0, + // meaning no collision is allowed at first level. + MaxCollisionLimitPerDigest = uint32(0) + + digesterBuilder := &mockDigesterBuilder{} + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + keyValues[k] = v + + digests := []Digest{Digest(i)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + } + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + // Insert elements within collision limits + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Insert elements exceeding collision limits + collisionKeyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(mapSize + i) + v := Uint64Value(mapSize + i) + collisionKeyValues[k] = v + + digests := []Digest{Digest(i)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) } - stored, err = storage.Encode() + for k, v := range collisionKeyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.Equal(t, 1, errorCategorizationCount(err)) + var fatalError *FatalError + var collisionLimitError *CollisionLimitError + require.ErrorAs(t, err, &fatalError) + require.ErrorAs(t, err, &collisionLimitError) + require.ErrorAs(t, fatalError, &collisionLimitError) + require.Nil(t, existingStorable) + } + + // Verify that no new elements exceeding collision limit inserted + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Update elements within collision limits + for k := range keyValues { + v := Uint64Value(0) + keyValues[k] = v + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.NotNil(t, existingStorable) + } + + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("collision limit > 0", func(t *testing.T) { + const mapSize = 1024 + + SetThreshold(256) + defer SetThreshold(1024) + + // Set noncryptographic hash collision limit as 7, + // meaning at most 8 elements in collision group per digest at first level. + MaxCollisionLimitPerDigest = uint32(7) + + digesterBuilder := &mockDigesterBuilder{} + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + keyValues[k] = v + + digests := []Digest{Digest(i % 128)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + } + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) require.NoError(t, err) - require.Equal(t, 2, len(stored)) - require.Equal(t, expectedHasPointer, stored[id1]) + + // Insert elements within collision limits + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Insert elements exceeding collision limits + collisionKeyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(mapSize + i) + v := Uint64Value(mapSize + i) + collisionKeyValues[k] = v + + digests := []Digest{Digest(i % 128)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + } + + for k, v := range collisionKeyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.Equal(t, 1, errorCategorizationCount(err)) + var fatalError *FatalError + var collisionLimitError *CollisionLimitError + require.ErrorAs(t, err, &fatalError) + require.ErrorAs(t, err, &collisionLimitError) + require.ErrorAs(t, fatalError, &collisionLimitError) + require.Nil(t, existingStorable) + } + + // Verify that no new elements exceeding collision limit inserted + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Update elements within collision limits + for k := range keyValues { + v := Uint64Value(0) + keyValues[k] = v + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.NotNil(t, existingStorable) + } + + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) } -func TestMapEncodeDecodeRandomValues(t *testing.T) { +func TestMapLoadedValueIterator(t *testing.T) { SetThreshold(256) defer SetThreshold(1024) - r := newRand(t) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - m, keyValues := testMapSetRemoveRandomValues(t, r, storage, typeInfo, address) + t.Run("empty", func(t *testing.T) { + storage := newTestPersistentStorage(t) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + digesterBuilder := &mockDigesterBuilder{} - // Create a new storage with encoded data from base storage - storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - // Create new map from new storage - m2, err := NewMapWithRootID(storage2, m.SlabID(), m.digesterBuilder) - require.NoError(t, err) + // parent map: 1 root data slab + require.Equal(t, 1, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - verifyMap(t, storage2, typeInfo, address, m2, keyValues, nil, false) -} + verifyMapLoadedElements(t, m, nil) + }) -func TestMapStoredValue(t *testing.T) { + t.Run("root data slab with simple values", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const mapSize = 3 + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) + + // parent map: 1 root data slab + require.Equal(t, 1, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + verifyMapLoadedElements(t, m, values) + }) + + t.Run("root data slab with composite values", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const mapSize = 3 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) + + // parent map: 1 root data slab + // composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + verifyMapLoadedElements(t, m, values) + }) + + t.Run("root data slab with composite values in collision group", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + // Create parent map with 3 collision groups, 2 elements in each group. + const mapSize = 6 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, + ) + + // parent map: 1 root data slab + // composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + verifyMapLoadedElements(t, m, values) + }) + + t.Run("root data slab with composite values in external collision group", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + // Create parent map with 3 external collision group, 4 elements in the group. + const mapSize = 12 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) + + // parent map: 1 root data slab, 3 external collision group + // composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + verifyMapLoadedElements(t, m, values) + }) + + t.Run("root data slab with composite values, unload value from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const mapSize = 3 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) + + // parent map: 1 root data slab + // composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + verifyMapLoadedElements(t, m, values) + + // Unload composite element from front to back. + for i := 0; i < len(values); i++ { + v := values[i][1] - const mapSize = 4096 + nestedArray, ok := v.(*Array) + require.True(t, ok) - r := newRand(t) + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + expectedValues := values[i+1:] + verifyMapLoadedElements(t, m, expectedValues) + } + }) - keyValues := make(map[Value]Value, mapSize) - i := 0 - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, 16)) - keyValues[k] = Uint64Value(i) - i++ - } + t.Run("root data slab with long string keys, unload key from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) - require.NoError(t, err) + const mapSize = 3 + m, values := createMapWithLongStringKey(t, storage, address, typeInfo, mapSize) - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + // parent map: 1 root data slab + // long string keys: 1 storable slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - rootID := m.SlabID() + verifyMapLoadedElements(t, m, values) - slabIterator, err := storage.SlabIterator() - require.NoError(t, err) + // Unload external key from front to back. + for i := 0; i < len(values); i++ { + k := values[i][0] - for { - id, slab := slabIterator() + s, ok := k.(StringValue) + require.True(t, ok) - if id == SlabIDUndefined { - break - } + // Find storage id for StringValue s. + var keyID SlabID + for id, slab := range storage.deltas { + if sslab, ok := slab.(*StorableSlab); ok { + if other, ok := sslab.storable.(StringValue); ok { + if s.str == other.str { + keyID = id + break + } + } + } + } - value, err := slab.StoredValue(storage) + require.NoError(t, keyID.Valid()) - if id == rootID { + err := storage.Remove(keyID) require.NoError(t, err) - m2, ok := value.(*OrderedMap) - require.True(t, ok) - - verifyMap(t, storage, typeInfo, address, m2, keyValues, nil, false) - } else { - require.Equal(t, 1, errorCategorizationCount(err)) - var fatalError *FatalError - var notValueError *NotValueError - require.ErrorAs(t, err, &fatalError) - require.ErrorAs(t, err, ¬ValueError) - require.ErrorAs(t, fatalError, ¬ValueError) - require.Nil(t, value) + expectedValues := values[i+1:] + verifyMapLoadedElements(t, m, expectedValues) } - } -} - -func TestMapPopIterate(t *testing.T) { + }) - t.Run("empty", func(t *testing.T) { - typeInfo := testTypeInfo{42} + t.Run("root data slab with composite values in collision group, unload value from front to back", func(t *testing.T) { storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - digesterBuilder := newBasicDigesterBuilder() - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + // Create parent map with 3 collision groups, 2 elements in each group. + const mapSize = 6 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, + ) - err = storage.Commit() - require.NoError(t, err) + // parent map: 1 root data slab + // composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - require.Equal(t, 1, storage.Count()) + verifyMapLoadedElements(t, m, values) - i := uint64(0) - err = m.PopIterate(func(k Storable, v Storable) { - i++ - }) - require.NoError(t, err) - require.Equal(t, uint64(0), i) + // Unload composite element from front to back. + for i := 0; i < len(values); i++ { + v := values[i][1] - verifyEmptyMap(t, storage, typeInfo, address, m) - }) + nestedArray, ok := v.(*Array) + require.True(t, ok) - t.Run("root-dataslab", func(t *testing.T) { - const mapSize = 10 + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) - typeInfo := testTypeInfo{42} + expectedValues := values[i+1:] + verifyMapLoadedElements(t, m, expectedValues) + } + }) + + t.Run("root data slab with composite values in external collision group, unload value from front to back", func(t *testing.T) { storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - digesterBuilder := newBasicDigesterBuilder() - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + // Create parent map with 3 external collision groups, 4 elements in the group. + const mapSize = 12 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) - keyValues := make(map[Value]Value, mapSize) - sortedKeys := make([]Value, mapSize) - for i := uint64(0); i < mapSize; i++ { - key, value := Uint64Value(i), Uint64Value(i*10) - sortedKeys[i] = key - keyValues[key] = value + // parent map: 1 root data slab, 3 external collision group + // composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - existingStorable, err := m.Set(compare, hashInputProvider, key, value) + verifyMapLoadedElements(t, m, values) + + // Unload composite element from front to back + for i := 0; i < len(values); i++ { + v := values[i][1] + + nestedArray, ok := v.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) require.NoError(t, err) - require.Nil(t, existingStorable) + + expectedValues := values[i+1:] + verifyMapLoadedElements(t, m, expectedValues) } + }) - require.Equal(t, uint64(mapSize), m.Count()) + t.Run("root data slab with composite values in external collision group, unload external slab from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) - err = storage.Commit() - require.NoError(t, err) + // Create parent map with 3 external collision groups, 4 elements in the group. + const mapSize = 12 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) - require.Equal(t, 1, storage.Count()) + // parent map: 1 root data slab, 3 external collision group + // composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + verifyMapLoadedElements(t, m, values) - i := mapSize - err = m.PopIterate(func(k, v Storable) { - i-- + // Unload external collision group slab from front to back - kv, err := k.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, sortedKeys[i], kv) + var externalCollisionSlabIDs []SlabID + for id, slab := range storage.deltas { + if dataSlab, ok := slab.(*MapDataSlab); ok { + if dataSlab.collisionGroup { + externalCollisionSlabIDs = append(externalCollisionSlabIDs, id) + } + } + } + require.Equal(t, 3, len(externalCollisionSlabIDs)) - vv, err := v.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, keyValues[sortedKeys[i]], vv) + sort.Slice(externalCollisionSlabIDs, func(i, j int) bool { + a := externalCollisionSlabIDs[i] + b := externalCollisionSlabIDs[j] + if a.address == b.address { + return a.IndexAsUint64() < b.IndexAsUint64() + } + return a.AddressAsUint64() < b.AddressAsUint64() }) - require.NoError(t, err) - require.Equal(t, 0, i) + for i, id := range externalCollisionSlabIDs { + err := storage.Remove(id) + require.NoError(t, err) - verifyEmptyMap(t, storage, typeInfo, address, m) + expectedValues := values[i*4+4:] + verifyMapLoadedElements(t, m, expectedValues) + } }) - t.Run("root-metaslab", func(t *testing.T) { - const mapSize = 4096 - - r := newRand(t) + t.Run("root data slab with composite values, unload composite value from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) - keyValues := make(map[Value]Value, mapSize) - sortedKeys := make([]Value, mapSize) - i := 0 - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, 16)) - if _, found := keyValues[k]; !found { - sortedKeys[i] = k - keyValues[k] = NewStringValue(randStr(r, 16)) - i++ - } - } + const mapSize = 3 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) - digesterBuilder := newBasicDigesterBuilder() + // parent map: 1 root data slab + // composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + verifyMapLoadedElements(t, m, values) - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + // Unload composite element from back to front. + for i := len(values) - 1; i >= 0; i-- { + v := values[i][1] + + nestedArray, ok := v.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) require.NoError(t, err) - require.Nil(t, existingStorable) - } - err = storage.Commit() - require.NoError(t, err) + expectedValues := values[:i] + verifyMapLoadedElements(t, m, expectedValues) + } + }) - sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + t.Run("root data slab with long string key, unload key from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // Iterate key value pairs - i = len(keyValues) - err = m.PopIterate(func(k Storable, v Storable) { - i-- + const mapSize = 3 + m, values := createMapWithLongStringKey(t, storage, address, typeInfo, mapSize) - kv, err := k.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, sortedKeys[i], kv) + // parent map: 1 root data slab + // long string keys: 1 storable slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - vv, err := v.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, keyValues[sortedKeys[i]], vv) - }) + verifyMapLoadedElements(t, m, values) - require.NoError(t, err) - require.Equal(t, 0, i) + // Unload composite element from front to back. + for i := len(values) - 1; i >= 0; i-- { + k := values[i][0] - verifyEmptyMap(t, storage, typeInfo, address, m) - }) + s, ok := k.(StringValue) + require.True(t, ok) - t.Run("collision", func(t *testing.T) { - //MetaDataSlabCount:1 DataSlabCount:13 CollisionDataSlabCount:100 + // Find storage id for StringValue s. + var keyID SlabID + for id, slab := range storage.deltas { + if sslab, ok := slab.(*StorableSlab); ok { + if other, ok := sslab.storable.(StringValue); ok { + if s.str == other.str { + keyID = id + break + } + } + } + } - const mapSize = 1024 + require.NoError(t, keyID.Valid()) - SetThreshold(512) - defer SetThreshold(1024) + err := storage.Remove(keyID) + require.NoError(t, err) - r := newRand(t) + expectedValues := values[:i] + verifyMapLoadedElements(t, m, expectedValues) + } + }) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - digesterBuilder := &mockDigesterBuilder{} + t.Run("root data slab with composite values in collision group, unload value from back to front", func(t *testing.T) { storage := newTestPersistentStorage(t) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) - - keyValues := make(map[Value]Value, mapSize) - sortedKeys := make([]Value, mapSize) - i := 0 - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, 16)) + // Create parent map with 3 collision groups, 2 elements in each group. + const mapSize = 6 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, + ) - if _, found := keyValues[k]; !found { + // parent map: 1 root data slab + // composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - sortedKeys[i] = k - keyValues[k] = NewStringValue(randStr(r, 16)) + verifyMapLoadedElements(t, m, values) - digests := []Digest{ - Digest(i % 100), - Digest(i % 5), - } + // Unload composite element from back to front + for i := len(values) - 1; i >= 0; i-- { + v := values[i][1] - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + nestedArray, ok := v.(*Array) + require.True(t, ok) - existingStorable, err := m.Set(compare, hashInputProvider, k, keyValues[k]) - require.NoError(t, err) - require.Nil(t, existingStorable) + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) - i++ - } + expectedValues := values[:i] + verifyMapLoadedElements(t, m, expectedValues) } + }) - sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + t.Run("root data slab with composite values in external collision group, unload value from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) - err = storage.Commit() - require.NoError(t, err) + // Create parent map with 3 external collision groups, 4 elements in the group. + const mapSize = 12 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) - // Iterate key value pairs - i = mapSize - err = m.PopIterate(func(k Storable, v Storable) { - i-- + // parent map: 1 root data slab, 3 external collision group + // composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - kv, err := k.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, sortedKeys[i], kv) + verifyMapLoadedElements(t, m, values) - vv, err := v.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, keyValues[sortedKeys[i]], vv) - }) + // Unload composite element from back to front + for i := len(values) - 1; i >= 0; i-- { + v := values[i][1] - require.NoError(t, err) - require.Equal(t, 0, i) + nestedArray, ok := v.(*Array) + require.True(t, ok) - verifyEmptyMap(t, storage, typeInfo, address, m) - }) -} + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) -func TestEmptyMap(t *testing.T) { + expectedValues := values[:i] + verifyMapLoadedElements(t, m, expectedValues) + } + }) - t.Parallel() + t.Run("root data slab with composite values in external collision group, unload external slab from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + // Create parent map with 3 external collision groups, 4 elements in the group. + const mapSize = 12 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) - m, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) - require.NoError(t, err) + // parent map: 1 root data slab, 3 external collision group + // composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - t.Run("get", func(t *testing.T) { - s, err := m.Get(compare, hashInputProvider, Uint64Value(0)) - require.Equal(t, 1, errorCategorizationCount(err)) - var userError *UserError - var keyNotFoundError *KeyNotFoundError - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &keyNotFoundError) - require.ErrorAs(t, userError, &keyNotFoundError) - require.Nil(t, s) - }) + verifyMapLoadedElements(t, m, values) - t.Run("remove", func(t *testing.T) { - existingKey, existingValue, err := m.Remove(compare, hashInputProvider, Uint64Value(0)) - require.Equal(t, 1, errorCategorizationCount(err)) - var userError *UserError - var keyNotFoundError *KeyNotFoundError - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &keyNotFoundError) - require.ErrorAs(t, userError, &keyNotFoundError) - require.Nil(t, existingKey) - require.Nil(t, existingValue) - }) + // Unload external slabs from back to front + var externalCollisionSlabIDs []SlabID + for id, slab := range storage.deltas { + if dataSlab, ok := slab.(*MapDataSlab); ok { + if dataSlab.collisionGroup { + externalCollisionSlabIDs = append(externalCollisionSlabIDs, id) + } + } + } + require.Equal(t, 3, len(externalCollisionSlabIDs)) - t.Run("iterate", func(t *testing.T) { - i := 0 - err := m.Iterate(func(k Value, v Value) (bool, error) { - i++ - return true, nil + sort.Slice(externalCollisionSlabIDs, func(i, j int) bool { + a := externalCollisionSlabIDs[i] + b := externalCollisionSlabIDs[j] + if a.address == b.address { + return a.IndexAsUint64() < b.IndexAsUint64() + } + return a.AddressAsUint64() < b.AddressAsUint64() }) - require.NoError(t, err) - require.Equal(t, 0, i) - }) - t.Run("count", func(t *testing.T) { - count := m.Count() - require.Equal(t, uint64(0), count) + for i := len(externalCollisionSlabIDs) - 1; i >= 0; i-- { + err := storage.Remove(externalCollisionSlabIDs[i]) + require.NoError(t, err) + + expectedValues := values[:i*4] + verifyMapLoadedElements(t, m, expectedValues) + } }) - t.Run("type", func(t *testing.T) { - require.True(t, typeInfoComparator(typeInfo, m.Type())) - }) + t.Run("root data slab with composite values, unload value in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) - t.Run("address", func(t *testing.T) { - require.Equal(t, address, m.Address()) - }) + const mapSize = 3 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - // TestMapEncodeDecode/empty tests empty map encoding and decoding -} + // parent map: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) -func TestMapFromBatchData(t *testing.T) { + verifyMapLoadedElements(t, m, values) - t.Run("empty", func(t *testing.T) { - typeInfo := testTypeInfo{42} + // Unload value in the middle + unloadValueIndex := 1 - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - NewDefaultDigesterBuilder(), - typeInfo, - ) - require.NoError(t, err) - require.Equal(t, uint64(0), m.Count()) + v := values[unloadValueIndex][1] - iter, err := m.Iterator() + nestedArray, ok := v.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) require.NoError(t, err) + copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) + values = values[:len(values)-1] + + verifyMapLoadedElements(t, m, values) + }) + + t.Run("root data slab with long string key, unload key in the middle", func(t *testing.T) { storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - // Create a map with new storage, new address, and original map's elements. - copied, err := NewMapFromBatchData( - storage, - address, - NewDefaultDigesterBuilder(), - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - return iter.Next() - }) - require.NoError(t, err) - require.NotEqual(t, copied.SlabID(), m.SlabID()) + const mapSize = 3 + m, values := createMapWithLongStringKey(t, storage, address, typeInfo, mapSize) - verifyEmptyMap(t, storage, typeInfo, address, copied) - }) + // parent map: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - t.Run("root-dataslab", func(t *testing.T) { - SetThreshold(1024) + verifyMapLoadedElements(t, m, values) - const mapSize = 10 + // Unload key in the middle. + unloadValueIndex := 1 - typeInfo := testTypeInfo{42} + k := values[unloadValueIndex][0] - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - NewDefaultDigesterBuilder(), - typeInfo, - ) - require.NoError(t, err) + s, ok := k.(StringValue) + require.True(t, ok) - for i := uint64(0); i < mapSize; i++ { - storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) - require.NoError(t, err) - require.Nil(t, storable) + // Find storage id for StringValue s. + var keyID SlabID + for id, slab := range storage.deltas { + if sslab, ok := slab.(*StorableSlab); ok { + if other, ok := sslab.storable.(StringValue); ok { + if s.str == other.str { + keyID = id + break + } + } + } } - require.Equal(t, uint64(mapSize), m.Count()) + require.NoError(t, keyID.Valid()) - iter, err := m.Iterator() + err := storage.Remove(keyID) require.NoError(t, err) - var sortedKeys []Value - keyValues := make(map[Value]Value) + copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) + values = values[:len(values)-1] + + verifyMapLoadedElements(t, m, values) + }) + t.Run("root data slab with composite values in collision group, unload value in the middle", func(t *testing.T) { storage := newTestPersistentStorage(t) - digesterBuilder := NewDefaultDigesterBuilder() - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - // Create a map with new storage, new address, and original map's elements. - copied, err := NewMapFromBatchData( + // Create parent map with 3 collision groups, 2 elements in each group. + const mapSize = 6 + m, values := createMapWithCompositeValues( + t, storage, address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - - k, v, err := iter.Next() + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, + ) - // Save key value pair - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v - } + // parent map: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - return k, v, err - }) + verifyMapLoadedElements(t, m, values) - require.NoError(t, err) - require.NotEqual(t, copied.SlabID(), m.SlabID()) + // Unload composite element in the middle + for _, unloadValueIndex := range []int{1, 3, 5} { + v := values[unloadValueIndex][1] - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) - }) + nestedArray, ok := v.(*Array) + require.True(t, ok) - t.Run("root-metaslab", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) + } - const mapSize = 4096 + expectedValues := [][2]Value{ + values[0], + values[2], + values[4], + } + verifyMapLoadedElements(t, m, expectedValues) + }) - typeInfo := testTypeInfo{42} + t.Run("root data slab with composite values in external collision group, unload value in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - NewDefaultDigesterBuilder(), + // Create parent map with 3 external collision groups, 4 elements in the group. + const mapSize = 12 + m, values := createMapWithCompositeValues( + t, + storage, + address, typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, ) - require.NoError(t, err) - for i := uint64(0); i < mapSize; i++ { - storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) - require.NoError(t, err) - require.Nil(t, storable) - } + // parent map: 1 root data slab, 3 external collision group + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - require.Equal(t, uint64(mapSize), m.Count()) + verifyMapLoadedElements(t, m, values) - iter, err := m.Iterator() - require.NoError(t, err) + // Unload composite value in the middle. + for _, unloadValueIndex := range []int{1, 3, 5, 7, 9, 11} { + v := values[unloadValueIndex][1] - var sortedKeys []Value - keyValues := make(map[Value]Value) + nestedArray, ok := v.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) + } + + expectedValues := [][2]Value{ + values[0], + values[2], + values[4], + values[6], + values[8], + values[10], + } + verifyMapLoadedElements(t, m, expectedValues) + }) + t.Run("root data slab with composite values in external collision group, unload external slab in the middle", func(t *testing.T) { storage := newTestPersistentStorage(t) - digesterBuilder := NewDefaultDigesterBuilder() - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - copied, err := NewMapFromBatchData( + // Create parent map with 3 external collision groups, 4 elements in the group. + const mapSize = 12 + m, values := createMapWithCompositeValues( + t, storage, address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - k, v, err := iter.Next() + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v + // parent map: 1 root data slab, 3 external collision group + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + verifyMapLoadedElements(t, m, values) + + // Unload external slabs in the middle. + var externalCollisionSlabIDs []SlabID + for id, slab := range storage.deltas { + if dataSlab, ok := slab.(*MapDataSlab); ok { + if dataSlab.collisionGroup { + externalCollisionSlabIDs = append(externalCollisionSlabIDs, id) } + } + } + require.Equal(t, 3, len(externalCollisionSlabIDs)) - return k, v, err - }) + sort.Slice(externalCollisionSlabIDs, func(i, j int) bool { + a := externalCollisionSlabIDs[i] + b := externalCollisionSlabIDs[j] + if a.address == b.address { + return a.IndexAsUint64() < b.IndexAsUint64() + } + return a.AddressAsUint64() < b.AddressAsUint64() + }) + id := externalCollisionSlabIDs[1] + err := storage.Remove(id) require.NoError(t, err) - require.NotEqual(t, m.SlabID(), copied.SlabID()) - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) - }) - - t.Run("rebalance two data slabs", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + copy(values[4:], values[8:]) + values = values[:8] - const mapSize = 10 + verifyMapLoadedElements(t, m, values) + }) - typeInfo := testTypeInfo{42} + t.Run("root data slab with composite values, unload composite elements during iteration", func(t *testing.T) { + storage := newTestPersistentStorage(t) - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - NewDefaultDigesterBuilder(), + const mapSize = 3 + m, values := createMapWithCompositeValues( + t, + storage, + address, typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, ) - require.NoError(t, err) - for i := uint64(0); i < mapSize; i++ { - storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) - require.NoError(t, err) - require.Nil(t, storable) - } + // parent map: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - k := NewStringValue(strings.Repeat("a", int(maxInlineMapElementSize-2))) - v := NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize-2))) - storable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, storable) + verifyMapLoadedElements(t, m, values) - require.Equal(t, uint64(mapSize+1), m.Count()) + i := 0 + err := m.IterateLoadedValues(func(k Value, v Value) (bool, error) { + // At this point, iterator returned first element (v). - iter, err := m.Iterator() - require.NoError(t, err) + // Remove all other nested composite elements (except first element) from storage. + for _, element := range values[1:] { + value := element[1] + nestedArray, ok := value.(*Array) + require.True(t, ok) - var sortedKeys []Value - keyValues := make(map[Value]Value) + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) + } - storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - digesterBuilder := NewDefaultDigesterBuilder() + require.Equal(t, 0, i) + valueEqual(t, typeInfoComparator, values[0][0], k) + valueEqual(t, typeInfoComparator, values[0][1], v) + i++ + return true, nil + }) - copied, err := NewMapFromBatchData( - storage, - address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - k, v, err := iter.Next() + require.NoError(t, err) + require.Equal(t, 1, i) // Only first element is iterated because other elements are remove during iteration. + }) - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v - } + t.Run("root data slab with simple and composite values, unloading composite value", func(t *testing.T) { + const mapSize = 3 - return k, v, err - }) + // Create a map with nested composite value at specified index + for nestedCompositeIndex := 0; nestedCompositeIndex < mapSize; nestedCompositeIndex++ { + storage := newTestPersistentStorage(t) - require.NoError(t, err) - require.NotEqual(t, m.SlabID(), copied.SlabID()) + m, values := createMapWithSimpleAndCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + nestedCompositeIndex, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) - }) + // parent map: 1 root data slab + // composite element: 1 root data slab + require.Equal(t, 2, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - t.Run("merge two data slabs", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + verifyMapLoadedElements(t, m, values) - const mapSize = 8 + // Unload composite value + v := values[nestedCompositeIndex][1].(*Array) - typeInfo := testTypeInfo{42} + err := storage.Remove(v.SlabID()) + require.NoError(t, err) - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - NewDefaultDigesterBuilder(), - typeInfo, - ) - require.NoError(t, err) + copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) + values = values[:len(values)-1] - for i := uint64(0); i < mapSize; i++ { - storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) - require.NoError(t, err) - require.Nil(t, storable) + verifyMapLoadedElements(t, m, values) } + }) - storable, err := m.Set( - compare, - hashInputProvider, - NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize-2))), - NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize-2))), - ) - require.NoError(t, err) - require.Nil(t, storable) + t.Run("root metadata slab with simple values", func(t *testing.T) { + storage := newTestPersistentStorage(t) - require.Equal(t, uint64(mapSize+1), m.Count()) - require.Equal(t, typeInfo, m.Type()) + const mapSize = 20 + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - iter, err := m.Iterator() - require.NoError(t, err) + // parent map (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - var sortedKeys []Value - keyValues := make(map[Value]Value) + verifyMapLoadedElements(t, m, values) + }) + t.Run("root metadata slab with composite values", func(t *testing.T) { storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - digesterBuilder := NewDefaultDigesterBuilder() - copied, err := NewMapFromBatchData( + const mapSize = 20 + m, values := createMapWithCompositeValues( + t, storage, address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - k, v, err := iter.Next() - - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v - } - - return k, v, err - }) + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - require.NoError(t, err) - require.NotEqual(t, m.SlabID(), copied.SlabID()) + // parent map (2 levels): 1 root metadata slab, 3 data slabs + // composite values: 1 root data slab for each + require.Equal(t, 4+mapSize, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + verifyMapLoadedElements(t, m, values) }) - t.Run("random", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + t.Run("root metadata slab with composite values, unload value from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) - const mapSize = 4096 + const mapSize = 20 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - r := newRand(t) + // parent map (2 levels): 1 root metadata slab, 3 data slabs + // composite values : 1 root data slab for each + require.Equal(t, 4+mapSize, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - typeInfo := testTypeInfo{42} + verifyMapLoadedElements(t, m, values) - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - NewDefaultDigesterBuilder(), - typeInfo, - ) - require.NoError(t, err) + // Unload composite element from front to back + for i := 0; i < len(values); i++ { + v := values[i][1] - for m.Count() < mapSize { - k := randomValue(r, int(maxInlineMapElementSize)) - v := randomValue(r, int(maxInlineMapElementSize)) + nestedArray, ok := v.(*Array) + require.True(t, ok) - _, err = m.Set(compare, hashInputProvider, k, v) + err := storage.Remove(nestedArray.SlabID()) require.NoError(t, err) - } - require.Equal(t, uint64(mapSize), m.Count()) - - iter, err := m.Iterator() - require.NoError(t, err) + expectedValues := values[i+1:] + verifyMapLoadedElements(t, m, expectedValues) + } + }) + t.Run("root metadata slab with composite values, unload values from back to front", func(t *testing.T) { storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - digesterBuilder := NewDefaultDigesterBuilder() - - var sortedKeys []Value - keyValues := make(map[Value]Value, mapSize) - copied, err := NewMapFromBatchData( + const mapSize = 20 + m, values := createMapWithCompositeValues( + t, storage, address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - k, v, err := iter.Next() - - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v - } - - return k, v, err - }) - - require.NoError(t, err) - require.NotEqual(t, m.SlabID(), copied.SlabID()) + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) - }) + // parent map (2 levels): 1 root metadata slab, 3 data slabs + // composite values: 1 root data slab for each + require.Equal(t, 4+mapSize, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - t.Run("collision", func(t *testing.T) { + verifyMapLoadedElements(t, m, values) - const mapSize = 1024 + // Unload composite element from back to front + for i := len(values) - 1; i >= 0; i-- { + v := values[i][1] - SetThreshold(512) - defer SetThreshold(1024) + nestedArray, ok := v.(*Array) + require.True(t, ok) - savedMaxCollisionLimitPerDigest := MaxCollisionLimitPerDigest - defer func() { - MaxCollisionLimitPerDigest = savedMaxCollisionLimitPerDigest - }() - MaxCollisionLimitPerDigest = mapSize / 2 + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) - typeInfo := testTypeInfo{42} + expectedValues := values[:i] + verifyMapLoadedElements(t, m, expectedValues) + } + }) - digesterBuilder := &mockDigesterBuilder{} + t.Run("root metadata slab with composite values, unload value in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - digesterBuilder, + const mapSize = 20 + m, values := createMapWithCompositeValues( + t, + storage, + address, typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, ) - require.NoError(t, err) - for i := uint64(0); i < mapSize; i++ { + // parent map (2 levels): 1 root metadata slab, 3 data slabs + // composite values: 1 root data slab for each + require.Equal(t, 4+mapSize, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - k, v := Uint64Value(i), Uint64Value(i*10) + verifyMapLoadedElements(t, m, values) - digests := make([]Digest, 2) - if i%2 == 0 { - digests[0] = 0 - } else { - digests[0] = Digest(i % (mapSize / 2)) - } - digests[1] = Digest(i) + // Unload composite element in the middle + for _, index := range []int{4, 14} { - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + v := values[index][1] - storable, err := m.Set(compare, hashInputProvider, k, v) + nestedArray, ok := v.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) require.NoError(t, err) - require.Nil(t, storable) + + copy(values[index:], values[index+1:]) + values = values[:len(values)-1] + + verifyMapLoadedElements(t, m, values) } + }) - require.Equal(t, uint64(mapSize), m.Count()) + t.Run("root metadata slab with simple and composite values, unload composite value", func(t *testing.T) { + const mapSize = 20 - iter, err := m.Iterator() - require.NoError(t, err) + // Create a map with nested composite value at specified index + for nestedCompositeIndex := 0; nestedCompositeIndex < mapSize; nestedCompositeIndex++ { + storage := newTestPersistentStorage(t) - var sortedKeys []Value - keyValues := make(map[Value]Value) + m, values := createMapWithSimpleAndCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + nestedCompositeIndex, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} + // parent map (2 levels): 1 root metadata slab, 3 data slabs + // composite values: 1 root data slab for each + require.Equal(t, 5, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - i := 0 - copied, err := NewMapFromBatchData( - storage, - address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - k, v, err := iter.Next() + verifyMapLoadedElements(t, m, values) - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v - } + v := values[nestedCompositeIndex][1].(*Array) - i++ - return k, v, err - }) + err := storage.Remove(v.SlabID()) + require.NoError(t, err) - require.NoError(t, err) - require.NotEqual(t, m.SlabID(), copied.SlabID()) + copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) + values = values[:len(values)-1] - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + verifyMapLoadedElements(t, m, values) + } }) - t.Run("data slab too large", func(t *testing.T) { - // Slab size must not exceed maxThreshold. - // We cannot make this problem happen after Atree Issue #193 - // was fixed by PR #194 & PR #197. This test is to catch regressions. + t.Run("root metadata slab, unload data slab from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) - SetThreshold(256) - defer SetThreshold(1024) + const mapSize = 20 - r := newRand(t) + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - maxStringSize := int(maxInlineMapKeySize - 2) + // parent map (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - typeInfo := testTypeInfo{42} + verifyMapLoadedElements(t, m, values) - digesterBuilder := &mockDigesterBuilder{} + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - digesterBuilder, - typeInfo, - ) - require.NoError(t, err) + // Unload data slabs from front to back + for i := 0; i < len(rootMetaDataSlab.childrenHeaders); i++ { - k := NewStringValue(randStr(r, maxStringSize)) - v := NewStringValue(randStr(r, maxStringSize)) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{3881892766069237908}}) + childHeader := rootMetaDataSlab.childrenHeaders[i] - storable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, storable) + // Get data slab element count before unload it from storage. + // Element count isn't in the header. + mapDataSlab, ok := storage.deltas[childHeader.slabID].(*MapDataSlab) + require.True(t, ok) - k = NewStringValue(randStr(r, maxStringSize)) - v = NewStringValue(randStr(r, maxStringSize)) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{3882976639190041664}}) + count := mapDataSlab.elements.Count() - storable, err = m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, storable) + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) - k = NewStringValue("zFKUYYNfIfJCCakcDuIEHj") - v = NewStringValue("EZbaCxxjDtMnbRlXJMgfHnZ") - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{3883321011075439822}}) + values = values[count:] - storable, err = m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, storable) + verifyMapLoadedElements(t, m, values) + } + }) - iter, err := m.Iterator() - require.NoError(t, err) + t.Run("root metadata slab, unload data slab from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) - var sortedKeys []Value - keyValues := make(map[Value]Value) + const mapSize = 20 + + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) + + // parent map (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} + verifyMapLoadedElements(t, m, values) - copied, err := NewMapFromBatchData( - storage, - address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - k, v, err := iter.Next() + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v - } + // Unload data slabs from back to front + for i := len(rootMetaDataSlab.childrenHeaders) - 1; i >= 0; i-- { - return k, v, err - }) + childHeader := rootMetaDataSlab.childrenHeaders[i] - require.NoError(t, err) - require.NotEqual(t, m.SlabID(), copied.SlabID()) + // Get data slab element count before unload it from storage + // Element count isn't in the header. + mapDataSlab, ok := storage.deltas[childHeader.slabID].(*MapDataSlab) + require.True(t, ok) - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) - }) -} + count := mapDataSlab.elements.Count() -func TestMapNestedStorables(t *testing.T) { + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) - t.Run("SomeValue", func(t *testing.T) { + values = values[:len(values)-int(count)] - const mapSize = 4096 + verifyMapLoadedElements(t, m, values) + } + }) - typeInfo := testTypeInfo{42} + t.Run("root metadata slab, unload data slab in the middle", func(t *testing.T) { storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) - require.NoError(t, err) + const mapSize = 20 - keyValues := make(map[Value]Value) - for i := uint64(0); i < mapSize; i++ { + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - ks := strings.Repeat("a", int(i)) - k := SomeValue{Value: NewStringValue(ks)} + // parent map (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - vs := strings.Repeat("b", int(i)) - v := SomeValue{Value: NewStringValue(vs)} + verifyMapLoadedElements(t, m, values) - keyValues[k] = v + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + require.True(t, len(rootMetaDataSlab.childrenHeaders) > 2) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, true) - }) + index := 1 + childHeader := rootMetaDataSlab.childrenHeaders[index] - t.Run("Array", func(t *testing.T) { + // Get element count from previous data slab + mapDataSlab, ok := storage.deltas[rootMetaDataSlab.childrenHeaders[0].slabID].(*MapDataSlab) + require.True(t, ok) - const mapSize = 4096 + countAtIndex0 := mapDataSlab.elements.Count() - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + // Get element count from slab to be unloaded + mapDataSlab, ok = storage.deltas[rootMetaDataSlab.childrenHeaders[index].slabID].(*MapDataSlab) + require.True(t, ok) - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) - require.NoError(t, err) + countAtIndex1 := mapDataSlab.elements.Count() - keyValues := make(map[Value]Value) - for i := uint64(0); i < mapSize; i++ { + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) - // Create a nested array with one element - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + copy(values[countAtIndex0:], values[countAtIndex0+countAtIndex1:]) + values = values[:m.Count()-uint64(countAtIndex1)] - vs := strings.Repeat("b", int(i)) - v := SomeValue{Value: NewStringValue(vs)} + verifyMapLoadedElements(t, m, values) + }) - err = array.Append(v) - require.NoError(t, err) + t.Run("root metadata slab, unload non-root metadata slab from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // Insert nested array into map - ks := strings.Repeat("a", int(i)) - k := SomeValue{Value: NewStringValue(ks)} + const mapSize = 200 - keyValues[k] = array + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - existingStorable, err := m.Set(compare, hashInputProvider, k, array) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + // parent map (3 levels): 1 root metadata slab, 3 child metadata slabs, n data slabs + require.Equal(t, 4, getMapMetaDataSlabCount(storage)) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, true) - }) -} + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) -func TestMapMaxInlineElement(t *testing.T) { - t.Parallel() + // Unload non-root metadata slabs from front to back. + for i := 0; i < len(rootMetaDataSlab.childrenHeaders); i++ { - r := newRand(t) - maxStringSize := int(maxInlineMapKeySize - 2) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + childHeader := rootMetaDataSlab.childrenHeaders[i] - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) - require.NoError(t, err) + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) - keyValues := make(map[Value]Value) - for len(keyValues) < 2 { - // String length is maxInlineMapKeySize - 2 to account for string encoding overhead. - k := NewStringValue(randStr(r, maxStringSize)) - v := NewStringValue(randStr(r, maxStringSize)) - keyValues[k] = v + // Use firstKey to deduce number of elements in slab. + var expectedValues [][2]Value + if i < len(rootMetaDataSlab.childrenHeaders)-1 { + nextChildHeader := rootMetaDataSlab.childrenHeaders[i+1] + expectedValues = values[int(nextChildHeader.firstKey):] + } - _, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - } + verifyMapLoadedElements(t, m, expectedValues) + } + }) - require.True(t, m.root.IsData()) + t.Run("root metadata slab, unload non-root metadata slab from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // Size of root data slab with two elements (key+value pairs) of - // max inlined size is target slab size minus - // slab id size (next slab id is omitted in root slab) - require.Equal(t, targetThreshold-slabIDSize, uint64(m.root.Header().size)) + const mapSize = 200 - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) -} + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) -func TestMapString(t *testing.T) { + // parent map (3 levels): 1 root metadata slab, 3 child metadata slabs, n data slabs + require.Equal(t, 4, getMapMetaDataSlabCount(storage)) - SetThreshold(256) - defer SetThreshold(1024) + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) - t.Run("small", func(t *testing.T) { - const mapSize = 3 + // Unload non-root metadata slabs from back to front. + for i := len(rootMetaDataSlab.childrenHeaders) - 1; i >= 0; i-- { - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + childHeader := rootMetaDataSlab.childrenHeaders[i] - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + // Use firstKey to deduce number of elements in slabs. + values = values[:childHeader.firstKey] - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) + verifyMapLoadedElements(t, m, values) } - - want := `[0:0 1:1 2:2]` - require.Equal(t, want, m.String()) }) - t.Run("large", func(t *testing.T) { - const mapSize = 30 + t.Run("root metadata slab with composite values, unload composite value at random index", func(t *testing.T) { - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) - - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + const mapSize = 500 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - want := `[0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 8:8 9:9 10:10 11:11 12:12 13:13 14:14 15:15 16:16 17:17 18:18 19:19 20:20 21:21 22:22 23:23 24:24 25:25 26:26 27:27 28:28 29:29]` - require.Equal(t, want, m.String()) - }) -} + // parent map (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs + // nested composite elements: 1 root data slab for each + require.True(t, len(storage.deltas) > 1+mapSize) + require.True(t, getMapMetaDataSlabCount(storage) > 1) -func TestMapSlabDump(t *testing.T) { + verifyMapLoadedElements(t, m, values) - SetThreshold(256) - defer SetThreshold(1024) + r := newRand(t) - t.Run("small", func(t *testing.T) { - const mapSize = 3 + // Unload composite element in random position + for len(values) > 0 { - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + i := r.Intn(len(values)) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + v := values[i][1] - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + nestedArray, ok := v.(*Array) + require.True(t, ok) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + err := storage.Remove(nestedArray.SlabID()) require.NoError(t, err) - require.Nil(t, existingStorable) - } - want := []string{ - "level 1, MapDataSlab id:0x102030405060708.1 size:55 firstkey:0 elements: [0:0:0 1:1:1 2:2:2]", + copy(values[i:], values[i+1:]) + values = values[:len(values)-1] + + verifyMapLoadedElements(t, m, values) } - dumps, err := DumpMapSlabs(m) - require.NoError(t, err) - require.Equal(t, want, dumps) }) - t.Run("large", func(t *testing.T) { - const mapSize = 30 + t.Run("root metadata slab with composite values, unload random data slab", func(t *testing.T) { - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + const mapSize = 500 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + // parent map (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs + // composite values: 1 root data slab for each + require.True(t, len(storage.deltas) > 1+mapSize) + require.True(t, getMapMetaDataSlabCount(storage) > 1) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + verifyMapLoadedElements(t, m, values) - want := []string{ - "level 1, MapMetaDataSlab id:0x102030405060708.1 size:48 firstKey:0 children: [{id:0x102030405060708.2 size:221 firstKey:0} {id:0x102030405060708.3 size:293 firstKey:13}]", - "level 2, MapDataSlab id:0x102030405060708.2 size:221 firstkey:0 elements: [0:0:0 1:1:1 2:2:2 3:3:3 4:4:4 5:5:5 6:6:6 7:7:7 8:8:8 9:9:9 10:10:10 11:11:11 12:12:12]", - "level 2, MapDataSlab id:0x102030405060708.3 size:293 firstkey:13 elements: [13:13:13 14:14:14 15:15:15 16:16:16 17:17:17 18:18:18 19:19:19 20:20:20 21:21:21 22:22:22 23:23:23 24:24:24 25:25:25 26:26:26 27:27:27 28:28:28 29:29:29]", - } - dumps, err := DumpMapSlabs(m) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) - t.Run("inline collision", func(t *testing.T) { - const mapSize = 30 + type slabInfo struct { + id SlabID + startIndex int + count int + } - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + var dataSlabInfos []*slabInfo + for _, mheader := range rootMetaDataSlab.childrenHeaders { - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + nonRootMetaDataSlab, ok := storage.deltas[mheader.slabID].(*MapMetaDataSlab) + require.True(t, ok) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i % 10)}}) + for i := 0; i < len(nonRootMetaDataSlab.childrenHeaders); i++ { + h := nonRootMetaDataSlab.childrenHeaders[i] - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + if len(dataSlabInfos) > 0 { + // Update previous slabInfo.count + dataSlabInfos[len(dataSlabInfos)-1].count = int(h.firstKey) - dataSlabInfos[len(dataSlabInfos)-1].startIndex + } - want := []string{ - "level 1, MapMetaDataSlab id:0x102030405060708.1 size:48 firstKey:0 children: [{id:0x102030405060708.2 size:213 firstKey:0} {id:0x102030405060708.3 size:221 firstKey:5}]", - "level 2, MapDataSlab id:0x102030405060708.2 size:213 firstkey:0 elements: [0:inline[:0:0 :10:10 :20:20] 1:inline[:1:1 :11:11 :21:21] 2:inline[:2:2 :12:12 :22:22] 3:inline[:3:3 :13:13 :23:23] 4:inline[:4:4 :14:14 :24:24]]", - "level 2, MapDataSlab id:0x102030405060708.3 size:221 firstkey:5 elements: [5:inline[:5:5 :15:15 :25:25] 6:inline[:6:6 :16:16 :26:26] 7:inline[:7:7 :17:17 :27:27] 8:inline[:8:8 :18:18 :28:28] 9:inline[:9:9 :19:19 :29:29]]", + dataSlabInfos = append(dataSlabInfos, &slabInfo{id: h.slabID, startIndex: int(h.firstKey)}) + } } - dumps, err := DumpMapSlabs(m) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) - t.Run("external collision", func(t *testing.T) { - const mapSize = 30 + r := newRand(t) - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + for len(dataSlabInfos) > 0 { + index := r.Intn(len(dataSlabInfos)) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + slabToBeRemoved := dataSlabInfos[index] - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i % 2)}}) + // Update startIndex for all subsequence data slabs + for i := index + 1; i < len(dataSlabInfos); i++ { + dataSlabInfos[i].startIndex -= slabToBeRemoved.count + } - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + err := storage.Remove(slabToBeRemoved.id) require.NoError(t, err) - require.Nil(t, existingStorable) - } - want := []string{ - "level 1, MapDataSlab id:0x102030405060708.1 size:68 firstkey:0 elements: [0:external(0x102030405060708.2) 1:external(0x102030405060708.3)]", - "collision: MapDataSlab id:0x102030405060708.2 size:135 firstkey:0 elements: [:0:0 :2:2 :4:4 :6:6 :8:8 :10:10 :12:12 :14:14 :16:16 :18:18 :20:20 :22:22 :24:24 :26:26 :28:28]", - "collision: MapDataSlab id:0x102030405060708.3 size:135 firstkey:0 elements: [:1:1 :3:3 :5:5 :7:7 :9:9 :11:11 :13:13 :15:15 :17:17 :19:19 :21:21 :23:23 :25:25 :27:27 :29:29]", + if index == len(dataSlabInfos)-1 { + values = values[:slabToBeRemoved.startIndex] + } else { + copy(values[slabToBeRemoved.startIndex:], values[slabToBeRemoved.startIndex+slabToBeRemoved.count:]) + values = values[:len(values)-slabToBeRemoved.count] + } + + copy(dataSlabInfos[index:], dataSlabInfos[index+1:]) + dataSlabInfos = dataSlabInfos[:len(dataSlabInfos)-1] + + verifyMapLoadedElements(t, m, values) } - dumps, err := DumpMapSlabs(m) - require.NoError(t, err) - require.Equal(t, want, dumps) + + require.Equal(t, 0, len(values)) }) - t.Run("key overflow", func(t *testing.T) { + t.Run("root metadata slab with composite values, unload random slab", func(t *testing.T) { - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + const mapSize = 500 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - k := NewStringValue(strings.Repeat("a", int(maxInlineMapKeySize))) - v := NewStringValue(strings.Repeat("b", int(maxInlineMapKeySize))) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(0)}}) + // parent map (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs + // composite values: 1 root data slab for each + require.True(t, len(storage.deltas) > 1+mapSize) + require.True(t, getMapMetaDataSlabCount(storage) > 1) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) + verifyMapLoadedElements(t, m, values) - want := []string{ - "level 1, MapDataSlab id:0x102030405060708.1 size:93 firstkey:0 elements: [0:SlabIDStorable({[1 2 3 4 5 6 7 8] [0 0 0 0 0 0 0 2]}):bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb]", - "StorableSlab id:0x102030405060708.2 storable:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + type slabInfo struct { + id SlabID + startIndex int + count int + children []*slabInfo } - dumps, err := DumpMapSlabs(m) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) - t.Run("value overflow", func(t *testing.T) { + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + metadataSlabInfos := make([]*slabInfo, len(rootMetaDataSlab.childrenHeaders)) + for i, mheader := range rootMetaDataSlab.childrenHeaders { - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + if i > 0 { + prevMetaDataSlabInfo := metadataSlabInfos[i-1] + prevDataSlabInfo := prevMetaDataSlabInfo.children[len(prevMetaDataSlabInfo.children)-1] - k := NewStringValue(strings.Repeat("a", int(maxInlineMapKeySize-2))) - v := NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize))) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(0)}}) + // Update previous metadata slab count + prevMetaDataSlabInfo.count = int(mheader.firstKey) - prevMetaDataSlabInfo.startIndex - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) + // Update previous data slab count + prevDataSlabInfo.count = int(mheader.firstKey) - prevDataSlabInfo.startIndex + } - want := []string{ - "level 1, MapDataSlab id:0x102030405060708.1 size:91 firstkey:0 elements: [0:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa:SlabIDStorable({[1 2 3 4 5 6 7 8] [0 0 0 0 0 0 0 2]})]", - "StorableSlab id:0x102030405060708.2 storable:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", - } - dumps, err := DumpMapSlabs(m) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) -} + metadataSlabInfo := &slabInfo{ + id: mheader.slabID, + startIndex: int(mheader.firstKey), + } -func TestMaxCollisionLimitPerDigest(t *testing.T) { - savedMaxCollisionLimitPerDigest := MaxCollisionLimitPerDigest - defer func() { - MaxCollisionLimitPerDigest = savedMaxCollisionLimitPerDigest - }() + nonRootMetadataSlab, ok := storage.deltas[mheader.slabID].(*MapMetaDataSlab) + require.True(t, ok) - t.Run("collision limit 0", func(t *testing.T) { - const mapSize = 1024 + children := make([]*slabInfo, len(nonRootMetadataSlab.childrenHeaders)) + for i, h := range nonRootMetadataSlab.childrenHeaders { + children[i] = &slabInfo{ + id: h.slabID, + startIndex: int(h.firstKey), + } + if i > 0 { + children[i-1].count = int(h.firstKey) - children[i-1].startIndex + } + } - SetThreshold(256) - defer SetThreshold(1024) + metadataSlabInfo.children = children + metadataSlabInfos[i] = metadataSlabInfo + } - // Set noncryptographic hash collision limit as 0, - // meaning no collision is allowed at first level. - MaxCollisionLimitPerDigest = uint32(0) + const ( + metadataSlabType int = iota + dataSlabType + maxSlabType + ) - digesterBuilder := &mockDigesterBuilder{} - keyValues := make(map[Value]Value, mapSize) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - keyValues[k] = v + r := newRand(t) - digests := []Digest{Digest(i)} - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - } + for len(metadataSlabInfos) > 0 { - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + var slabInfoToBeRemoved *slabInfo + var isLastSlab bool - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + switch r.Intn(maxSlabType) { - // Insert elements within collision limits - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + case metadataSlabType: - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + metadataSlabIndex := r.Intn(len(metadataSlabInfos)) - // Insert elements exceeding collision limits - collisionKeyValues := make(map[Value]Value, mapSize) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(mapSize + i) - v := Uint64Value(mapSize + i) - collisionKeyValues[k] = v + isLastSlab = metadataSlabIndex == len(metadataSlabInfos)-1 - digests := []Digest{Digest(i)} - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - } + slabInfoToBeRemoved = metadataSlabInfos[metadataSlabIndex] - for k, v := range collisionKeyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.Equal(t, 1, errorCategorizationCount(err)) - var fatalError *FatalError - var collisionLimitError *CollisionLimitError - require.ErrorAs(t, err, &fatalError) - require.ErrorAs(t, err, &collisionLimitError) - require.ErrorAs(t, fatalError, &collisionLimitError) - require.Nil(t, existingStorable) - } + count := slabInfoToBeRemoved.count - // Verify that no new elements exceeding collision limit inserted - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + // Update startIndex for subsequence metadata slabs + for i := metadataSlabIndex + 1; i < len(metadataSlabInfos); i++ { + metadataSlabInfos[i].startIndex -= count - // Update elements within collision limits - for k := range keyValues { - v := Uint64Value(0) - keyValues[k] = v - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.NotNil(t, existingStorable) - } + for j := 0; j < len(metadataSlabInfos[i].children); j++ { + metadataSlabInfos[i].children[j].startIndex -= count + } + } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) - }) + copy(metadataSlabInfos[metadataSlabIndex:], metadataSlabInfos[metadataSlabIndex+1:]) + metadataSlabInfos = metadataSlabInfos[:len(metadataSlabInfos)-1] - t.Run("collision limit > 0", func(t *testing.T) { - const mapSize = 1024 + case dataSlabType: - SetThreshold(256) - defer SetThreshold(1024) + metadataSlabIndex := r.Intn(len(metadataSlabInfos)) - // Set noncryptographic hash collision limit as 7, - // meaning at most 8 elements in collision group per digest at first level. - MaxCollisionLimitPerDigest = uint32(7) + metadataSlabInfo := metadataSlabInfos[metadataSlabIndex] - digesterBuilder := &mockDigesterBuilder{} - keyValues := make(map[Value]Value, mapSize) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - keyValues[k] = v + dataSlabIndex := r.Intn(len(metadataSlabInfo.children)) - digests := []Digest{Digest(i % 128)} - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - } + isLastSlab = (metadataSlabIndex == len(metadataSlabInfos)-1) && + (dataSlabIndex == len(metadataSlabInfo.children)-1) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + slabInfoToBeRemoved = metadataSlabInfo.children[dataSlabIndex] - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + count := slabInfoToBeRemoved.count - // Insert elements within collision limits - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + // Update startIndex for all subsequence data slabs in this metadata slab info + for i := dataSlabIndex + 1; i < len(metadataSlabInfo.children); i++ { + metadataSlabInfo.children[i].startIndex -= count + } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + copy(metadataSlabInfo.children[dataSlabIndex:], metadataSlabInfo.children[dataSlabIndex+1:]) + metadataSlabInfo.children = metadataSlabInfo.children[:len(metadataSlabInfo.children)-1] - // Insert elements exceeding collision limits - collisionKeyValues := make(map[Value]Value, mapSize) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(mapSize + i) - v := Uint64Value(mapSize + i) - collisionKeyValues[k] = v + metadataSlabInfo.count -= count - digests := []Digest{Digest(i % 128)} - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - } + // Update startIndex for all subsequence metadata slabs. + for i := metadataSlabIndex + 1; i < len(metadataSlabInfos); i++ { + metadataSlabInfos[i].startIndex -= count - for k, v := range collisionKeyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.Equal(t, 1, errorCategorizationCount(err)) - var fatalError *FatalError - var collisionLimitError *CollisionLimitError - require.ErrorAs(t, err, &fatalError) - require.ErrorAs(t, err, &collisionLimitError) - require.ErrorAs(t, fatalError, &collisionLimitError) - require.Nil(t, existingStorable) - } + for j := 0; j < len(metadataSlabInfos[i].children); j++ { + metadataSlabInfos[i].children[j].startIndex -= count + } + } - // Verify that no new elements exceeding collision limit inserted - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + if len(metadataSlabInfo.children) == 0 { + copy(metadataSlabInfos[metadataSlabIndex:], metadataSlabInfos[metadataSlabIndex+1:]) + metadataSlabInfos = metadataSlabInfos[:len(metadataSlabInfos)-1] + } + } - // Update elements within collision limits - for k := range keyValues { - v := Uint64Value(0) - keyValues[k] = v - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + err := storage.Remove(slabInfoToBeRemoved.id) require.NoError(t, err) - require.NotNil(t, existingStorable) + + if isLastSlab { + values = values[:slabInfoToBeRemoved.startIndex] + } else { + copy(values[slabInfoToBeRemoved.startIndex:], values[slabInfoToBeRemoved.startIndex+slabInfoToBeRemoved.count:]) + values = values[:len(values)-slabInfoToBeRemoved.count] + } + + verifyMapLoadedElements(t, m, values) } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + require.Equal(t, 0, len(values)) }) } -func TestMapLoadedValueIterator(t *testing.T) { +func createMapWithLongStringKey( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + size int, +) (*OrderedMap, [][2]Value) { - SetThreshold(256) - defer SetThreshold(1024) + digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + // Create parent map. + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - t.Run("empty", func(t *testing.T) { - storage := newTestPersistentStorage(t) + expectedValues := make([][2]Value, size) + r := 'a' + for i := 0; i < size; i++ { + s := strings.Repeat(string(r), int(maxInlineMapElementSize)) - digesterBuilder := &mockDigesterBuilder{} + k := NewStringValue(s) + v := Uint64Value(i) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) + expectedValues[i] = [2]Value{k, v} + + digests := []Digest{Digest(i)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) require.NoError(t, err) + require.Nil(t, existingStorable) - // parent map: 1 root data slab - require.Equal(t, 1, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + r++ + } - verifyMapLoadedElements(t, m, nil) - }) + return m, expectedValues +} - t.Run("root data slab with simple values", func(t *testing.T) { - storage := newTestPersistentStorage(t) +func createMapWithSimpleValues( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + size int, + newDigests func(i int) []Digest, +) (*OrderedMap, [][2]Value) { - const mapSize = 3 - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + digesterBuilder := &mockDigesterBuilder{} - // parent map: 1 root data slab - require.Equal(t, 1, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - verifyMapLoadedElements(t, m, values) - }) + expectedValues := make([][2]Value, size) + r := rune('a') + for i := 0; i < size; i++ { + k := Uint64Value(i) + v := NewStringValue(strings.Repeat(string(r), 20)) + + digests := newDigests(i) + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + expectedValues[i] = [2]Value{k, v} - t.Run("root data slab with composite values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + existingStorable, err := m.Set(compare, hashInputProvider, expectedValues[i][0], expectedValues[i][1]) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - const mapSize = 3 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + return m, expectedValues +} - // parent map: 1 root data slab - // composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) +func createMapWithCompositeValues( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + size int, + newDigests func(i int) []Digest, +) (*OrderedMap, [][2]Value) { - verifyMapLoadedElements(t, m, values) - }) + // Use mockDigesterBuilder to guarantee element order. + digesterBuilder := &mockDigesterBuilder{} - t.Run("root data slab with composite values in collision group", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Create parent map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - // Create parent map with 3 collision groups, 2 elements in each group. - const mapSize = 6 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, - ) + expectedValues := make([][2]Value, size) + for i := 0; i < size; i++ { + // Create nested array + nested, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // parent map: 1 root data slab - // composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + for j := 0; j < 50; j++ { + err = nested.Append(Uint64Value(j)) + require.NoError(t, err) + } - verifyMapLoadedElements(t, m, values) - }) + k := Uint64Value(i) + v := nested - t.Run("root data slab with composite values in external collision group", func(t *testing.T) { - storage := newTestPersistentStorage(t) + expectedValues[i] = [2]Value{k, v} - // Create parent map with 3 external collision group, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + digests := newDigests(i) + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - // parent map: 1 root data slab, 3 external collision group - // composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // Set nested array to parent + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - verifyMapLoadedElements(t, m, values) - }) + return m, expectedValues +} - t.Run("root data slab with composite values, unload value from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) +func createMapWithSimpleAndCompositeValues( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + size int, + compositeValueIndex int, + newDigests func(i int) []Digest, +) (*OrderedMap, [][2]Value) { - const mapSize = 3 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + digesterBuilder := &mockDigesterBuilder{} - // parent map: 1 root data slab - // composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // Create parent map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - verifyMapLoadedElements(t, m, values) + values := make([][2]Value, size) + r := 'a' + for i := 0; i < size; i++ { - // Unload composite element from front to back. - for i := 0; i < len(values); i++ { - v := values[i][1] + k := Uint64Value(i) - nestedArray, ok := v.(*Array) - require.True(t, ok) + digests := newDigests(i) + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - err := storage.Remove(nestedArray.SlabID()) + if compositeValueIndex == i { + // Create nested array with one element + a, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - expectedValues := values[i+1:] - verifyMapLoadedElements(t, m, expectedValues) + for j := 0; j < 50; j++ { + err = a.Append(Uint64Value(j)) + require.NoError(t, err) + } + + values[i] = [2]Value{k, a} + } else { + values[i] = [2]Value{k, NewStringValue(strings.Repeat(string(r), 18))} } - }) - t.Run("root data slab with long string keys, unload key from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + existingStorable, err := m.Set(compare, hashInputProvider, values[i][0], values[i][1]) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - const mapSize = 3 - m, values := createMapWithLongStringKey(t, storage, address, typeInfo, mapSize) + return m, values +} - // parent map: 1 root data slab - // long string keys: 1 storable slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) +func verifyMapLoadedElements(t *testing.T, m *OrderedMap, expectedValues [][2]Value) { + i := 0 + err := m.IterateLoadedValues(func(k Value, v Value) (bool, error) { + require.True(t, i < len(expectedValues)) + valueEqual(t, typeInfoComparator, expectedValues[i][0], k) + valueEqual(t, typeInfoComparator, expectedValues[i][1], v) + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, len(expectedValues), i) +} - verifyMapLoadedElements(t, m, values) +func getMapMetaDataSlabCount(storage *PersistentSlabStorage) int { + var counter int + for _, slab := range storage.deltas { + if _, ok := slab.(*MapMetaDataSlab); ok { + counter++ + } + } + return counter +} - // Unload external key from front to back. - for i := 0; i < len(values); i++ { - k := values[i][0] +func TestMaxInlineMapValueSize(t *testing.T) { - s, ok := k.(StringValue) - require.True(t, ok) + t.Run("small key", func(t *testing.T) { + // Value has larger max inline size when key is less than max map key size. - // Find storage id for StringValue s. - var keyID SlabID - for id, slab := range storage.deltas { - if sslab, ok := slab.(*StorableSlab); ok { - if other, ok := sslab.storable.(StringValue); ok { - if s.str == other.str { - keyID = id - break - } - } - } - } + SetThreshold(256) + defer SetThreshold(1024) - require.NoError(t, keyID.Valid()) + mapSize := 2 + keyStringSize := 16 // Key size is less than max map key size. + valueStringSize := maxInlineMapElementSize/2 + 10 // Value size is more than half of max map element size. - err := storage.Remove(keyID) - require.NoError(t, err) + r := newRand(t) - expectedValues := values[i+1:] - verifyMapLoadedElements(t, m, expectedValues) + keyValues := make(map[Value]Value, mapSize) + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, int(valueStringSize))) + keyValues[k] = v } - }) - t.Run("root data slab with composite values in collision group, unload value from front to back", func(t *testing.T) { + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} storage := newTestPersistentStorage(t) - // Create parent map with 3 collision groups, 2 elements in each group. - const mapSize = 6 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, - ) + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - // parent map: 1 root data slab - // composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - verifyMapLoadedElements(t, m, values) + // Both key and value are stored in map slab. + require.Equal(t, 1, len(storage.deltas)) - // Unload composite element from front to back. - for i := 0; i < len(values); i++ { - v := values[i][1] + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("max size key", func(t *testing.T) { + // Value max size is about half of max map element size when key is exactly max map key size. + + SetThreshold(256) + defer SetThreshold(1024) - nestedArray, ok := v.(*Array) - require.True(t, ok) + mapSize := 1 + keyStringSize := maxInlineMapKeySize - 2 // Key size is exactly max map key size (2 bytes is string encoding overhead). + valueStringSize := maxInlineMapElementSize/2 + 2 // Value size is more than half of max map element size (add 2 bytes to make it more than half). - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + r := newRand(t) - expectedValues := values[i+1:] - verifyMapLoadedElements(t, m, expectedValues) + keyValues := make(map[Value]Value, mapSize) + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, int(keyStringSize))) + v := NewStringValue(randStr(r, int(valueStringSize))) + keyValues[k] = v } - }) - t.Run("root data slab with composite values in external collision group, unload value from front to back", func(t *testing.T) { + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} storage := newTestPersistentStorage(t) - // Create parent map with 3 external collision groups, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - // parent map: 1 root data slab, 3 external collision group - // composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - verifyMapLoadedElements(t, m, values) + // Key is stored in map slab, while value is stored separately in storable slab. + require.Equal(t, 2, len(storage.deltas)) - // Unload composite element from front to back - for i := 0; i < len(values); i++ { - v := values[i][1] + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) - nestedArray, ok := v.(*Array) - require.True(t, ok) + t.Run("large key", func(t *testing.T) { + // Value has larger max inline size when key is more than max map key size because + // when key size exceeds max map key size, it is stored in a separate storable slab, + // and SlabIDStorable is stored as key in the map, which is 19 bytes. - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + SetThreshold(256) + defer SetThreshold(1024) - expectedValues := values[i+1:] - verifyMapLoadedElements(t, m, expectedValues) + mapSize := 1 + keyStringSize := maxInlineMapKeySize + 10 // key size is more than max map key size + valueStringSize := maxInlineMapElementSize/2 + 10 // value size is more than half of max map element size + + r := newRand(t) + + keyValues := make(map[Value]Value, mapSize) + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, int(keyStringSize))) + v := NewStringValue(randStr(r, int(valueStringSize))) + keyValues[k] = v } - }) - t.Run("root data slab with composite values in external collision group, unload external slab from front to back", func(t *testing.T) { + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} storage := newTestPersistentStorage(t) - // Create parent map with 3 external collision groups, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - // parent map: 1 root data slab, 3 external collision group - // composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - verifyMapLoadedElements(t, m, values) + // Key is stored in separate storable slabs, while value is stored in map slab. + require.Equal(t, 2, len(storage.deltas)) - // Unload external collision group slab from front to back + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) +} - var externalCollisionSlabIDs []SlabID - for id, slab := range storage.deltas { - if dataSlab, ok := slab.(*MapDataSlab); ok { - if dataSlab.collisionGroup { - externalCollisionSlabIDs = append(externalCollisionSlabIDs, id) - } - } - } - require.Equal(t, 3, len(externalCollisionSlabIDs)) +func TestMapID(t *testing.T) { + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - sort.Slice(externalCollisionSlabIDs, func(i, j int) bool { - a := externalCollisionSlabIDs[i] - b := externalCollisionSlabIDs[j] - if a.address == b.address { - return a.IndexAsUint64() < b.IndexAsUint64() - } - return a.AddressAsUint64() < b.AddressAsUint64() - }) + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - for i, id := range externalCollisionSlabIDs { - err := storage.Remove(id) - require.NoError(t, err) + sid := m.SlabID() + id := m.ValueID() - expectedValues := values[i*4+4:] - verifyMapLoadedElements(t, m, expectedValues) - } - }) + require.Equal(t, sid.address[:], id[:8]) + require.Equal(t, sid.index[:], id[8:]) +} - t.Run("root data slab with composite values, unload composite value from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) +func TestSlabSizeWhenResettingMutableStorableInMap(t *testing.T) { + const ( + mapSize = 3 + keyStringSize = 16 + initialStorableSize = 1 + mutatedStorableSize = 5 + ) - const mapSize = 3 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + keyValues := make(map[Value]*testMutableValue, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := newTestMutableValue(initialStorableSize) + keyValues[k] = v + } - // parent map: 1 root data slab - // composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) - verifyMapLoadedElements(t, m, values) + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - // Unload composite element from back to front. - for i := len(values) - 1; i >= 0; i-- { - v := values[i][1] + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - nestedArray, ok := v.(*Array) - require.True(t, ok) + require.True(t, m.root.IsData()) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + expectedElementSize := singleElementPrefixSize + digestSize + Uint64Value(0).ByteSize() + initialStorableSize + expectedMapRootDataSlabSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + expectedElementSize*mapSize + require.Equal(t, expectedMapRootDataSlabSize, m.root.ByteSize()) - expectedValues := values[:i] - verifyMapLoadedElements(t, m, expectedValues) - } - }) + err = ValidMap(m, typeInfo, typeInfoComparator, hashInputProvider) + require.NoError(t, err) - t.Run("root data slab with long string key, unload key from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Reset mutable values after changing its storable size + for k, v := range keyValues { + v.updateStorableSize(mutatedStorableSize) - const mapSize = 3 - m, values := createMapWithLongStringKey(t, storage, address, typeInfo, mapSize) + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.NotNil(t, existingStorable) + } - // parent map: 1 root data slab - // long string keys: 1 storable slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + require.True(t, m.root.IsData()) - verifyMapLoadedElements(t, m, values) + expectedElementSize = singleElementPrefixSize + digestSize + Uint64Value(0).ByteSize() + mutatedStorableSize + expectedMapRootDataSlabSize = mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + expectedElementSize*mapSize + require.Equal(t, expectedMapRootDataSlabSize, m.root.ByteSize()) - // Unload composite element from front to back. - for i := len(values) - 1; i >= 0; i-- { - k := values[i][0] + err = ValidMap(m, typeInfo, typeInfoComparator, hashInputProvider) + require.NoError(t, err) +} - s, ok := k.(StringValue) - require.True(t, ok) +func TestChildMapInlinabilityInParentMap(t *testing.T) { - // Find storage id for StringValue s. - var keyID SlabID - for id, slab := range storage.deltas { - if sslab, ok := slab.(*StorableSlab); ok { - if other, ok := sslab.storable.(StringValue); ok { - if s.str == other.str { - keyID = id - break - } - } - } - } + SetThreshold(256) + defer SetThreshold(1024) - require.NoError(t, keyID.Valid()) + const expectedEmptyInlinedMapSize = uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) // 22 - err := storage.Remove(keyID) - require.NoError(t, err) + t.Run("parent is root data slab, with one child map", func(t *testing.T) { + const ( + mapSize = 1 + keyStringSize = 9 + valueStringSize = 4 + ) - expectedValues := values[:i] - verifyMapLoadedElements(t, m, expectedValues) - } - }) + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() - t.Run("root data slab with composite values in collision group, unload value from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + r := newRand(t) - // Create parent map with 3 collision groups, 2 elements in each group. - const mapSize = 6 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, - ) + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - // parent map: 1 root data slab - // composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + parentMap, expectedKeyValues := createMapWithEmptyChildMap(t, storage, address, typeInfo, mapSize, func() Value { + return NewStringValue(randStr(r, keyStringSize)) + }) - verifyMapLoadedElements(t, m, values) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - // Unload composite element from back to front - for i := len(values) - 1; i >= 0; i-- { - v := values[i][1] + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - nestedArray, ok := v.(*Array) - require.True(t, ok) + children := getInlinedChildMapsFromParentMap(t, address, parentMap) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + // Appending 3 elements to child map so that inlined child map reaches max inlined size as map element. + for i := 0; i < 3; i++ { + for _, child := range children { + childMap := child.m + valueID := child.valueID - expectedValues := values[:i] - verifyMapLoadedElements(t, m, expectedValues) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) + + child.keys = append(child.keys, k) + + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(i+1), childMap.Count()) + + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged + require.Equal(t, 1, getStoredDeltas(storage)) + + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) + + // Test parent slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedInlinedMapSize + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedParentElementSize*mapSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } } - }) - t.Run("root data slab with composite values in external collision group, unload value from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Add one more element to child array which triggers inlined child array slab becomes standalone slab + for i, child := range children { + childMap := child.m + valueID := child.valueID - // Create parent map with 3 external collision groups, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - // parent map: 1 root data slab, 3 external collision group - // composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + child.keys = append(child.keys, k) - verifyMapLoadedElements(t, m, values) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - // Unload composite element from back to front - for i := len(values) - 1; i >= 0; i-- { - v := values[i][1] + require.False(t, childMap.Inlined()) + require.Equal(t, 1+1+i, getStoredDeltas(storage)) // There are >1 stored slab because child map is no longer inlined. - nestedArray, ok := v.(*Array) - require.True(t, ok) + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childMap.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedStandaloneSlabSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedStandaloneSlabSize, childMap.root.ByteSize()) - expectedValues := values[:i] - verifyMapLoadedElements(t, m, expectedValues) + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + SlabIDStorable(expectedSlabID).ByteSize() + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedParentElementSize*mapSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - }) - t.Run("root data slab with composite values in external collision group, unload external slab from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Remove elements from child map which triggers standalone map slab becomes inlined slab again. + for _, child := range children { + childMap := child.m + valueID := child.valueID + keys := child.keys - // Create parent map with 3 external collision groups, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + for _, k := range keys { + existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingKey) + require.NotNil(t, existingValue) - // parent map: 1 root data slab, 3 external collision group - // composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, valueID, childMap.ValueID()) // value ID is unchanged + require.Equal(t, 1, getStoredDeltas(storage)) - verifyMapLoadedElements(t, m, values) + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - // Unload external slabs from back to front - var externalCollisionSlabIDs []SlabID - for id, slab := range storage.deltas { - if dataSlab, ok := slab.(*MapDataSlab); ok { - if dataSlab.collisionGroup { - externalCollisionSlabIDs = append(externalCollisionSlabIDs, id) - } - } - } - require.Equal(t, 3, len(externalCollisionSlabIDs)) + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedInlinedMapSize + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedParentElementSize*mapSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - sort.Slice(externalCollisionSlabIDs, func(i, j int) bool { - a := externalCollisionSlabIDs[i] - b := externalCollisionSlabIDs[j] - if a.address == b.address { - return a.IndexAsUint64() < b.IndexAsUint64() + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - return a.AddressAsUint64() < b.AddressAsUint64() - }) - - for i := len(externalCollisionSlabIDs) - 1; i >= 0; i-- { - err := storage.Remove(externalCollisionSlabIDs[i]) - require.NoError(t, err) - - expectedValues := values[:i*4] - verifyMapLoadedElements(t, m, expectedValues) } - }) - t.Run("root data slab with composite values, unload value in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. + }) - const mapSize = 3 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, + t.Run("parent is root data slab, with two child maps", func(t *testing.T) { + const ( + mapSize = 2 + keyStringSize = 9 + valueStringSize = 4 ) - // parent map: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() - verifyMapLoadedElements(t, m, values) + r := newRand(t) - // Unload value in the middle - unloadValueIndex := 1 + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - v := values[unloadValueIndex][1] + parentMap, expectedKeyValues := createMapWithEmptyChildMap(t, storage, address, typeInfo, mapSize, func() Value { + return NewStringValue(randStr(r, keyStringSize)) + }) - nestedArray, ok := v.(*Array) - require.True(t, ok) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) - values = values[:len(values)-1] + children := getInlinedChildMapsFromParentMap(t, address, parentMap) - verifyMapLoadedElements(t, m, values) - }) + expectedParentSize := parentMap.root.ByteSize() - t.Run("root data slab with long string key, unload key in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Appending 3 elements to child map so that inlined child map reaches max inlined size as map element. + for i := 0; i < 3; i++ { + for _, child := range children { + childMap := child.m + valueID := child.valueID - const mapSize = 3 - m, values := createMapWithLongStringKey(t, storage, address, typeInfo, mapSize) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - // parent map: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + child.keys = append(child.keys, k) - verifyMapLoadedElements(t, m, values) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(i+1), childMap.Count()) - // Unload key in the middle. - unloadValueIndex := 1 + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged + require.Equal(t, 1, getStoredDeltas(storage)) - k := values[unloadValueIndex][0] + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - s, ok := k.(StringValue) - require.True(t, ok) + // Test parent slab size + expectedParentSize += expectedChildElementSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - // Find storage id for StringValue s. - var keyID SlabID - for id, slab := range storage.deltas { - if sslab, ok := slab.(*StorableSlab); ok { - if other, ok := sslab.storable.(StringValue); ok { - if s.str == other.str { - keyID = id - break - } - } + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } } - require.NoError(t, keyID.Valid()) - - err := storage.Remove(keyID) - require.NoError(t, err) - - copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) - values = values[:len(values)-1] + // Add one more element to child array which triggers inlined child array slab becomes standalone slab + for i, child := range children { + childMap := child.m + valueID := child.valueID - verifyMapLoadedElements(t, m, values) - }) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - t.Run("root data slab with composite values in collision group, unload value in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + child.keys = append(child.keys, k) - // Create parent map with 3 collision groups, 2 elements in each group. - const mapSize = 6 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, - ) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - // parent map: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + require.False(t, childMap.Inlined()) + require.Equal(t, 1+1+i, getStoredDeltas(storage)) // There are >1 stored slab because child map is no longer inlined. - verifyMapLoadedElements(t, m, values) + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childMap.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged - // Unload composite element in the middle - for _, unloadValueIndex := range []int{1, 3, 5} { - v := values[unloadValueIndex][1] + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedStandaloneSlabSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedStandaloneSlabSize, childMap.root.ByteSize()) - nestedArray, ok := v.(*Array) - require.True(t, ok) + // Subtract inlined child map size from expected parent size + expectedParentSize -= uint32(inlinedMapDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedChildElementSize*uint32(childMap.Count()-1) + // Add slab id storable size to expected parent size + expectedParentSize += SlabIDStorable(expectedSlabID).ByteSize() + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - expectedValues := [][2]Value{ - values[0], - values[2], - values[4], - } - verifyMapLoadedElements(t, m, expectedValues) - }) + require.Equal(t, 1+mapSize, getStoredDeltas(storage)) // There are >1 stored slab because child map is no longer inlined. - t.Run("root data slab with composite values in external collision group, unload value in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Remove one element from each child map which triggers standalone map slab becomes inlined slab again. + for i, child := range children { + childMap := child.m + valueID := child.valueID + keys := child.keys - // Create parent map with 3 external collision groups, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + lastKey := keys[len(keys)-1] + child.keys = child.keys[:len(keys)-1] - // parent map: 1 root data slab, 3 external collision group - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, lastKey) + require.NoError(t, err) + require.Equal(t, lastKey, existingKey) + require.NotNil(t, existingValue) - verifyMapLoadedElements(t, m, values) + require.Equal(t, 1+mapSize-1-i, getStoredDeltas(storage)) - // Unload composite value in the middle. - for _, unloadValueIndex := range []int{1, 3, 5, 7, 9, 11} { - v := values[unloadValueIndex][1] + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, valueID, childMap.ValueID()) // value ID is unchanged - nestedArray, ok := v.(*Array) - require.True(t, ok) + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) - } + // Subtract slab id storable size from expected parent size + expectedParentSize -= SlabIDStorable(SlabID{}).ByteSize() + // Add expected inlined child map to expected parent size + expectedParentSize += expectedInlinedMapSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - expectedValues := [][2]Value{ - values[0], - values[2], - values[4], - values[6], - values[8], - values[10], + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - verifyMapLoadedElements(t, m, expectedValues) - }) - t.Run("root data slab with composite values in external collision group, unload external slab in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Remove remaining elements from each inlined child map. + for _, child := range children { + childMap := child.m + valueID := child.valueID + keys := child.keys - // Create parent map with 3 external collision groups, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + for _, k := range keys { + existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingKey) + require.NotNil(t, existingValue) - // parent map: 1 root data slab, 3 external collision group - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + require.Equal(t, 1, getStoredDeltas(storage)) + + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, valueID, childMap.ValueID()) // value ID is unchanged + + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - verifyMapLoadedElements(t, m, values) + expectedParentSize -= expectedChildElementSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - // Unload external slabs in the middle. - var externalCollisionSlabIDs []SlabID - for id, slab := range storage.deltas { - if dataSlab, ok := slab.(*MapDataSlab); ok { - if dataSlab.collisionGroup { - externalCollisionSlabIDs = append(externalCollisionSlabIDs, id) - } + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } } - require.Equal(t, 3, len(externalCollisionSlabIDs)) - sort.Slice(externalCollisionSlabIDs, func(i, j int) bool { - a := externalCollisionSlabIDs[i] - b := externalCollisionSlabIDs[j] - if a.address == b.address { - return a.IndexAsUint64() < b.IndexAsUint64() - } - return a.AddressAsUint64() < b.AddressAsUint64() - }) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. + }) - id := externalCollisionSlabIDs[1] - err := storage.Remove(id) - require.NoError(t, err) + t.Run("parent is root metadata slab, with four child maps", func(t *testing.T) { + const ( + mapSize = 4 + keyStringSize = 9 + valueStringSize = 4 + ) - copy(values[4:], values[8:]) - values = values[:8] + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() - verifyMapLoadedElements(t, m, values) - }) + r := newRand(t) - t.Run("root data slab with composite values, unload composite elements during iteration", func(t *testing.T) { + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - const mapSize = 3 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + parentMap, expectedKeyValues := createMapWithEmptyChildMap(t, storage, address, typeInfo, mapSize, func() Value { + return NewStringValue(randStr(r, keyStringSize)) + }) - // parent map: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - verifyMapLoadedElements(t, m, values) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - i := 0 - err := m.IterateLoadedValues(func(k Value, v Value) (bool, error) { - // At this point, iterator returned first element (v). + children := getInlinedChildMapsFromParentMap(t, address, parentMap) - // Remove all other nested composite elements (except first element) from storage. - for _, element := range values[1:] { - value := element[1] - nestedArray, ok := value.(*Array) - require.True(t, ok) + // Appending 3 elements to child map so that inlined child map reaches max inlined size as map element. + for i := 0; i < 3; i++ { + for _, child := range children { + childMap := child.m + valueID := child.valueID - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) - } + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - require.Equal(t, 0, i) - valueEqual(t, typeInfoComparator, values[0][0], k) - valueEqual(t, typeInfoComparator, values[0][1], v) - i++ - return true, nil - }) + child.keys = append(child.keys, k) - require.NoError(t, err) - require.Equal(t, 1, i) // Only first element is iterated because other elements are remove during iteration. - }) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(i+1), childMap.Count()) - t.Run("root data slab with simple and composite values, unloading composite value", func(t *testing.T) { - const mapSize = 3 + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged - // Create a map with nested composite value at specified index - for nestedCompositeIndex := 0; nestedCompositeIndex < mapSize; nestedCompositeIndex++ { - storage := newTestPersistentStorage(t) + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - m, values := createMapWithSimpleAndCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - nestedCompositeIndex, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } + } - // parent map: 1 root data slab - // composite element: 1 root data slab - require.Equal(t, 2, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // Parent array has 1 meta data slab and 2 data slabs. + // All child arrays are inlined. + require.Equal(t, 3, getStoredDeltas(storage)) + require.False(t, parentMap.root.IsData()) - verifyMapLoadedElements(t, m, values) + // Add one more element to child array which triggers inlined child array slab becomes standalone slab + for _, child := range children { + childMap := child.m + valueID := child.valueID - // Unload composite value - v := values[nestedCompositeIndex][1].(*Array) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - err := storage.Remove(v.SlabID()) + child.keys = append(child.keys, k) + + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) + require.Nil(t, existingStorable) - copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) - values = values[:len(values)-1] + require.False(t, childMap.Inlined()) - verifyMapLoadedElements(t, m, values) - } - }) + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childMap.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged - t.Run("root metadata slab with simple values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedStandaloneSlabSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedStandaloneSlabSize, childMap.root.ByteSize()) - const mapSize = 20 - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - // parent map (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + // Parent map has one root data slab. + // Each child maps has one root data slab. + require.Equal(t, 1+mapSize, getStoredDeltas(storage)) // There are >1 stored slab because child map is no longer inlined. + require.True(t, parentMap.root.IsData()) - verifyMapLoadedElements(t, m, values) - }) + // Remove one element from each child map which triggers standalone map slab becomes inlined slab again. + for _, child := range children { + childMap := child.m + valueID := child.valueID + keys := child.keys - t.Run("root metadata slab with composite values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + lastKey := keys[len(keys)-1] + child.keys = child.keys[:len(keys)-1] - const mapSize = 20 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, lastKey) + require.NoError(t, err) + require.Equal(t, lastKey, existingKey) + require.NotNil(t, existingValue) - // parent map (2 levels): 1 root metadata slab, 3 data slabs - // composite values: 1 root data slab for each - require.Equal(t, 4+mapSize, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, valueID, childMap.ValueID()) // value ID is unchanged - verifyMapLoadedElements(t, m, values) - }) + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - t.Run("root metadata slab with composite values, unload value from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - const mapSize = 20 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // Parent map has one metadata slab + 2 data slabs. + require.Equal(t, 3, getStoredDeltas(storage)) // There are 3 stored slab because child map is inlined again. + require.False(t, parentMap.root.IsData()) - // parent map (2 levels): 1 root metadata slab, 3 data slabs - // composite values : 1 root data slab for each - require.Equal(t, 4+mapSize, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + // Remove remaining elements from each inlined child map. + for _, child := range children { + childMap := child.m + valueID := child.valueID + keys := child.keys - verifyMapLoadedElements(t, m, values) + for _, k := range keys { + existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingKey) + require.NotNil(t, existingValue) - // Unload composite element from front to back - for i := 0; i < len(values); i++ { - v := values[i][1] + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, valueID, childMap.ValueID()) // value ID is unchanged - nestedArray, ok := v.(*Array) - require.True(t, ok) + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } + } - expectedValues := values[i+1:] - verifyMapLoadedElements(t, m, expectedValues) + require.Equal(t, uint64(mapSize), parentMap.Count()) + for _, child := range children { + require.Equal(t, uint64(0), child.m.Count()) } + + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. + + // Test parent map slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedEmptyInlinedMapSize + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + // standalone map data slab with 0 element + expectedParentElementSize*uint32(mapSize) + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) }) +} - t.Run("root metadata slab with composite values, unload values from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) +func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { - const mapSize = 20 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("parent is root data slab, one child map, one grand child map, changes to grand child map triggers child map slab to become standalone slab", func(t *testing.T) { + const ( + mapSize = 1 + keyStringSize = 9 + valueStringSize = 4 ) - // parent map (2 levels): 1 root metadata slab, 3 data slabs - // composite values: 1 root data slab for each - require.Equal(t, 4+mapSize, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() - verifyMapLoadedElements(t, m, values) + r := newRand(t) - // Unload composite element from back to front - for i := len(values) - 1; i >= 0; i-- { - v := values[i][1] + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - nestedArray, ok := v.(*Array) - require.True(t, ok) + getKeyFunc := func() Value { + return NewStringValue(randStr(r, keyStringSize)) + } - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + // Create a parent map, with an inlined child map, with an inlined grand child map + parentMap, expectedKeyValues := createMapWithEmpty2LevelChildMap(t, storage, address, typeInfo, mapSize, getKeyFunc) - expectedValues := values[:i] - verifyMapLoadedElements(t, m, expectedValues) - } - }) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - t.Run("root metadata slab with composite values, unload value in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - const mapSize = 20 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + children := getInlinedChildMapsFromParentMap(t, address, parentMap) + require.Equal(t, mapSize, len(children)) - // parent map (2 levels): 1 root metadata slab, 3 data slabs - // composite values: 1 root data slab for each - require.Equal(t, 4+mapSize, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + expectedParentSize := parentMap.root.ByteSize() - verifyMapLoadedElements(t, m, values) + // Inserting 1 elements to grand child map so that inlined grand child map reaches max inlined size as map element. + for _, child := range children { + require.Equal(t, 1, len(child.children)) - // Unload composite element in the middle - for _, index := range []int{4, 14} { + childMap := child.m + cValueID := child.valueID - v := values[index][1] + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - nestedArray, ok := v.(*Array) - require.True(t, ok) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - err := storage.Remove(nestedArray.SlabID()) + gchild.keys = append(gchild.keys, k) + + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) + require.Nil(t, existingStorable) - copy(values[index:], values[index+1:]) - values = values[:len(values)-1] + // Grand child map is still inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged - verifyMapLoadedElements(t, m, values) - } - }) + // Child map is still inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged - t.Run("root metadata slab with simple and composite values, unload composite value", func(t *testing.T) { - const mapSize = 20 + // Only parent map slab is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - // Create a map with nested composite value at specified index - for nestedCompositeIndex := 0; nestedCompositeIndex < mapSize; nestedCompositeIndex++ { - storage := newTestPersistentStorage(t) + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - m, values := createMapWithSimpleAndCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - nestedCompositeIndex, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - // parent map (2 levels): 1 root metadata slab, 3 data slabs - // composite values: 1 root data slab for each - require.Equal(t, 5, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + // Test parent slab size + expectedParentSize += expectedGrandChildElementSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } + + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + + // Add one more element to grand child map which triggers inlined child map slab (NOT grand child map slab) becomes standalone slab + for _, child := range children { + require.Equal(t, 1, len(child.children)) - verifyMapLoadedElements(t, m, values) + childMap := child.m + cValueID := child.valueID - v := values[nestedCompositeIndex][1].(*Array) + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - err := storage.Remove(v.SlabID()) - require.NoError(t, err) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) - values = values[:len(values)-1] + gchild.keys = append(gchild.keys, k) - verifyMapLoadedElements(t, m, values) - } - }) + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - t.Run("root metadata slab, unload data slab from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged - const mapSize = 20 + // Child map is NOT inlined + require.False(t, childMap.Inlined()) + require.Equal(t, valueIDToSlabID(cValueID), childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // Parent map is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, 2, getStoredDeltas(storage)) - // parent map (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - verifyMapLoadedElements(t, m, values) + // Test standalone child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + // Test parent slab size + expectedParentSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + singleElementPrefixSize + digestSize + encodedKeySize + SlabIDStorable(SlabID{}).ByteSize() + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - // Unload data slabs from front to back - for i := 0; i < len(rootMetaDataSlab.childrenHeaders); i++ { + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - childHeader := rootMetaDataSlab.childrenHeaders[i] + require.True(t, parentMap.root.IsData()) + require.Equal(t, 2, getStoredDeltas(storage)) // There is 2 stored slab because child map is not inlined. - // Get data slab element count before unload it from storage. - // Element count isn't in the header. - mapDataSlab, ok := storage.deltas[childHeader.slabID].(*MapDataSlab) - require.True(t, ok) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - count := mapDataSlab.elements.Count() + // Remove elements from grand child map which triggers standalone child map slab becomes inlined slab again. + for _, child := range children { + childMap := child.m + cValueID := child.valueID - err := storage.Remove(childHeader.slabID) - require.NoError(t, err) + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - values = values[count:] + for _, k := range gchild.keys { + existingKey, existingValue, err := gchildMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingKey) + require.NotNil(t, existingValue) + + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, cValueID, childMap.ValueID()) // value ID is unchanged + + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) + require.Equal(t, gValueID, gchildMap.ValueID()) // value ID is unchanged + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + // Test parent child slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedChildMapSize + expectedParentMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedParentElementSize*uint32(parentMap.Count()) + require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - verifyMapLoadedElements(t, m, values) + require.Equal(t, uint64(0), gchildMap.Count()) + require.Equal(t, uint64(1), childMap.Count()) } + + require.Equal(t, uint64(1), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map and grand child map are inlined. + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) }) - t.Run("root metadata slab, unload data slab from back to front", func(t *testing.T) { + t.Run("parent is root data slab, one child map, one grand child map, changes to grand child map triggers grand child array slab to become standalone slab", func(t *testing.T) { + const ( + mapSize = 1 + keyStringSize = 9 + valueStringSize = 4 + largeValueStringSize = 40 + ) + + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() + encodedLargeValueSize := NewStringValue(strings.Repeat("a", largeValueStringSize)).ByteSize() + slabIDStorableSize := SlabIDStorable(SlabID{}).ByteSize() + + r := newRand(t) + + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - const mapSize = 20 + getKeyFunc := func() Value { + return NewStringValue(randStr(r, keyStringSize)) + } - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // Create a parent map, with an inlined child map, with an inlined grand child map + parentMap, expectedKeyValues := createMapWithEmpty2LevelChildMap(t, storage, address, typeInfo, mapSize, getKeyFunc) - // parent map (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - verifyMapLoadedElements(t, m, values) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + children := getInlinedChildMapsFromParentMap(t, address, parentMap) + require.Equal(t, mapSize, len(children)) - // Unload data slabs from back to front - for i := len(rootMetaDataSlab.childrenHeaders) - 1; i >= 0; i-- { + expectedParentSize := parentMap.root.ByteSize() - childHeader := rootMetaDataSlab.childrenHeaders[i] + // Inserting 1 elements to grand child map so that inlined grand child map reaches max inlined size as map element. + for _, child := range children { + require.Equal(t, 1, len(child.children)) - // Get data slab element count before unload it from storage - // Element count isn't in the header. - mapDataSlab, ok := storage.deltas[childHeader.slabID].(*MapDataSlab) - require.True(t, ok) + childMap := child.m + cValueID := child.valueID - count := mapDataSlab.elements.Count() + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - err := storage.Remove(childHeader.slabID) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) + + gchild.keys = append(gchild.keys, k) + + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) + require.Nil(t, existingStorable) - values = values[:len(values)-int(count)] + // Grand child map is still inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged - verifyMapLoadedElements(t, m, values) - } - }) + // Child map is still inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged - t.Run("root metadata slab, unload data slab in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Only parent map slab is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - const mapSize = 20 + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - // parent map (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + // Test parent slab size + expectedParentSize += expectedGrandChildElementSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - verifyMapLoadedElements(t, m, values) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - require.True(t, len(rootMetaDataSlab.childrenHeaders) > 2) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - index := 1 - childHeader := rootMetaDataSlab.childrenHeaders[index] + // Add one large element to grand child map which triggers inlined grand child map slab (NOT child map slab) becomes standalone slab + for _, child := range children { + require.Equal(t, 1, len(child.children)) - // Get element count from previous data slab - mapDataSlab, ok := storage.deltas[rootMetaDataSlab.childrenHeaders[0].slabID].(*MapDataSlab) - require.True(t, ok) + childMap := child.m + cValueID := child.valueID - countAtIndex0 := mapDataSlab.elements.Count() + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - // Get element count from slab to be unloaded - mapDataSlab, ok = storage.deltas[rootMetaDataSlab.childrenHeaders[index].slabID].(*MapDataSlab) - require.True(t, ok) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, largeValueStringSize)) - countAtIndex1 := mapDataSlab.elements.Count() + gchild.keys = append(gchild.keys, k) - err := storage.Remove(childHeader.slabID) - require.NoError(t, err) + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - copy(values[countAtIndex0:], values[countAtIndex0+countAtIndex1:]) - values = values[:m.Count()-uint64(countAtIndex1)] + // Grand child map is NOT inlined + require.False(t, gchildMap.Inlined()) + require.Equal(t, valueIDToSlabID(gValueID), gchildMap.SlabID()) // Slab ID is valid for not inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged - verifyMapLoadedElements(t, m, values) - }) + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged - t.Run("root metadata slab, unload non-root metadata slab from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Parent map is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, 2, getStoredDeltas(storage)) - const mapSize = 200 + // Test standalone grand child slab size + expectedGrandChildElement1Size := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildElement2Size := singleElementPrefixSize + digestSize + encodedKeySize + encodedLargeValueSize + expectedGrandChildMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElement1Size + expectedGrandChildElement2Size + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + slabIDStorableSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + expectedChildElementSize + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - // parent map (3 levels): 1 root metadata slab, 3 child metadata slabs, n data slabs - require.Equal(t, 4, getMapMetaDataSlabCount(storage)) + // Test parent slab size + expectedParentSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + singleElementPrefixSize + digestSize + encodedKeySize + expectedChildMapSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - // Unload non-root metadata slabs from front to back. - for i := 0; i < len(rootMetaDataSlab.childrenHeaders); i++ { + require.True(t, parentMap.root.IsData()) + require.Equal(t, 2, getStoredDeltas(storage)) // There is 2 stored slab because child map is not inlined. - childHeader := rootMetaDataSlab.childrenHeaders[i] + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - err := storage.Remove(childHeader.slabID) - require.NoError(t, err) + // Remove elements from grand child map which triggers standalone child map slab becomes inlined slab again. + for _, child := range children { + childMap := child.m + cValueID := child.valueID - // Use firstKey to deduce number of elements in slab. - var expectedValues [][2]Value - if i < len(rootMetaDataSlab.childrenHeaders)-1 { - nextChildHeader := rootMetaDataSlab.childrenHeaders[i+1] - expectedValues = values[int(nextChildHeader.firstKey):] + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID + + // Remove the last element (large element) first to trigger grand child map being inlined again. + for i := len(gchild.keys) - 1; i >= 0; i-- { + k := gchild.keys[i] + + existingKey, existingValue, err := gchildMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingKey) + require.NotNil(t, existingValue) + + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, cValueID, childMap.ValueID()) // value ID is unchanged + + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) + require.Equal(t, gValueID, gchildMap.ValueID()) // value ID is unchanged + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + // Test parent child slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedChildMapSize + expectedParentMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedParentElementSize*uint32(parentMap.Count()) + require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - verifyMapLoadedElements(t, m, expectedValues) + require.Equal(t, uint64(0), gchildMap.Count()) + require.Equal(t, uint64(1), childMap.Count()) } - }) - t.Run("root metadata slab, unload non-root metadata slab from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + require.Equal(t, uint64(1), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map and grand child map are inlined. - const mapSize = 200 + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + }) - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, + t.Run("parent is root data slab, two child map, one grand child map each, changes to child map triggers child map slab to become standalone slab", func(t *testing.T) { + const ( + mapSize = 2 + keyStringSize = 4 + valueStringSize = 4 ) - // parent map (3 levels): 1 root metadata slab, 3 child metadata slabs, n data slabs - require.Equal(t, 4, getMapMetaDataSlabCount(storage)) + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() + slabIDStorableSize := SlabIDStorable(SlabID{}).ByteSize() - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + r := newRand(t) - // Unload non-root metadata slabs from back to front. - for i := len(rootMetaDataSlab.childrenHeaders) - 1; i >= 0; i-- { + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - childHeader := rootMetaDataSlab.childrenHeaders[i] + getKeyFunc := func() Value { + return NewStringValue(randStr(r, keyStringSize)) + } - err := storage.Remove(childHeader.slabID) - require.NoError(t, err) + // Create a parent map, with inlined child map, containing inlined grand child map + parentMap, expectedKeyValues := createMapWithEmpty2LevelChildMap(t, storage, address, typeInfo, mapSize, getKeyFunc) - // Use firstKey to deduce number of elements in slabs. - values = values[:childHeader.firstKey] + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - verifyMapLoadedElements(t, m, values) - } - }) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - t.Run("root metadata slab with composite values, unload composite value at random index", func(t *testing.T) { + children := getInlinedChildMapsFromParentMap(t, address, parentMap) + require.Equal(t, mapSize, len(children)) - storage := newTestPersistentStorage(t) + expectedParentSize := parentMap.root.ByteSize() - const mapSize = 500 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // Insert 1 elements to grand child map (both child map and grand child map are still inlined). + for _, child := range children { + childMap := child.m + cValueID := child.valueID - // parent map (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs - // nested composite elements: 1 root data slab for each - require.True(t, len(storage.deltas) > 1+mapSize) - require.True(t, getMapMetaDataSlabCount(storage) > 1) + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - verifyMapLoadedElements(t, m, values) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - r := newRand(t) + gchild.keys = append(gchild.keys, k) - // Unload composite element in random position - for len(values) > 0 { + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - i := r.Intn(len(values)) + // Grand child map is still inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged - v := values[i][1] + // Child map is still inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged - nestedArray, ok := v.(*Array) - require.True(t, ok) + // Only parent map slab is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - copy(values[i:], values[i+1:]) - values = values[:len(values)-1] + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - verifyMapLoadedElements(t, m, values) + // Test parent slab size + expectedParentSize += expectedGrandChildElementSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - }) - t.Run("root metadata slab with composite values, unload random data slab", func(t *testing.T) { + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - storage := newTestPersistentStorage(t) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - const mapSize = 500 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + expectedParentSize = parentMap.root.ByteSize() - // parent map (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs - // composite values: 1 root data slab for each - require.True(t, len(storage.deltas) > 1+mapSize) - require.True(t, getMapMetaDataSlabCount(storage) > 1) + // Add 1 element to each child map so child map reaches its max size + for _, child := range children { - verifyMapLoadedElements(t, m, values) + childMap := child.m + cValueID := child.valueID - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - type slabInfo struct { - id SlabID - startIndex int - count int - } + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - var dataSlabInfos []*slabInfo - for _, mheader := range rootMetaDataSlab.childrenHeaders { + child.keys = append(child.keys, k) - nonRootMetaDataSlab, ok := storage.deltas[mheader.slabID].(*MapMetaDataSlab) - require.True(t, ok) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - for i := 0; i < len(nonRootMetaDataSlab.childrenHeaders); i++ { - h := nonRootMetaDataSlab.childrenHeaders[i] + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged + + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged + + // Parent map is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + + // Test inlined grand child slab size + expectedGrandChildElementSize := digestSize + singleElementPrefixSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - if len(dataSlabInfos) > 0 { - // Update previous slabInfo.count - dataSlabInfos[len(dataSlabInfos)-1].count = int(h.firstKey) - dataSlabInfos[len(dataSlabInfos)-1].startIndex - } + // Test inlined child slab size + expectedChildElementSize := digestSize + singleElementPrefixSize + encodedKeySize + encodedValueSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize + (digestSize + singleElementPrefixSize + encodedKeySize + expectedGrandChildMapSize) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - dataSlabInfos = append(dataSlabInfos, &slabInfo{id: h.slabID, startIndex: int(h.firstKey)}) - } - } + // Test parent slab size + expectedParentSize += digestSize + singleElementPrefixSize + encodedKeySize + encodedValueSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - r := newRand(t) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - for len(dataSlabInfos) > 0 { - index := r.Intn(len(dataSlabInfos)) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is 1 stored slab because child map is inlined. - slabToBeRemoved := dataSlabInfos[index] + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - // Update startIndex for all subsequence data slabs - for i := index + 1; i < len(dataSlabInfos); i++ { - dataSlabInfos[i].startIndex -= slabToBeRemoved.count - } + // Add 1 more element to each child map so child map reaches its max size + for i, child := range children { - err := storage.Remove(slabToBeRemoved.id) - require.NoError(t, err) + childMap := child.m + cValueID := child.valueID - if index == len(dataSlabInfos)-1 { - values = values[:slabToBeRemoved.startIndex] - } else { - copy(values[slabToBeRemoved.startIndex:], values[slabToBeRemoved.startIndex+slabToBeRemoved.count:]) - values = values[:len(values)-slabToBeRemoved.count] - } + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - copy(dataSlabInfos[index:], dataSlabInfos[index+1:]) - dataSlabInfos = dataSlabInfos[:len(dataSlabInfos)-1] + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - verifyMapLoadedElements(t, m, values) - } + child.keys = append(child.keys, k) - require.Equal(t, 0, len(values)) - }) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - t.Run("root metadata slab with composite values, unload random slab", func(t *testing.T) { + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged - storage := newTestPersistentStorage(t) + // Child map is NOT inlined + require.False(t, childMap.Inlined()) + require.Equal(t, valueIDToSlabID(cValueID), childMap.SlabID()) // Slab ID is the same as value ID for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged - const mapSize = 500 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // Parent map is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, (1 + i + 1), getStoredDeltas(storage)) - // parent map (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs - // composite values: 1 root data slab for each - require.True(t, len(storage.deltas) > 1+mapSize) - require.True(t, getMapMetaDataSlabCount(storage) > 1) + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - verifyMapLoadedElements(t, m, values) + // Test standalone child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedChildMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*2 + (digestSize + singleElementPrefixSize + encodedKeySize + expectedGrandChildMapSize) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - type slabInfo struct { - id SlabID - startIndex int - count int - children []*slabInfo + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1+mapSize, getStoredDeltas(storage)) // There is 1+mapSize stored slab because all child maps are standalone. - metadataSlabInfos := make([]*slabInfo, len(rootMetaDataSlab.childrenHeaders)) - for i, mheader := range rootMetaDataSlab.childrenHeaders { + // Test parent slab size + expectedParentSize = mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + (singleElementPrefixSize+digestSize+encodedKeySize+slabIDStorableSize)*mapSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - if i > 0 { - prevMetaDataSlabInfo := metadataSlabInfos[i-1] - prevDataSlabInfo := prevMetaDataSlabInfo.children[len(prevMetaDataSlabInfo.children)-1] + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - // Update previous metadata slab count - prevMetaDataSlabInfo.count = int(mheader.firstKey) - prevMetaDataSlabInfo.startIndex + expectedParentMapSize := parentMap.root.ByteSize() - // Update previous data slab count - prevDataSlabInfo.count = int(mheader.firstKey) - prevDataSlabInfo.startIndex - } + // Remove one element from child map which triggers standalone child map slab becomes inlined slab again. + for _, child := range children { + childMap := child.m + cValueID := child.valueID - metadataSlabInfo := &slabInfo{ - id: mheader.slabID, - startIndex: int(mheader.firstKey), - } + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - nonRootMetadataSlab, ok := storage.deltas[mheader.slabID].(*MapMetaDataSlab) - require.True(t, ok) + // Remove one element + k := child.keys[len(child.keys)-1] + child.keys = child.keys[:len(child.keys)-1] - children := make([]*slabInfo, len(nonRootMetadataSlab.childrenHeaders)) - for i, h := range nonRootMetadataSlab.childrenHeaders { - children[i] = &slabInfo{ - id: h.slabID, - startIndex: int(h.firstKey), - } - if i > 0 { - children[i-1].count = int(h.firstKey) - children[i-1].startIndex - } - } + existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingKey) + require.NotNil(t, existingValue) - metadataSlabInfo.children = children - metadataSlabInfos[i] = metadataSlabInfo - } + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, cValueID, childMap.ValueID()) // value ID is unchanged - const ( - metadataSlabType int = iota - dataSlabType - maxSlabType - ) + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) + require.Equal(t, gValueID, gchildMap.ValueID()) // value ID is unchanged - r := newRand(t) + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - for len(metadataSlabInfos) > 0 { + // Test inlined child slab size + expectedChildElementSize1 := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildElementSize2 := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize1 + expectedChildElementSize2*uint32(childMap.Count()-1) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - var slabInfoToBeRemoved *slabInfo - var isLastSlab bool + // Test parent child slab size + expectedParentMapSize = expectedParentMapSize - slabIDStorableSize + expectedChildMapSize + require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) - switch r.Intn(maxSlabType) { + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - case metadataSlabType: + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map and grand child map are inlined. - metadataSlabIndex := r.Intn(len(metadataSlabInfos)) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - isLastSlab = metadataSlabIndex == len(metadataSlabInfos)-1 + // remove remaining elements from child map, except for grand child map + for _, child := range children { + childMap := child.m + cValueID := child.valueID - slabInfoToBeRemoved = metadataSlabInfos[metadataSlabIndex] + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - count := slabInfoToBeRemoved.count + // Remove all elements, except grand child map (first element in child.keys) + for _, k := range child.keys[1:] { + existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingKey) + require.NotNil(t, existingValue) + + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, cValueID, childMap.ValueID()) // value ID is unchanged + + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) + require.Equal(t, gValueID, gchildMap.ValueID()) // value ID is unchanged + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test inlined child slab size + expectedChildElementSize1 := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildElementSize2 := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize1 + expectedChildElementSize2*uint32(childMap.Count()-1) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + // Test parent child slab size + expectedParentMapSize -= digestSize + singleElementPrefixSize + encodedKeySize + encodedValueSize + require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - // Update startIndex for subsequence metadata slabs - for i := metadataSlabIndex + 1; i < len(metadataSlabInfos); i++ { - metadataSlabInfos[i].startIndex -= count + require.Equal(t, uint64(1), gchildMap.Count()) + require.Equal(t, uint64(1), childMap.Count()) + } - for j := 0; j < len(metadataSlabInfos[i].children); j++ { - metadataSlabInfos[i].children[j].startIndex -= count - } - } + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map and grand child map are inlined. - copy(metadataSlabInfos[metadataSlabIndex:], metadataSlabInfos[metadataSlabIndex+1:]) - metadataSlabInfos = metadataSlabInfos[:len(metadataSlabInfos)-1] + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + }) - case dataSlabType: + t.Run("parent is root metadata slab, with four child maps, each child map has grand child maps", func(t *testing.T) { + const ( + mapSize = 4 + keyStringSize = 4 + valueStringSize = 8 + ) - metadataSlabIndex := r.Intn(len(metadataSlabInfos)) + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() + slabIDStorableSize := SlabIDStorable(SlabID{}).ByteSize() - metadataSlabInfo := metadataSlabInfos[metadataSlabIndex] + r := newRand(t) - dataSlabIndex := r.Intn(len(metadataSlabInfo.children)) + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - isLastSlab = (metadataSlabIndex == len(metadataSlabInfos)-1) && - (dataSlabIndex == len(metadataSlabInfo.children)-1) + getKeyFunc := func() Value { + return NewStringValue(randStr(r, keyStringSize)) + } - slabInfoToBeRemoved = metadataSlabInfo.children[dataSlabIndex] + // Create a parent map, with inlined child map, containing inlined grand child map + parentMap, expectedKeyValues := createMapWithEmpty2LevelChildMap(t, storage, address, typeInfo, mapSize, getKeyFunc) - count := slabInfoToBeRemoved.count + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - // Update startIndex for all subsequence data slabs in this metadata slab info - for i := dataSlabIndex + 1; i < len(metadataSlabInfo.children); i++ { - metadataSlabInfo.children[i].startIndex -= count - } + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - copy(metadataSlabInfo.children[dataSlabIndex:], metadataSlabInfo.children[dataSlabIndex+1:]) - metadataSlabInfo.children = metadataSlabInfo.children[:len(metadataSlabInfo.children)-1] + children := getInlinedChildMapsFromParentMap(t, address, parentMap) + require.Equal(t, mapSize, len(children)) - metadataSlabInfo.count -= count + // Insert 1 element to grand child map + // Both child map and grand child map are still inlined, but parent map's root slab is metadata slab. + for _, child := range children { + childMap := child.m + cValueID := child.valueID - // Update startIndex for all subsequence metadata slabs. - for i := metadataSlabIndex + 1; i < len(metadataSlabInfos); i++ { - metadataSlabInfos[i].startIndex -= count + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - for j := 0; j < len(metadataSlabInfos[i].children); j++ { - metadataSlabInfos[i].children[j].startIndex -= count - } - } + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - if len(metadataSlabInfo.children) == 0 { - copy(metadataSlabInfos[metadataSlabIndex:], metadataSlabInfos[metadataSlabIndex+1:]) - metadataSlabInfos = metadataSlabInfos[:len(metadataSlabInfos)-1] - } - } + gchild.keys = append(gchild.keys, k) - err := storage.Remove(slabInfoToBeRemoved.id) + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) + require.Nil(t, existingStorable) - if isLastSlab { - values = values[:slabInfoToBeRemoved.startIndex] - } else { - copy(values[slabInfoToBeRemoved.startIndex:], values[slabInfoToBeRemoved.startIndex+slabInfoToBeRemoved.count:]) - values = values[:len(values)-slabInfoToBeRemoved.count] - } + // Grand child map is still inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged - verifyMapLoadedElements(t, m, values) - } + // Child map is still inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged - require.Equal(t, 0, len(values)) - }) -} + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) -func createMapWithLongStringKey( - t *testing.T, - storage SlabStorage, - address Address, - typeInfo TypeInfo, - size int, -) (*OrderedMap, [][2]Value) { + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - digesterBuilder := &mockDigesterBuilder{} + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - // Create parent map. - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + require.False(t, parentMap.Inlined()) + require.False(t, parentMap.root.IsData()) + // There is 3 stored slab: parent metadata slab with 2 data slabs (all child and grand child maps are inlined) + require.Equal(t, 3, getStoredDeltas(storage)) - expectedValues := make([][2]Value, size) - r := 'a' - for i := 0; i < size; i++ { - s := strings.Repeat(string(r), int(maxInlineMapElementSize)) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - k := NewStringValue(s) - v := Uint64Value(i) + // Insert 1 element to grand child map + // - grand child maps are inlined + // - child maps are standalone + // - parent map's root slab is data slab. + for _, child := range children { + childMap := child.m + cValueID := child.valueID - expectedValues[i] = [2]Value{k, v} + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - digests := []Digest{Digest(i)} - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) + gchild.keys = append(gchild.keys, k) - r++ - } + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - return m, expectedValues -} + // Grand child map is still inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged -func createMapWithSimpleValues( - t *testing.T, - storage SlabStorage, - address Address, - typeInfo TypeInfo, - size int, - newDigests func(i int) []Digest, -) (*OrderedMap, [][2]Value) { + // Child map is NOT inlined + require.False(t, childMap.Inlined()) + require.Equal(t, valueIDToSlabID(cValueID), childMap.SlabID()) // Slab ID is same as value ID + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged - digesterBuilder := &mockDigesterBuilder{} + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + // Test standalone child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - expectedValues := make([][2]Value, size) - r := rune('a') - for i := 0; i < size; i++ { - k := Uint64Value(i) - v := NewStringValue(strings.Repeat(string(r), 20)) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - digests := newDigests(i) - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + require.False(t, parentMap.Inlined()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1+mapSize, getStoredDeltas(storage)) - expectedValues[i] = [2]Value{k, v} + // Test parent slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + slabIDStorableSize + expectedParentMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedParentElementSize*uint32(parentMap.Count()) + require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) - existingStorable, err := m.Set(compare, hashInputProvider, expectedValues[i][0], expectedValues[i][1]) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - return m, expectedValues -} + // Remove one element from grand child map to trigger child map inlined again. + // - grand child maps are inlined + // - child maps are inlined + // - parent map root slab is metadata slab + for _, child := range children { + childMap := child.m + cValueID := child.valueID -func createMapWithCompositeValues( - t *testing.T, - storage SlabStorage, - address Address, - typeInfo TypeInfo, - size int, - newDigests func(i int) []Digest, -) (*OrderedMap, [][2]Value) { + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - // Use mockDigesterBuilder to guarantee element order. - digesterBuilder := &mockDigesterBuilder{} + // Remove one element from grand child map + k := gchild.keys[len(gchild.keys)-1] + gchild.keys = gchild.keys[:len(gchild.keys)-1] - // Create parent map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + existingKey, existingValue, err := gchildMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingKey) + require.NotNil(t, existingValue) + + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, cValueID, childMap.ValueID()) // value ID is unchanged + + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) + require.Equal(t, gValueID, gchildMap.ValueID()) // value ID is unchanged + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test inlined child slab size + expectedChildElementSize1 := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildElementSize2 := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize1 + expectedChildElementSize2*uint32(childMap.Count()-1) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.False(t, parentMap.root.IsData()) + require.Equal(t, 3, getStoredDeltas(storage)) + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + + // Remove all grand child element to trigger + // - child maps are inlined + // - parent map root slab is data slab + for _, child := range children { + childMap := child.m + cValueID := child.valueID + + // Remove grand children + for _, k := range child.keys { + existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingKey) + require.NotNil(t, existingValue) - expectedValues := make([][2]Value, size) - for i := 0; i < size; i++ { - // Create nested array - nested, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, cValueID, childMap.ValueID()) // value ID is unchanged - err = nested.Append(Uint64Value(i)) - require.NoError(t, err) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - k := Uint64Value(i) - v := nested + expectedChildMapSize := uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - expectedValues[i] = [2]Value{k, v} + require.Equal(t, uint64(0), childMap.Count()) + } - //digests := []Digest{Digest(i)} - digests := newDigests(i) - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) - // Set nested array to parent - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - return m, expectedValues + expectedChildMapSize := uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) + expectedParentMapSize = mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + (digestSize+singleElementPrefixSize+encodedKeySize+expectedChildMapSize)*uint32(mapSize) + require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) + }) } -func createMapWithSimpleAndCompositeValues( - t *testing.T, - storage SlabStorage, - address Address, - typeInfo TypeInfo, - size int, - compositeValueIndex int, - newDigests func(i int) []Digest, -) (*OrderedMap, [][2]Value) { +func TestChildMapWhenParentMapIsModified(t *testing.T) { + const ( + mapSize = 2 + keyStringSize = 4 + valueStringSize = 4 + expectedEmptyInlinedMapSize = uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) // 22 + ) + + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() - digesterBuilder := &mockDigesterBuilder{} + r := newRand(t) - // Create parent map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - values := make([][2]Value, size) - r := 'a' - for i := 0; i < size; i++ { + parentMapDigesterBuilder := &mockDigesterBuilder{} + parentDigest := 1 - k := Uint64Value(i) + // Create parent map with mock digests + parentMap, err := NewMap(storage, address, parentMapDigesterBuilder, typeInfo) + require.NoError(t, err) - digests := newDigests(i) - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + expectedKeyValues := make(map[Value]Value) - if compositeValueIndex == i { - // Create nested array with one element - a, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // Insert 2 child map with digest values of 1 and 3. + for i := 0; i < mapSize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - err = a.Append(Uint64Value(i)) - require.NoError(t, err) + k := NewStringValue(randStr(r, keyStringSize)) - values[i] = [2]Value{k, a} - } else { - values[i] = [2]Value{k, NewStringValue(strings.Repeat(string(r), 18))} + digests := []Digest{ + Digest(parentDigest), } + parentMapDigesterBuilder.On("Digest", k).Return(mockDigester{digests}) + parentDigest += 2 - existingStorable, err := m.Set(compare, hashInputProvider, values[i][0], values[i][1]) + // Insert child map to parent map + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childMap) require.NoError(t, err) require.Nil(t, existingStorable) - } - return m, values -} + expectedKeyValues[k] = childMap -func verifyMapLoadedElements(t *testing.T, m *OrderedMap, expectedValues [][2]Value) { - i := 0 - err := m.IterateLoadedValues(func(k Value, v Value) (bool, error) { - require.True(t, i < len(expectedValues)) - valueEqual(t, typeInfoComparator, expectedValues[i][0], k) - valueEqual(t, typeInfoComparator, expectedValues[i][1], v) - i++ - return true, nil - }) - require.NoError(t, err) - require.Equal(t, len(expectedValues), i) -} + require.True(t, childMap.Inlined()) + testInlinedMapIDs(t, address, childMap) -func getMapMetaDataSlabCount(storage *PersistentSlabStorage) int { - var counter int - for _, slab := range storage.deltas { - if _, ok := slab.(*MapMetaDataSlab); ok { - counter++ - } + // Test child map slab size + require.Equal(t, expectedEmptyInlinedMapSize, childMap.root.ByteSize()) + + // Test parent map slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedEmptyInlinedMapSize + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + // standalone map data slab with 0 element + expectedParentElementSize*uint32(i+1) + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) } - return counter -} -func TestMaxInlineMapValueSize(t *testing.T) { + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - t.Run("small key", func(t *testing.T) { - // Value has larger max inline size when key is less than max map key size. + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - SetThreshold(256) - defer SetThreshold(1024) + children := getInlinedChildMapsFromParentMap(t, address, parentMap) + require.Equal(t, mapSize, len(children)) - mapSize := 2 - keyStringSize := 16 // Key size is less than max map key size. - valueStringSize := maxInlineMapElementSize/2 + 10 // Value size is more than half of max map element size. + var keysForNonChildMaps []Value - r := newRand(t) + t.Run("insert elements in parent map", func(t *testing.T) { - keyValues := make(map[Value]Value, mapSize) - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, keyStringSize)) - v := NewStringValue(randStr(r, int(valueStringSize))) - keyValues[k] = v + newDigests := []Digest{ + 0, // insert value at digest 0, so all child map physical positions are moved by +1 + 2, // insert value at digest 2, so second child map physical positions are moved by +1 + 4, // insert value at digest 4, so no child map physical positions are moved } - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + for _, digest := range newDigests { - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) - require.NoError(t, err) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + digests := []Digest{digest} + parentMapDigesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) - } - // Both key and value are stored in map slab. - require.Equal(t, 1, len(storage.deltas)) + expectedKeyValues[k] = v + keysForNonChildMaps = append(keysForNonChildMaps, k) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) - }) + for i, child := range children { + childMap := child.m + childValueID := child.valueID - t.Run("max size key", func(t *testing.T) { - // Value max size is about half of max map element size when key is exactly max map key size. + k := NewStringValue(randStr(r, keyStringSize)) + v := Uint64Value(i) - SetThreshold(256) - defer SetThreshold(1024) + existingStorable, err = childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - mapSize := 1 - keyStringSize := maxInlineMapKeySize - 2 // Key size is exactly max map key size (2 bytes is string encoding overhead). - valueStringSize := maxInlineMapElementSize/2 + 2 // Value size is more than half of max map element size (add 2 bytes to make it more than half). + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childMap.ValueID()) // Value ID is unchanged - r := newRand(t) + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + k.ByteSize() + v.ByteSize() + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - keyValues := make(map[Value]Value, mapSize) - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, int(keyStringSize))) - v := NewStringValue(randStr(r, int(valueStringSize))) - keyValues[k] = v + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } } - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + t.Run("remove elements from parent map", func(t *testing.T) { + // Remove element at digest 0, so all child map physical position are moved by -1. + // Remove element at digest 2, so only second child map physical position is moved by -1 + // Remove element at digest 4, so no child map physical position is moved by -1 - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) - require.NoError(t, err) + for _, k := range keysForNonChildMaps { - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + existingKey, existingValue, err := parentMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.NotNil(t, existingKey) + require.NotNil(t, existingValue) - // Key is stored in map slab, while value is stored separately in storable slab. - require.Equal(t, 2, len(storage.deltas)) + delete(expectedKeyValues, k) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + for i, child := range children { + childMap := child.m + childValueID := child.valueID + + k := NewStringValue(randStr(r, keyStringSize)) + v := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childMap.ValueID()) // Value ID is unchanged + + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + k.ByteSize() + v.ByteSize() + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } + } + }) }) +} - t.Run("large key", func(t *testing.T) { - // Value has larger max inline size when key is more than max map key size because - // when key size exceeds max map key size, it is stored in a separate storable slab, - // and SlabIDStorable is stored as key in the map, which is 19 bytes. +func createMapWithEmptyChildMap( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + mapSize int, + getKey func() Value, +) (*OrderedMap, map[Value]Value) { - SetThreshold(256) - defer SetThreshold(1024) + const expectedEmptyInlinedMapSize = uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) // 22 - mapSize := 1 - keyStringSize := maxInlineMapKeySize + 10 // key size is more than max map key size - valueStringSize := maxInlineMapElementSize/2 + 10 // value size is more than half of max map element size + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - r := newRand(t) + expectedKeyValues := make(map[Value]Value) - keyValues := make(map[Value]Value, mapSize) - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, int(keyStringSize))) - v := NewStringValue(randStr(r, int(valueStringSize))) - keyValues[k] = v - } + for i := 0; i < mapSize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + k := getKey() - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + ks, err := k.Storable(storage, address, maxInlineMapElementSize) require.NoError(t, err) - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + // Insert child map to parent map + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) - // Key is stored in separate storable slabs, while value is stored in map slab. - require.Equal(t, 2, len(storage.deltas)) + expectedKeyValues[k] = childMap - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) - }) + require.True(t, childMap.Inlined()) + testInlinedMapIDs(t, address, childMap) + + // Test child map slab size + require.Equal(t, expectedEmptyInlinedMapSize, childMap.root.ByteSize()) + + // Test parent map slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + ks.ByteSize() + expectedEmptyInlinedMapSize + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + // standalone map data slab with 0 element + expectedParentElementSize*uint32(i+1) + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) + } + + return parentMap, expectedKeyValues } -func TestMapID(t *testing.T) { - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} +func createMapWithEmpty2LevelChildMap( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + mapSize int, + getKey func() Value, +) (*OrderedMap, map[Value]Value) { - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + const expectedEmptyInlinedMapSize = uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) // 22 + + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) require.NoError(t, err) - sid := m.SlabID() - id := m.ValueID() + expectedKeyValues := make(map[Value]Value) - require.Equal(t, sid.address[:], id[:8]) - require.Equal(t, sid.index[:], id[8:]) -} + for i := 0; i < mapSize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) -func TestSlabSizeWhenResettingMutableStorableInMap(t *testing.T) { - const ( - mapSize = 3 - keyStringSize = 16 - initialStorableSize = 1 - mutatedStorableSize = 5 - ) + // Create grand child map + gchildMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - keyValues := make(map[Value]*mutableValue, mapSize) - for i := 0; i < mapSize; i++ { - k := Uint64Value(i) - v := newMutableValue(initialStorableSize) - keyValues[k] = v - } + k := getKey() - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + ks, err := k.Storable(storage, address, maxInlineMapElementSize) + require.NoError(t, err) - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) - require.NoError(t, err) + // Insert grand child map to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, k, gchildMap) + require.NoError(t, err) + require.Nil(t, existingStorable) - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.True(t, gchildMap.Inlined()) + testInlinedMapIDs(t, address, gchildMap) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) require.NoError(t, err) require.Nil(t, existingStorable) + + expectedKeyValues[k] = childMap + + require.True(t, childMap.Inlined()) + testInlinedMapIDs(t, address, childMap) + + // Test grand child map slab size + require.Equal(t, expectedEmptyInlinedMapSize, gchildMap.root.ByteSize()) + + // Test child map slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + ks.ByteSize() + expectedEmptyInlinedMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + // Test parent map slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + ks.ByteSize() + expectedChildMapSize + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + // standalone map data slab with 0 element + expectedParentElementSize*uint32(i+1) + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) } - require.True(t, m.root.IsData()) + testNotInlinedMapIDs(t, address, parentMap) - expectedElementSize := singleElementPrefixSize + digestSize + Uint64Value(0).ByteSize() + initialStorableSize - expectedMapRootDataSlabSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + expectedElementSize*mapSize - require.Equal(t, expectedMapRootDataSlabSize, m.root.ByteSize()) + return parentMap, expectedKeyValues +} - err = ValidMap(m, typeInfo, typeInfoComparator, hashInputProvider) - require.NoError(t, err) +type mapInfo struct { + m *OrderedMap + valueID ValueID + keys []Value + children []*mapInfo +} - // Reset mutable values after changing its storable size - for k, v := range keyValues { - v.updateStorableSize(mutatedStorableSize) +func getInlinedChildMapsFromParentMap(t *testing.T, address Address, parentMap *OrderedMap) []*mapInfo { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + children := make([]*mapInfo, 0, parentMap.Count()) + + err := parentMap.IterateKeys(func(k Value) (bool, error) { + if k == nil { + return false, nil + } + + e, err := parentMap.Get(compare, hashInputProvider, k) require.NoError(t, err) - require.NotNil(t, existingStorable) - } - require.True(t, m.root.IsData()) + childMap, ok := e.(*OrderedMap) + if !ok { + return true, nil + } - expectedElementSize = singleElementPrefixSize + digestSize + Uint64Value(0).ByteSize() + mutatedStorableSize - expectedMapRootDataSlabSize = mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + expectedElementSize*mapSize - require.Equal(t, expectedMapRootDataSlabSize, m.root.ByteSize()) + if childMap.Inlined() { + testInlinedMapIDs(t, address, childMap) + } else { + testNotInlinedMapIDs(t, address, childMap) + } - err = ValidMap(m, typeInfo, typeInfoComparator, hashInputProvider) + var childKeys []Value + err = childMap.IterateKeys(func(key Value) (bool, error) { + if key == nil { + return false, nil + } + childKeys = append(childKeys, key) + return true, nil + }) + require.NoError(t, err) + + children = append(children, &mapInfo{ + m: childMap, + valueID: childMap.ValueID(), + keys: childKeys, + children: getInlinedChildMapsFromParentMap(t, address, childMap), + }) + + return true, nil + }) require.NoError(t, err) + + return children } diff --git a/storable.go b/storable.go index 2d19fefd..c0102a9c 100644 --- a/storable.go +++ b/storable.go @@ -37,6 +37,14 @@ type Storable interface { ChildStorables() []Storable } +// EquatableStorable is an interface that supports comparison of Storable. +// This is only used for composite keys. +type EquatableStorable interface { + Storable + // Equal returns true if the given storable is equal to this storable. + Equal(Storable) bool +} + type containerStorable interface { Storable hasPointer() bool @@ -50,6 +58,14 @@ func hasPointer(storable Storable) bool { } const ( + CBORTagInlinedArrayExtraData = 247 + CBORTagInlinedMapExtraData = 248 + CBORTagInlinedCompositeExtraData = 249 + + CBORTagInlinedArray = 250 + CBORTagInlinedMap = 251 + CBORTagInlinedComposite = 252 + CBORTagInlineCollisionGroup = 253 CBORTagExternalCollisionGroup = 254 @@ -59,6 +75,7 @@ const ( type SlabIDStorable SlabID var _ Storable = SlabIDStorable{} +var _ containerStorable = SlabIDStorable{} func (v SlabIDStorable) hasPointer() bool { return true diff --git a/storable_test.go b/storable_test.go index 9f4d6ece..77e2b4b4 100644 --- a/storable_test.go +++ b/storable_test.go @@ -345,6 +345,13 @@ func (v StringValue) StoredValue(_ SlabStorage) (Value, error) { return v, nil } +func (v StringValue) Equal(other Storable) bool { + if _, ok := other.(StringValue); !ok { + return false + } + return v.str == other.(StringValue).str +} + func (v StringValue) Storable(storage SlabStorage, address Address, maxInlineSize uint64) (Storable, error) { if uint64(v.ByteSize()) > maxInlineSize { @@ -430,7 +437,7 @@ func (v StringValue) String() string { return v.str } -func decodeStorable(dec *cbor.StreamDecoder, id SlabID) (Storable, error) { +func decodeStorable(dec *cbor.StreamDecoder, id SlabID, inlinedExtraData []ExtraData) (Storable, error) { t, err := dec.NextType() if err != nil { return nil, err @@ -451,6 +458,15 @@ func decodeStorable(dec *cbor.StreamDecoder, id SlabID) (Storable, error) { } switch tagNumber { + case CBORTagInlinedArray: + return DecodeInlinedArrayStorable(dec, decodeStorable, id, inlinedExtraData) + + case CBORTagInlinedMap: + return DecodeInlinedMapStorable(dec, decodeStorable, id, inlinedExtraData) + + case CBORTagInlinedComposite: + return DecodeInlinedCompositeStorable(dec, decodeStorable, id, inlinedExtraData) + case CBORTagSlabID: return DecodeSlabIDStorable(dec) @@ -492,7 +508,7 @@ func decodeStorable(dec *cbor.StreamDecoder, id SlabID) (Storable, error) { return Uint64Value(n), nil case cborTagSomeValue: - storable, err := decodeStorable(dec, id) + storable, err := decodeStorable(dec, id, inlinedExtraData) if err != nil { return nil, err } @@ -507,12 +523,43 @@ func decodeStorable(dec *cbor.StreamDecoder, id SlabID) (Storable, error) { } func decodeTypeInfo(dec *cbor.StreamDecoder) (TypeInfo, error) { - value, err := dec.DecodeUint64() + t, err := dec.NextType() if err != nil { return nil, err } - return testTypeInfo{value: value}, nil + switch t { + case cbor.UintType: + value, err := dec.DecodeUint64() + if err != nil { + return nil, err + } + + return testTypeInfo{value: value}, nil + + case cbor.TagType: + tagNum, err := dec.DecodeTagNumber() + if err != nil { + return nil, err + } + + switch tagNum { + case testCompositeTypeInfoTagNum: + value, err := dec.DecodeUint64() + if err != nil { + return nil, err + } + + return testCompositeTypeInfo{value: value}, nil + + default: + return nil, fmt.Errorf("failed to decode type info") + } + + default: + return nil, fmt.Errorf("failed to decode type info") + } + } func compare(storage SlabStorage, value Value, storable Storable) (bool, error) { @@ -677,25 +724,25 @@ func (v SomeStorable) String() string { return fmt.Sprintf("%s", v.Storable) } -type mutableValue struct { +type testMutableValue struct { storable *mutableStorable } -var _ Value = &mutableValue{} +var _ Value = &testMutableValue{} -func newMutableValue(storableSize uint32) *mutableValue { - return &mutableValue{ +func newTestMutableValue(storableSize uint32) *testMutableValue { + return &testMutableValue{ storable: &mutableStorable{ size: storableSize, }, } } -func (v *mutableValue) Storable(SlabStorage, Address, uint64) (Storable, error) { +func (v *testMutableValue) Storable(SlabStorage, Address, uint64) (Storable, error) { return v.storable, nil } -func (v *mutableValue) updateStorableSize(n uint32) { +func (v *testMutableValue) updateStorableSize(n uint32) { v.storable.size = n } @@ -710,7 +757,7 @@ func (s *mutableStorable) ByteSize() uint32 { } func (s *mutableStorable) StoredValue(SlabStorage) (Value, error) { - return &mutableValue{s}, nil + return &testMutableValue{s}, nil } func (*mutableStorable) ChildStorables() []Storable { diff --git a/storage.go b/storage.go index 005e69fd..7deb5b60 100644 --- a/storage.go +++ b/storage.go @@ -34,6 +34,13 @@ const LedgerBaseStorageSlabPrefix = "$" // ValueID identifies Array and OrderedMap. type ValueID [16]byte +func slabIDToValueID(sid SlabID) ValueID { + var id ValueID + copy(id[:], sid.address[:]) + copy(id[8:], sid.index[:]) + return id +} + type ( Address [8]byte SlabIndex [8]byte @@ -448,6 +455,8 @@ func CheckStorageHealth(storage SlabStorage, expectedNumberOfRootSlabs int) (map atLeastOneExternalSlab = true } + // This handles inlined slab because inlined slab is a child storable (s) and + // we traverse s.ChildStorables() for its inlined elements. next = append(next, s.ChildStorables()...) } @@ -574,6 +583,11 @@ func (s *PersistentSlabStorage) SlabIterator() (SlabIterator, error) { slabIDStorable, ok := childStorable.(SlabIDStorable) if !ok { + // Append child storables of this childStorable to handle inlined slab containing SlabIDStorable. + nextChildStorables = append( + nextChildStorables, + childStorable.ChildStorables()..., + ) continue } @@ -989,12 +1003,18 @@ func (s *PersistentSlabStorage) Retrieve(id SlabID) (Slab, bool, error) { } func (s *PersistentSlabStorage) Store(id SlabID, slab Slab) error { + if id == SlabIDUndefined { + return NewSlabIDError("failed to store slab with undefined slab ID") + } // add to deltas s.deltas[id] = slab return nil } func (s *PersistentSlabStorage) Remove(id SlabID) error { + if id == SlabIDUndefined { + return NewSlabIDError("failed to remove slab with undefined slab ID") + } // add to nil to deltas under that id s.deltas[id] = nil return nil diff --git a/storage_test.go b/storage_test.go index 40a4e6c8..2cd2a929 100644 --- a/storage_test.go +++ b/storage_test.go @@ -900,7 +900,6 @@ func TestPersistentStorageSlabIterator(t *testing.T) { data := map[SlabID][]byte{ // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:270 count:11} ] id1: { - // extra data // version 0x10, // extra data flag @@ -970,7 +969,6 @@ func TestPersistentStorageSlabIterator(t *testing.T) { // (data slab) next: 0, data: [0] id4: { - // extra data // version 0x10, // extra data flag diff --git a/typeinfo.go b/typeinfo.go index 35eb718d..7241fa15 100644 --- a/typeinfo.go +++ b/typeinfo.go @@ -19,11 +19,17 @@ package atree import ( + "encoding/binary" + "fmt" + "github.com/fxamacker/cbor/v2" ) type TypeInfo interface { Encode(*cbor.StreamEncoder) error + IsComposite() bool + ID() string + // TODO: maybe add a copy function because decoded TypeInfo can be shared by multiple slabs if not copied. } type TypeInfoDecoder func( @@ -32,3 +38,340 @@ type TypeInfoDecoder func( TypeInfo, error, ) + +type ExtraData interface { + isExtraData() bool + Encode(enc *Encoder) error +} + +// compositeExtraData is used for inlining composite values. +// compositeExtraData includes hkeys and keys with map extra data +// because hkeys and keys are the same in order and content for +// all values with the same composite type and map seed. +type compositeExtraData struct { + mapExtraData *MapExtraData + hkeys []Digest // hkeys is ordered by mapExtraData.Seed + keys []MapKey // keys is ordered by mapExtraData.Seed +} + +var _ ExtraData = &compositeExtraData{} + +const compositeExtraDataLength = 3 + +func (c *compositeExtraData) isExtraData() bool { + return true +} + +func (c *compositeExtraData) Encode(enc *Encoder) error { + err := enc.CBOR.EncodeArrayHead(compositeExtraDataLength) + if err != nil { + return NewEncodingError(err) + } + + // element 0: map extra data + err = c.mapExtraData.Encode(enc) + if err != nil { + return err + } + + // element 1: digests + totalDigestSize := len(c.hkeys) * digestSize + + var digests []byte + if totalDigestSize <= len(enc.Scratch) { + digests = enc.Scratch[:totalDigestSize] + } else { + digests = make([]byte, totalDigestSize) + } + + for i := 0; i < len(c.hkeys); i++ { + binary.BigEndian.PutUint64(digests[i*digestSize:], uint64(c.hkeys[i])) + } + + err = enc.CBOR.EncodeBytes(digests) + if err != nil { + return NewEncodingError(err) + } + + // element 2: field names + err = enc.CBOR.EncodeArrayHead(uint64(len(c.keys))) + if err != nil { + return NewEncodingError(err) + } + + for _, key := range c.keys { + err = key.Encode(enc) + if err != nil { + return NewEncodingError(err) + } + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +func newCompositeExtraData( + dec *cbor.StreamDecoder, + decodeTypeInfo TypeInfoDecoder, + decodeStorable StorableDecoder, +) (*compositeExtraData, error) { + + length, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + if length != compositeExtraDataLength { + return nil, NewDecodingError( + fmt.Errorf( + "composite extra data has invalid length %d, want %d", + length, + arrayExtraDataLength, + )) + } + + // element 0: map extra data + mapExtraData, err := newMapExtraData(dec, decodeTypeInfo) + if err != nil { + return nil, err + } + + // element 1: digests + digestBytes, err := dec.DecodeBytes() + if err != nil { + return nil, NewDecodingError(err) + } + + if len(digestBytes)%digestSize != 0 { + return nil, NewDecodingError( + fmt.Errorf( + "decoding digests failed: number of bytes %d is not multiple of %d", + len(digestBytes), + digestSize)) + } + + digestCount := len(digestBytes) / digestSize + + // element 2: keys + keyCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + if keyCount != uint64(digestCount) { + return nil, NewDecodingError( + fmt.Errorf( + "decoding composite key failed: number of keys %d is different from number of digests %d", + keyCount, + digestCount)) + } + + hkeys := make([]Digest, digestCount) + for i := 0; i < digestCount; i++ { + hkeys[i] = Digest(binary.BigEndian.Uint64(digestBytes[i*digestSize:])) + } + + keys := make([]MapKey, keyCount) + for i := uint64(0); i < keyCount; i++ { + // Decode composite key + key, err := decodeStorable(dec, SlabIDUndefined, nil) + if err != nil { + // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode key's storable") + } + keys[i] = key + } + + return &compositeExtraData{mapExtraData: mapExtraData, hkeys: hkeys, keys: keys}, nil +} + +type compositeTypeID struct { + id string + fieldCount int +} + +type compositeTypeInfo struct { + index int + keys []MapKey +} + +type inlinedExtraData struct { + extraData []ExtraData + compositeTypes map[compositeTypeID]compositeTypeInfo + arrayTypes map[string]int +} + +func newInlinedExtraData() *inlinedExtraData { + return &inlinedExtraData{ + compositeTypes: make(map[compositeTypeID]compositeTypeInfo), + arrayTypes: make(map[string]int), + } +} + +// Encode encodes inlined extra data as CBOR array. +func (ied *inlinedExtraData) Encode(enc *Encoder) error { + err := enc.CBOR.EncodeArrayHead(uint64(len(ied.extraData))) + if err != nil { + return NewEncodingError(err) + } + + var tagNum uint64 + + for _, extraData := range ied.extraData { + switch extraData.(type) { + case *ArrayExtraData: + tagNum = CBORTagInlinedArrayExtraData + + case *MapExtraData: + tagNum = CBORTagInlinedMapExtraData + + case *compositeExtraData: + tagNum = CBORTagInlinedCompositeExtraData + + default: + return NewEncodingError(fmt.Errorf("failed to encode unsupported extra data type %T", extraData)) + } + + err = enc.CBOR.EncodeTagHead(tagNum) + if err != nil { + return NewEncodingError(err) + } + + err = extraData.Encode(enc) + if err != nil { + return err + } + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +func newInlinedExtraDataFromData( + data []byte, + decMode cbor.DecMode, + decodeStorable StorableDecoder, + decodeTypeInfo TypeInfoDecoder, +) ([]ExtraData, []byte, error) { + + dec := decMode.NewByteStreamDecoder(data) + + count, err := dec.DecodeArrayHead() + if err != nil { + return nil, nil, NewDecodingError(err) + } + + if count == 0 { + return nil, nil, NewDecodingError(fmt.Errorf("failed to decode inlined extra data: expect at least one inlined extra data")) + } + + inlinedExtraData := make([]ExtraData, count) + for i := uint64(0); i < count; i++ { + tagNum, err := dec.DecodeTagNumber() + if err != nil { + return nil, nil, NewDecodingError(err) + } + + switch tagNum { + case CBORTagInlinedArrayExtraData: + inlinedExtraData[i], err = newArrayExtraData(dec, decodeTypeInfo) + if err != nil { + return nil, nil, err + } + + case CBORTagInlinedMapExtraData: + inlinedExtraData[i], err = newMapExtraData(dec, decodeTypeInfo) + if err != nil { + return nil, nil, err + } + + case CBORTagInlinedCompositeExtraData: + inlinedExtraData[i], err = newCompositeExtraData(dec, decodeTypeInfo, decodeStorable) + if err != nil { + return nil, nil, err + } + + default: + return nil, nil, NewDecodingError(fmt.Errorf("failed to decode inlined extra data: unsupported tag number %d", tagNum)) + } + } + + return inlinedExtraData, data[dec.NumBytesDecoded():], nil +} + +// addArrayExtraData returns index of deduplicated array extra data. +// Array extra data is deduplicated by array type info ID because array +// extra data only contains type info. +func (ied *inlinedExtraData) addArrayExtraData(data *ArrayExtraData) int { + id := data.TypeInfo.ID() + index, exist := ied.arrayTypes[id] + if exist { + return index + } + + index = len(ied.extraData) + ied.extraData = append(ied.extraData, data) + ied.arrayTypes[id] = index + return index +} + +// addMapExtraData returns index of map extra data. +// Map extra data is not deduplicated because it also contains count and seed. +func (ied *inlinedExtraData) addMapExtraData(data *MapExtraData) int { + index := len(ied.extraData) + ied.extraData = append(ied.extraData, data) + return index +} + +// addCompositeExtraData returns index of deduplicated composite extra data. +// Composite extra data is deduplicated by TypeInfo.ID() and number of fields, +// Composite fields can be removed but new fields can't be added, and existing field types can't be modified. +// Given this, composites with same type ID and same number of fields have the same fields. +// See https://developers.flow.com/cadence/language/contract-updatability#fields +func (ied *inlinedExtraData) addCompositeExtraData(data *MapExtraData, digests []Digest, keys []MapKey) int { + id := compositeTypeID{data.TypeInfo.ID(), int(data.Count)} + info, exist := ied.compositeTypes[id] + if exist { + return info.index + } + + compositeData := &compositeExtraData{ + mapExtraData: data, + hkeys: digests, + keys: keys, + } + + index := len(ied.extraData) + ied.extraData = append(ied.extraData, compositeData) + + ied.compositeTypes[id] = compositeTypeInfo{ + keys: keys, + index: index, + } + + return index +} + +// getCompositeTypeInfo returns index of composite type and cached keys. +// NOTE: use this function instead of addCompositeExtraData to check if +// composite type is already added to save some allocation. +func (ied *inlinedExtraData) getCompositeTypeInfo(t TypeInfo, fieldCount int) (int, []MapKey, bool) { + id := compositeTypeID{t.ID(), fieldCount} + info, exist := ied.compositeTypes[id] + if !exist { + return 0, nil, false + } + return info.index, info.keys, true +} + +func (ied *inlinedExtraData) empty() bool { + return len(ied.extraData) == 0 +} diff --git a/utils_test.go b/utils_test.go index a40a3599..90b7bda2 100644 --- a/utils_test.go +++ b/utils_test.go @@ -20,7 +20,9 @@ package atree import ( "flag" + "fmt" "math/rand" + "reflect" "testing" "time" @@ -91,6 +93,14 @@ type testTypeInfo struct { var _ TypeInfo = testTypeInfo{} +func (i testTypeInfo) IsComposite() bool { + return false +} + +func (i testTypeInfo) ID() string { + return fmt.Sprintf("uint64(%d)", i) +} + func (i testTypeInfo) Encode(enc *cbor.StreamEncoder) error { return enc.EncodeUint64(i.value) } @@ -100,13 +110,46 @@ func (i testTypeInfo) Equal(other TypeInfo) bool { return ok && i.value == otherTestTypeInfo.value } +const testCompositeTypeInfoTagNum = 246 + +type testCompositeTypeInfo struct { + value uint64 +} + +var _ TypeInfo = testCompositeTypeInfo{} + +func (i testCompositeTypeInfo) IsComposite() bool { + return true +} + +func (i testCompositeTypeInfo) ID() string { + return fmt.Sprintf("composite(%d)", i) +} + +func (i testCompositeTypeInfo) Encode(enc *cbor.StreamEncoder) error { + err := enc.EncodeTagHead(testCompositeTypeInfoTagNum) + if err != nil { + return err + } + return enc.EncodeUint64(i.value) +} + +func (i testCompositeTypeInfo) Equal(other TypeInfo) bool { + otherTestTypeInfo, ok := other.(testCompositeTypeInfo) + return ok && i.value == otherTestTypeInfo.value +} + func typeInfoComparator(a, b TypeInfo) bool { - x, ok := a.(testTypeInfo) - if !ok { + switch x := a.(type) { + case testTypeInfo: + return x.Equal(b) + + case testCompositeTypeInfo: + return x.Equal(b) + + default: return false } - y, ok := b.(testTypeInfo) - return ok && x.value == y.value } func newTestPersistentStorage(t testing.TB) *PersistentSlabStorage { @@ -323,21 +366,83 @@ func mapEqual(t *testing.T, tic TypeInfoComparator, a Value, b Value) { iterator1, err := m1.Iterator() require.NoError(t, err) - iterator2, err := m2.Iterator() - require.NoError(t, err) + if m1.Type().IsComposite() { + // Check element by key for composite type because + // composite fields can be rearranged to reuse seed and digests. - for { - key1, value1, err := iterator1.Next() - require.NoError(t, err) + for { + key1, value1, err := iterator1.Next() + require.NoError(t, err) + + if key1 == nil { + break + } + + iterator2, err := m2.Iterator() + require.NoError(t, err) + + var value2 Value + for { + key, value, err := iterator2.Next() + require.NoError(t, err) + require.NotNil(t, key) + + if reflect.DeepEqual(key, key1) { + value2 = value + break + } + } - key2, value2, err := iterator2.Next() + valueEqual(t, tic, value1, value2) + } + } else { + + iterator2, err := m2.Iterator() require.NoError(t, err) - valueEqual(t, tic, key1, key2) - valueEqual(t, tic, value1, value2) + for { + key1, value1, err := iterator1.Next() + require.NoError(t, err) - if key1 == nil || key2 == nil { - break + key2, value2, err := iterator2.Next() + require.NoError(t, err) + + valueEqual(t, tic, key1, key2) + valueEqual(t, tic, value1, value2) + + if key1 == nil || key2 == nil { + break + } } } } + +func valueIDToSlabID(vid ValueID) SlabID { + var id SlabID + copy(id.address[:], vid[:slabAddressSize]) + copy(id.index[:], vid[slabAddressSize:]) + return id +} + +func testInlinedMapIDs(t *testing.T, address Address, m *OrderedMap) { + testInlinedSlabIDAndValueID(t, address, m.SlabID(), m.ValueID()) +} + +func testNotInlinedMapIDs(t *testing.T, address Address, m *OrderedMap) { + testNotInlinedSlabIDAndValueID(t, address, m.SlabID(), m.ValueID()) +} + +func testInlinedSlabIDAndValueID(t *testing.T, expectedAddress Address, slabID SlabID, valueID ValueID) { + require.Equal(t, SlabIDUndefined, slabID) + + require.Equal(t, expectedAddress[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) +} + +func testNotInlinedSlabIDAndValueID(t *testing.T, expectedAddress Address, slabID SlabID, valueID ValueID) { + require.Equal(t, expectedAddress, slabID.address) + require.NotEqual(t, SlabIndexUndefined, slabID.index) + + require.Equal(t, slabID.address[:], valueID[:slabAddressSize]) + require.Equal(t, slabID.index[:], valueID[slabAddressSize:]) +} diff --git a/value.go b/value.go index 06ce3a5c..3c6327fc 100644 --- a/value.go +++ b/value.go @@ -25,3 +25,12 @@ type Value interface { type ValueComparator func(SlabStorage, Value, Storable) (bool, error) type StorableComparator func(Storable, Storable) bool + +type parentUpdater func() error + +// valueNotifier is an interface that allows child value to notify and update parent. +type valueNotifier interface { + Value + ValueID() ValueID + setParentUpdater(parentUpdater) +} From 6f25137a0ca8ff116cf37ddf64d995cb512a3bfd Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Sun, 17 Sep 2023 15:07:24 -0500 Subject: [PATCH 002/126] Check overwritten value in parentUpdater callback Currently, in parentUpdater callback, parent array/map resets same child value. Child value ID should match overwritten SlabIDStorable or Slab.SlabID(). This commit adds check to make sure same child value is being reset. --- array.go | 35 +++++++++++++++++++++++++++++++++-- map.go | 38 +++++++++++++++++++++++++++++++++++--- storage.go | 13 +++++++++++++ 3 files changed, 81 insertions(+), 5 deletions(-) diff --git a/array.go b/array.go index 03d35617..66f672f9 100644 --- a/array.go +++ b/array.go @@ -2725,8 +2725,39 @@ func (a *Array) setCallbackWithChild(i uint64, child Value) { return err } - if existingValueStorable == nil { - return NewFatalError(fmt.Errorf("failed to reset child value in parent updater callback because previous value is nil")) + // Verify overwritten storable has identical value ID. + + switch x := existingValueStorable.(type) { + case SlabIDStorable: + sid := SlabID(x) + if !vid.equal(sid) { + return NewFatalError( + fmt.Errorf( + "failed to reset child value in parent updater callback: overwritten SlabIDStorable %s != value ID %s", + sid, + vid)) + } + + case Slab: + sid := x.SlabID() + if !vid.equal(sid) { + return NewFatalError( + fmt.Errorf( + "failed to reset child value in parent updater callback: overwritten Slab ID %s != value ID %s", + sid, + vid)) + } + + case nil: + return NewFatalError( + fmt.Errorf( + "failed to reset child value in parent updater callback: overwritten value is nil")) + + default: + return NewFatalError( + fmt.Errorf( + "failed to reset child value in parent updater callback: overwritten value is wrong type %T", + existingValueStorable)) } return nil diff --git a/map.go b/map.go index f9f6b596..4aa900e0 100644 --- a/map.go +++ b/map.go @@ -4523,6 +4523,8 @@ func (m *OrderedMap) setCallbackWithChild( return } + vid := c.ValueID() + c.setParentUpdater(func() error { // Set child value with parent map using same key. // Set() calls c.Storable() which returns inlined or not-inlined child storable. @@ -4531,10 +4533,40 @@ func (m *OrderedMap) setCallbackWithChild( return err } - if existingValueStorable == nil { - return NewFatalError(fmt.Errorf("failed to reset child value in parent updater callback because previous value is nil")) - } + // Verify overwritten storable has identical value ID. + + switch x := existingValueStorable.(type) { + case SlabIDStorable: + sid := SlabID(x) + if !vid.equal(sid) { + return NewFatalError( + fmt.Errorf( + "failed to reset child value in parent updater callback: overwritten SlabIDStorable %s != value ID %s", + sid, + vid)) + } + + case Slab: + sid := x.SlabID() + if !vid.equal(sid) { + return NewFatalError( + fmt.Errorf( + "failed to reset child value in parent updater callback: overwritten Slab ID %s != value ID %s", + sid, + vid)) + } + + case nil: + return NewFatalError( + fmt.Errorf( + "failed to reset child value in parent updater callback: overwritten value is nil")) + default: + return NewFatalError( + fmt.Errorf( + "failed to reset child value in parent updater callback: overwritten value is wrong type %T", + existingValueStorable)) + } return nil }) } diff --git a/storage.go b/storage.go index 7deb5b60..71d1804b 100644 --- a/storage.go +++ b/storage.go @@ -41,6 +41,19 @@ func slabIDToValueID(sid SlabID) ValueID { return id } +func (vid ValueID) equal(sid SlabID) bool { + return bytes.Equal(vid[:8], sid.address[:]) && + bytes.Equal(vid[8:], sid.index[:]) +} + +func (vid ValueID) String() string { + return fmt.Sprintf( + "0x%x.%d", + binary.BigEndian.Uint64(vid[:8]), + binary.BigEndian.Uint64(vid[8:]), + ) +} + type ( Address [8]byte SlabIndex [8]byte From 399684a3e05fe9e7cacfe8f66d0c9477cd7b8bba Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Sun, 17 Sep 2023 21:52:51 -0500 Subject: [PATCH 003/126] Avoid writing parent on update to uninlinable child --- array.go | 20 +++++++++++--- map.go | 82 +++++++++++++++++++++++++++++++++----------------------- value.go | 6 +++-- 3 files changed, 69 insertions(+), 39 deletions(-) diff --git a/array.go b/array.go index 66f672f9..4ec8bdcf 100644 --- a/array.go +++ b/array.go @@ -183,7 +183,7 @@ type Array struct { } var _ Value = &Array{} -var _ valueNotifier = &Array{} +var _ mutableValueNotifier = &Array{} func (a *Array) Address() Address { return a.root.SlabID().address @@ -2699,8 +2699,8 @@ func (a *Array) setParentUpdater(f parentUpdater) { // setCallbackWithChild sets up callback function with child value so // parent array a can be notified when child value is modified. -func (a *Array) setCallbackWithChild(i uint64, child Value) { - c, ok := child.(valueNotifier) +func (a *Array) setCallbackWithChild(i uint64, child Value, maxInlineSize uint64) { + c, ok := child.(mutableValueNotifier) if !ok { return } @@ -2712,6 +2712,14 @@ func (a *Array) setCallbackWithChild(i uint64, child Value) { c.setParentUpdater(func() error { + // Avoid unnecessary write operation on parent container. + // Child value was stored as SlabIDStorable (not inlined) in parent container, + // and continues to be stored as SlabIDStorable (still not inlinable), + // so no update to parent container is needed. + if !c.Inlined() && !c.Inlinable(maxInlineSize) { + return nil + } + // Get latest index by child value ID. index, exist := a.getIndexByValueID(vid) if !exist { @@ -2785,7 +2793,7 @@ func (a *Array) Get(i uint64) (Value, error) { return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") } - a.setCallbackWithChild(i, v) + a.setCallbackWithChild(i, v, maxInlineArrayElementSize) return v, nil } @@ -2987,6 +2995,10 @@ func (a *Array) Inlined() bool { return a.root.Inlined() } +func (a *Array) Inlinable(maxInlineSize uint64) bool { + return a.root.Inlinable(maxInlineSize) +} + // Storable returns array a as either: // - SlabIDStorable, or // - inlined data slab storable diff --git a/map.go b/map.go index 4aa900e0..42dbc4da 100644 --- a/map.go +++ b/map.go @@ -116,7 +116,7 @@ type element interface { hkey Digest, comparator ValueComparator, key Value, - ) (MapValue, error) + ) (MapKey, MapValue, error) // Set returns updated element, which may be a different type of element because of hash collision. Set( @@ -168,7 +168,7 @@ type elementGroup interface { type elements interface { fmt.Stringer - Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapValue, error) + Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) Set(storage SlabStorage, address Address, b DigesterBuilder, digester Digester, level uint, hkey Digest, comparator ValueComparator, hip HashInputProvider, key Value, value Value) (existingValue MapValue, err error) Remove(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) @@ -286,7 +286,7 @@ var _ MapSlab = &MapMetaDataSlab{} type MapSlab interface { Slab - Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapValue, error) + Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) Set(storage SlabStorage, b DigesterBuilder, digester Digester, level uint, hkey Digest, comparator ValueComparator, hip HashInputProvider, key Value, value Value) (existingValue MapValue, err error) Remove(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) @@ -319,7 +319,7 @@ type OrderedMap struct { } var _ Value = &OrderedMap{} -var _ valueNotifier = &OrderedMap{} +var _ mutableValueNotifier = &OrderedMap{} const mapExtraDataLength = 3 @@ -537,16 +537,16 @@ func (e *singleElement) Encode(enc *Encoder, inlinedTypeInfo *inlinedExtraData) return nil } -func (e *singleElement) Get(storage SlabStorage, _ Digester, _ uint, _ Digest, comparator ValueComparator, key Value) (MapValue, error) { +func (e *singleElement) Get(storage SlabStorage, _ Digester, _ uint, _ Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { equal, err := comparator(storage, key, e.key) if err != nil { // Wrap err as external error (if needed) because err is returned by ValueComparator callback. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") } if equal { - return e.value, nil + return e.key, e.value, nil } - return nil, NewKeyNotFoundError(key) + return nil, nil, NewKeyNotFoundError(key) } // Set updates value if key matches, otherwise returns inlineCollisionGroup with existing and new elements. @@ -709,12 +709,12 @@ func (e *inlineCollisionGroup) Encode(enc *Encoder, inlinedTypeInfo *inlinedExtr return nil } -func (e *inlineCollisionGroup) Get(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapValue, error) { +func (e *inlineCollisionGroup) Get(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { // Adjust level and hkey for collision group level++ if level > digester.Levels() { - return nil, NewHashLevelErrorf("inline collision group digest level is %d, want <= %d", level, digester.Levels()) + return nil, nil, NewHashLevelErrorf("inline collision group digest level is %d, want <= %d", level, digester.Levels()) } hkey, _ := digester.Digest(level) @@ -898,17 +898,17 @@ func (e *externalCollisionGroup) Encode(enc *Encoder, _ *inlinedExtraData) error return nil } -func (e *externalCollisionGroup) Get(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapValue, error) { +func (e *externalCollisionGroup) Get(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { slab, err := getMapSlab(storage, e.slabID) if err != nil { // Don't need to wrap error as external error because err is already categorized by getMapSlab(). - return nil, err + return nil, nil, err } // Adjust level and hkey for collision group level++ if level > digester.Levels() { - return nil, NewHashLevelErrorf("external collision group digest level is %d, want <= %d", level, digester.Levels()) + return nil, nil, NewHashLevelErrorf("external collision group digest level is %d, want <= %d", level, digester.Levels()) } hkey, _ := digester.Digest(level) @@ -1303,10 +1303,10 @@ func (e *hkeyElements) EncodeCompositeValues(enc *Encoder, orderedKeys []MapKey, return nil } -func (e *hkeyElements) Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapValue, error) { +func (e *hkeyElements) Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { if level >= digester.Levels() { - return nil, NewHashLevelErrorf("hkey elements digest level is %d, want < %d", level, digester.Levels()) + return nil, nil, NewHashLevelErrorf("hkey elements digest level is %d, want < %d", level, digester.Levels()) } // binary search by hkey @@ -1328,7 +1328,7 @@ func (e *hkeyElements) Get(storage SlabStorage, digester Digester, level uint, h // No matching hkey if equalIndex == -1 { - return nil, NewKeyNotFoundError(key) + return nil, nil, NewKeyNotFoundError(key) } elem := e.elems[equalIndex] @@ -1449,7 +1449,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild // Check if existing collision count reached MaxCollisionLimitPerDigest if collisionCount >= MaxCollisionLimitPerDigest { // Enforce collision limit on inserts and ignore updates. - _, err = elem.Get(storage, digester, level, hkey, comparator, key) + _, _, err = elem.Get(storage, digester, level, hkey, comparator, key) if err != nil { var knfe *KeyNotFoundError if errors.As(err, &knfe) { @@ -1935,10 +1935,10 @@ func (e *singleElements) EncodeCompositeValues(_ *Encoder, _ []MapKey, _ *inline return NewEncodingError(fmt.Errorf("singleElements can't encoded as composite value")) } -func (e *singleElements) Get(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapValue, error) { +func (e *singleElements) Get(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { if level != digester.Levels() { - return nil, NewHashLevelErrorf("single elements digest level is %d, want %d", level, digester.Levels()) + return nil, nil, NewHashLevelErrorf("single elements digest level is %d, want %d", level, digester.Levels()) } // linear search by key @@ -1946,14 +1946,14 @@ func (e *singleElements) Get(storage SlabStorage, digester Digester, level uint, equal, err := comparator(storage, key, elem.key) if err != nil { // Wrap err as external error (if needed) because err is returned by ValueComparator callback. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") } if equal { - return elem.value, nil + return elem.key, elem.value, nil } } - return nil, NewKeyNotFoundError(key) + return nil, nil, NewKeyNotFoundError(key) } func (e *singleElements) Set(storage SlabStorage, address Address, _ DigesterBuilder, digester Digester, level uint, _ Digest, comparator ValueComparator, _ HashInputProvider, key Value, value Value) (MapValue, error) { @@ -3667,7 +3667,7 @@ func (m *MapMetaDataSlab) ChildStorables() []Storable { return childIDs } -func (m *MapMetaDataSlab) Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapValue, error) { +func (m *MapMetaDataSlab) Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { ans := -1 i, j := 0, len(m.childrenHeaders) @@ -3682,7 +3682,7 @@ func (m *MapMetaDataSlab) Get(storage SlabStorage, digester Digester, level uint } if ans == -1 { - return nil, NewKeyNotFoundError(key) + return nil, nil, NewKeyNotFoundError(key) } childHeaderIndex := ans @@ -3692,7 +3692,7 @@ func (m *MapMetaDataSlab) Get(storage SlabStorage, digester Digester, level uint child, err := getMapSlab(storage, childID) if err != nil { // Don't need to wrap error as external error because err is already categorized by getMapSlab(). - return nil, err + return nil, nil, err } // Don't need to wrap error as external error because err is already categorized by MapSlab.Get(). @@ -4506,6 +4506,10 @@ func (m *OrderedMap) Inlined() bool { return m.root.Inlined() } +func (m *OrderedMap) Inlinable(maxInlineSize uint64) bool { + return m.root.Inlinable(maxInlineSize) +} + func (m *OrderedMap) setParentUpdater(f parentUpdater) { m.parentUpdater = f } @@ -4517,8 +4521,9 @@ func (m *OrderedMap) setCallbackWithChild( hip HashInputProvider, key Value, child Value, + maxInlineSize uint64, ) { - c, ok := child.(valueNotifier) + c, ok := child.(mutableValueNotifier) if !ok { return } @@ -4526,6 +4531,15 @@ func (m *OrderedMap) setCallbackWithChild( vid := c.ValueID() c.setParentUpdater(func() error { + + // Avoid unnecessary write operation on parent container. + // Child value was stored as SlabIDStorable (not inlined) in parent container, + // and continues to be stored as SlabIDStorable (still not inlinable), + // so no update to parent container is needed. + if !c.Inlined() && !c.Inlinable(maxInlineSize) { + return nil + } + // Set child value with parent map using same key. // Set() calls c.Storable() which returns inlined or not-inlined child storable. existingValueStorable, err := m.Set(comparator, hip, key, c) @@ -4580,7 +4594,7 @@ func (m *OrderedMap) notifyParentIfNeeded() error { } func (m *OrderedMap) Has(comparator ValueComparator, hip HashInputProvider, key Value) (bool, error) { - _, err := m.get(comparator, hip, key) + _, _, err := m.get(comparator, hip, key) if err != nil { var knf *KeyNotFoundError if errors.As(err, &knf) { @@ -4594,29 +4608,31 @@ func (m *OrderedMap) Has(comparator ValueComparator, hip HashInputProvider, key func (m *OrderedMap) Get(comparator ValueComparator, hip HashInputProvider, key Value) (Value, error) { - storable, err := m.get(comparator, hip, key) + keyStorable, valueStorable, err := m.get(comparator, hip, key) if err != nil { // Don't need to wrap error as external error because err is already categorized by MapSlab.Get(). return nil, err } - v, err := storable.StoredValue(m.Storage) + v, err := valueStorable.StoredValue(m.Storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") } - m.setCallbackWithChild(comparator, hip, key, v) + maxInlineSize := maxInlineMapValueSize(uint64(keyStorable.ByteSize())) + + m.setCallbackWithChild(comparator, hip, key, v, maxInlineSize) return v, nil } -func (m *OrderedMap) get(comparator ValueComparator, hip HashInputProvider, key Value) (Storable, error) { +func (m *OrderedMap) get(comparator ValueComparator, hip HashInputProvider, key Value) (Storable, Storable, error) { keyDigest, err := m.digesterBuilder.Digest(hip, key) if err != nil { // Wrap err as external error (if needed) because err is returned by DigesterBuilder interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to create map key digester") + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to create map key digester") } defer putDigester(keyDigest) @@ -4625,7 +4641,7 @@ func (m *OrderedMap) get(comparator ValueComparator, hip HashInputProvider, key hkey, err := keyDigest.Digest(level) if err != nil { // Wrap err as external error (if needed) because err is returned by Digesert interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to get map key digest at level %d", level)) + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to get map key digest at level %d", level)) } // Don't need to wrap error as external error because err is already categorized by MapSlab.Get(). diff --git a/value.go b/value.go index 3c6327fc..ec590c0c 100644 --- a/value.go +++ b/value.go @@ -28,9 +28,11 @@ type StorableComparator func(Storable, Storable) bool type parentUpdater func() error -// valueNotifier is an interface that allows child value to notify and update parent. -type valueNotifier interface { +// mutableValueNotifier is an interface that allows mutable child value to notify and update parent. +type mutableValueNotifier interface { Value ValueID() ValueID setParentUpdater(parentUpdater) + Inlined() bool + Inlinable(uint64) bool } From c178a72a6d9c28ba3846f2df06b27ac88ec1dc3b Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 19 Sep 2023 11:17:55 -0500 Subject: [PATCH 004/126] Update comments about encoding format --- array.go | 28 ++++++++-------------------- map.go | 26 +++++++------------------- 2 files changed, 15 insertions(+), 39 deletions(-) diff --git a/array.go b/array.go index 4ec8bdcf..7d625a6d 100644 --- a/array.go +++ b/array.go @@ -419,17 +419,11 @@ func newArrayDataSlabFromDataV0( // newArrayDataSlabFromDataV1 decodes data in version 1: // -// Root DataSlab Header: -// -// +-------------------------------+------------+---------------------------------+ -// | slab version + flag (2 bytes) | extra data | inlined extra data (if present) | -// +-------------------------------+------------+---------------------------------+ -// -// Non-root DataSlab Header: +// DataSlab Header: // -// +-------------------------------+---------------------------------+-----------------------------+ -// | slab version + flag (2 bytes) | inlined extra data (if present) | next slab ID (if non-empty) | -// +-------------------------------+---------------------------------+-----------------------------+ +// +-------------------------------+----------------------+---------------------------------+-----------------------------+ +// | slab version + flag (2 bytes) | extra data (if root) | inlined extra data (if present) | next slab ID (if non-empty) | +// +-------------------------------+----------------------+---------------------------------+-----------------------------+ // // Content: // @@ -713,17 +707,11 @@ func (a *ArrayDataSlab) encodeAsInlined(enc *Encoder, inlinedTypeInfo *inlinedEx // Encode encodes this array data slab to the given encoder. // -// Root DataSlab Header: -// -// +-------------------------------+------------+---------------------------------+ -// | slab version + flag (2 bytes) | extra data | inlined extra data (if present) | -// +-------------------------------+------------+---------------------------------+ -// -// Non-root DataSlab Header: +// DataSlab Header: // -// +-------------------------------+---------------------------------+-----------------------------+ -// | slab version + flag (2 bytes) | inlined extra data (if present) | next slab ID (if non-empty) | -// +-------------------------------+---------------------------------+-----------------------------+ +// +-------------------------------+----------------------+---------------------------------+-----------------------------+ +// | slab version + flag (2 bytes) | extra data (if root) | inlined extra data (if present) | next slab ID (if non-empty) | +// +-------------------------------+----------------------+---------------------------------+-----------------------------+ // // Content: // diff --git a/map.go b/map.go index 42dbc4da..243c51d5 100644 --- a/map.go +++ b/map.go @@ -2268,17 +2268,11 @@ func newMapDataSlabFromDataV0( // newMapDataSlabFromDataV1 decodes data in version 1: // -// Root DataSlab Header: -// -// +-------------------------------+------------+---------------------------------+ -// | slab version + flag (2 bytes) | extra data | inlined extra data (if present) | -// +-------------------------------+------------+---------------------------------+ -// -// Non-root DataSlab Header: +// DataSlab Header: // -// +-------------------------------+---------------------------------+-----------------------------+ -// | slab version + flag (2 bytes) | inlined extra data (if present) | next slab ID (if non-empty) | -// +-------------------------------+---------------------------------+-----------------------------+ +// +-------------------------------+----------------------+---------------------------------+-----------------------------+ +// | slab version + flag (2 bytes) | extra data (if root) | inlined extra data (if present) | next slab ID (if non-empty) | +// +-------------------------------+----------------------+---------------------------------+-----------------------------+ // // Content: // @@ -2610,15 +2604,9 @@ func DecodeInlinedMapStorable( // // Root DataSlab Header: // -// +-------------------------------+------------+---------------------------------+ -// | slab version + flag (2 bytes) | extra data | inlined extra data (if present) | -// +-------------------------------+------------+---------------------------------+ -// -// Non-root DataSlab Header: -// -// +-------------------------------+---------------------------------+-----------------------------+ -// | slab version + flag (2 bytes) | inlined extra data (if present) | next slab ID (if non-empty) | -// +-------------------------------+---------------------------------+-----------------------------+ +// +-------------------------------+----------------------+---------------------------------+-----------------------------+ +// | slab version + flag (2 bytes) | extra data (if root) | inlined extra data (if present) | next slab ID (if non-empty) | +// +-------------------------------+----------------------+---------------------------------+-----------------------------+ // // Content: // From efc46e676d0874cad1c171e37f95f185077802aa Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 19 Sep 2023 11:28:36 -0500 Subject: [PATCH 005/126] Update comments about encoding format --- array.go | 12 ++++++------ map.go | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/array.go b/array.go index 7d625a6d..417b4241 100644 --- a/array.go +++ b/array.go @@ -534,9 +534,9 @@ func newArrayDataSlabFromDataV1( // version 1 with CBOR tag having tag number CBORTagInlinedArray, and tag contant // as 3-element array: // -// - index of inlined extra data -// - value ID index -// - CBOR array of elements +// +------------------+----------------+----------+ +// | extra data index | value ID index | elements | +// +------------------+----------------+----------+ // // NOTE: This function doesn't decode tag number because tag number is decoded // in the caller and decoder only contains tag content. @@ -641,9 +641,9 @@ func DecodeInlinedArrayStorable( // version 1 with CBOR tag having tag number CBORTagInlinedArray, // and tag contant as 3-element array: // -// - index of inlined extra data -// - value ID index -// - CBOR array of elements +// +------------------+----------------+----------+ +// | extra data index | value ID index | elements | +// +------------------+----------------+----------+ func (a *ArrayDataSlab) encodeAsInlined(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { if a.extraData == nil { return NewEncodingError( diff --git a/map.go b/map.go index 243c51d5..a9145dbf 100644 --- a/map.go +++ b/map.go @@ -2507,9 +2507,9 @@ func DecodeInlinedCompositeStorable( // version 1 with CBOR tag having tag number CBORTagInlinedMap, and tag contant // as 3-element array: // -// - index of inlined extra data -// - value ID index -// - CBOR array of elements +// +------------------+----------------+----------+ +// | extra data index | value ID index | elements | +// +------------------+----------------+----------+ // // NOTE: This function doesn't decode tag number because tag number is decoded // in the caller and decoder only contains tag content. @@ -2741,9 +2741,9 @@ func (m *MapDataSlab) encodeElements(enc *Encoder, inlinedTypes *inlinedExtraDat // version 1 with CBOR tag having tag number CBORTagInlinedMap, // and tag contant as 3-element array: // -// - index of inlined extra data -// - value ID index -// - CBOR array of elements +// +------------------+----------------+----------+ +// | extra data index | value ID index | elements | +// +------------------+----------------+----------+ func (m *MapDataSlab) encodeAsInlined(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { if m.extraData == nil { return NewEncodingError( From 1664e364e19af0c84a77f54acb7c8ee9a11f25ab Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 20 Sep 2023 13:41:28 -0500 Subject: [PATCH 006/126] Add notification in Set and Insert in Array This commit adds callback notification in the set or inserted child element in array. So if child element is modified after Array.Set() or Array.Insert(), changes to child element is properly reflected in the parent array. This commit doesn't appear to be needed by current version of Cadence but it helps reduce Atree's dependence on an implementation detail of Cadence. --- array.go | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/array.go b/array.go index 417b4241..21042883 100644 --- a/array.go +++ b/array.go @@ -2781,6 +2781,8 @@ func (a *Array) Get(i uint64) (Value, error) { return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") } + // Set up notification callback in child value so + // when child value is modified parent a is notified. a.setCallbackWithChild(i, v, maxInlineArrayElementSize) return v, nil @@ -2818,6 +2820,10 @@ func (a *Array) Set(index uint64, value Value) (Storable, error) { return nil, err } + // Set up notification callback in child value so + // when child value is modified parent a is notified. + a.setCallbackWithChild(index, value, maxInlineArrayElementSize) + return existingStorable, nil } @@ -2840,7 +2846,16 @@ func (a *Array) Insert(index uint64, value Value) error { a.incrementIndexFrom(index) - return a.notifyParentIfNeeded() + err = a.notifyParentIfNeeded() + if err != nil { + return err + } + + // Set up notification callback in child value so + // when child value is modified parent a is notified. + a.setCallbackWithChild(index, value, maxInlineArrayElementSize) + + return nil } func (a *Array) Remove(index uint64) (Storable, error) { From feb6adf88affa869ebdd5641dedd97ae89f700fc Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 20 Sep 2023 15:10:20 -0500 Subject: [PATCH 007/126] Add notification in Set in OrderedMap This commit adds callback notification in the set child element in map. So if child element is modified after OrderedMap.Set() , changes to child element is properly reflected in the parent map. This commit doesn't appear to be needed by current version of Cadence but it helps reduce Atree's dependence on an implementation detail of Cadence. --- map.go | 247 +++++++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 179 insertions(+), 68 deletions(-) diff --git a/map.go b/map.go index a9145dbf..5239be75 100644 --- a/map.go +++ b/map.go @@ -130,7 +130,7 @@ type element interface { hip HashInputProvider, key Value, value Value, - ) (newElem element, existingValue MapValue, err error) + ) (newElem element, keyStorable MapKey, existingValue MapValue, err error) // Remove returns matched key, value, and updated element. // Updated element may be nil, modified, or a different type of element. @@ -168,9 +168,36 @@ type elementGroup interface { type elements interface { fmt.Stringer - Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) - Set(storage SlabStorage, address Address, b DigesterBuilder, digester Digester, level uint, hkey Digest, comparator ValueComparator, hip HashInputProvider, key Value, value Value) (existingValue MapValue, err error) - Remove(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) + Get( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, + ) (MapKey, MapValue, error) + + Set( + storage SlabStorage, + address Address, + b DigesterBuilder, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + hip HashInputProvider, + key Value, + value Value, + ) (MapKey, MapValue, error) + + Remove( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, + ) (MapKey, MapValue, error) Merge(elements) error Split() (elements, elements, error) @@ -286,9 +313,35 @@ var _ MapSlab = &MapMetaDataSlab{} type MapSlab interface { Slab - Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) - Set(storage SlabStorage, b DigesterBuilder, digester Digester, level uint, hkey Digest, comparator ValueComparator, hip HashInputProvider, key Value, value Value) (existingValue MapValue, err error) - Remove(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) + Get( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, + ) (MapKey, MapValue, error) + + Set( + storage SlabStorage, + b DigesterBuilder, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + hip HashInputProvider, + key Value, + value Value, + ) (MapKey, MapValue, error) + + Remove( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, + ) (MapKey, MapValue, error) IsData() bool @@ -565,12 +618,12 @@ func (e *singleElement) Set( hip HashInputProvider, key Value, value Value, -) (element, MapValue, error) { +) (element, MapKey, MapValue, error) { equal, err := comparator(storage, key, e.key) if err != nil { // Wrap err as external error (if needed) because err is returned by ValueComparator callback. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") } // Key matches, overwrite existing value @@ -580,12 +633,12 @@ func (e *singleElement) Set( valueStorable, err := value.Storable(storage, address, maxInlineMapValueSize(uint64(e.key.ByteSize()))) if err != nil { // Wrap err as external error (if needed) because err is returned by Value interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get value's storable") + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get value's storable") } e.value = valueStorable e.size = singleElementPrefixSize + e.key.ByteSize() + e.value.ByteSize() - return e, existingValue, nil + return e, e.key, existingValue, nil } // Hash collision detected @@ -609,20 +662,20 @@ func (e *singleElement) Set( kv, err := e.key.StoredValue(storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get key's stored value") + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get key's stored value") } existingKeyDigest, err := b.Digest(hip, kv) if err != nil { // Wrap err as external error (if needed) because err is returned by DigestBuilder interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get key's digester") + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get key's digester") } defer putDigester(existingKeyDigest) d, err := existingKeyDigest.Digest(level + 1) if err != nil { // Wrap err as external error (if needed) because err is returned by Digester interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to get key's digest at level %d", level+1)) + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to get key's digest at level %d", level+1)) } group := &inlineCollisionGroup{ @@ -734,19 +787,19 @@ func (e *inlineCollisionGroup) Set( hip HashInputProvider, key Value, value Value, -) (element, MapValue, error) { +) (element, MapKey, MapValue, error) { // Adjust level and hkey for collision group level++ if level > digester.Levels() { - return nil, nil, NewHashLevelErrorf("inline collision group digest level is %d, want <= %d", level, digester.Levels()) + return nil, nil, nil, NewHashLevelErrorf("inline collision group digest level is %d, want <= %d", level, digester.Levels()) } hkey, _ := digester.Digest(level) - existingValue, err := e.elements.Set(storage, address, b, digester, level, hkey, comparator, hip, key, value) + keyStorable, existingValue, err := e.elements.Set(storage, address, b, digester, level, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Set(). - return nil, nil, err + return nil, nil, nil, err } if level == 1 { @@ -757,7 +810,7 @@ func (e *inlineCollisionGroup) Set( id, err := storage.GenerateSlabID(address) if err != nil { // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded( + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded( err, fmt.Sprintf("failed to generate slab ID for address 0x%x", address)) } @@ -777,18 +830,18 @@ func (e *inlineCollisionGroup) Set( err = storage.Store(id, slab) if err != nil { // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", id)) + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", id)) } // Create and return externalCollisionGroup (wrapper of newly created MapDataSlab) return &externalCollisionGroup{ slabID: id, size: externalCollisionGroupPrefixSize + SlabIDStorable(id).ByteSize(), - }, existingValue, nil + }, keyStorable, existingValue, nil } } - return e, existingValue, nil + return e, keyStorable, existingValue, nil } // Remove returns key, value, and updated element if key is found. @@ -917,26 +970,37 @@ func (e *externalCollisionGroup) Get(storage SlabStorage, digester Digester, lev return slab.Get(storage, digester, level, hkey, comparator, key) } -func (e *externalCollisionGroup) Set(storage SlabStorage, _ Address, b DigesterBuilder, digester Digester, level uint, _ Digest, comparator ValueComparator, hip HashInputProvider, key Value, value Value) (element, MapValue, error) { +func (e *externalCollisionGroup) Set( + storage SlabStorage, + _ Address, + b DigesterBuilder, + digester Digester, + level uint, + _ Digest, + comparator ValueComparator, + hip HashInputProvider, + key Value, + value Value, +) (element, MapKey, MapValue, error) { slab, err := getMapSlab(storage, e.slabID) if err != nil { // Don't need to wrap error as external error because err is already categorized by getMapSlab(). - return nil, nil, err + return nil, nil, nil, err } // Adjust level and hkey for collision group level++ if level > digester.Levels() { - return nil, nil, NewHashLevelErrorf("external collision group digest level is %d, want <= %d", level, digester.Levels()) + return nil, nil, nil, NewHashLevelErrorf("external collision group digest level is %d, want <= %d", level, digester.Levels()) } hkey, _ := digester.Digest(level) - existingValue, err := slab.Set(storage, b, digester, level, hkey, comparator, hip, key, value) + keyStorable, existingValue, err := slab.Set(storage, b, digester, level, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by MapSlab.Set(). - return nil, nil, err + return nil, nil, nil, err } - return e, existingValue, nil + return e, keyStorable, existingValue, nil } // Remove returns key, value, and updated element if key is found. @@ -1337,11 +1401,22 @@ func (e *hkeyElements) Get(storage SlabStorage, digester Digester, level uint, h return elem.Get(storage, digester, level, hkey, comparator, key) } -func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuilder, digester Digester, level uint, hkey Digest, comparator ValueComparator, hip HashInputProvider, key Value, value Value) (MapValue, error) { +func (e *hkeyElements) Set( + storage SlabStorage, + address Address, + b DigesterBuilder, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + hip HashInputProvider, + key Value, + value Value, +) (MapKey, MapValue, error) { // Check hkeys are not empty if level >= digester.Levels() { - return nil, NewHashLevelErrorf("hkey elements digest level is %d, want < %d", level, digester.Levels()) + return nil, nil, NewHashLevelErrorf("hkey elements digest level is %d, want < %d", level, digester.Levels()) } if len(e.hkeys) == 0 { @@ -1350,7 +1425,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild newElem, err := newSingleElement(storage, address, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by newSingleElement(). - return nil, err + return nil, nil, err } e.hkeys = []Digest{hkey} @@ -1359,7 +1434,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild e.size += digestSize + newElem.Size() - return nil, nil + return newElem.key, nil, nil } if hkey < e.hkeys[0] { @@ -1368,7 +1443,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild newElem, err := newSingleElement(storage, address, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by newSingleElement(). - return nil, err + return nil, nil, err } e.hkeys = append(e.hkeys, Digest(0)) @@ -1381,7 +1456,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild e.size += digestSize + newElem.Size() - return nil, nil + return newElem.key, nil, nil } if hkey > e.hkeys[len(e.hkeys)-1] { @@ -1390,7 +1465,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild newElem, err := newSingleElement(storage, address, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by newSingleElement(). - return nil, err + return nil, nil, err } e.hkeys = append(e.hkeys, hkey) @@ -1399,7 +1474,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild e.size += digestSize + newElem.Size() - return nil, nil + return newElem.key, nil, nil } equalIndex := -1 // first index that m.hkeys[h] == hkey @@ -1434,10 +1509,10 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild elementCount, err := elem.Count(storage) if err != nil { // Don't need to wrap error as external error because err is already categorized by element.Count(). - return nil, err + return nil, nil, err } if elementCount == 0 { - return nil, NewMapElementCountError("expect element count > 0, got element count == 0") + return nil, nil, NewMapElementCountError("expect element count > 0, got element count == 0") } // collisionCount is elementCount-1 because: @@ -1455,16 +1530,16 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild if errors.As(err, &knfe) { // Don't allow any more collisions for a digest that // already reached MaxCollisionLimitPerDigest. - return nil, NewCollisionLimitError(MaxCollisionLimitPerDigest) + return nil, nil, NewCollisionLimitError(MaxCollisionLimitPerDigest) } } } } - elem, existingValue, err := elem.Set(storage, address, b, digester, level, hkey, comparator, hip, key, value) + elem, keyStorable, existingValue, err := elem.Set(storage, address, b, digester, level, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by element.Set(). - return nil, err + return nil, nil, err } e.elems[equalIndex] = elem @@ -1478,7 +1553,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild } e.size = size - return existingValue, nil + return keyStorable, existingValue, nil } // No matching hkey @@ -1486,7 +1561,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild newElem, err := newSingleElement(storage, address, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by newSingleElement(). - return nil, err + return nil, nil, err } // insert into sorted hkeys @@ -1501,7 +1576,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild e.size += digestSize + newElem.Size() - return nil, nil + return newElem.key, nil, nil } func (e *hkeyElements) Remove(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { @@ -1956,10 +2031,21 @@ func (e *singleElements) Get(storage SlabStorage, digester Digester, level uint, return nil, nil, NewKeyNotFoundError(key) } -func (e *singleElements) Set(storage SlabStorage, address Address, _ DigesterBuilder, digester Digester, level uint, _ Digest, comparator ValueComparator, _ HashInputProvider, key Value, value Value) (MapValue, error) { +func (e *singleElements) Set( + storage SlabStorage, + address Address, + _ DigesterBuilder, + digester Digester, + level uint, + _ Digest, + comparator ValueComparator, + _ HashInputProvider, + key Value, + value Value, +) (MapKey, MapValue, error) { if level != digester.Levels() { - return nil, NewHashLevelErrorf("single elements digest level is %d, want %d", level, digester.Levels()) + return nil, nil, NewHashLevelErrorf("single elements digest level is %d, want %d", level, digester.Levels()) } // linear search key and update value @@ -1969,16 +2055,17 @@ func (e *singleElements) Set(storage SlabStorage, address Address, _ DigesterBui equal, err := comparator(storage, key, elem.key) if err != nil { // Wrap err as external error (if needed) because err is returned by ValueComparator callback. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") } if equal { + existingKey := elem.key existingValue := elem.value vs, err := value.Storable(storage, address, maxInlineMapValueSize(uint64(elem.key.ByteSize()))) if err != nil { // Wrap err as external error (if needed) because err is returned by Value interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get value's storable") + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get value's storable") } elem.value = vs @@ -1993,7 +2080,7 @@ func (e *singleElements) Set(storage SlabStorage, address Address, _ DigesterBui } e.size = size - return existingValue, nil + return existingKey, existingValue, nil } } @@ -2001,12 +2088,12 @@ func (e *singleElements) Set(storage SlabStorage, address Address, _ DigesterBui newElem, err := newSingleElement(storage, address, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by newSingleElement(). - return nil, err + return nil, nil, err } e.elems = append(e.elems, newElem) e.size += newElem.size - return nil, nil + return newElem.key, nil, nil } func (e *singleElements) Remove(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { @@ -3016,12 +3103,22 @@ func (m *MapDataSlab) StoredValue(storage SlabStorage) (Value, error) { }, nil } -func (m *MapDataSlab) Set(storage SlabStorage, b DigesterBuilder, digester Digester, level uint, hkey Digest, comparator ValueComparator, hip HashInputProvider, key Value, value Value) (MapValue, error) { +func (m *MapDataSlab) Set( + storage SlabStorage, + b DigesterBuilder, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + hip HashInputProvider, + key Value, + value Value, +) (MapKey, MapValue, error) { - existingValue, err := m.elements.Set(storage, m.SlabID().address, b, digester, level, hkey, comparator, hip, key, value) + keyStorable, existingValue, err := m.elements.Set(storage, m.SlabID().address, b, digester, level, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Set(). - return nil, err + return nil, nil, err } // Adjust header's first key @@ -3035,11 +3132,11 @@ func (m *MapDataSlab) Set(storage SlabStorage, b DigesterBuilder, digester Diges err := storage.Store(m.header.slabID, m) if err != nil { // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) } } - return existingValue, nil + return keyStorable, existingValue, nil } func (m *MapDataSlab) Remove(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { @@ -3687,7 +3784,17 @@ func (m *MapMetaDataSlab) Get(storage SlabStorage, digester Digester, level uint return child.Get(storage, digester, level, hkey, comparator, key) } -func (m *MapMetaDataSlab) Set(storage SlabStorage, b DigesterBuilder, digester Digester, level uint, hkey Digest, comparator ValueComparator, hip HashInputProvider, key Value, value Value) (MapValue, error) { +func (m *MapMetaDataSlab) Set( + storage SlabStorage, + b DigesterBuilder, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + hip HashInputProvider, + key Value, + value Value, +) (MapKey, MapValue, error) { ans := 0 i, j := 0, len(m.childrenHeaders) @@ -3708,13 +3815,13 @@ func (m *MapMetaDataSlab) Set(storage SlabStorage, b DigesterBuilder, digester D child, err := getMapSlab(storage, childID) if err != nil { // Don't need to wrap error as external error because err is already categorized by getMapSlab(). - return nil, err + return nil, nil, err } - existingValue, err := child.Set(storage, b, digester, level, hkey, comparator, hip, key, value) + keyStorable, existingValue, err := child.Set(storage, b, digester, level, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by MapSlab.Set(). - return nil, err + return nil, nil, err } m.childrenHeaders[childHeaderIndex] = child.Header() @@ -3728,26 +3835,26 @@ func (m *MapMetaDataSlab) Set(storage SlabStorage, b DigesterBuilder, digester D err := m.SplitChildSlab(storage, child, childHeaderIndex) if err != nil { // Don't need to wrap error as external error because err is already categorized by MapMetaDataSlab.SplitChildSlab(). - return nil, err + return nil, nil, err } - return existingValue, nil + return keyStorable, existingValue, nil } if underflowSize, underflow := child.IsUnderflow(); underflow { err := m.MergeOrRebalanceChildSlab(storage, child, childHeaderIndex, underflowSize) if err != nil { // Don't need to wrap error as external error because err is already categorized by MapMetaDataSlab.MergeOrRebalanceChildSlab(). - return nil, err + return nil, nil, err } - return existingValue, nil + return keyStorable, existingValue, nil } err = storage.Store(m.header.slabID, m) if err != nil { // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) } - return existingValue, nil + return keyStorable, existingValue, nil } func (m *MapMetaDataSlab) Remove(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { @@ -4653,7 +4760,7 @@ func (m *OrderedMap) Set(comparator ValueComparator, hip HashInputProvider, key return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to get map key digest at level %d", level)) } - existingValue, err := m.root.Set(m.Storage, m.digesterBuilder, keyDigest, level, hkey, comparator, hip, key, value) + keyStorable, existingValue, err := m.root.Set(m.Storage, m.digesterBuilder, keyDigest, level, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by MapSlab.Set(). return nil, err @@ -4689,6 +4796,10 @@ func (m *OrderedMap) Set(comparator ValueComparator, hip HashInputProvider, key return nil, err } + maxInlineSize := maxInlineMapValueSize(uint64(keyStorable.ByteSize())) + + m.setCallbackWithChild(comparator, hip, key, value, maxInlineSize) + return existingValue, nil } @@ -5455,7 +5566,7 @@ func NewMapFromBatchData( prevElem := elements.elems[lastElementIndex] prevElemSize := prevElem.Size() - elem, existingValue, err := prevElem.Set(storage, address, digesterBuilder, digester, 0, hkey, comparator, hip, key, value) + elem, _, existingValue, err := prevElem.Set(storage, address, digesterBuilder, digester, 0, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by element.Set(). return nil, err From 0d57f80771b790d87cad6ae125682dc028da4662 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 21 Sep 2023 12:23:21 -0500 Subject: [PATCH 008/126] Deduplicate composite by type ID and field names Currently, deduplication feature in PR 342 (not merged yet) does not support Cadence attachments. Add support for Cadence attachments and other future use cases by using type ID and sorted field names instead of field count. While at it, also refactor encoding composite type to reduce traversal and type assertion. --- map.go | 197 ++++++++++++++++++---------------------- map_test.go | 230 +++++++++++++++++++++++++++++++++++++++++++++++ storable.go | 11 ++- storable_test.go | 12 +++ typeinfo.go | 107 +++++++++++++++------- 5 files changed, 415 insertions(+), 142 deletions(-) diff --git a/map.go b/map.go index 5239be75..898db44d 100644 --- a/map.go +++ b/map.go @@ -211,7 +211,6 @@ type elements interface { Element(int) (element, error) Encode(*Encoder, *inlinedExtraData) error - EncodeCompositeValues(*Encoder, []MapKey, *inlinedExtraData) error hasPointer() bool @@ -1303,70 +1302,6 @@ func (e *hkeyElements) Encode(enc *Encoder, inlinedTypeInfo *inlinedExtraData) e return nil } -// EncodeCompositeValues encodes hkeyElements as an array of values ordered by orderedKeys. -// Level is not encoded because it is always 0. Digests are not encoded because -// they are encoded with composite keys in the composite extra data section. -func (e *hkeyElements) EncodeCompositeValues(enc *Encoder, orderedKeys []MapKey, inlinedTypeInfo *inlinedExtraData) error { - if e.level != 0 { - return NewEncodingError(fmt.Errorf("hash level must be 0 to be encoded as composite, got %d", e.level)) - } - - if len(e.elems) != len(orderedKeys) { - return NewEncodingError(fmt.Errorf("number of elements %d is different from number of elements in composite extra data %d", len(e.elems), len(orderedKeys))) - } - - var err error - - err = enc.CBOR.EncodeArrayHead(uint64(len(orderedKeys))) - if err != nil { - return NewEncodingError(err) - } - - keyIndexes := make([]int, len(e.elems)) - for i := 0; i < len(e.elems); i++ { - keyIndexes[i] = i - } - - // Encode values in the same order as orderedKeys. - for i, k := range orderedKeys { - key, ok := k.(EquatableStorable) - if !ok { - return NewEncodingError(fmt.Errorf("composite keys must be implement EquableStorable")) - } - - found := false - for j := i; j < len(keyIndexes); j++ { - index := keyIndexes[j] - se, ok := e.elems[index].(*singleElement) - if !ok { - return NewEncodingError(fmt.Errorf("composite element must not have collision")) - } - if key.Equal(se.key) { - found = true - keyIndexes[i], keyIndexes[j] = keyIndexes[j], keyIndexes[i] - - err = encodeStorableAsElement(enc, se.value, inlinedTypeInfo) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by encodeStorable(). - return err - } - - break - } - } - if !found { - return NewEncodingError(fmt.Errorf("failed to find key %v", k)) - } - } - - err = enc.CBOR.Flush() - if err != nil { - return NewEncodingError(err) - } - - return nil -} - func (e *hkeyElements) Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { if level >= digester.Levels() { @@ -2006,10 +1941,6 @@ func (e *singleElements) Encode(enc *Encoder, inlinedTypeInfo *inlinedExtraData) return nil } -func (e *singleElements) EncodeCompositeValues(_ *Encoder, _ []MapKey, _ *inlinedExtraData) error { - return NewEncodingError(fmt.Errorf("singleElements can't encoded as composite value")) -} - func (e *singleElements) Get(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { if level != digester.Levels() { @@ -2842,8 +2773,8 @@ func (m *MapDataSlab) encodeAsInlined(enc *Encoder, inlinedTypeInfo *inlinedExtr fmt.Errorf("failed to encode standalone map data slab as inlined")) } - if m.canBeEncodedAsComposite() { - return m.encodeAsInlinedComposite(enc, inlinedTypeInfo) + if hkeys, keys, values, ok := m.canBeEncodedAsComposite(); ok { + return encodeAsInlinedComposite(enc, m.header.slabID, m.extraData, hkeys, keys, values, inlinedTypeInfo) } return m.encodeAsInlinedMap(enc, inlinedTypeInfo) @@ -2901,35 +2832,21 @@ func (m *MapDataSlab) encodeAsInlinedMap(enc *Encoder, inlinedTypeInfo *inlinedE return nil } -func (m *MapDataSlab) encodeAsInlinedComposite(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { - - // Composite extra data is deduplicated by TypeInfo.ID() and number of fields, - // Composite fields can be removed but new fields can't be added, and existing field types can't be modified. - // Given this, composites with same type ID and same number of fields have the same fields. - // See https://developers.flow.com/cadence/language/contract-updatability#fields - - extraDataIndex, orderedKeys, exist := inlinedTypeInfo.getCompositeTypeInfo(m.extraData.TypeInfo, int(m.extraData.Count)) - - if !exist { - elements, ok := m.elements.(*hkeyElements) - if !ok { - // This should never happen because canBeEncodedAsComposite() - // returns false for map containing any collision elements. - return NewEncodingError(fmt.Errorf("singleElements can't be encoded as composite elements")) - } +// encodeAsInlinedComposite encodes hkeys, keys, and values as inlined composite value. +func encodeAsInlinedComposite( + enc *Encoder, + slabID SlabID, + extraData *MapExtraData, + hkeys []Digest, + keys []ComparableStorable, + values []Storable, + inlinedTypeInfo *inlinedExtraData, +) error { - orderedKeys = make([]MapKey, len(elements.elems)) - for i, e := range elements.elems { - e, ok := e.(*singleElement) - if !ok { - // This should never happen because canBeEncodedAsComposite() - // returns false for map containing any collision elements. - return NewEncodingError(fmt.Errorf("non-singleElement can't be encoded as composite elements")) - } - orderedKeys[i] = e.key - } + extraDataIndex, cachedKeys := inlinedTypeInfo.addCompositeExtraData(extraData, hkeys, keys) - extraDataIndex = inlinedTypeInfo.addCompositeExtraData(m.extraData, elements.hkeys, orderedKeys) + if len(keys) != len(cachedKeys) { + return NewEncodingError(fmt.Errorf("number of elements %d is different from number of elements in cached composite type %d", len(keys), len(cachedKeys))) } if extraDataIndex > 255 { @@ -2961,13 +2878,13 @@ func (m *MapDataSlab) encodeAsInlinedComposite(enc *Encoder, inlinedTypeInfo *in } // element 1: slab id - err = enc.CBOR.EncodeBytes(m.header.slabID.index[:]) + err = enc.CBOR.EncodeBytes(slabID.index[:]) if err != nil { return NewEncodingError(err) } - // element 2: map elements - err = m.elements.EncodeCompositeValues(enc, orderedKeys, inlinedTypeInfo) + // element 2: composite values in the order of cachedKeys + err = encodeCompositeValues(enc, cachedKeys, keys, values, inlinedTypeInfo) if err != nil { return NewEncodingError(err) } @@ -2980,38 +2897,100 @@ func (m *MapDataSlab) encodeAsInlinedComposite(enc *Encoder, inlinedTypeInfo *in return nil } +// encodeCompositeValues encodes composite values as an array of values ordered by cachedKeys. +func encodeCompositeValues( + enc *Encoder, + cachedKeys []ComparableStorable, + keys []ComparableStorable, + values []Storable, + inlinedTypeInfo *inlinedExtraData, +) error { + + var err error + + err = enc.CBOR.EncodeArrayHead(uint64(len(cachedKeys))) + if err != nil { + return NewEncodingError(err) + } + + keyIndexes := make([]int, len(keys)) + for i := 0; i < len(keys); i++ { + keyIndexes[i] = i + } + + // Encode values in the same order as cachedKeys. + for i, cachedKey := range cachedKeys { + found := false + for j := i; j < len(keyIndexes); j++ { + index := keyIndexes[j] + key := keys[index] + + if cachedKey.Equal(key) { + found = true + keyIndexes[i], keyIndexes[j] = keyIndexes[j], keyIndexes[i] + + err = encodeStorableAsElement(enc, values[index], inlinedTypeInfo) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by encodeStorable(). + return err + } + + break + } + } + if !found { + return NewEncodingError(fmt.Errorf("failed to find key %v", cachedKey)) + } + } + + return nil +} + // canBeEncodedAsComposite returns true if: // - map data slab is inlined // - map is composite type // - no collision elements // - keys are stored inline (not in a separate slab) -func (m *MapDataSlab) canBeEncodedAsComposite() bool { +func (m *MapDataSlab) canBeEncodedAsComposite() ([]Digest, []ComparableStorable, []Storable, bool) { if !m.inlined { - return false + return nil, nil, nil, false } if !m.extraData.TypeInfo.IsComposite() { - return false + return nil, nil, nil, false } elements, ok := m.elements.(*hkeyElements) if !ok { - return false + return nil, nil, nil, false } - for _, e := range elements.elems { + keys := make([]ComparableStorable, m.extraData.Count) + values := make([]Storable, m.extraData.Count) + + for i, e := range elements.elems { se, ok := e.(*singleElement) if !ok { // Has collision element - return false + return nil, nil, nil, false } + if _, ok = se.key.(SlabIDStorable); ok { // Key is stored in a separate slab - return false + return nil, nil, nil, false + } + + key, ok := se.key.(ComparableStorable) + if !ok { + // Key can't be compared (sorted) + return nil, nil, nil, false } + + keys[i] = key + values[i] = se.value } - return true + return elements.hkeys, keys, values, true } func (m *MapDataSlab) hasPointer() bool { diff --git a/map_test.go b/map_test.go index 6d4cc71f..c4bcfc7b 100644 --- a/map_test.go +++ b/map_test.go @@ -6833,6 +6833,236 @@ func TestMapEncodeDecode(t *testing.T) { verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) + t.Run("same composite with different fields", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo := testCompositeTypeInfo{43} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 3 + keyValues := make(map[Value]Value, mapSize) + + for i := uint64(0); i < mapSize; i++ { + + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + // Insert first element "uuid" to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, NewStringValue("uuid"), Uint64Value(i)) + require.NoError(t, err) + require.Nil(t, existingStorable) + + // Insert second element to child map (second element is different) + switch i % 3 { + case 0: + existingStorable, err = childMap.Set(compare, hashInputProvider, NewStringValue("a"), Uint64Value(i*2)) + case 1: + existingStorable, err = childMap.Set(compare, hashInputProvider, NewStringValue("b"), Uint64Value(i*2)) + case 2: + existingStorable, err = childMap.Set(compare, hashInputProvider, NewStringValue("c"), Uint64Value(i*2)) + } + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = childMap + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 3 + 0x03, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // 3 inlined slab extra data + 0x83, + // element 0 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count: 2 + 0x02, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // composite digests + 0x50, + 0x42, 0xa5, 0xa2, 0x7f, 0xb3, 0xc9, 0x0c, 0xa1, + 0x4c, 0x1f, 0x34, 0x74, 0x38, 0x15, 0x64, 0xe5, + // composite keys ["a", "uuid"] + 0x82, 0x61, 0x61, 0x64, 0x75, 0x75, 0x69, 0x64, + + // element 1 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count: 2 + 0x02, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0xa, + // composite digests + 0x50, + 0x74, 0x0a, 0x02, 0xc1, 0x19, 0x6f, 0xb8, 0x9e, + 0x82, 0x41, 0xee, 0xef, 0xc7, 0xb3, 0x2f, 0x28, + // composite keys ["uuid", "b"] + 0x82, 0x64, 0x75, 0x75, 0x69, 0x64, 0x61, 0x62, + + // element 2 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count: 2 + 0x02, + // seed + 0x1b, 0x8d, 0x99, 0xcc, 0x54, 0xc8, 0x6b, 0xab, 0x50, + // composite digests + 0x50, + 0x5a, 0x98, 0x80, 0xf4, 0xa6, 0x52, 0x9e, 0x2d, + 0x6d, 0x8a, 0x0a, 0xe7, 0x19, 0xf1, 0xbb, 0x8b, + // composite keys ["uuid", "c"] + 0x82, 0x64, 0x75, 0x75, 0x69, 0x64, 0x61, 0x63, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 3) + 0x59, 0x00, 0x18, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + + // elements (array of 3 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x03, + // element 0: + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 1 + 0xd8, 0xa4, 0x01, + // value: 2 + 0xd8, 0xa4, 0x02, + + // element 2: + 0x82, + // key: 2 + 0xd8, 0xa4, 0x02, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 2 + 0xd8, 0xa4, 0x02, + // value: 4 + 0xd8, 0xa4, 0x04, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + t.Run("same composite with different number of fields", func(t *testing.T) { SetThreshold(256) defer SetThreshold(1024) diff --git a/storable.go b/storable.go index c0102a9c..ca199390 100644 --- a/storable.go +++ b/storable.go @@ -37,12 +37,19 @@ type Storable interface { ChildStorables() []Storable } -// EquatableStorable is an interface that supports comparison of Storable. +// ComparableStorable is an interface that supports comparison of Storable. // This is only used for composite keys. -type EquatableStorable interface { +type ComparableStorable interface { Storable + // Equal returns true if the given storable is equal to this storable. Equal(Storable) bool + + // Less returns true if the given storable is less than this storable. + Less(Storable) bool + + // ID returns a unique identifier. + ID() string } type containerStorable interface { diff --git a/storable_test.go b/storable_test.go index 77e2b4b4..0e94944e 100644 --- a/storable_test.go +++ b/storable_test.go @@ -333,6 +333,7 @@ type StringValue struct { var _ Value = StringValue{} var _ Storable = StringValue{} var _ HashableValue = StringValue{} +var _ ComparableStorable = StringValue{} func NewStringValue(s string) StringValue { size := GetUintCBORSize(uint64(len(s))) + uint32(len(s)) @@ -352,6 +353,17 @@ func (v StringValue) Equal(other Storable) bool { return v.str == other.(StringValue).str } +func (v StringValue) Less(other Storable) bool { + if _, ok := other.(StringValue); !ok { + return false + } + return v.str < other.(StringValue).str +} + +func (v StringValue) ID() string { + return v.str +} + func (v StringValue) Storable(storage SlabStorage, address Address, maxInlineSize uint64) (Storable, error) { if uint64(v.ByteSize()) > maxInlineSize { diff --git a/typeinfo.go b/typeinfo.go index 7241fa15..484ea8f5 100644 --- a/typeinfo.go +++ b/typeinfo.go @@ -21,6 +21,7 @@ package atree import ( "encoding/binary" "fmt" + "sort" "github.com/fxamacker/cbor/v2" ) @@ -50,8 +51,8 @@ type ExtraData interface { // all values with the same composite type and map seed. type compositeExtraData struct { mapExtraData *MapExtraData - hkeys []Digest // hkeys is ordered by mapExtraData.Seed - keys []MapKey // keys is ordered by mapExtraData.Seed + hkeys []Digest // hkeys is ordered by mapExtraData.Seed + keys []Storable // keys is ordered by mapExtraData.Seed } var _ ExtraData = &compositeExtraData{} @@ -175,7 +176,7 @@ func newCompositeExtraData( hkeys[i] = Digest(binary.BigEndian.Uint64(digestBytes[i*digestSize:])) } - keys := make([]MapKey, keyCount) + keys := make([]Storable, keyCount) for i := uint64(0); i < keyCount; i++ { // Decode composite key key, err := decodeStorable(dec, SlabIDUndefined, nil) @@ -189,25 +190,20 @@ func newCompositeExtraData( return &compositeExtraData{mapExtraData: mapExtraData, hkeys: hkeys, keys: keys}, nil } -type compositeTypeID struct { - id string - fieldCount int -} - type compositeTypeInfo struct { index int - keys []MapKey + keys []ComparableStorable } type inlinedExtraData struct { extraData []ExtraData - compositeTypes map[compositeTypeID]compositeTypeInfo + compositeTypes map[string]compositeTypeInfo arrayTypes map[string]int } func newInlinedExtraData() *inlinedExtraData { return &inlinedExtraData{ - compositeTypes: make(map[compositeTypeID]compositeTypeInfo), + compositeTypes: make(map[string]compositeTypeInfo), arrayTypes: make(map[string]int), } } @@ -332,21 +328,28 @@ func (ied *inlinedExtraData) addMapExtraData(data *MapExtraData) int { } // addCompositeExtraData returns index of deduplicated composite extra data. -// Composite extra data is deduplicated by TypeInfo.ID() and number of fields, -// Composite fields can be removed but new fields can't be added, and existing field types can't be modified. -// Given this, composites with same type ID and same number of fields have the same fields. -// See https://developers.flow.com/cadence/language/contract-updatability#fields -func (ied *inlinedExtraData) addCompositeExtraData(data *MapExtraData, digests []Digest, keys []MapKey) int { - id := compositeTypeID{data.TypeInfo.ID(), int(data.Count)} +// Composite extra data is deduplicated by TypeInfo.ID() with sorted field names. +func (ied *inlinedExtraData) addCompositeExtraData( + data *MapExtraData, + digests []Digest, + keys []ComparableStorable, +) (int, []ComparableStorable) { + + id := makeCompositeTypeID(data.TypeInfo, keys) info, exist := ied.compositeTypes[id] if exist { - return info.index + return info.index, info.keys + } + + storableKeys := make([]Storable, len(keys)) + for i, k := range keys { + storableKeys[i] = k } compositeData := &compositeExtraData{ mapExtraData: data, hkeys: digests, - keys: keys, + keys: storableKeys, } index := len(ied.extraData) @@ -357,21 +360,63 @@ func (ied *inlinedExtraData) addCompositeExtraData(data *MapExtraData, digests [ index: index, } - return index + return index, keys } -// getCompositeTypeInfo returns index of composite type and cached keys. -// NOTE: use this function instead of addCompositeExtraData to check if -// composite type is already added to save some allocation. -func (ied *inlinedExtraData) getCompositeTypeInfo(t TypeInfo, fieldCount int) (int, []MapKey, bool) { - id := compositeTypeID{t.ID(), fieldCount} - info, exist := ied.compositeTypes[id] - if !exist { - return 0, nil, false +func (ied *inlinedExtraData) empty() bool { + return len(ied.extraData) == 0 +} + +// makeCompositeTypeID returns id of concatenated t.ID() with sorted names with "," as separator. +func makeCompositeTypeID(t TypeInfo, names []ComparableStorable) string { + const separator = "," + + if len(names) == 1 { + return t.ID() + separator + names[0].ID() } - return info.index, info.keys, true + + sorter := newFieldNameSorter(names) + + sort.Sort(sorter) + + return t.ID() + separator + sorter.join(separator) } -func (ied *inlinedExtraData) empty() bool { - return len(ied.extraData) == 0 +// fieldNameSorter sorts names by index (not in place sort). +type fieldNameSorter struct { + names []ComparableStorable + index []int +} + +func newFieldNameSorter(names []ComparableStorable) *fieldNameSorter { + index := make([]int, len(names)) + for i := 0; i < len(names); i++ { + index[i] = i + } + return &fieldNameSorter{ + names: names, + index: index, + } +} + +func (fn *fieldNameSorter) Len() int { + return len(fn.names) +} + +func (fn *fieldNameSorter) Less(i, j int) bool { + i = fn.index[i] + j = fn.index[j] + return fn.names[i].Less(fn.names[j]) +} + +func (fn *fieldNameSorter) Swap(i, j int) { + fn.index[i], fn.index[j] = fn.index[j], fn.index[i] +} + +func (fn *fieldNameSorter) join(sep string) string { + var s string + for _, i := range fn.index { + s += sep + fn.names[i].ID() + } + return s } From 16435897c01efda71bd3f62a9a62f80b8a0530f1 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 21 Sep 2023 13:01:41 -0500 Subject: [PATCH 009/126] Add comment to update ValueID when SlabID is changed --- storage.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/storage.go b/storage.go index 71d1804b..be7003cf 100644 --- a/storage.go +++ b/storage.go @@ -54,6 +54,8 @@ func (vid ValueID) String() string { ) } +// WARNING: Any changes to SlabID or its components (Address and SlabIndex) +// require updates to ValueID definition and functions. type ( Address [8]byte SlabIndex [8]byte From 72d3614209b6c03dd766a4b29ea1b7b98376bb69 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 21 Sep 2023 15:06:24 -0500 Subject: [PATCH 010/126] Use new ExtraData for inlined slab during decoding Decoded ExtraData (including TypeInfo) can be shared by all inlined slabs. This commit creates a new ExtraData with copied TypeInfo for inlined slabs to prevent accidental mutation. --- array.go | 12 ++++++++---- cmd/main/main.go | 18 ++++++++++++------ cmd/stress/typeinfo.go | 4 ++++ map.go | 31 ++++++++++++++++++------------- typeinfo.go | 2 +- utils_test.go | 8 ++++++++ 6 files changed, 51 insertions(+), 24 deletions(-) diff --git a/array.go b/array.go index 21042883..fe6c77d7 100644 --- a/array.go +++ b/array.go @@ -630,10 +630,14 @@ func DecodeInlinedArrayStorable( } return &ArrayDataSlab{ - header: header, - elements: elements, - extraData: extraData, - inlined: true, + header: header, + elements: elements, + extraData: &ArrayExtraData{ + // Make a copy of extraData.TypeInfo because + // inlined extra data are shared by all inlined slabs. + TypeInfo: extraData.TypeInfo.Copy(), + }, + inlined: true, }, nil } diff --git a/cmd/main/main.go b/cmd/main/main.go index 3b2eaebd..f94db511 100644 --- a/cmd/main/main.go +++ b/cmd/main/main.go @@ -73,25 +73,31 @@ func (v Uint64Value) String() string { return fmt.Sprintf("%d", uint64(v)) } -type testTypeInfo struct{} +type testTypeInfo struct { + value uint64 +} var _ atree.TypeInfo = testTypeInfo{} +func (i testTypeInfo) Copy() atree.TypeInfo { + return i +} + func (testTypeInfo) IsComposite() bool { return false } func (i testTypeInfo) ID() string { - return fmt.Sprintf("uint64(%d)", i) + return fmt.Sprintf("uint64(%d)", i.value) } -func (testTypeInfo) Encode(e *cbor.StreamEncoder) error { - return e.EncodeUint8(42) +func (i testTypeInfo) Encode(e *cbor.StreamEncoder) error { + return e.EncodeUint64(i.value) } func (i testTypeInfo) Equal(other atree.TypeInfo) bool { - _, ok := other.(testTypeInfo) - return ok + otherTestTypeInfo, ok := other.(testTypeInfo) + return ok && i.value == otherTestTypeInfo.value } func decodeStorable(dec *cbor.StreamDecoder, _ atree.SlabID, _ []atree.ExtraData) (atree.Storable, error) { diff --git a/cmd/stress/typeinfo.go b/cmd/stress/typeinfo.go index b14c212b..ec78239f 100644 --- a/cmd/stress/typeinfo.go +++ b/cmd/stress/typeinfo.go @@ -32,6 +32,10 @@ type testTypeInfo struct { var _ atree.TypeInfo = testTypeInfo{} +func (i testTypeInfo) Copy() atree.TypeInfo { + return i +} + func (i testTypeInfo) IsComposite() bool { return false } diff --git a/map.go b/map.go index 898db44d..1b128920 100644 --- a/map.go +++ b/map.go @@ -2504,17 +2504,16 @@ func DecodeInlinedCompositeStorable( firstKey: elements.firstKey(), } - // TODO: does extra data needs to be copied? - copiedExtraData := &MapExtraData{ - TypeInfo: extraData.mapExtraData.TypeInfo, - Count: extraData.mapExtraData.Count, - Seed: extraData.mapExtraData.Seed, - } - return &MapDataSlab{ - header: header, - elements: elements, - extraData: copiedExtraData, + header: header, + elements: elements, + extraData: &MapExtraData{ + // Make a copy of extraData.TypeInfo because + // inlined extra data are shared by all inlined slabs. + TypeInfo: extraData.mapExtraData.TypeInfo.Copy(), + Count: extraData.mapExtraData.Count, + Seed: extraData.mapExtraData.Seed, + }, anySize: false, collisionGroup: false, inlined: true, @@ -2609,9 +2608,15 @@ func DecodeInlinedMapStorable( // NOTE: extra data doesn't need to be copied because every inlined map has its own inlined extra data. return &MapDataSlab{ - header: header, - elements: elements, - extraData: extraData, + header: header, + elements: elements, + extraData: &MapExtraData{ + // Make a copy of extraData.TypeInfo because + // inlined extra data are shared by all inlined slabs. + TypeInfo: extraData.TypeInfo.Copy(), + Count: extraData.Count, + Seed: extraData.Seed, + }, anySize: false, collisionGroup: false, inlined: true, diff --git a/typeinfo.go b/typeinfo.go index 484ea8f5..43268c1a 100644 --- a/typeinfo.go +++ b/typeinfo.go @@ -30,7 +30,7 @@ type TypeInfo interface { Encode(*cbor.StreamEncoder) error IsComposite() bool ID() string - // TODO: maybe add a copy function because decoded TypeInfo can be shared by multiple slabs if not copied. + Copy() TypeInfo } type TypeInfoDecoder func( diff --git a/utils_test.go b/utils_test.go index 90b7bda2..56eb2274 100644 --- a/utils_test.go +++ b/utils_test.go @@ -93,6 +93,10 @@ type testTypeInfo struct { var _ TypeInfo = testTypeInfo{} +func (i testTypeInfo) Copy() TypeInfo { + return i +} + func (i testTypeInfo) IsComposite() bool { return false } @@ -118,6 +122,10 @@ type testCompositeTypeInfo struct { var _ TypeInfo = testCompositeTypeInfo{} +func (i testCompositeTypeInfo) Copy() TypeInfo { + return i +} + func (i testCompositeTypeInfo) IsComposite() bool { return true } From f66a85b5469fd885e03c4d3a36fd7ee15ad03e47 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 21 Sep 2023 15:46:51 -0500 Subject: [PATCH 011/126] Copy key for inlined composite during decoding Decoded ExtraData (including keys) can be shared by all inlined composite referring to the same type. This commit copies key for inlined composite to prevent accidental mutation. --- map.go | 9 ++++++--- storable.go | 4 +++- storable_test.go | 4 ++++ typeinfo.go | 19 +++++++++---------- 4 files changed, 22 insertions(+), 14 deletions(-) diff --git a/map.go b/map.go index 1b128920..b6933065 100644 --- a/map.go +++ b/map.go @@ -2470,6 +2470,7 @@ func DecodeInlinedCompositeStorable( extraData.mapExtraData.Count)) } + // Make a copy of digests because extraData is shared by all inlined composite referring to the same type. hkeys := make([]Digest, len(extraData.hkeys)) copy(hkeys, extraData.hkeys) @@ -2482,9 +2483,11 @@ func DecodeInlinedCompositeStorable( return nil, err } - elemSize := singleElementPrefixSize + extraData.keys[i].ByteSize() + value.ByteSize() - // TODO: does key need to be copied? - elem := &singleElement{extraData.keys[i], value, elemSize} + // Make a copy of key in case it is shared. + key := extraData.keys[i].Copy() + + elemSize := singleElementPrefixSize + key.ByteSize() + value.ByteSize() + elem := &singleElement{key, value, elemSize} elems[i] = elem size += digestSize + elem.Size() diff --git a/storable.go b/storable.go index ca199390..52892575 100644 --- a/storable.go +++ b/storable.go @@ -37,7 +37,7 @@ type Storable interface { ChildStorables() []Storable } -// ComparableStorable is an interface that supports comparison of Storable. +// ComparableStorable is an interface that supports comparison and cloning of Storable. // This is only used for composite keys. type ComparableStorable interface { Storable @@ -50,6 +50,8 @@ type ComparableStorable interface { // ID returns a unique identifier. ID() string + + Copy() Storable } type containerStorable interface { diff --git a/storable_test.go b/storable_test.go index 0e94944e..4cd52b6f 100644 --- a/storable_test.go +++ b/storable_test.go @@ -364,6 +364,10 @@ func (v StringValue) ID() string { return v.str } +func (v StringValue) Copy() Storable { + return v +} + func (v StringValue) Storable(storage SlabStorage, address Address, maxInlineSize uint64) (Storable, error) { if uint64(v.ByteSize()) > maxInlineSize { diff --git a/typeinfo.go b/typeinfo.go index 43268c1a..7f0a34bc 100644 --- a/typeinfo.go +++ b/typeinfo.go @@ -51,8 +51,8 @@ type ExtraData interface { // all values with the same composite type and map seed. type compositeExtraData struct { mapExtraData *MapExtraData - hkeys []Digest // hkeys is ordered by mapExtraData.Seed - keys []Storable // keys is ordered by mapExtraData.Seed + hkeys []Digest // hkeys is ordered by mapExtraData.Seed + keys []ComparableStorable // keys is ordered by mapExtraData.Seed } var _ ExtraData = &compositeExtraData{} @@ -176,7 +176,7 @@ func newCompositeExtraData( hkeys[i] = Digest(binary.BigEndian.Uint64(digestBytes[i*digestSize:])) } - keys := make([]Storable, keyCount) + keys := make([]ComparableStorable, keyCount) for i := uint64(0); i < keyCount; i++ { // Decode composite key key, err := decodeStorable(dec, SlabIDUndefined, nil) @@ -184,7 +184,11 @@ func newCompositeExtraData( // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode key's storable") } - keys[i] = key + compositeKey, ok := key.(ComparableStorable) + if !ok { + return nil, NewDecodingError(fmt.Errorf("failed to decode key's storable: got %T, expect ComparableStorable", key)) + } + keys[i] = compositeKey } return &compositeExtraData{mapExtraData: mapExtraData, hkeys: hkeys, keys: keys}, nil @@ -341,15 +345,10 @@ func (ied *inlinedExtraData) addCompositeExtraData( return info.index, info.keys } - storableKeys := make([]Storable, len(keys)) - for i, k := range keys { - storableKeys[i] = k - } - compositeData := &compositeExtraData{ mapExtraData: data, hkeys: digests, - keys: storableKeys, + keys: keys, } index := len(ied.extraData) From f8e0992b1296afda3fd2414e82cdd24e3b6b48be Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 21 Sep 2023 16:09:44 -0500 Subject: [PATCH 012/126] Add comments for safe use of range loop over Go map --- array.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/array.go b/array.go index fe6c77d7..01dcdc62 100644 --- a/array.go +++ b/array.go @@ -2664,6 +2664,9 @@ func NewArrayWithRootID(storage SlabStorage, rootID SlabID) (*Array, error) { // TODO: maybe optimize this func (a *Array) incrementIndexFrom(index uint64) { + // Although range loop over Go map is not deterministic, it is OK + // to use here because this operation is free of side-effect and + // leads to the same results independent of map order. for id, i := range a.mutableElementIndex { if i >= index { a.mutableElementIndex[id]++ @@ -2673,6 +2676,9 @@ func (a *Array) incrementIndexFrom(index uint64) { // TODO: maybe optimize this func (a *Array) decrementIndexFrom(index uint64) { + // Although range loop over Go map is not deterministic, it is OK + // to use here because this operation is free of side-effect and + // leads to the same results independent of map order. for id, i := range a.mutableElementIndex { if i > index { a.mutableElementIndex[id]-- From 35849521656f4449ee79d162d41ba1543119aaa0 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 21 Sep 2023 16:22:21 -0500 Subject: [PATCH 013/126] Replace if with switch in Array.Storable() --- array.go | 56 ++++++++++++++++++++++++++++++-------------------------- 1 file changed, 30 insertions(+), 26 deletions(-) diff --git a/array.go b/array.go index 01dcdc62..d46af79e 100644 --- a/array.go +++ b/array.go @@ -3020,19 +3020,18 @@ func (a *Array) Storable(_ SlabStorage, _ Address, maxInlineSize uint64) (Storab inlined := a.root.Inlined() inlinable := a.root.Inlinable(maxInlineSize) - if inlinable && inlined { + switch { + case inlinable && inlined: // Root slab is inlinable and was inlined. // Return root slab as storable, no size adjustment and change to storage. return a.root, nil - } - if !inlinable && !inlined { + case !inlinable && !inlined: // Root slab is not inlinable and was not inlined. // Return root slab ID as storable, no size adjustment and change to storage. return SlabIDStorable(a.SlabID()), nil - } - if inlinable && !inlined { + case inlinable && !inlined: // Root slab is inlinable and was NOT inlined. // Inline root data slab. @@ -3061,34 +3060,39 @@ func (a *Array) Storable(_ SlabStorage, _ Address, maxInlineSize uint64) (Storab rootDataSlab.inlined = true return rootDataSlab, nil - } - // here, root slab is NOT inlinable and was previously inlined. + case !inlinable && inlined: - // Un-inline root slab. + // Root slab is NOT inlinable and was previously inlined. - // Inlined root slab must be data slab. - rootDataSlab, ok := a.root.(*ArrayDataSlab) - if !ok { - return nil, NewFatalError(fmt.Errorf("unexpected inlined array slab type %T", a.root)) - } + // Un-inline root slab. - // Update root data slab size - rootDataSlab.header.size = rootDataSlab.header.size - - inlinedArrayDataSlabPrefixSize + - arrayRootDataSlabPrefixSize + // Inlined root slab must be data slab. + rootDataSlab, ok := a.root.(*ArrayDataSlab) + if !ok { + return nil, NewFatalError(fmt.Errorf("unexpected inlined array slab type %T", a.root)) + } - // Update root data slab inlined status. - rootDataSlab.inlined = false + // Update root data slab size + rootDataSlab.header.size = rootDataSlab.header.size - + inlinedArrayDataSlabPrefixSize + + arrayRootDataSlabPrefixSize - // Store root slab in storage - err := a.Storage.Store(rootDataSlab.header.slabID, a.root) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.SlabID())) - } + // Update root data slab inlined status. + rootDataSlab.inlined = false + + // Store root slab in storage + err := a.Storage.Store(rootDataSlab.header.slabID, a.root) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.SlabID())) + } + + return SlabIDStorable(a.SlabID()), nil - return SlabIDStorable(a.SlabID()), nil + default: + panic("not reachable") + } } var emptyArrayIterator = &ArrayIterator{} From dc8a567a5e743336e2f9b5fd4ecd3e61226ed639 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 21 Sep 2023 16:29:05 -0500 Subject: [PATCH 014/126] Replace if with switch in OrderedMap.Storable() --- map.go | 53 +++++++++++++++++++++++++++++------------------------ 1 file changed, 29 insertions(+), 24 deletions(-) diff --git a/map.go b/map.go index b6933065..aaf9b7a6 100644 --- a/map.go +++ b/map.go @@ -4966,19 +4966,19 @@ func (m *OrderedMap) Storable(_ SlabStorage, _ Address, maxInlineSize uint64) (S inlined := m.root.Inlined() inlinable := m.root.Inlinable(maxInlineSize) - if inlinable && inlined { + switch { + + case inlinable && inlined: // Root slab is inlinable and was inlined. // Return root slab as storable, no size adjustment and change to storage. return m.root, nil - } - if !inlinable && !inlined { + case !inlinable && !inlined: // Root slab is not inlinable and was not inlined. // Return root slab as storable, no size adjustment and change to storage. return SlabIDStorable(m.SlabID()), nil - } - if inlinable && !inlined { + case inlinable && !inlined: // Root slab is inlinable and was NOT inlined. // Inline root data slab. @@ -5005,32 +5005,37 @@ func (m *OrderedMap) Storable(_ SlabStorage, _ Address, maxInlineSize uint64) (S rootDataSlab.inlined = true return rootDataSlab, nil - } - // here, root slab is NOT inlinable and was inlined. + case !inlinable && inlined: - // Un-inline root slab. + // Root slab is NOT inlinable and was inlined. - // Inlined root slab must be data slab. - rootDataSlab, ok := m.root.(*MapDataSlab) - if !ok { - return nil, NewFatalError(fmt.Errorf("unexpected inlined map slab type %T", m.root)) - } + // Un-inline root slab. - // Update root data slab size from inlined to not inlined. - rootDataSlab.header.size = mapRootDataSlabPrefixSize + rootDataSlab.elements.Size() + // Inlined root slab must be data slab. + rootDataSlab, ok := m.root.(*MapDataSlab) + if !ok { + return nil, NewFatalError(fmt.Errorf("unexpected inlined map slab type %T", m.root)) + } - // Update root data slab inlined status. - rootDataSlab.inlined = false + // Update root data slab size from inlined to not inlined. + rootDataSlab.header.size = mapRootDataSlabPrefixSize + rootDataSlab.elements.Size() - // Store root slab in storage - err := m.Storage.Store(m.SlabID(), m.root) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.SlabID())) - } + // Update root data slab inlined status. + rootDataSlab.inlined = false + + // Store root slab in storage + err := m.Storage.Store(m.SlabID(), m.root) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.SlabID())) + } + + return SlabIDStorable(m.SlabID()), nil - return SlabIDStorable(m.SlabID()), nil + default: + panic("not reachable") + } } func (m *OrderedMap) Count() uint64 { From 5194eee696bed5a80b9cdd42d79be430ab7a36b4 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 21 Sep 2023 16:55:50 -0500 Subject: [PATCH 015/126] Add comment about encoding size --- array.go | 2 ++ map.go | 2 ++ 2 files changed, 4 insertions(+) diff --git a/array.go b/array.go index d46af79e..24868c3d 100644 --- a/array.go +++ b/array.go @@ -28,6 +28,8 @@ import ( "github.com/fxamacker/cbor/v2" ) +// NOTE: we use encoding size (in bytes) instead of Go type size for slab operations, +// such as merge and split, so size constants here are related to encoding size. const ( slabAddressSize = 8 slabIndexSize = 8 diff --git a/map.go b/map.go index aaf9b7a6..29d72c16 100644 --- a/map.go +++ b/map.go @@ -30,6 +30,8 @@ import ( "github.com/fxamacker/circlehash" ) +// NOTE: we use encoding size (in bytes) instead of Go type size for slab operations, +// such as merge and split, so size constants here are related to encoding size. const ( digestSize = 8 From 7375b82116b2971d2dd216b0e62512232310ac98 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 21 Sep 2023 17:33:37 -0500 Subject: [PATCH 016/126] Add comments for parentUpdater --- array.go | 9 ++++++--- map.go | 10 +++++++++- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/array.go b/array.go index 24868c3d..6aa7d970 100644 --- a/array.go +++ b/array.go @@ -173,9 +173,12 @@ type Array struct { root ArraySlab // parentUpdater is a callback that notifies parent container when this array is modified. - // If this callback is null, this array has no parent. Otherwise, this array has parent - // and this callback must be used when this array is changed by Append, Insert, Set, - // Remove, etc. + // If this callback is nil, this array has no parent. Otherwise, this array has parent + // and this callback must be used when this array is changed by Append, Insert, Set, Remove, etc. + // + // parentUpdater acts like "parent pointer". It is not stored physically and is only in memory. + // It is setup when child array is returned from parent's Get. It is also setup when + // new child is added to parent through Set or Insert. parentUpdater parentUpdater // mutableElementIndex tracks index of mutable element, such as Array and OrderedMap. diff --git a/map.go b/map.go index 29d72c16..01ff922d 100644 --- a/map.go +++ b/map.go @@ -369,7 +369,15 @@ type OrderedMap struct { Storage SlabStorage root MapSlab digesterBuilder DigesterBuilder - parentUpdater parentUpdater + + // parentUpdater is a callback that notifies parent container when this map is modified. + // If this callback is nil, this map has no parent. Otherwise, this map has parent + // and this callback must be used when this map is changed by Set and Remove. + // + // parentUpdater acts like "parent pointer". It is not stored physically and is only in memory. + // It is setup when child map is returned from parent's Get. It is also setup when + // new child is added to parent through Set or Insert. + parentUpdater parentUpdater } var _ Value = &OrderedMap{} From d49d6b4b49950f6410b526492da7fb183738f003 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 21 Sep 2023 17:58:19 -0500 Subject: [PATCH 017/126] Add more comments for Array and OrderedMap Some of the comments were taken from Atree's README which was originally authored by Ramtin. Co-authored-by: Ramtin M. Seraj --- array.go | 11 ++++++++++- map.go | 12 ++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/array.go b/array.go index 6aa7d970..40bb8686 100644 --- a/array.go +++ b/array.go @@ -167,7 +167,16 @@ type ArraySlab interface { Inlinable(maxInlineSize uint64) bool } -// Array is tree +// Array is a heterogeneous variable-size array, storing any type of values +// into a smaller ordered list of values and provides efficient functionality +// to lookup, insert and remove elements anywhere in the array. +// +// Array elements can be stored in one or more relatively fixed-sized segments. +// +// Array can be inlined into its parent container when the entire content fits in +// parent container's element size limit. Specifically, array with one segment +// which fits in size limit can be inlined, while arrays with multiple segments +// can't be inlined. type Array struct { Storage SlabStorage root ArraySlab diff --git a/map.go b/map.go index 01ff922d..9daa8e93 100644 --- a/map.go +++ b/map.go @@ -365,6 +365,18 @@ type MapSlab interface { Inlinable(maxInlineSize uint64) bool } +// OrderedMap is an ordered map of key-value pairs; keys can be any hashable type +// and values can be any serializable value type. It supports heterogeneous key +// or value types (e.g. first key storing a boolean and second key storing a string). +// OrderedMap keeps values in specific sorted order and operations are deterministic +// so the state of the segments after a sequence of operations are always unique. +// +// OrderedMap key-value pairs can be stored in one or more relatively fixed-sized segments. +// +// OrderedMap can be inlined into its parent container when the entire content fits in +// parent container's element size limit. Specifically, OrderedMap with one segment +// which fits in size limit can be inlined, while OrderedMap with multiple segments +// can't be inlined. type OrderedMap struct { Storage SlabStorage root MapSlab From f56232f8066f043f9e4b4aae198a8c212ed35c9b Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 21 Sep 2023 18:47:07 -0500 Subject: [PATCH 018/126] Add comment about ValueID and SlabID --- storage.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/storage.go b/storage.go index be7003cf..4c5a450a 100644 --- a/storage.go +++ b/storage.go @@ -31,7 +31,12 @@ import ( const LedgerBaseStorageSlabPrefix = "$" -// ValueID identifies Array and OrderedMap. +// ValueID identifies an Array or OrderedMap. ValueID is consistent +// independent of inlining status, while ValueID and SlabID are used +// differently despite having the same size and content under the hood. +// By contrast, SlabID is affected by inlining because it identifies +// a slab in storage. Given this, ValueID should be used for +// resource tracking, etc. type ValueID [16]byte func slabIDToValueID(sid SlabID) ValueID { From 24aa117f1488a20a67680c46179d2bf973ae2131 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Fri, 22 Sep 2023 16:03:54 -0500 Subject: [PATCH 019/126] Validate map key/value size <= max limit in tests While at it, also refactored validMapSlab(). --- map_debug.go | 210 ++++++++++++++++++++++++++++++++++----------------- 1 file changed, 139 insertions(+), 71 deletions(-) diff --git a/map_debug.go b/map_debug.go index 9e752325..b5d5cb2b 100644 --- a/map_debug.go +++ b/map_debug.go @@ -361,90 +361,140 @@ func validMapSlab( } } - if slab.IsData() { + switch slab := slab.(type) { + case *MapDataSlab: + return validMapDataSlab(storage, digesterBuilder, tic, hip, slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys) - dataSlab, ok := slab.(*MapDataSlab) - if !ok { - return 0, nil, nil, nil, NewFatalError(fmt.Errorf("slab %d is not MapDataSlab", id)) - } + case *MapMetaDataSlab: + return validMapMetaDataSlab(storage, digesterBuilder, tic, hip, slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys) - // Verify data slab's elements - elementCount, elementSize, err := validMapElements(storage, digesterBuilder, tic, hip, id, dataSlab.elements, 0, nil) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by validMapElements(). - return 0, nil, nil, nil, err - } + default: + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("MapSlab is either *MapDataSlab or *MapMetaDataSlab, got %T", slab)) + } +} - // Verify slab's first key - if dataSlab.elements.firstKey() != dataSlab.header.firstKey { - return 0, nil, nil, nil, NewFatalError( - fmt.Errorf("data slab %d header first key %d is wrong, want %d", - id, dataSlab.header.firstKey, dataSlab.elements.firstKey())) - } +func validMapDataSlab( + storage SlabStorage, + digesterBuilder DigesterBuilder, + tic TypeInfoComparator, + hip HashInputProvider, + dataSlab *MapDataSlab, + level int, + dataSlabIDs []SlabID, + nextDataSlabIDs []SlabID, + firstKeys []Digest, +) ( + elementCount uint64, + _dataSlabIDs []SlabID, + _nextDataSlabIDs []SlabID, + _firstKeys []Digest, + err error, +) { + id := dataSlab.header.slabID - // Verify that only root slab can be inlined - if level > 0 && slab.Inlined() { - return 0, nil, nil, nil, NewFatalError(fmt.Errorf("non-root slab %s is inlined", id)) - } + if !dataSlab.IsData() { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("MapDataSlab %s is not data", id)) + } - // Verify that aggregated element size + slab prefix is the same as header.size - computedSize := uint32(mapDataSlabPrefixSize) - if level == 0 { - computedSize = uint32(mapRootDataSlabPrefixSize) - if dataSlab.Inlined() { - computedSize = uint32(inlinedMapDataSlabPrefixSize) - } - } - computedSize += elementSize + // Verify data slab's elements + elementCount, elementSize, err := validMapElements(storage, digesterBuilder, tic, hip, id, dataSlab.elements, 0, nil) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by validMapElements(). + return 0, nil, nil, nil, err + } - if computedSize != dataSlab.header.size { - return 0, nil, nil, nil, NewFatalError( - fmt.Errorf("data slab %d header size %d is wrong, want %d", - id, dataSlab.header.size, computedSize)) - } + // Verify slab's first key + if dataSlab.elements.firstKey() != dataSlab.header.firstKey { + return 0, nil, nil, nil, NewFatalError( + fmt.Errorf("data slab %d header first key %d is wrong, want %d", + id, dataSlab.header.firstKey, dataSlab.elements.firstKey())) + } - // Verify any size flag - if dataSlab.anySize { - return 0, nil, nil, nil, NewFatalError( - fmt.Errorf("data slab %d anySize %t is wrong, want false", - id, dataSlab.anySize)) - } + // Verify that only root slab can be inlined + if level > 0 && dataSlab.Inlined() { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("non-root slab %s is inlined", id)) + } - // Verify collision group flag - if dataSlab.collisionGroup { - return 0, nil, nil, nil, NewFatalError( - fmt.Errorf("data slab %d collisionGroup %t is wrong, want false", - id, dataSlab.collisionGroup)) + // Verify that aggregated element size + slab prefix is the same as header.size + computedSize := uint32(mapDataSlabPrefixSize) + if level == 0 { + computedSize = uint32(mapRootDataSlabPrefixSize) + if dataSlab.Inlined() { + computedSize = uint32(inlinedMapDataSlabPrefixSize) } + } + computedSize += elementSize - dataSlabIDs = append(dataSlabIDs, id) + if computedSize != dataSlab.header.size { + return 0, nil, nil, nil, NewFatalError( + fmt.Errorf("data slab %d header size %d is wrong, want %d", + id, dataSlab.header.size, computedSize)) + } - if dataSlab.next != SlabIDUndefined { - nextDataSlabIDs = append(nextDataSlabIDs, dataSlab.next) - } + // Verify any size flag + if dataSlab.anySize { + return 0, nil, nil, nil, NewFatalError( + fmt.Errorf("data slab %d anySize %t is wrong, want false", + id, dataSlab.anySize)) + } - firstKeys = append(firstKeys, dataSlab.header.firstKey) + // Verify collision group flag + if dataSlab.collisionGroup { + return 0, nil, nil, nil, NewFatalError( + fmt.Errorf("data slab %d collisionGroup %t is wrong, want false", + id, dataSlab.collisionGroup)) + } - return elementCount, dataSlabIDs, nextDataSlabIDs, firstKeys, nil + dataSlabIDs = append(dataSlabIDs, id) + + if dataSlab.next != SlabIDUndefined { + nextDataSlabIDs = append(nextDataSlabIDs, dataSlab.next) } - meta, ok := slab.(*MapMetaDataSlab) - if !ok { - return 0, nil, nil, nil, NewFatalError(fmt.Errorf("slab %d is not MapMetaDataSlab", id)) + firstKeys = append(firstKeys, dataSlab.header.firstKey) + + return elementCount, dataSlabIDs, nextDataSlabIDs, firstKeys, nil +} + +func validMapMetaDataSlab( + storage SlabStorage, + digesterBuilder DigesterBuilder, + tic TypeInfoComparator, + hip HashInputProvider, + metaSlab *MapMetaDataSlab, + level int, + dataSlabIDs []SlabID, + nextDataSlabIDs []SlabID, + firstKeys []Digest, +) ( + elementCount uint64, + _dataSlabIDs []SlabID, + _nextDataSlabIDs []SlabID, + _firstKeys []Digest, + err error, +) { + id := metaSlab.header.slabID + + if metaSlab.IsData() { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("MapMetaDataSlab %s is data", id)) + } + + if metaSlab.Inlined() { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("MapMetaDataSlab %s can't be inlined", id)) } if level == 0 { // Verify that root slab has more than one child slabs - if len(meta.childrenHeaders) < 2 { + if len(metaSlab.childrenHeaders) < 2 { return 0, nil, nil, nil, NewFatalError( fmt.Errorf("root metadata slab %d has %d children, want at least 2 children ", - id, len(meta.childrenHeaders))) + id, len(metaSlab.childrenHeaders))) } } elementCount = 0 - for i := 0; i < len(meta.childrenHeaders); i++ { - h := meta.childrenHeaders[i] + for i := 0; i < len(metaSlab.childrenHeaders); i++ { + h := metaSlab.childrenHeaders[i] childSlab, err := getMapSlab(storage, h.slabID) if err != nil { @@ -465,39 +515,39 @@ func validMapSlab( } // Verify slab header first key - if meta.childrenHeaders[0].firstKey != meta.header.firstKey { + if metaSlab.childrenHeaders[0].firstKey != metaSlab.header.firstKey { return 0, nil, nil, nil, NewFatalError( fmt.Errorf("metadata slab %d header first key %d is wrong, want %d", - id, meta.header.firstKey, meta.childrenHeaders[0].firstKey)) + id, metaSlab.header.firstKey, metaSlab.childrenHeaders[0].firstKey)) } // Verify that child slab's first keys are sorted. - sortedHKey := sort.SliceIsSorted(meta.childrenHeaders, func(i, j int) bool { - return meta.childrenHeaders[i].firstKey < meta.childrenHeaders[j].firstKey + sortedHKey := sort.SliceIsSorted(metaSlab.childrenHeaders, func(i, j int) bool { + return metaSlab.childrenHeaders[i].firstKey < metaSlab.childrenHeaders[j].firstKey }) if !sortedHKey { - return 0, nil, nil, nil, NewFatalError(fmt.Errorf("metadata slab %d child slab's first key isn't sorted %+v", id, meta.childrenHeaders)) + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("metadata slab %d child slab's first key isn't sorted %+v", id, metaSlab.childrenHeaders)) } // Verify that child slab's first keys are unique. - if len(meta.childrenHeaders) > 1 { - prev := meta.childrenHeaders[0].firstKey - for _, h := range meta.childrenHeaders[1:] { + if len(metaSlab.childrenHeaders) > 1 { + prev := metaSlab.childrenHeaders[0].firstKey + for _, h := range metaSlab.childrenHeaders[1:] { if prev == h.firstKey { return 0, nil, nil, nil, NewFatalError( fmt.Errorf("metadata slab %d child header first key isn't unique %v", - id, meta.childrenHeaders)) + id, metaSlab.childrenHeaders)) } prev = h.firstKey } } // Verify slab header's size - computedSize := uint32(len(meta.childrenHeaders)*mapSlabHeaderSize) + mapMetaDataSlabPrefixSize - if computedSize != meta.header.size { + computedSize := uint32(len(metaSlab.childrenHeaders)*mapSlabHeaderSize) + mapMetaDataSlabPrefixSize + if computedSize != metaSlab.header.size { return 0, nil, nil, nil, NewFatalError( fmt.Errorf("metadata slab %d header size %d is wrong, want %d", - id, meta.header.size, computedSize)) + id, metaSlab.header.size, computedSize)) } return elementCount, dataSlabIDs, nextDataSlabIDs, firstKeys, nil @@ -733,6 +783,24 @@ func validSingleElement( digestMaxLevel uint, err error, ) { + // Verify key storable's size is less than size limit + if e.key.ByteSize() > uint32(maxInlineMapKeySize) { + return 0, 0, NewFatalError( + fmt.Errorf( + "map element key %s size %d exceeds size limit %d", + e.key, e.key.ByteSize(), maxInlineMapKeySize, + )) + } + + // Verify value storable's size is less than size limit + valueSizeLimit := maxInlineMapValueSize(uint64(e.key.ByteSize())) + if e.value.ByteSize() > uint32(valueSizeLimit) { + return 0, 0, NewFatalError( + fmt.Errorf( + "map element value %s size %d exceeds size limit %d", + e.value, e.value.ByteSize(), valueSizeLimit, + )) + } // Verify key kv, err := e.key.StoredValue(storage) From 15bde126d92c1c6109544dec81c48bc97c44cd6e Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Fri, 22 Sep 2023 16:10:04 -0500 Subject: [PATCH 020/126] Validate ValueID and SlabID for arrays in tests While at it, also refactored validArraySlab(). --- array_debug.go | 268 +++++++++++++++++++++++++++++++++++-------------- 1 file changed, 195 insertions(+), 73 deletions(-) diff --git a/array_debug.go b/array_debug.go index 9c18cbb1..ce1c8f1b 100644 --- a/array_debug.go +++ b/array_debug.go @@ -169,6 +169,19 @@ type TypeInfoComparator func(TypeInfo, TypeInfo) bool func ValidArray(a *Array, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider) error { + // Verify array value ID + err := validArrayValueID(a) + if err != nil { + return err + } + + // Verify array slab ID + err = validArraySlabID(a) + if err != nil { + return err + } + + // Verify array extra data extraData := a.root.ExtraData() if extraData == nil { return NewFatalError(fmt.Errorf("root slab %d doesn't have extra data", a.root.SlabID())) @@ -184,6 +197,7 @@ func ValidArray(a *Array, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInp )) } + // Verify array root slab computedCount, dataSlabIDs, nextDataSlabIDs, err := validArraySlab(tic, hip, a.Storage, a.root, 0, nil, []SlabID{}, []SlabID{}) if err != nil { @@ -249,95 +263,140 @@ func validArraySlab( } } - if slab.IsData() { - dataSlab, ok := slab.(*ArrayDataSlab) - if !ok { - return 0, nil, nil, NewFatalError(fmt.Errorf("slab %s is not ArrayDataSlab", id)) - } + switch slab := slab.(type) { + case *ArrayDataSlab: + return validArrayDataSlab(tic, hip, storage, slab, level, dataSlabIDs, nextDataSlabIDs) - // Verify that element count is the same as header.count - if uint32(len(dataSlab.elements)) != dataSlab.header.count { - return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %s header count %d is wrong, want %d", - id, dataSlab.header.count, len(dataSlab.elements))) - } + case *ArrayMetaDataSlab: + return validArrayMetaDataSlab(tic, hip, storage, slab, level, dataSlabIDs, nextDataSlabIDs) - // Verify that only root slab can be inlined - if level > 0 && slab.Inlined() { - return 0, nil, nil, NewFatalError(fmt.Errorf("non-root slab %s is inlined", id)) - } + default: + return 0, nil, nil, NewFatalError(fmt.Errorf("ArraySlab is either *ArrayDataSlab or *ArrayMetaDataSlab, got %T", slab)) + } +} - // Verify that aggregated element size + slab prefix is the same as header.size - computedSize := uint32(arrayDataSlabPrefixSize) - if level == 0 { - computedSize = uint32(arrayRootDataSlabPrefixSize) - if slab.Inlined() { - computedSize = uint32(inlinedArrayDataSlabPrefixSize) - } - } +func validArrayDataSlab( + tic TypeInfoComparator, + hip HashInputProvider, + storage SlabStorage, + dataSlab *ArrayDataSlab, + level int, + dataSlabIDs []SlabID, + nextDataSlabIDs []SlabID, +) ( + elementCount uint32, + _dataSlabIDs []SlabID, + _nextDataSlabIDs []SlabID, + err error, +) { + id := dataSlab.header.slabID - for _, e := range dataSlab.elements { + if !dataSlab.IsData() { + return 0, nil, nil, NewFatalError(fmt.Errorf("ArrayDataSlab %s is not data", id)) + } - // Verify element size is <= inline size - if e.ByteSize() > uint32(maxInlineArrayElementSize) { - return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %s element %s size %d is too large, want < %d", - id, e, e.ByteSize(), maxInlineArrayElementSize)) - } + // Verify that element count is the same as header.count + if uint32(len(dataSlab.elements)) != dataSlab.header.count { + return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %s header count %d is wrong, want %d", + id, dataSlab.header.count, len(dataSlab.elements))) + } - computedSize += e.ByteSize() - } + // Verify that only root data slab can be inlined + if level > 0 && dataSlab.Inlined() { + return 0, nil, nil, NewFatalError(fmt.Errorf("non-root slab %s is inlined", id)) + } - if computedSize != dataSlab.header.size { - return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %s header size %d is wrong, want %d", - id, dataSlab.header.size, computedSize)) + // Verify that aggregated element size + slab prefix is the same as header.size + computedSize := uint32(arrayDataSlabPrefixSize) + if level == 0 { + computedSize = uint32(arrayRootDataSlabPrefixSize) + if dataSlab.Inlined() { + computedSize = uint32(inlinedArrayDataSlabPrefixSize) } + } - dataSlabIDs = append(dataSlabIDs, id) + for _, e := range dataSlab.elements { - if dataSlab.next != SlabIDUndefined { - nextDataSlabIDs = append(nextDataSlabIDs, dataSlab.next) + // Verify element size is <= inline size + if e.ByteSize() > uint32(maxInlineArrayElementSize) { + return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %s element %s size %d is too large, want < %d", + id, e, e.ByteSize(), maxInlineArrayElementSize)) } - // Verify element - for _, e := range dataSlab.elements { - v, err := e.StoredValue(storage) - if err != nil { - // Wrap err as external error (if needed) because err is returned by Storable interface. - return 0, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, - fmt.Sprintf( - "data slab %s element %s can't be converted to value", - id, e, - )) - } - err = ValidValue(v, nil, tic, hip) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by ValidValue(). - return 0, nil, nil, fmt.Errorf( - "data slab %s element %q isn't valid: %w", - id, e, err, - ) - } + computedSize += e.ByteSize() + } + + if computedSize != dataSlab.header.size { + return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %s header size %d is wrong, want %d", + id, dataSlab.header.size, computedSize)) + } + + dataSlabIDs = append(dataSlabIDs, id) + + if dataSlab.next != SlabIDUndefined { + nextDataSlabIDs = append(nextDataSlabIDs, dataSlab.next) + } + + // Verify element + for _, e := range dataSlab.elements { + v, err := e.StoredValue(storage) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storable interface. + return 0, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, + fmt.Sprintf( + "data slab %s element %s can't be converted to value", + id, e, + )) + } + err = ValidValue(v, nil, tic, hip) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by ValidValue(). + return 0, nil, nil, fmt.Errorf( + "data slab %s element %q isn't valid: %w", + id, e, err, + ) } + } + + return dataSlab.header.count, dataSlabIDs, nextDataSlabIDs, nil +} - return dataSlab.header.count, dataSlabIDs, nextDataSlabIDs, nil +func validArrayMetaDataSlab( + tic TypeInfoComparator, + hip HashInputProvider, + storage SlabStorage, + metaSlab *ArrayMetaDataSlab, + level int, + dataSlabIDs []SlabID, + nextDataSlabIDs []SlabID, +) ( + elementCount uint32, + _dataSlabIDs []SlabID, + _nextDataSlabIDs []SlabID, + err error, +) { + id := metaSlab.header.slabID + + if metaSlab.IsData() { + return 0, nil, nil, NewFatalError(fmt.Errorf("ArrayMetaDataSlab %s is data", id)) } - meta, ok := slab.(*ArrayMetaDataSlab) - if !ok { - return 0, nil, nil, NewFatalError(fmt.Errorf("slab %d is not ArrayMetaDataSlab", id)) + if metaSlab.Inlined() { + return 0, nil, nil, NewFatalError(fmt.Errorf("ArrayMetaDataSlab %s shouldn't be inlined", id)) } if level == 0 { // Verify that root slab has more than one child slabs - if len(meta.childrenHeaders) < 2 { + if len(metaSlab.childrenHeaders) < 2 { return 0, nil, nil, NewFatalError(fmt.Errorf("root metadata slab %d has %d children, want at least 2 children ", - id, len(meta.childrenHeaders))) + id, len(metaSlab.childrenHeaders))) } } // Verify childrenCountSum - if len(meta.childrenCountSum) != len(meta.childrenHeaders) { + if len(metaSlab.childrenCountSum) != len(metaSlab.childrenHeaders) { return 0, nil, nil, NewFatalError(fmt.Errorf("metadata slab %d has %d childrenCountSum, want %d", - id, len(meta.childrenCountSum), len(meta.childrenHeaders))) + id, len(metaSlab.childrenCountSum), len(metaSlab.childrenHeaders))) } computedCount := uint32(0) @@ -345,8 +404,8 @@ func validArraySlab( // If we use range, then h would be a temporary object and we'd be passing address of // temporary object to function, which can lead to bugs depending on usage. It's not a bug // with the current usage but it's less fragile to future changes by not using range here. - for i := 0; i < len(meta.childrenHeaders); i++ { - h := meta.childrenHeaders[i] + for i := 0; i < len(metaSlab.childrenHeaders); i++ { + h := metaSlab.childrenHeaders[i] childSlab, err := getArraySlab(storage, h.slabID) if err != nil { @@ -366,26 +425,26 @@ func validArraySlab( computedCount += count // Verify childrenCountSum - if meta.childrenCountSum[i] != computedCount { + if metaSlab.childrenCountSum[i] != computedCount { return 0, nil, nil, NewFatalError(fmt.Errorf("metadata slab %d childrenCountSum[%d] is %d, want %d", - id, i, meta.childrenCountSum[i], computedCount)) + id, i, metaSlab.childrenCountSum[i], computedCount)) } } // Verify that aggregated element count is the same as header.count - if computedCount != meta.header.count { + if computedCount != metaSlab.header.count { return 0, nil, nil, NewFatalError(fmt.Errorf("metadata slab %d header count %d is wrong, want %d", - id, meta.header.count, computedCount)) + id, metaSlab.header.count, computedCount)) } // Verify that aggregated header size + slab prefix is the same as header.size - computedSize := uint32(len(meta.childrenHeaders)*arraySlabHeaderSize) + arrayMetaDataSlabPrefixSize - if computedSize != meta.header.size { + computedSize := uint32(len(metaSlab.childrenHeaders)*arraySlabHeaderSize) + arrayMetaDataSlabPrefixSize + if computedSize != metaSlab.header.size { return 0, nil, nil, NewFatalError(fmt.Errorf("metadata slab %d header size %d is wrong, want %d", - id, meta.header.size, computedSize)) + id, metaSlab.header.size, computedSize)) } - return meta.header.count, dataSlabIDs, nextDataSlabIDs, nil + return metaSlab.header.count, dataSlabIDs, nextDataSlabIDs, nil } // ValidArraySerialization traverses array tree and verifies serialization @@ -847,3 +906,66 @@ func getSlabIDFromStorable(storable Storable, ids []SlabID) []SlabID { return ids } + +// validArrayValueID verifies array ValueID is always the same as +// root slab's SlabID indepedent of array's inlined status. +func validArrayValueID(a *Array) error { + rootSlabID := a.root.Header().slabID + + vid := a.ValueID() + + if !bytes.Equal(vid[:slabAddressSize], rootSlabID.address[:]) { + return NewFatalError( + fmt.Errorf( + "expect first %d bytes of array value ID as %v, got %v", + slabAddressSize, + rootSlabID.address[:], + vid[:slabAddressSize])) + } + + if !bytes.Equal(vid[slabAddressSize:], rootSlabID.index[:]) { + return NewFatalError( + fmt.Errorf( + "expect second %d bytes of array value ID as %v, got %v", + slabIndexSize, + rootSlabID.index[:], + vid[slabAddressSize:])) + } + + return nil +} + +// validArraySlabID verifies array SlabID is either empty for inlined array, or +// same as root slab's SlabID for not-inlined array. +func validArraySlabID(a *Array) error { + sid := a.SlabID() + + if a.Inlined() { + if sid != SlabIDUndefined { + return NewFatalError( + fmt.Errorf( + "expect empty slab ID for inlined array, got %v", + sid)) + } + return nil + } + + rootSlabID := a.root.Header().slabID + + if sid == SlabIDUndefined { + return NewFatalError( + fmt.Errorf( + "expect non-empty slab ID for not-inlined array, got %v", + sid)) + } + + if sid != rootSlabID { + return NewFatalError( + fmt.Errorf( + "expect array slab ID same as root slab's slab ID %s, got %s", + rootSlabID, + sid)) + } + + return nil +} From 27aa4cb9d2a267457de7d7dee033312891b733b4 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Fri, 22 Sep 2023 16:15:57 -0500 Subject: [PATCH 021/126] Validate ValueID and SlabID for maps in tests --- map_debug.go | 76 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/map_debug.go b/map_debug.go index b5d5cb2b..5c8e4a65 100644 --- a/map_debug.go +++ b/map_debug.go @@ -247,6 +247,19 @@ func DumpMapSlabs(m *OrderedMap) ([]string, error) { func ValidMap(m *OrderedMap, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider) error { + // Verify map value ID + err := validMapValueID(m) + if err != nil { + return err + } + + // Verify map slab ID + err = validMapSlabID(m) + if err != nil { + return err + } + + // Verify map extra data extraData := m.root.ExtraData() if extraData == nil { return NewFatalError(fmt.Errorf("root slab %d doesn't have extra data", m.root.SlabID())) @@ -1472,3 +1485,66 @@ func mapExtraDataEqual(expected, actual *MapExtraData) error { return nil } + +// validMapValueID verifies map ValueID is always the same as +// root slab's SlabID indepedent of map's inlined status. +func validMapValueID(m *OrderedMap) error { + rootSlabID := m.root.Header().slabID + + vid := m.ValueID() + + if !bytes.Equal(vid[:slabAddressSize], rootSlabID.address[:]) { + return NewFatalError( + fmt.Errorf( + "expect first %d bytes of array value ID as %v, got %v", + slabAddressSize, + rootSlabID.address[:], + vid[:slabAddressSize])) + } + + if !bytes.Equal(vid[slabAddressSize:], rootSlabID.index[:]) { + return NewFatalError( + fmt.Errorf( + "expect second %d bytes of array value ID as %v, got %v", + slabIndexSize, + rootSlabID.index[:], + vid[slabAddressSize:])) + } + + return nil +} + +// validMapSlabID verifies map SlabID is either empty for inlined map, or +// same as root slab's SlabID for not-inlined map. +func validMapSlabID(m *OrderedMap) error { + sid := m.SlabID() + + if m.Inlined() { + if sid != SlabIDUndefined { + return NewFatalError( + fmt.Errorf( + "expect empty slab ID for inlined array, got %v", + sid)) + } + return nil + } + + rootSlabID := m.root.Header().slabID + + if sid == SlabIDUndefined { + return NewFatalError( + fmt.Errorf( + "expect non-empty slab ID for not-inlined array, got %v", + sid)) + } + + if sid != rootSlabID { + return NewFatalError( + fmt.Errorf( + "expect array slab ID same as root slab's slab ID %s, got %s", + rootSlabID, + sid)) + } + + return nil +} From 5d8ff9817edca01085e2f8b77b0dcdad54fa06d2 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Fri, 22 Sep 2023 16:33:44 -0500 Subject: [PATCH 022/126] Validate address of inlined array/map in tests --- array_debug.go | 24 ++++++++++++++++++------ array_test.go | 6 +++--- map_debug.go | 48 ++++++++++++++++++++++++++++++++---------------- map_test.go | 6 +++--- 4 files changed, 56 insertions(+), 28 deletions(-) diff --git a/array_debug.go b/array_debug.go index ce1c8f1b..cca75d26 100644 --- a/array_debug.go +++ b/array_debug.go @@ -167,7 +167,16 @@ func DumpArraySlabs(a *Array) ([]string, error) { type TypeInfoComparator func(TypeInfo, TypeInfo) bool -func ValidArray(a *Array, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider) error { +func ValidArray(a *Array, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider) error { + + // Verify array address + if address != a.Address() { + return NewFatalError(fmt.Errorf("array address %v, got %v", address, a.Address())) + } + + if address != a.root.Header().slabID.address { + return NewFatalError(fmt.Errorf("array root slab address %v, got %v", address, a.root.Header().slabID.address)) + } // Verify array value ID err := validArrayValueID(a) @@ -199,7 +208,7 @@ func ValidArray(a *Array, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInp // Verify array root slab computedCount, dataSlabIDs, nextDataSlabIDs, err := - validArraySlab(tic, hip, a.Storage, a.root, 0, nil, []SlabID{}, []SlabID{}) + validArraySlab(address, tic, hip, a.Storage, a.root, 0, nil, []SlabID{}, []SlabID{}) if err != nil { // Don't need to wrap error as external error because err is already categorized by validArraySlab(). return err @@ -220,6 +229,7 @@ func ValidArray(a *Array, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInp } func validArraySlab( + address Address, tic TypeInfoComparator, hip HashInputProvider, storage SlabStorage, @@ -265,10 +275,10 @@ func validArraySlab( switch slab := slab.(type) { case *ArrayDataSlab: - return validArrayDataSlab(tic, hip, storage, slab, level, dataSlabIDs, nextDataSlabIDs) + return validArrayDataSlab(address, tic, hip, storage, slab, level, dataSlabIDs, nextDataSlabIDs) case *ArrayMetaDataSlab: - return validArrayMetaDataSlab(tic, hip, storage, slab, level, dataSlabIDs, nextDataSlabIDs) + return validArrayMetaDataSlab(address, tic, hip, storage, slab, level, dataSlabIDs, nextDataSlabIDs) default: return 0, nil, nil, NewFatalError(fmt.Errorf("ArraySlab is either *ArrayDataSlab or *ArrayMetaDataSlab, got %T", slab)) @@ -276,6 +286,7 @@ func validArraySlab( } func validArrayDataSlab( + address Address, tic TypeInfoComparator, hip HashInputProvider, storage SlabStorage, @@ -348,7 +359,7 @@ func validArrayDataSlab( id, e, )) } - err = ValidValue(v, nil, tic, hip) + err = ValidValue(v, address, nil, tic, hip) if err != nil { // Don't need to wrap error as external error because err is already categorized by ValidValue(). return 0, nil, nil, fmt.Errorf( @@ -362,6 +373,7 @@ func validArrayDataSlab( } func validArrayMetaDataSlab( + address Address, tic TypeInfoComparator, hip HashInputProvider, storage SlabStorage, @@ -416,7 +428,7 @@ func validArrayMetaDataSlab( // Verify child slabs var count uint32 count, dataSlabIDs, nextDataSlabIDs, err = - validArraySlab(tic, hip, storage, childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs) + validArraySlab(address, tic, hip, storage, childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs) if err != nil { // Don't need to wrap error as external error because err is already categorized by validArraySlab(). return 0, nil, nil, err diff --git a/array_test.go b/array_test.go index ed1f57d4..91494b2b 100644 --- a/array_test.go +++ b/array_test.go @@ -74,7 +74,7 @@ func verifyArray( require.Equal(t, len(values), i) // Verify in-memory slabs - err = ValidArray(array, typeInfo, typeInfoComparator, hashInputProvider) + err = ValidArray(array, address, typeInfo, typeInfoComparator, hashInputProvider) if err != nil { PrintArray(array) } @@ -4628,7 +4628,7 @@ func TestSlabSizeWhenResettingMutableStorable(t *testing.T) { expectedArrayRootDataSlabSize := arrayRootDataSlabPrefixSize + initialStorableSize*arraySize require.Equal(t, uint32(expectedArrayRootDataSlabSize), array.root.ByteSize()) - err = ValidArray(array, typeInfo, typeInfoComparator, hashInputProvider) + err = ValidArray(array, address, typeInfo, typeInfoComparator, hashInputProvider) require.NoError(t, err) for i := uint64(0); i < arraySize; i++ { @@ -4645,7 +4645,7 @@ func TestSlabSizeWhenResettingMutableStorable(t *testing.T) { expectedArrayRootDataSlabSize = arrayRootDataSlabPrefixSize + mutatedStorableSize*arraySize require.Equal(t, uint32(expectedArrayRootDataSlabSize), array.root.ByteSize()) - err = ValidArray(array, typeInfo, typeInfoComparator, hashInputProvider) + err = ValidArray(array, address, typeInfo, typeInfoComparator, hashInputProvider) require.NoError(t, err) } diff --git a/map_debug.go b/map_debug.go index 5c8e4a65..7f02f17b 100644 --- a/map_debug.go +++ b/map_debug.go @@ -245,7 +245,16 @@ func DumpMapSlabs(m *OrderedMap) ([]string, error) { return dumps, nil } -func ValidMap(m *OrderedMap, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider) error { +func ValidMap(m *OrderedMap, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider) error { + + // Verify map address + if address != m.Address() { + return NewFatalError(fmt.Errorf("map address %v, got %v", address, m.Address())) + } + + if address != m.root.Header().slabID.address { + return NewFatalError(fmt.Errorf("map root slab address %v, got %v", address, m.root.Header().slabID.address)) + } // Verify map value ID err := validMapValueID(m) @@ -282,7 +291,7 @@ func ValidMap(m *OrderedMap, typeInfo TypeInfo, tic TypeInfoComparator, hip Hash } computedCount, dataSlabIDs, nextDataSlabIDs, firstKeys, err := validMapSlab( - m.Storage, m.digesterBuilder, tic, hip, m.root, 0, nil, []SlabID{}, []SlabID{}, []Digest{}) + address, m.Storage, m.digesterBuilder, tic, hip, m.root, 0, nil, []SlabID{}, []SlabID{}, []Digest{}) if err != nil { // Don't need to wrap error as external error because err is already categorized by validMapSlab(). return err @@ -327,6 +336,7 @@ func ValidMap(m *OrderedMap, typeInfo TypeInfo, tic TypeInfoComparator, hip Hash } func validMapSlab( + address Address, storage SlabStorage, digesterBuilder DigesterBuilder, tic TypeInfoComparator, @@ -376,10 +386,10 @@ func validMapSlab( switch slab := slab.(type) { case *MapDataSlab: - return validMapDataSlab(storage, digesterBuilder, tic, hip, slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys) + return validMapDataSlab(address, storage, digesterBuilder, tic, hip, slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys) case *MapMetaDataSlab: - return validMapMetaDataSlab(storage, digesterBuilder, tic, hip, slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys) + return validMapMetaDataSlab(address, storage, digesterBuilder, tic, hip, slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys) default: return 0, nil, nil, nil, NewFatalError(fmt.Errorf("MapSlab is either *MapDataSlab or *MapMetaDataSlab, got %T", slab)) @@ -387,6 +397,7 @@ func validMapSlab( } func validMapDataSlab( + address Address, storage SlabStorage, digesterBuilder DigesterBuilder, tic TypeInfoComparator, @@ -410,7 +421,7 @@ func validMapDataSlab( } // Verify data slab's elements - elementCount, elementSize, err := validMapElements(storage, digesterBuilder, tic, hip, id, dataSlab.elements, 0, nil) + elementCount, elementSize, err := validMapElements(address, storage, digesterBuilder, tic, hip, id, dataSlab.elements, 0, nil) if err != nil { // Don't need to wrap error as external error because err is already categorized by validMapElements(). return 0, nil, nil, nil, err @@ -470,6 +481,7 @@ func validMapDataSlab( } func validMapMetaDataSlab( + address Address, storage SlabStorage, digesterBuilder DigesterBuilder, tic TypeInfoComparator, @@ -518,7 +530,7 @@ func validMapMetaDataSlab( // Verify child slabs count := uint64(0) count, dataSlabIDs, nextDataSlabIDs, firstKeys, err = - validMapSlab(storage, digesterBuilder, tic, hip, childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs, firstKeys) + validMapSlab(address, storage, digesterBuilder, tic, hip, childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs, firstKeys) if err != nil { // Don't need to wrap error as external error because err is already categorized by validMapSlab(). return 0, nil, nil, nil, err @@ -567,6 +579,7 @@ func validMapMetaDataSlab( } func validMapElements( + address Address, storage SlabStorage, db DigesterBuilder, tic TypeInfoComparator, @@ -583,15 +596,16 @@ func validMapElements( switch elems := elements.(type) { case *hkeyElements: - return validMapHkeyElements(storage, db, tic, hip, id, elems, digestLevel, hkeyPrefixes) + return validMapHkeyElements(address, storage, db, tic, hip, id, elems, digestLevel, hkeyPrefixes) case *singleElements: - return validMapSingleElements(storage, db, tic, hip, id, elems, digestLevel, hkeyPrefixes) + return validMapSingleElements(address, storage, db, tic, hip, id, elems, digestLevel, hkeyPrefixes) default: return 0, 0, NewFatalError(fmt.Errorf("slab %d has unknown elements type %T at digest level %d", id, elements, digestLevel)) } } func validMapHkeyElements( + address Address, storage SlabStorage, db DigesterBuilder, tic TypeInfoComparator, @@ -666,7 +680,7 @@ func validMapHkeyElements( copy(hkeys, hkeyPrefixes) hkeys[len(hkeys)-1] = elements.hkeys[i] - count, size, err := validMapElements(storage, db, tic, hip, id, ge, digestLevel+1, hkeys) + count, size, err := validMapElements(address, storage, db, tic, hip, id, ge, digestLevel+1, hkeys) if err != nil { // Don't need to wrap error as external error because err is already categorized by validMapElement(). return 0, 0, err @@ -699,7 +713,7 @@ func validMapHkeyElements( hkeys[len(hkeys)-1] = elements.hkeys[i] // Verify element - computedSize, maxDigestLevel, err := validSingleElement(storage, db, tic, hip, se, hkeys) + computedSize, maxDigestLevel, err := validSingleElement(address, storage, db, tic, hip, se, hkeys) if err != nil { // Don't need to wrap error as external error because err is already categorized by validSingleElement(). return 0, 0, fmt.Errorf("data slab %d: %w", id, err) @@ -727,6 +741,7 @@ func validMapHkeyElements( } func validMapSingleElements( + address Address, storage SlabStorage, db DigesterBuilder, tic TypeInfoComparator, @@ -753,7 +768,7 @@ func validMapSingleElements( for _, e := range elements.elems { // Verify element - computedSize, maxDigestLevel, err := validSingleElement(storage, db, tic, hip, e, hkeyPrefixes) + computedSize, maxDigestLevel, err := validSingleElement(address, storage, db, tic, hip, e, hkeyPrefixes) if err != nil { // Don't need to wrap error as external error because err is already categorized by validSingleElement(). return 0, 0, fmt.Errorf("data slab %d: %w", id, err) @@ -785,6 +800,7 @@ func validMapSingleElements( } func validSingleElement( + address Address, storage SlabStorage, db DigesterBuilder, tic TypeInfoComparator, @@ -822,7 +838,7 @@ func validSingleElement( return 0, 0, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("element %s key can't be converted to value", e)) } - err = ValidValue(kv, nil, tic, hip) + err = ValidValue(kv, address, nil, tic, hip) if err != nil { // Don't need to wrap error as external error because err is already categorized by ValidValue(). return 0, 0, fmt.Errorf("element %s key isn't valid: %w", e, err) @@ -835,7 +851,7 @@ func validSingleElement( return 0, 0, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("element %s value can't be converted to value", e)) } - err = ValidValue(vv, nil, tic, hip) + err = ValidValue(vv, address, nil, tic, hip) if err != nil { // Don't need to wrap error as external error because err is already categorized by ValidValue(). return 0, 0, fmt.Errorf("element %s value isn't valid: %w", e, err) @@ -867,12 +883,12 @@ func validSingleElement( return computedSize, digest.Levels(), nil } -func ValidValue(value Value, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider) error { +func ValidValue(value Value, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider) error { switch v := value.(type) { case *Array: - return ValidArray(v, typeInfo, tic, hip) + return ValidArray(v, address, typeInfo, tic, hip) case *OrderedMap: - return ValidMap(v, typeInfo, tic, hip) + return ValidMap(v, address, typeInfo, tic, hip) } return nil } diff --git a/map_test.go b/map_test.go index c4bcfc7b..fbba9516 100644 --- a/map_test.go +++ b/map_test.go @@ -145,7 +145,7 @@ func verifyMap( } // Verify in-memory slabs - err = ValidMap(m, typeInfo, typeInfoComparator, hashInputProvider) + err = ValidMap(m, address, typeInfo, typeInfoComparator, hashInputProvider) if err != nil { PrintMap(m) } @@ -10696,7 +10696,7 @@ func TestSlabSizeWhenResettingMutableStorableInMap(t *testing.T) { expectedMapRootDataSlabSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + expectedElementSize*mapSize require.Equal(t, expectedMapRootDataSlabSize, m.root.ByteSize()) - err = ValidMap(m, typeInfo, typeInfoComparator, hashInputProvider) + err = ValidMap(m, address, typeInfo, typeInfoComparator, hashInputProvider) require.NoError(t, err) // Reset mutable values after changing its storable size @@ -10714,7 +10714,7 @@ func TestSlabSizeWhenResettingMutableStorableInMap(t *testing.T) { expectedMapRootDataSlabSize = mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + expectedElementSize*mapSize require.Equal(t, expectedMapRootDataSlabSize, m.root.ByteSize()) - err = ValidMap(m, typeInfo, typeInfoComparator, hashInputProvider) + err = ValidMap(m, address, typeInfo, typeInfoComparator, hashInputProvider) require.NoError(t, err) } From 6cd8c0c4595933c57fd791b0793570f111d7468e Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Fri, 22 Sep 2023 18:41:26 -0500 Subject: [PATCH 023/126] Verify inlinability of not inlined values in tests --- array_debug.go | 91 ++++++++++++++++++++++++++++++++++++++++++-------- array_test.go | 49 +++++++++++++++++++++++---- map_debug.go | 49 ++++++++++++++++++--------- map_test.go | 57 +++++++++++++++++++++++++------ 4 files changed, 199 insertions(+), 47 deletions(-) diff --git a/array_debug.go b/array_debug.go index cca75d26..ffc7f848 100644 --- a/array_debug.go +++ b/array_debug.go @@ -167,7 +167,7 @@ func DumpArraySlabs(a *Array) ([]string, error) { type TypeInfoComparator func(TypeInfo, TypeInfo) bool -func ValidArray(a *Array, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider) error { +func ValidArray(a *Array, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool) error { // Verify array address if address != a.Address() { @@ -208,7 +208,7 @@ func ValidArray(a *Array, address Address, typeInfo TypeInfo, tic TypeInfoCompar // Verify array root slab computedCount, dataSlabIDs, nextDataSlabIDs, err := - validArraySlab(address, tic, hip, a.Storage, a.root, 0, nil, []SlabID{}, []SlabID{}) + validArraySlab(address, tic, hip, a.Storage, a.root, 0, nil, []SlabID{}, []SlabID{}, inlineEnabled) if err != nil { // Don't need to wrap error as external error because err is already categorized by validArraySlab(). return err @@ -238,6 +238,7 @@ func validArraySlab( headerFromParentSlab *ArraySlabHeader, dataSlabIDs []SlabID, nextDataSlabIDs []SlabID, + inlineEnabled bool, ) ( elementCount uint32, _dataSlabIDs []SlabID, @@ -275,10 +276,10 @@ func validArraySlab( switch slab := slab.(type) { case *ArrayDataSlab: - return validArrayDataSlab(address, tic, hip, storage, slab, level, dataSlabIDs, nextDataSlabIDs) + return validArrayDataSlab(address, tic, hip, storage, slab, level, dataSlabIDs, nextDataSlabIDs, inlineEnabled) case *ArrayMetaDataSlab: - return validArrayMetaDataSlab(address, tic, hip, storage, slab, level, dataSlabIDs, nextDataSlabIDs) + return validArrayMetaDataSlab(address, tic, hip, storage, slab, level, dataSlabIDs, nextDataSlabIDs, inlineEnabled) default: return 0, nil, nil, NewFatalError(fmt.Errorf("ArraySlab is either *ArrayDataSlab or *ArrayMetaDataSlab, got %T", slab)) @@ -294,6 +295,7 @@ func validArrayDataSlab( level int, dataSlabIDs []SlabID, nextDataSlabIDs []SlabID, + inlineEnabled bool, ) ( elementCount uint32, _dataSlabIDs []SlabID, @@ -327,13 +329,6 @@ func validArrayDataSlab( } for _, e := range dataSlab.elements { - - // Verify element size is <= inline size - if e.ByteSize() > uint32(maxInlineArrayElementSize) { - return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %s element %s size %d is too large, want < %d", - id, e, e.ByteSize(), maxInlineArrayElementSize)) - } - computedSize += e.ByteSize() } @@ -348,8 +343,8 @@ func validArrayDataSlab( nextDataSlabIDs = append(nextDataSlabIDs, dataSlab.next) } - // Verify element for _, e := range dataSlab.elements { + v, err := e.StoredValue(storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. @@ -359,7 +354,25 @@ func validArrayDataSlab( id, e, )) } - err = ValidValue(v, address, nil, tic, hip) + + // Verify element size <= inline size + if e.ByteSize() > uint32(maxInlineArrayElementSize) { + return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %s element %s size %d is too large, want < %d", + id, e, e.ByteSize(), maxInlineArrayElementSize)) + } + + // Verify not-inlined array/map > inline size, or can't be inlined + if inlineEnabled { + if _, ok := e.(SlabIDStorable); ok { + err = validNotInlinedValueStatusAndSize(v, uint32(maxInlineArrayElementSize)) + if err != nil { + return 0, nil, nil, err + } + } + } + + // Verify element + err = ValidValue(v, address, nil, tic, hip, inlineEnabled) if err != nil { // Don't need to wrap error as external error because err is already categorized by ValidValue(). return 0, nil, nil, fmt.Errorf( @@ -381,6 +394,7 @@ func validArrayMetaDataSlab( level int, dataSlabIDs []SlabID, nextDataSlabIDs []SlabID, + inlineEnabled bool, ) ( elementCount uint32, _dataSlabIDs []SlabID, @@ -428,7 +442,7 @@ func validArrayMetaDataSlab( // Verify child slabs var count uint32 count, dataSlabIDs, nextDataSlabIDs, err = - validArraySlab(address, tic, hip, storage, childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs) + validArraySlab(address, tic, hip, storage, childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs, inlineEnabled) if err != nil { // Don't need to wrap error as external error because err is already categorized by validArraySlab(). return 0, nil, nil, err @@ -981,3 +995,52 @@ func validArraySlabID(a *Array) error { return nil } + +func validNotInlinedValueStatusAndSize(v Value, maxInlineSize uint32) error { + + switch v := v.(type) { + case *Array: + // Verify not-inlined array's inlined status + if v.root.Inlined() { + return NewFatalError( + fmt.Errorf( + "not-inlined array %s has inlined status", + v.root.Header().slabID)) + } + + // Verify not-inlined array size. + if v.root.IsData() { + inlinableSize := v.root.ByteSize() - arrayRootDataSlabPrefixSize + inlinedArrayDataSlabPrefixSize + if inlinableSize <= maxInlineSize { + return NewFatalError( + fmt.Errorf("not-inlined array root slab %s can be inlined, inlinable size %d <= max inline size %d", + v.root.Header().slabID, + inlinableSize, + maxInlineSize)) + } + } + + case *OrderedMap: + // Verify not-inlined map's inlined status + if v.Inlined() { + return NewFatalError( + fmt.Errorf( + "not-inlined map %s has inlined status", + v.root.Header().slabID)) + } + + // Verify not-inlined map size. + if v.root.IsData() { + inlinableSize := v.root.ByteSize() - mapRootDataSlabPrefixSize + inlinedMapDataSlabPrefixSize + if inlinableSize <= maxInlineSize { + return NewFatalError( + fmt.Errorf("not-inlined map root slab %s can be inlined, inlinable size %d <= max inline size %d", + v.root.Header().slabID, + inlinableSize, + maxInlineSize)) + } + } + } + + return nil +} diff --git a/array_test.go b/array_test.go index 91494b2b..0345f521 100644 --- a/array_test.go +++ b/array_test.go @@ -29,6 +29,16 @@ import ( "github.com/stretchr/testify/require" ) +func verifyEmptyArrayV0( + t *testing.T, + storage *PersistentSlabStorage, + typeInfo TypeInfo, + address Address, + array *Array, +) { + verifyArrayV0(t, storage, typeInfo, address, array, nil, false) +} + func verifyEmptyArray( t *testing.T, storage *PersistentSlabStorage, @@ -39,7 +49,18 @@ func verifyEmptyArray( verifyArray(t, storage, typeInfo, address, array, nil, false) } -// verifyArray verifies array elements and validates serialization and in-memory slab tree. +func verifyArrayV0( + t *testing.T, + storage *PersistentSlabStorage, + typeInfo TypeInfo, + address Address, + array *Array, + values []Value, + hasNestedArrayMapElement bool, +) { + _verifyArray(t, storage, typeInfo, address, array, values, hasNestedArrayMapElement, false) +} + func verifyArray( t *testing.T, storage *PersistentSlabStorage, @@ -48,6 +69,20 @@ func verifyArray( array *Array, values []Value, hasNestedArrayMapElement bool, +) { + _verifyArray(t, storage, typeInfo, address, array, values, hasNestedArrayMapElement, true) +} + +// verifyArray verifies array elements and validates serialization and in-memory slab tree. +func _verifyArray( + t *testing.T, + storage *PersistentSlabStorage, + typeInfo TypeInfo, + address Address, + array *Array, + values []Value, + hasNestedArrayMapElement bool, + inlineEnabled bool, ) { require.True(t, typeInfoComparator(typeInfo, array.Type())) require.Equal(t, address, array.Address()) @@ -74,7 +109,7 @@ func verifyArray( require.Equal(t, len(values), i) // Verify in-memory slabs - err = ValidArray(array, address, typeInfo, typeInfoComparator, hashInputProvider) + err = ValidArray(array, address, typeInfo, typeInfoComparator, hashInputProvider, inlineEnabled) if err != nil { PrintArray(array) } @@ -1552,7 +1587,7 @@ func TestArrayDecodeV0(t *testing.T) { array, err := NewArrayWithRootID(storage, arraySlabID) require.NoError(t, err) - verifyEmptyArray(t, storage, typeInfo, address, array) + verifyEmptyArrayV0(t, storage, typeInfo, address, array) }) t.Run("dataslab as root", func(t *testing.T) { @@ -1598,7 +1633,7 @@ func TestArrayDecodeV0(t *testing.T) { array, err := NewArrayWithRootID(storage, arraySlabID) require.NoError(t, err) - verifyArray(t, storage, typeInfo, address, array, values, false) + verifyArrayV0(t, storage, typeInfo, address, array, values, false) }) t.Run("metadataslab as root", func(t *testing.T) { @@ -1734,7 +1769,7 @@ func TestArrayDecodeV0(t *testing.T) { array, err := NewArrayWithRootID(storage2, arraySlabID) require.NoError(t, err) - verifyArray(t, storage2, typeInfo, address, array, values, false) + verifyArrayV0(t, storage2, typeInfo, address, array, values, false) }) } @@ -4628,7 +4663,7 @@ func TestSlabSizeWhenResettingMutableStorable(t *testing.T) { expectedArrayRootDataSlabSize := arrayRootDataSlabPrefixSize + initialStorableSize*arraySize require.Equal(t, uint32(expectedArrayRootDataSlabSize), array.root.ByteSize()) - err = ValidArray(array, address, typeInfo, typeInfoComparator, hashInputProvider) + err = ValidArray(array, address, typeInfo, typeInfoComparator, hashInputProvider, true) require.NoError(t, err) for i := uint64(0); i < arraySize; i++ { @@ -4645,7 +4680,7 @@ func TestSlabSizeWhenResettingMutableStorable(t *testing.T) { expectedArrayRootDataSlabSize = arrayRootDataSlabPrefixSize + mutatedStorableSize*arraySize require.Equal(t, uint32(expectedArrayRootDataSlabSize), array.root.ByteSize()) - err = ValidArray(array, address, typeInfo, typeInfoComparator, hashInputProvider) + err = ValidArray(array, address, typeInfo, typeInfoComparator, hashInputProvider, true) require.NoError(t, err) } diff --git a/map_debug.go b/map_debug.go index 7f02f17b..02a8b666 100644 --- a/map_debug.go +++ b/map_debug.go @@ -245,7 +245,7 @@ func DumpMapSlabs(m *OrderedMap) ([]string, error) { return dumps, nil } -func ValidMap(m *OrderedMap, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider) error { +func ValidMap(m *OrderedMap, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool) error { // Verify map address if address != m.Address() { @@ -291,7 +291,7 @@ func ValidMap(m *OrderedMap, address Address, typeInfo TypeInfo, tic TypeInfoCom } computedCount, dataSlabIDs, nextDataSlabIDs, firstKeys, err := validMapSlab( - address, m.Storage, m.digesterBuilder, tic, hip, m.root, 0, nil, []SlabID{}, []SlabID{}, []Digest{}) + address, m.Storage, m.digesterBuilder, tic, hip, m.root, 0, nil, []SlabID{}, []SlabID{}, []Digest{}, inlineEnabled) if err != nil { // Don't need to wrap error as external error because err is already categorized by validMapSlab(). return err @@ -347,6 +347,7 @@ func validMapSlab( dataSlabIDs []SlabID, nextDataSlabIDs []SlabID, firstKeys []Digest, + inlineEnabled bool, ) ( elementCount uint64, _dataSlabIDs []SlabID, @@ -386,10 +387,10 @@ func validMapSlab( switch slab := slab.(type) { case *MapDataSlab: - return validMapDataSlab(address, storage, digesterBuilder, tic, hip, slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys) + return validMapDataSlab(address, storage, digesterBuilder, tic, hip, slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys, inlineEnabled) case *MapMetaDataSlab: - return validMapMetaDataSlab(address, storage, digesterBuilder, tic, hip, slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys) + return validMapMetaDataSlab(address, storage, digesterBuilder, tic, hip, slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys, inlineEnabled) default: return 0, nil, nil, nil, NewFatalError(fmt.Errorf("MapSlab is either *MapDataSlab or *MapMetaDataSlab, got %T", slab)) @@ -407,6 +408,7 @@ func validMapDataSlab( dataSlabIDs []SlabID, nextDataSlabIDs []SlabID, firstKeys []Digest, + inlineEnabled bool, ) ( elementCount uint64, _dataSlabIDs []SlabID, @@ -421,7 +423,7 @@ func validMapDataSlab( } // Verify data slab's elements - elementCount, elementSize, err := validMapElements(address, storage, digesterBuilder, tic, hip, id, dataSlab.elements, 0, nil) + elementCount, elementSize, err := validMapElements(address, storage, digesterBuilder, tic, hip, id, dataSlab.elements, 0, nil, inlineEnabled) if err != nil { // Don't need to wrap error as external error because err is already categorized by validMapElements(). return 0, nil, nil, nil, err @@ -491,6 +493,7 @@ func validMapMetaDataSlab( dataSlabIDs []SlabID, nextDataSlabIDs []SlabID, firstKeys []Digest, + inlineEnabled bool, ) ( elementCount uint64, _dataSlabIDs []SlabID, @@ -530,7 +533,7 @@ func validMapMetaDataSlab( // Verify child slabs count := uint64(0) count, dataSlabIDs, nextDataSlabIDs, firstKeys, err = - validMapSlab(address, storage, digesterBuilder, tic, hip, childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs, firstKeys) + validMapSlab(address, storage, digesterBuilder, tic, hip, childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs, firstKeys, inlineEnabled) if err != nil { // Don't need to wrap error as external error because err is already categorized by validMapSlab(). return 0, nil, nil, nil, err @@ -588,6 +591,7 @@ func validMapElements( elements elements, digestLevel uint, hkeyPrefixes []Digest, + inlineEnabled bool, ) ( elementCount uint64, elementSize uint32, @@ -596,9 +600,9 @@ func validMapElements( switch elems := elements.(type) { case *hkeyElements: - return validMapHkeyElements(address, storage, db, tic, hip, id, elems, digestLevel, hkeyPrefixes) + return validMapHkeyElements(address, storage, db, tic, hip, id, elems, digestLevel, hkeyPrefixes, inlineEnabled) case *singleElements: - return validMapSingleElements(address, storage, db, tic, hip, id, elems, digestLevel, hkeyPrefixes) + return validMapSingleElements(address, storage, db, tic, hip, id, elems, digestLevel, hkeyPrefixes, inlineEnabled) default: return 0, 0, NewFatalError(fmt.Errorf("slab %d has unknown elements type %T at digest level %d", id, elements, digestLevel)) } @@ -614,6 +618,7 @@ func validMapHkeyElements( elements *hkeyElements, digestLevel uint, hkeyPrefixes []Digest, + inlineEnabled bool, ) ( elementCount uint64, elementSize uint32, @@ -680,7 +685,7 @@ func validMapHkeyElements( copy(hkeys, hkeyPrefixes) hkeys[len(hkeys)-1] = elements.hkeys[i] - count, size, err := validMapElements(address, storage, db, tic, hip, id, ge, digestLevel+1, hkeys) + count, size, err := validMapElements(address, storage, db, tic, hip, id, ge, digestLevel+1, hkeys, inlineEnabled) if err != nil { // Don't need to wrap error as external error because err is already categorized by validMapElement(). return 0, 0, err @@ -713,7 +718,7 @@ func validMapHkeyElements( hkeys[len(hkeys)-1] = elements.hkeys[i] // Verify element - computedSize, maxDigestLevel, err := validSingleElement(address, storage, db, tic, hip, se, hkeys) + computedSize, maxDigestLevel, err := validSingleElement(address, storage, db, tic, hip, se, hkeys, inlineEnabled) if err != nil { // Don't need to wrap error as external error because err is already categorized by validSingleElement(). return 0, 0, fmt.Errorf("data slab %d: %w", id, err) @@ -750,6 +755,7 @@ func validMapSingleElements( elements *singleElements, digestLevel uint, hkeyPrefixes []Digest, + inlineEnabled bool, ) ( elementCount uint64, elementSize uint32, @@ -768,7 +774,7 @@ func validMapSingleElements( for _, e := range elements.elems { // Verify element - computedSize, maxDigestLevel, err := validSingleElement(address, storage, db, tic, hip, e, hkeyPrefixes) + computedSize, maxDigestLevel, err := validSingleElement(address, storage, db, tic, hip, e, hkeyPrefixes, inlineEnabled) if err != nil { // Don't need to wrap error as external error because err is already categorized by validSingleElement(). return 0, 0, fmt.Errorf("data slab %d: %w", id, err) @@ -807,6 +813,7 @@ func validSingleElement( hip HashInputProvider, e *singleElement, digests []Digest, + inlineEnabled bool, ) ( size uint32, digestMaxLevel uint, @@ -838,7 +845,7 @@ func validSingleElement( return 0, 0, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("element %s key can't be converted to value", e)) } - err = ValidValue(kv, address, nil, tic, hip) + err = ValidValue(kv, address, nil, tic, hip, inlineEnabled) if err != nil { // Don't need to wrap error as external error because err is already categorized by ValidValue(). return 0, 0, fmt.Errorf("element %s key isn't valid: %w", e, err) @@ -851,12 +858,22 @@ func validSingleElement( return 0, 0, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("element %s value can't be converted to value", e)) } - err = ValidValue(vv, address, nil, tic, hip) + err = ValidValue(vv, address, nil, tic, hip, inlineEnabled) if err != nil { // Don't need to wrap error as external error because err is already categorized by ValidValue(). return 0, 0, fmt.Errorf("element %s value isn't valid: %w", e, err) } + // Verify not-inlined array/map > inline size, or can't be inlined + if inlineEnabled { + if _, ok := e.value.(SlabIDStorable); ok { + err = validNotInlinedValueStatusAndSize(vv, uint32(valueSizeLimit)) + if err != nil { + return 0, 0, err + } + } + } + // Verify size computedSize := singleElementPrefixSize + e.key.ByteSize() + e.value.ByteSize() if computedSize != e.Size() { @@ -883,12 +900,12 @@ func validSingleElement( return computedSize, digest.Levels(), nil } -func ValidValue(value Value, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider) error { +func ValidValue(value Value, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool) error { switch v := value.(type) { case *Array: - return ValidArray(v, address, typeInfo, tic, hip) + return ValidArray(v, address, typeInfo, tic, hip, inlineEnabled) case *OrderedMap: - return ValidMap(v, address, typeInfo, tic, hip) + return ValidMap(v, address, typeInfo, tic, hip, inlineEnabled) } return nil } diff --git a/map_test.go b/map_test.go index fbba9516..ed102c61 100644 --- a/map_test.go +++ b/map_test.go @@ -89,6 +89,16 @@ func (h *errorDigesterBuilder) Digest(_ HashInputProvider, _ Value) (Digester, e return nil, h.err } +func verifyEmptyMapV0( + t *testing.T, + storage *PersistentSlabStorage, + typeInfo TypeInfo, + address Address, + m *OrderedMap, +) { + verifyMapV0(t, storage, typeInfo, address, m, nil, nil, false) +} + func verifyEmptyMap( t *testing.T, storage *PersistentSlabStorage, @@ -99,9 +109,35 @@ func verifyEmptyMap( verifyMap(t, storage, typeInfo, address, m, nil, nil, false) } +func verifyMapV0( + t *testing.T, + storage *PersistentSlabStorage, + typeInfo TypeInfo, + address Address, + m *OrderedMap, + keyValues map[Value]Value, + sortedKeys []Value, + hasNestedArrayMapElement bool, +) { + _verifyMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, hasNestedArrayMapElement, false) +} + +func verifyMap( + t *testing.T, + storage *PersistentSlabStorage, + typeInfo TypeInfo, + address Address, + m *OrderedMap, + keyValues map[Value]Value, + sortedKeys []Value, + hasNestedArrayMapElement bool, +) { + _verifyMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, hasNestedArrayMapElement, true) +} + // verifyMap verifies map elements and validates serialization and in-memory slab tree. // It also verifies elements ordering if sortedKeys is not nil. -func verifyMap( +func _verifyMap( t *testing.T, storage *PersistentSlabStorage, typeInfo TypeInfo, @@ -110,6 +146,7 @@ func verifyMap( keyValues map[Value]Value, sortedKeys []Value, hasNestedArrayMapElement bool, + inlineEnabled bool, ) { require.True(t, typeInfoComparator(typeInfo, m.Type())) require.Equal(t, address, m.Address()) @@ -145,7 +182,7 @@ func verifyMap( } // Verify in-memory slabs - err = ValidMap(m, address, typeInfo, typeInfoComparator, hashInputProvider) + err = ValidMap(m, address, typeInfo, typeInfoComparator, hashInputProvider, inlineEnabled) if err != nil { PrintMap(m) } @@ -1577,7 +1614,7 @@ func TestMapDecodeV0(t *testing.T) { decodedMap, err := NewMapWithRootID(storage, mapSlabID, NewDefaultDigesterBuilder()) require.NoError(t, err) - verifyEmptyMap(t, storage, typeInfo, address, decodedMap) + verifyEmptyMapV0(t, storage, typeInfo, address, decodedMap) }) t.Run("dataslab as root", func(t *testing.T) { @@ -1650,7 +1687,7 @@ func TestMapDecodeV0(t *testing.T) { decodedMap, err := NewMapWithRootID(storage, mapSlabID, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) + verifyMapV0(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("has pointer no collision", func(t *testing.T) { @@ -1863,7 +1900,7 @@ func TestMapDecodeV0(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, mapSlabID, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + verifyMapV0(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("inline collision 1 level", func(t *testing.T) { @@ -2037,7 +2074,7 @@ func TestMapDecodeV0(t *testing.T) { decodedMap, err := NewMapWithRootID(storage, mapSlabID, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) + verifyMapV0(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("inline collision 2 levels", func(t *testing.T) { @@ -2261,7 +2298,7 @@ func TestMapDecodeV0(t *testing.T) { decodedMap, err := NewMapWithRootID(storage, mapSlabID, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) + verifyMapV0(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("external collision", func(t *testing.T) { @@ -2480,7 +2517,7 @@ func TestMapDecodeV0(t *testing.T) { decodedMap, err := NewMapWithRootID(storage, mapSlabID, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) + verifyMapV0(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) }) } @@ -10696,7 +10733,7 @@ func TestSlabSizeWhenResettingMutableStorableInMap(t *testing.T) { expectedMapRootDataSlabSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + expectedElementSize*mapSize require.Equal(t, expectedMapRootDataSlabSize, m.root.ByteSize()) - err = ValidMap(m, address, typeInfo, typeInfoComparator, hashInputProvider) + err = ValidMap(m, address, typeInfo, typeInfoComparator, hashInputProvider, true) require.NoError(t, err) // Reset mutable values after changing its storable size @@ -10714,7 +10751,7 @@ func TestSlabSizeWhenResettingMutableStorableInMap(t *testing.T) { expectedMapRootDataSlabSize = mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + expectedElementSize*mapSize require.Equal(t, expectedMapRootDataSlabSize, m.root.ByteSize()) - err = ValidMap(m, address, typeInfo, typeInfoComparator, hashInputProvider) + err = ValidMap(m, address, typeInfo, typeInfoComparator, hashInputProvider, true) require.NoError(t, err) } From 43edecaaab4cc5d86860a17dc4ea11b884ae934e Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Sun, 24 Sep 2023 14:55:45 -0500 Subject: [PATCH 024/126] Refactor array validation --- array_debug.go | 76 +++++++++++++++++++++++++------------------------- map_debug.go | 2 +- 2 files changed, 39 insertions(+), 39 deletions(-) diff --git a/array_debug.go b/array_debug.go index ffc7f848..c26679b1 100644 --- a/array_debug.go +++ b/array_debug.go @@ -169,22 +169,18 @@ type TypeInfoComparator func(TypeInfo, TypeInfo) bool func ValidArray(a *Array, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool) error { - // Verify array address + // Verify array address (independent of array inlined status) if address != a.Address() { return NewFatalError(fmt.Errorf("array address %v, got %v", address, a.Address())) } - if address != a.root.Header().slabID.address { - return NewFatalError(fmt.Errorf("array root slab address %v, got %v", address, a.root.Header().slabID.address)) - } - - // Verify array value ID + // Verify array value ID (independent of array inlined status) err := validArrayValueID(a) if err != nil { return err } - // Verify array slab ID + // Verify array slab ID (dependent of array inlined status) err = validArraySlabID(a) if err != nil { return err @@ -206,9 +202,16 @@ func ValidArray(a *Array, address Address, typeInfo TypeInfo, tic TypeInfoCompar )) } - // Verify array root slab - computedCount, dataSlabIDs, nextDataSlabIDs, err := - validArraySlab(address, tic, hip, a.Storage, a.root, 0, nil, []SlabID{}, []SlabID{}, inlineEnabled) + v := &verification{ + storage: a.Storage, + address: address, + tic: tic, + hip: hip, + inlineEnabled: inlineEnabled, + } + + // Verify array slabs + computedCount, dataSlabIDs, nextDataSlabIDs, err := v.verifyArraySlab(a.root, 0, nil, []SlabID{}, []SlabID{}) if err != nil { // Don't need to wrap error as external error because err is already categorized by validArraySlab(). return err @@ -228,26 +231,33 @@ func ValidArray(a *Array, address Address, typeInfo TypeInfo, tic TypeInfoCompar return nil } -func validArraySlab( - address Address, - tic TypeInfoComparator, - hip HashInputProvider, - storage SlabStorage, +type verification struct { + storage SlabStorage + address Address + tic TypeInfoComparator + hip HashInputProvider + inlineEnabled bool +} + +func (v *verification) verifyArraySlab( slab ArraySlab, level int, headerFromParentSlab *ArraySlabHeader, dataSlabIDs []SlabID, nextDataSlabIDs []SlabID, - inlineEnabled bool, ) ( elementCount uint32, _dataSlabIDs []SlabID, _nextDataSlabIDs []SlabID, err error, ) { - id := slab.Header().slabID + // Verify slab address (independent of array inlined status) + if v.address != id.address { + return 0, nil, nil, NewFatalError(fmt.Errorf("array slab address %v, got %v", v.address, id.address)) + } + if level > 0 { // Verify that non-root slab doesn't have extra data if slab.ExtraData() != nil { @@ -276,26 +286,21 @@ func validArraySlab( switch slab := slab.(type) { case *ArrayDataSlab: - return validArrayDataSlab(address, tic, hip, storage, slab, level, dataSlabIDs, nextDataSlabIDs, inlineEnabled) + return v.verifyArrayDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs) case *ArrayMetaDataSlab: - return validArrayMetaDataSlab(address, tic, hip, storage, slab, level, dataSlabIDs, nextDataSlabIDs, inlineEnabled) + return v.verifyArrayMetaDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs) default: return 0, nil, nil, NewFatalError(fmt.Errorf("ArraySlab is either *ArrayDataSlab or *ArrayMetaDataSlab, got %T", slab)) } } -func validArrayDataSlab( - address Address, - tic TypeInfoComparator, - hip HashInputProvider, - storage SlabStorage, +func (v *verification) verifyArrayDataSlab( dataSlab *ArrayDataSlab, level int, dataSlabIDs []SlabID, nextDataSlabIDs []SlabID, - inlineEnabled bool, ) ( elementCount uint32, _dataSlabIDs []SlabID, @@ -345,7 +350,7 @@ func validArrayDataSlab( for _, e := range dataSlab.elements { - v, err := e.StoredValue(storage) + value, err := e.StoredValue(v.storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return 0, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, @@ -362,9 +367,9 @@ func validArrayDataSlab( } // Verify not-inlined array/map > inline size, or can't be inlined - if inlineEnabled { + if v.inlineEnabled { if _, ok := e.(SlabIDStorable); ok { - err = validNotInlinedValueStatusAndSize(v, uint32(maxInlineArrayElementSize)) + err = verifyNotInlinedValueStatusAndSize(value, uint32(maxInlineArrayElementSize)) if err != nil { return 0, nil, nil, err } @@ -372,7 +377,7 @@ func validArrayDataSlab( } // Verify element - err = ValidValue(v, address, nil, tic, hip, inlineEnabled) + err = ValidValue(value, v.address, nil, v.tic, v.hip, v.inlineEnabled) if err != nil { // Don't need to wrap error as external error because err is already categorized by ValidValue(). return 0, nil, nil, fmt.Errorf( @@ -385,16 +390,11 @@ func validArrayDataSlab( return dataSlab.header.count, dataSlabIDs, nextDataSlabIDs, nil } -func validArrayMetaDataSlab( - address Address, - tic TypeInfoComparator, - hip HashInputProvider, - storage SlabStorage, +func (v *verification) verifyArrayMetaDataSlab( metaSlab *ArrayMetaDataSlab, level int, dataSlabIDs []SlabID, nextDataSlabIDs []SlabID, - inlineEnabled bool, ) ( elementCount uint32, _dataSlabIDs []SlabID, @@ -433,7 +433,7 @@ func validArrayMetaDataSlab( for i := 0; i < len(metaSlab.childrenHeaders); i++ { h := metaSlab.childrenHeaders[i] - childSlab, err := getArraySlab(storage, h.slabID) + childSlab, err := getArraySlab(v.storage, h.slabID) if err != nil { // Don't need to wrap error as external error because err is already categorized by getArraySlab(). return 0, nil, nil, err @@ -442,7 +442,7 @@ func validArrayMetaDataSlab( // Verify child slabs var count uint32 count, dataSlabIDs, nextDataSlabIDs, err = - validArraySlab(address, tic, hip, storage, childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs, inlineEnabled) + v.verifyArraySlab(childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs) if err != nil { // Don't need to wrap error as external error because err is already categorized by validArraySlab(). return 0, nil, nil, err @@ -996,7 +996,7 @@ func validArraySlabID(a *Array) error { return nil } -func validNotInlinedValueStatusAndSize(v Value, maxInlineSize uint32) error { +func verifyNotInlinedValueStatusAndSize(v Value, maxInlineSize uint32) error { switch v := v.(type) { case *Array: diff --git a/map_debug.go b/map_debug.go index 02a8b666..2841b4c8 100644 --- a/map_debug.go +++ b/map_debug.go @@ -867,7 +867,7 @@ func validSingleElement( // Verify not-inlined array/map > inline size, or can't be inlined if inlineEnabled { if _, ok := e.value.(SlabIDStorable); ok { - err = validNotInlinedValueStatusAndSize(vv, uint32(valueSizeLimit)) + err = verifyNotInlinedValueStatusAndSize(vv, uint32(valueSizeLimit)) if err != nil { return 0, 0, err } From 119f6c51c0b496d46d93d4f4dc33dc83de95d5b8 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Sun, 24 Sep 2023 15:06:50 -0500 Subject: [PATCH 025/126] Test inlined array slabs are not in storage Currently, ValidArray() doesn't verify that inlined array slabs are in not storage. It is called by tests in Atree and Cadence, so update it to check if inlined array slabs are in storage. --- array_debug.go | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/array_debug.go b/array_debug.go index c26679b1..81d6fa67 100644 --- a/array_debug.go +++ b/array_debug.go @@ -202,7 +202,7 @@ func ValidArray(a *Array, address Address, typeInfo TypeInfo, tic TypeInfoCompar )) } - v := &verification{ + v := &arrayVerifier{ storage: a.Storage, address: address, tic: tic, @@ -231,7 +231,7 @@ func ValidArray(a *Array, address Address, typeInfo TypeInfo, tic TypeInfoCompar return nil } -type verification struct { +type arrayVerifier struct { storage SlabStorage address Address tic TypeInfoComparator @@ -239,7 +239,7 @@ type verification struct { inlineEnabled bool } -func (v *verification) verifyArraySlab( +func (v *arrayVerifier) verifyArraySlab( slab ArraySlab, level int, headerFromParentSlab *ArraySlabHeader, @@ -258,6 +258,18 @@ func (v *verification) verifyArraySlab( return 0, nil, nil, NewFatalError(fmt.Errorf("array slab address %v, got %v", v.address, id.address)) } + // Verify that inlined slab is not in storage + if slab.Inlined() { + _, exist, err := v.storage.Retrieve(id) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storage interface. + return 0, nil, nil, wrapErrorAsExternalErrorIfNeeded(err) + } + if exist { + return 0, nil, nil, NewFatalError(fmt.Errorf("inlined slab %s is in storage", id)) + } + } + if level > 0 { // Verify that non-root slab doesn't have extra data if slab.ExtraData() != nil { @@ -296,7 +308,7 @@ func (v *verification) verifyArraySlab( } } -func (v *verification) verifyArrayDataSlab( +func (v *arrayVerifier) verifyArrayDataSlab( dataSlab *ArrayDataSlab, level int, dataSlabIDs []SlabID, @@ -390,7 +402,7 @@ func (v *verification) verifyArrayDataSlab( return dataSlab.header.count, dataSlabIDs, nextDataSlabIDs, nil } -func (v *verification) verifyArrayMetaDataSlab( +func (v *arrayVerifier) verifyArrayMetaDataSlab( metaSlab *ArrayMetaDataSlab, level int, dataSlabIDs []SlabID, From efcb2a2b54da33ffbd83726dd6510cb2ac77c7a5 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Sun, 24 Sep 2023 15:31:43 -0500 Subject: [PATCH 026/126] Refactor map validation --- map_debug.go | 114 ++++++++++++++++++++------------------------------- 1 file changed, 45 insertions(+), 69 deletions(-) diff --git a/map_debug.go b/map_debug.go index 2841b4c8..b7dac378 100644 --- a/map_debug.go +++ b/map_debug.go @@ -290,8 +290,17 @@ func ValidMap(m *OrderedMap, address Address, typeInfo TypeInfo, tic TypeInfoCom return NewFatalError(fmt.Errorf("root slab %d seed is uninitialized", m.root.SlabID())) } - computedCount, dataSlabIDs, nextDataSlabIDs, firstKeys, err := validMapSlab( - address, m.Storage, m.digesterBuilder, tic, hip, m.root, 0, nil, []SlabID{}, []SlabID{}, []Digest{}, inlineEnabled) + v := &mapVerifier{ + storage: m.Storage, + address: address, + digesterBuilder: m.digesterBuilder, + tic: tic, + hip: hip, + inlineEnabled: inlineEnabled, + } + + computedCount, dataSlabIDs, nextDataSlabIDs, firstKeys, err := v.verifyMapSlab( + m.root, 0, nil, []SlabID{}, []SlabID{}, []Digest{}) if err != nil { // Don't need to wrap error as external error because err is already categorized by validMapSlab(). return err @@ -335,19 +344,22 @@ func ValidMap(m *OrderedMap, address Address, typeInfo TypeInfo, tic TypeInfoCom return nil } -func validMapSlab( - address Address, - storage SlabStorage, - digesterBuilder DigesterBuilder, - tic TypeInfoComparator, - hip HashInputProvider, +type mapVerifier struct { + storage SlabStorage + address Address + digesterBuilder DigesterBuilder + tic TypeInfoComparator + hip HashInputProvider + inlineEnabled bool +} + +func (v *mapVerifier) verifyMapSlab( slab MapSlab, level int, headerFromParentSlab *MapSlabHeader, dataSlabIDs []SlabID, nextDataSlabIDs []SlabID, firstKeys []Digest, - inlineEnabled bool, ) ( elementCount uint64, _dataSlabIDs []SlabID, @@ -387,28 +399,22 @@ func validMapSlab( switch slab := slab.(type) { case *MapDataSlab: - return validMapDataSlab(address, storage, digesterBuilder, tic, hip, slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys, inlineEnabled) + return v.verifyMapDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys) case *MapMetaDataSlab: - return validMapMetaDataSlab(address, storage, digesterBuilder, tic, hip, slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys, inlineEnabled) + return v.verifyMapMetaDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys) default: return 0, nil, nil, nil, NewFatalError(fmt.Errorf("MapSlab is either *MapDataSlab or *MapMetaDataSlab, got %T", slab)) } } -func validMapDataSlab( - address Address, - storage SlabStorage, - digesterBuilder DigesterBuilder, - tic TypeInfoComparator, - hip HashInputProvider, +func (v *mapVerifier) verifyMapDataSlab( dataSlab *MapDataSlab, level int, dataSlabIDs []SlabID, nextDataSlabIDs []SlabID, firstKeys []Digest, - inlineEnabled bool, ) ( elementCount uint64, _dataSlabIDs []SlabID, @@ -423,7 +429,7 @@ func validMapDataSlab( } // Verify data slab's elements - elementCount, elementSize, err := validMapElements(address, storage, digesterBuilder, tic, hip, id, dataSlab.elements, 0, nil, inlineEnabled) + elementCount, elementSize, err := v.verifyMapElements(id, dataSlab.elements, 0, nil) if err != nil { // Don't need to wrap error as external error because err is already categorized by validMapElements(). return 0, nil, nil, nil, err @@ -482,18 +488,12 @@ func validMapDataSlab( return elementCount, dataSlabIDs, nextDataSlabIDs, firstKeys, nil } -func validMapMetaDataSlab( - address Address, - storage SlabStorage, - digesterBuilder DigesterBuilder, - tic TypeInfoComparator, - hip HashInputProvider, +func (v *mapVerifier) verifyMapMetaDataSlab( metaSlab *MapMetaDataSlab, level int, dataSlabIDs []SlabID, nextDataSlabIDs []SlabID, firstKeys []Digest, - inlineEnabled bool, ) ( elementCount uint64, _dataSlabIDs []SlabID, @@ -524,7 +524,7 @@ func validMapMetaDataSlab( for i := 0; i < len(metaSlab.childrenHeaders); i++ { h := metaSlab.childrenHeaders[i] - childSlab, err := getMapSlab(storage, h.slabID) + childSlab, err := getMapSlab(v.storage, h.slabID) if err != nil { // Don't need to wrap error as external error because err is already categorized by getMapSlab(). return 0, nil, nil, nil, err @@ -533,7 +533,7 @@ func validMapMetaDataSlab( // Verify child slabs count := uint64(0) count, dataSlabIDs, nextDataSlabIDs, firstKeys, err = - validMapSlab(address, storage, digesterBuilder, tic, hip, childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs, firstKeys, inlineEnabled) + v.verifyMapSlab(childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs, firstKeys) if err != nil { // Don't need to wrap error as external error because err is already categorized by validMapSlab(). return 0, nil, nil, nil, err @@ -581,17 +581,11 @@ func validMapMetaDataSlab( return elementCount, dataSlabIDs, nextDataSlabIDs, firstKeys, nil } -func validMapElements( - address Address, - storage SlabStorage, - db DigesterBuilder, - tic TypeInfoComparator, - hip HashInputProvider, +func (v *mapVerifier) verifyMapElements( id SlabID, elements elements, digestLevel uint, hkeyPrefixes []Digest, - inlineEnabled bool, ) ( elementCount uint64, elementSize uint32, @@ -600,25 +594,19 @@ func validMapElements( switch elems := elements.(type) { case *hkeyElements: - return validMapHkeyElements(address, storage, db, tic, hip, id, elems, digestLevel, hkeyPrefixes, inlineEnabled) + return v.verifyMapHkeyElements(id, elems, digestLevel, hkeyPrefixes) case *singleElements: - return validMapSingleElements(address, storage, db, tic, hip, id, elems, digestLevel, hkeyPrefixes, inlineEnabled) + return v.verifyMapSingleElements(id, elems, digestLevel, hkeyPrefixes) default: return 0, 0, NewFatalError(fmt.Errorf("slab %d has unknown elements type %T at digest level %d", id, elements, digestLevel)) } } -func validMapHkeyElements( - address Address, - storage SlabStorage, - db DigesterBuilder, - tic TypeInfoComparator, - hip HashInputProvider, +func (v *mapVerifier) verifyMapHkeyElements( id SlabID, elements *hkeyElements, digestLevel uint, hkeyPrefixes []Digest, - inlineEnabled bool, ) ( elementCount uint64, elementSize uint32, @@ -675,7 +663,7 @@ func validMapHkeyElements( if group, ok := e.(elementGroup); ok { - ge, err := group.Elements(storage) + ge, err := group.Elements(v.storage) if err != nil { // Don't need to wrap error as external error because err is already categorized by elementGroup.Elements(). return 0, 0, err @@ -685,7 +673,7 @@ func validMapHkeyElements( copy(hkeys, hkeyPrefixes) hkeys[len(hkeys)-1] = elements.hkeys[i] - count, size, err := validMapElements(address, storage, db, tic, hip, id, ge, digestLevel+1, hkeys, inlineEnabled) + count, size, err := v.verifyMapElements(id, ge, digestLevel+1, hkeys) if err != nil { // Don't need to wrap error as external error because err is already categorized by validMapElement(). return 0, 0, err @@ -718,9 +706,9 @@ func validMapHkeyElements( hkeys[len(hkeys)-1] = elements.hkeys[i] // Verify element - computedSize, maxDigestLevel, err := validSingleElement(address, storage, db, tic, hip, se, hkeys, inlineEnabled) + computedSize, maxDigestLevel, err := v.verifySingleElement(se, hkeys) if err != nil { - // Don't need to wrap error as external error because err is already categorized by validSingleElement(). + // Don't need to wrap error as external error because err is already categorized by verifySingleElement(). return 0, 0, fmt.Errorf("data slab %d: %w", id, err) } @@ -745,17 +733,11 @@ func validMapHkeyElements( return elementCount, elementSize, nil } -func validMapSingleElements( - address Address, - storage SlabStorage, - db DigesterBuilder, - tic TypeInfoComparator, - hip HashInputProvider, +func (v *mapVerifier) verifyMapSingleElements( id SlabID, elements *singleElements, digestLevel uint, hkeyPrefixes []Digest, - inlineEnabled bool, ) ( elementCount uint64, elementSize uint32, @@ -774,7 +756,7 @@ func validMapSingleElements( for _, e := range elements.elems { // Verify element - computedSize, maxDigestLevel, err := validSingleElement(address, storage, db, tic, hip, e, hkeyPrefixes, inlineEnabled) + computedSize, maxDigestLevel, err := v.verifySingleElement(e, hkeyPrefixes) if err != nil { // Don't need to wrap error as external error because err is already categorized by validSingleElement(). return 0, 0, fmt.Errorf("data slab %d: %w", id, err) @@ -805,15 +787,9 @@ func validMapSingleElements( return uint64(len(elements.elems)), elementSize, nil } -func validSingleElement( - address Address, - storage SlabStorage, - db DigesterBuilder, - tic TypeInfoComparator, - hip HashInputProvider, +func (v *mapVerifier) verifySingleElement( e *singleElement, digests []Digest, - inlineEnabled bool, ) ( size uint32, digestMaxLevel uint, @@ -839,33 +815,33 @@ func validSingleElement( } // Verify key - kv, err := e.key.StoredValue(storage) + kv, err := e.key.StoredValue(v.storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Stroable interface. return 0, 0, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("element %s key can't be converted to value", e)) } - err = ValidValue(kv, address, nil, tic, hip, inlineEnabled) + err = ValidValue(kv, v.address, nil, v.tic, v.hip, v.inlineEnabled) if err != nil { // Don't need to wrap error as external error because err is already categorized by ValidValue(). return 0, 0, fmt.Errorf("element %s key isn't valid: %w", e, err) } // Verify value - vv, err := e.value.StoredValue(storage) + vv, err := e.value.StoredValue(v.storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Stroable interface. return 0, 0, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("element %s value can't be converted to value", e)) } - err = ValidValue(vv, address, nil, tic, hip, inlineEnabled) + err = ValidValue(vv, v.address, nil, v.tic, v.hip, v.inlineEnabled) if err != nil { // Don't need to wrap error as external error because err is already categorized by ValidValue(). return 0, 0, fmt.Errorf("element %s value isn't valid: %w", e, err) } // Verify not-inlined array/map > inline size, or can't be inlined - if inlineEnabled { + if v.inlineEnabled { if _, ok := e.value.(SlabIDStorable); ok { err = verifyNotInlinedValueStatusAndSize(vv, uint32(valueSizeLimit)) if err != nil { @@ -881,7 +857,7 @@ func validSingleElement( } // Verify digest - digest, err := db.Digest(hip, kv) + digest, err := v.digesterBuilder.Digest(v.hip, kv) if err != nil { // Wrap err as external error (if needed) because err is returned by DigesterBuilder interface. return 0, 0, wrapErrorfAsExternalErrorIfNeeded(err, "failed to create digester") From 5af0bc4eed3cb29fbdef1e9358c2d3fe44bb85eb Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Sun, 24 Sep 2023 15:35:46 -0500 Subject: [PATCH 027/126] Test inlined map slabs are not in storage --- map_debug.go | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/map_debug.go b/map_debug.go index b7dac378..8f94f0d3 100644 --- a/map_debug.go +++ b/map_debug.go @@ -252,10 +252,6 @@ func ValidMap(m *OrderedMap, address Address, typeInfo TypeInfo, tic TypeInfoCom return NewFatalError(fmt.Errorf("map address %v, got %v", address, m.Address())) } - if address != m.root.Header().slabID.address { - return NewFatalError(fmt.Errorf("map root slab address %v, got %v", address, m.root.Header().slabID.address)) - } - // Verify map value ID err := validMapValueID(m) if err != nil { @@ -370,6 +366,23 @@ func (v *mapVerifier) verifyMapSlab( id := slab.Header().slabID + // Verify slab address (independent of map inlined status) + if v.address != id.address { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("map slab address %v, got %v", v.address, id.address)) + } + + // Verify that inlined slab is not in storage + if slab.Inlined() { + _, exist, err := v.storage.Retrieve(id) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storage interface. + return 0, nil, nil, nil, wrapErrorAsExternalErrorIfNeeded(err) + } + if exist { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("inlined slab %s is in storage", id)) + } + } + if level > 0 { // Verify that non-root slab doesn't have extra data. if slab.ExtraData() != nil { From a71e7f842afd53c7ec6cf4739db8b3c6d69a8d15 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Sun, 24 Sep 2023 15:46:38 -0500 Subject: [PATCH 028/126] Refactor array and map validation --- array_debug.go | 36 +++++++++++++-------------- array_test.go | 6 ++--- map_debug.go | 66 +++++++++++++++++++++++++------------------------- map_test.go | 6 ++--- 4 files changed, 57 insertions(+), 57 deletions(-) diff --git a/array_debug.go b/array_debug.go index 81d6fa67..090dc226 100644 --- a/array_debug.go +++ b/array_debug.go @@ -167,7 +167,7 @@ func DumpArraySlabs(a *Array) ([]string, error) { type TypeInfoComparator func(TypeInfo, TypeInfo) bool -func ValidArray(a *Array, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool) error { +func VerifyArray(a *Array, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool) error { // Verify array address (independent of array inlined status) if address != a.Address() { @@ -175,13 +175,13 @@ func ValidArray(a *Array, address Address, typeInfo TypeInfo, tic TypeInfoCompar } // Verify array value ID (independent of array inlined status) - err := validArrayValueID(a) + err := verifyArrayValueID(a) if err != nil { return err } // Verify array slab ID (dependent of array inlined status) - err = validArraySlabID(a) + err = verifyArraySlabID(a) if err != nil { return err } @@ -211,9 +211,9 @@ func ValidArray(a *Array, address Address, typeInfo TypeInfo, tic TypeInfoCompar } // Verify array slabs - computedCount, dataSlabIDs, nextDataSlabIDs, err := v.verifyArraySlab(a.root, 0, nil, []SlabID{}, []SlabID{}) + computedCount, dataSlabIDs, nextDataSlabIDs, err := v.verifySlab(a.root, 0, nil, []SlabID{}, []SlabID{}) if err != nil { - // Don't need to wrap error as external error because err is already categorized by validArraySlab(). + // Don't need to wrap error as external error because err is already categorized by verifySlab(). return err } @@ -239,7 +239,7 @@ type arrayVerifier struct { inlineEnabled bool } -func (v *arrayVerifier) verifyArraySlab( +func (v *arrayVerifier) verifySlab( slab ArraySlab, level int, headerFromParentSlab *ArraySlabHeader, @@ -298,17 +298,17 @@ func (v *arrayVerifier) verifyArraySlab( switch slab := slab.(type) { case *ArrayDataSlab: - return v.verifyArrayDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs) + return v.verifyDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs) case *ArrayMetaDataSlab: - return v.verifyArrayMetaDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs) + return v.verifyMetaDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs) default: return 0, nil, nil, NewFatalError(fmt.Errorf("ArraySlab is either *ArrayDataSlab or *ArrayMetaDataSlab, got %T", slab)) } } -func (v *arrayVerifier) verifyArrayDataSlab( +func (v *arrayVerifier) verifyDataSlab( dataSlab *ArrayDataSlab, level int, dataSlabIDs []SlabID, @@ -389,9 +389,9 @@ func (v *arrayVerifier) verifyArrayDataSlab( } // Verify element - err = ValidValue(value, v.address, nil, v.tic, v.hip, v.inlineEnabled) + err = verifyValue(value, v.address, nil, v.tic, v.hip, v.inlineEnabled) if err != nil { - // Don't need to wrap error as external error because err is already categorized by ValidValue(). + // Don't need to wrap error as external error because err is already categorized by verifyValue(). return 0, nil, nil, fmt.Errorf( "data slab %s element %q isn't valid: %w", id, e, err, @@ -402,7 +402,7 @@ func (v *arrayVerifier) verifyArrayDataSlab( return dataSlab.header.count, dataSlabIDs, nextDataSlabIDs, nil } -func (v *arrayVerifier) verifyArrayMetaDataSlab( +func (v *arrayVerifier) verifyMetaDataSlab( metaSlab *ArrayMetaDataSlab, level int, dataSlabIDs []SlabID, @@ -454,9 +454,9 @@ func (v *arrayVerifier) verifyArrayMetaDataSlab( // Verify child slabs var count uint32 count, dataSlabIDs, nextDataSlabIDs, err = - v.verifyArraySlab(childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs) + v.verifySlab(childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs) if err != nil { - // Don't need to wrap error as external error because err is already categorized by validArraySlab(). + // Don't need to wrap error as external error because err is already categorized by verifySlab(). return 0, nil, nil, err } @@ -945,9 +945,9 @@ func getSlabIDFromStorable(storable Storable, ids []SlabID) []SlabID { return ids } -// validArrayValueID verifies array ValueID is always the same as +// verifyArrayValueID verifies array ValueID is always the same as // root slab's SlabID indepedent of array's inlined status. -func validArrayValueID(a *Array) error { +func verifyArrayValueID(a *Array) error { rootSlabID := a.root.Header().slabID vid := a.ValueID() @@ -973,9 +973,9 @@ func validArrayValueID(a *Array) error { return nil } -// validArraySlabID verifies array SlabID is either empty for inlined array, or +// verifyArraySlabID verifies array SlabID is either empty for inlined array, or // same as root slab's SlabID for not-inlined array. -func validArraySlabID(a *Array) error { +func verifyArraySlabID(a *Array) error { sid := a.SlabID() if a.Inlined() { diff --git a/array_test.go b/array_test.go index 0345f521..f14b79cc 100644 --- a/array_test.go +++ b/array_test.go @@ -109,7 +109,7 @@ func _verifyArray( require.Equal(t, len(values), i) // Verify in-memory slabs - err = ValidArray(array, address, typeInfo, typeInfoComparator, hashInputProvider, inlineEnabled) + err = VerifyArray(array, address, typeInfo, typeInfoComparator, hashInputProvider, inlineEnabled) if err != nil { PrintArray(array) } @@ -4663,7 +4663,7 @@ func TestSlabSizeWhenResettingMutableStorable(t *testing.T) { expectedArrayRootDataSlabSize := arrayRootDataSlabPrefixSize + initialStorableSize*arraySize require.Equal(t, uint32(expectedArrayRootDataSlabSize), array.root.ByteSize()) - err = ValidArray(array, address, typeInfo, typeInfoComparator, hashInputProvider, true) + err = VerifyArray(array, address, typeInfo, typeInfoComparator, hashInputProvider, true) require.NoError(t, err) for i := uint64(0); i < arraySize; i++ { @@ -4680,7 +4680,7 @@ func TestSlabSizeWhenResettingMutableStorable(t *testing.T) { expectedArrayRootDataSlabSize = arrayRootDataSlabPrefixSize + mutatedStorableSize*arraySize require.Equal(t, uint32(expectedArrayRootDataSlabSize), array.root.ByteSize()) - err = ValidArray(array, address, typeInfo, typeInfoComparator, hashInputProvider, true) + err = VerifyArray(array, address, typeInfo, typeInfoComparator, hashInputProvider, true) require.NoError(t, err) } diff --git a/map_debug.go b/map_debug.go index 8f94f0d3..cec40668 100644 --- a/map_debug.go +++ b/map_debug.go @@ -245,7 +245,7 @@ func DumpMapSlabs(m *OrderedMap) ([]string, error) { return dumps, nil } -func ValidMap(m *OrderedMap, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool) error { +func VerifyMap(m *OrderedMap, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool) error { // Verify map address if address != m.Address() { @@ -253,13 +253,13 @@ func ValidMap(m *OrderedMap, address Address, typeInfo TypeInfo, tic TypeInfoCom } // Verify map value ID - err := validMapValueID(m) + err := verifyMapValueID(m) if err != nil { return err } // Verify map slab ID - err = validMapSlabID(m) + err = verifyMapSlabID(m) if err != nil { return err } @@ -295,10 +295,10 @@ func ValidMap(m *OrderedMap, address Address, typeInfo TypeInfo, tic TypeInfoCom inlineEnabled: inlineEnabled, } - computedCount, dataSlabIDs, nextDataSlabIDs, firstKeys, err := v.verifyMapSlab( + computedCount, dataSlabIDs, nextDataSlabIDs, firstKeys, err := v.verifySlab( m.root, 0, nil, []SlabID{}, []SlabID{}, []Digest{}) if err != nil { - // Don't need to wrap error as external error because err is already categorized by validMapSlab(). + // Don't need to wrap error as external error because err is already categorized by verifySlab(). return err } @@ -349,7 +349,7 @@ type mapVerifier struct { inlineEnabled bool } -func (v *mapVerifier) verifyMapSlab( +func (v *mapVerifier) verifySlab( slab MapSlab, level int, headerFromParentSlab *MapSlabHeader, @@ -412,17 +412,17 @@ func (v *mapVerifier) verifyMapSlab( switch slab := slab.(type) { case *MapDataSlab: - return v.verifyMapDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys) + return v.verifyDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys) case *MapMetaDataSlab: - return v.verifyMapMetaDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys) + return v.verifyMetaDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys) default: return 0, nil, nil, nil, NewFatalError(fmt.Errorf("MapSlab is either *MapDataSlab or *MapMetaDataSlab, got %T", slab)) } } -func (v *mapVerifier) verifyMapDataSlab( +func (v *mapVerifier) verifyDataSlab( dataSlab *MapDataSlab, level int, dataSlabIDs []SlabID, @@ -442,9 +442,9 @@ func (v *mapVerifier) verifyMapDataSlab( } // Verify data slab's elements - elementCount, elementSize, err := v.verifyMapElements(id, dataSlab.elements, 0, nil) + elementCount, elementSize, err := v.verifyElements(id, dataSlab.elements, 0, nil) if err != nil { - // Don't need to wrap error as external error because err is already categorized by validMapElements(). + // Don't need to wrap error as external error because err is already categorized by verifyElements(). return 0, nil, nil, nil, err } @@ -501,7 +501,7 @@ func (v *mapVerifier) verifyMapDataSlab( return elementCount, dataSlabIDs, nextDataSlabIDs, firstKeys, nil } -func (v *mapVerifier) verifyMapMetaDataSlab( +func (v *mapVerifier) verifyMetaDataSlab( metaSlab *MapMetaDataSlab, level int, dataSlabIDs []SlabID, @@ -546,9 +546,9 @@ func (v *mapVerifier) verifyMapMetaDataSlab( // Verify child slabs count := uint64(0) count, dataSlabIDs, nextDataSlabIDs, firstKeys, err = - v.verifyMapSlab(childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs, firstKeys) + v.verifySlab(childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs, firstKeys) if err != nil { - // Don't need to wrap error as external error because err is already categorized by validMapSlab(). + // Don't need to wrap error as external error because err is already categorized by verifySlab(). return 0, nil, nil, nil, err } @@ -594,7 +594,7 @@ func (v *mapVerifier) verifyMapMetaDataSlab( return elementCount, dataSlabIDs, nextDataSlabIDs, firstKeys, nil } -func (v *mapVerifier) verifyMapElements( +func (v *mapVerifier) verifyElements( id SlabID, elements elements, digestLevel uint, @@ -607,15 +607,15 @@ func (v *mapVerifier) verifyMapElements( switch elems := elements.(type) { case *hkeyElements: - return v.verifyMapHkeyElements(id, elems, digestLevel, hkeyPrefixes) + return v.verifyHkeyElements(id, elems, digestLevel, hkeyPrefixes) case *singleElements: - return v.verifyMapSingleElements(id, elems, digestLevel, hkeyPrefixes) + return v.verifySingleElements(id, elems, digestLevel, hkeyPrefixes) default: return 0, 0, NewFatalError(fmt.Errorf("slab %d has unknown elements type %T at digest level %d", id, elements, digestLevel)) } } -func (v *mapVerifier) verifyMapHkeyElements( +func (v *mapVerifier) verifyHkeyElements( id SlabID, elements *hkeyElements, digestLevel uint, @@ -686,9 +686,9 @@ func (v *mapVerifier) verifyMapHkeyElements( copy(hkeys, hkeyPrefixes) hkeys[len(hkeys)-1] = elements.hkeys[i] - count, size, err := v.verifyMapElements(id, ge, digestLevel+1, hkeys) + count, size, err := v.verifyElements(id, ge, digestLevel+1, hkeys) if err != nil { - // Don't need to wrap error as external error because err is already categorized by validMapElement(). + // Don't need to wrap error as external error because err is already categorized by verifyElements(). return 0, 0, err } @@ -746,7 +746,7 @@ func (v *mapVerifier) verifyMapHkeyElements( return elementCount, elementSize, nil } -func (v *mapVerifier) verifyMapSingleElements( +func (v *mapVerifier) verifySingleElements( id SlabID, elements *singleElements, digestLevel uint, @@ -771,7 +771,7 @@ func (v *mapVerifier) verifyMapSingleElements( // Verify element computedSize, maxDigestLevel, err := v.verifySingleElement(e, hkeyPrefixes) if err != nil { - // Don't need to wrap error as external error because err is already categorized by validSingleElement(). + // Don't need to wrap error as external error because err is already categorized by verifySingleElement(). return 0, 0, fmt.Errorf("data slab %d: %w", id, err) } @@ -834,9 +834,9 @@ func (v *mapVerifier) verifySingleElement( return 0, 0, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("element %s key can't be converted to value", e)) } - err = ValidValue(kv, v.address, nil, v.tic, v.hip, v.inlineEnabled) + err = verifyValue(kv, v.address, nil, v.tic, v.hip, v.inlineEnabled) if err != nil { - // Don't need to wrap error as external error because err is already categorized by ValidValue(). + // Don't need to wrap error as external error because err is already categorized by verifyValue(). return 0, 0, fmt.Errorf("element %s key isn't valid: %w", e, err) } @@ -847,9 +847,9 @@ func (v *mapVerifier) verifySingleElement( return 0, 0, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("element %s value can't be converted to value", e)) } - err = ValidValue(vv, v.address, nil, v.tic, v.hip, v.inlineEnabled) + err = verifyValue(vv, v.address, nil, v.tic, v.hip, v.inlineEnabled) if err != nil { - // Don't need to wrap error as external error because err is already categorized by ValidValue(). + // Don't need to wrap error as external error because err is already categorized by verifyValue(). return 0, 0, fmt.Errorf("element %s value isn't valid: %w", e, err) } @@ -889,12 +889,12 @@ func (v *mapVerifier) verifySingleElement( return computedSize, digest.Levels(), nil } -func ValidValue(value Value, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool) error { +func verifyValue(value Value, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool) error { switch v := value.(type) { case *Array: - return ValidArray(v, address, typeInfo, tic, hip, inlineEnabled) + return VerifyArray(v, address, typeInfo, tic, hip, inlineEnabled) case *OrderedMap: - return ValidMap(v, address, typeInfo, tic, hip, inlineEnabled) + return VerifyMap(v, address, typeInfo, tic, hip, inlineEnabled) } return nil } @@ -1508,9 +1508,9 @@ func mapExtraDataEqual(expected, actual *MapExtraData) error { return nil } -// validMapValueID verifies map ValueID is always the same as +// verifyMapValueID verifies map ValueID is always the same as // root slab's SlabID indepedent of map's inlined status. -func validMapValueID(m *OrderedMap) error { +func verifyMapValueID(m *OrderedMap) error { rootSlabID := m.root.Header().slabID vid := m.ValueID() @@ -1536,9 +1536,9 @@ func validMapValueID(m *OrderedMap) error { return nil } -// validMapSlabID verifies map SlabID is either empty for inlined map, or +// verifyMapSlabID verifies map SlabID is either empty for inlined map, or // same as root slab's SlabID for not-inlined map. -func validMapSlabID(m *OrderedMap) error { +func verifyMapSlabID(m *OrderedMap) error { sid := m.SlabID() if m.Inlined() { diff --git a/map_test.go b/map_test.go index ed102c61..05ca3b03 100644 --- a/map_test.go +++ b/map_test.go @@ -182,7 +182,7 @@ func _verifyMap( } // Verify in-memory slabs - err = ValidMap(m, address, typeInfo, typeInfoComparator, hashInputProvider, inlineEnabled) + err = VerifyMap(m, address, typeInfo, typeInfoComparator, hashInputProvider, inlineEnabled) if err != nil { PrintMap(m) } @@ -10733,7 +10733,7 @@ func TestSlabSizeWhenResettingMutableStorableInMap(t *testing.T) { expectedMapRootDataSlabSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + expectedElementSize*mapSize require.Equal(t, expectedMapRootDataSlabSize, m.root.ByteSize()) - err = ValidMap(m, address, typeInfo, typeInfoComparator, hashInputProvider, true) + err = VerifyMap(m, address, typeInfo, typeInfoComparator, hashInputProvider, true) require.NoError(t, err) // Reset mutable values after changing its storable size @@ -10751,7 +10751,7 @@ func TestSlabSizeWhenResettingMutableStorableInMap(t *testing.T) { expectedMapRootDataSlabSize = mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + expectedElementSize*mapSize require.Equal(t, expectedMapRootDataSlabSize, m.root.ByteSize()) - err = ValidMap(m, address, typeInfo, typeInfoComparator, hashInputProvider, true) + err = VerifyMap(m, address, typeInfo, typeInfoComparator, hashInputProvider, true) require.NoError(t, err) } From 2d8a4e8be37e2fb9450ddac79f936f2886586792 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Sun, 24 Sep 2023 18:22:32 -0500 Subject: [PATCH 029/126] Refactor array and map serialization validation --- array_debug.go | 220 ++++++++++++------------------ array_test.go | 2 +- map_debug.go | 357 ++++++++++++------------------------------------- map_test.go | 2 +- 4 files changed, 174 insertions(+), 407 deletions(-) diff --git a/array_debug.go b/array_debug.go index 090dc226..e152ae62 100644 --- a/array_debug.go +++ b/array_debug.go @@ -239,6 +239,7 @@ type arrayVerifier struct { inlineEnabled bool } +// verifySlab verifies ArraySlab in memory which can be inlined or not inlined. func (v *arrayVerifier) verifySlab( slab ArraySlab, level int, @@ -485,11 +486,11 @@ func (v *arrayVerifier) verifyMetaDataSlab( return metaSlab.header.count, dataSlabIDs, nextDataSlabIDs, nil } -// ValidArraySerialization traverses array tree and verifies serialization +// VerifyArraySerialization traverses array tree and verifies serialization // by encoding, decoding, and re-encoding slabs. // It compares in-memory objects of original slab with decoded slab. // It also compares encoded data of original slab with encoded data of decoded slab. -func ValidArraySerialization( +func VerifyArraySerialization( a *Array, cborDecMode cbor.DecMode, cborEncMode cbor.EncMode, @@ -497,54 +498,58 @@ func ValidArraySerialization( decodeTypeInfo TypeInfoDecoder, compare StorableComparator, ) error { - return validArraySlabSerialization( - a.Storage, - a.root.SlabID(), - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + v := &serializationVerifier{ + storage: a.Storage, + cborDecMode: cborDecMode, + cborEncMode: cborEncMode, + decodeStorable: decodeStorable, + decodeTypeInfo: decodeTypeInfo, + compare: compare, + } + return v.verifyArraySlab(a.root) } -func validArraySlabSerialization( - storage SlabStorage, - id SlabID, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +type serializationVerifier struct { + storage SlabStorage + cborDecMode cbor.DecMode + cborEncMode cbor.EncMode + decodeStorable StorableDecoder + decodeTypeInfo TypeInfoDecoder + compare StorableComparator +} - slab, err := getArraySlab(storage, id) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by getArraySlab(). - return err - } +// verifySlab verifies serialization of not inlined ArraySlab. +func (v *serializationVerifier) verifyArraySlab(slab ArraySlab) error { + + id := slab.SlabID() // Encode slab - data, err := Encode(slab, cborEncMode) + data, err := Encode(slab, v.cborEncMode) if err != nil { // Don't need to wrap error as external error because err is already categorized by Encode(). return err } // Decode encoded slab - decodedSlab, err := DecodeSlab(id, data, cborDecMode, decodeStorable, decodeTypeInfo) + decodedSlab, err := DecodeSlab(id, data, v.cborDecMode, v.decodeStorable, v.decodeTypeInfo) if err != nil { // Don't need to wrap error as external error because err is already categorized by DecodeSlab(). return err } // Re-encode decoded slab - dataFromDecodedSlab, err := Encode(decodedSlab, cborEncMode) + dataFromDecodedSlab, err := Encode(decodedSlab, v.cborEncMode) if err != nil { // Don't need to wrap error as external error because err is already categorized by Encode(). return err } + // Verify encoding is deterministic (encoded data of original slab is same as encoded data of decoded slab) + if !bytes.Equal(data, dataFromDecodedSlab) { + return NewFatalError(fmt.Errorf("encoded data of original slab %s is different from encoded data of decoded slab, got %v, want %v", + id, dataFromDecodedSlab, data)) + } + // Extra check: encoded data size == header.size // This check is skipped for slabs with inlined composite because // encoded size and slab size differ for inlined composites. @@ -572,89 +577,58 @@ func validArraySlabSerialization( } } - // Compare encoded data of original slab with encoded data of decoded slab - if !bytes.Equal(data, dataFromDecodedSlab) { - return NewFatalError(fmt.Errorf("slab %d encoded data is different from decoded slab's encoded data, got %v, want %v", - id, dataFromDecodedSlab, data)) - } - - if slab.IsData() { - dataSlab, ok := slab.(*ArrayDataSlab) - if !ok { - return NewFatalError(fmt.Errorf("slab %d is not ArrayDataSlab", id)) - } - + switch slab := slab.(type) { + case *ArrayDataSlab: decodedDataSlab, ok := decodedSlab.(*ArrayDataSlab) if !ok { return NewFatalError(fmt.Errorf("decoded slab %d is not ArrayDataSlab", id)) } // Compare slabs - err = arrayDataSlabEqual( - dataSlab, - decodedDataSlab, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + err = v.arrayDataSlabEqual(slab, decodedDataSlab) if err != nil { // Don't need to wrap error as external error because err is already categorized by arrayDataSlabEqual(). return fmt.Errorf("data slab %d round-trip serialization failed: %w", id, err) } return nil - } - metaSlab, ok := slab.(*ArrayMetaDataSlab) - if !ok { - return NewFatalError(fmt.Errorf("slab %d is not ArrayMetaDataSlab", id)) - } + case *ArrayMetaDataSlab: + decodedMetaSlab, ok := decodedSlab.(*ArrayMetaDataSlab) + if !ok { + return NewFatalError(fmt.Errorf("decoded slab %d is not ArrayMetaDataSlab", id)) + } - decodedMetaSlab, ok := decodedSlab.(*ArrayMetaDataSlab) - if !ok { - return NewFatalError(fmt.Errorf("decoded slab %d is not ArrayMetaDataSlab", id)) - } + // Compare slabs + err = v.arrayMetaDataSlabEqual(slab, decodedMetaSlab) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by arrayMetaDataSlabEqual(). + return fmt.Errorf("metadata slab %d round-trip serialization failed: %w", id, err) + } - // Compare slabs - err = arrayMetaDataSlabEqual(metaSlab, decodedMetaSlab) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by arrayMetaDataSlabEqual(). - return fmt.Errorf("metadata slab %d round-trip serialization failed: %w", id, err) - } + for _, h := range slab.childrenHeaders { + childSlab, err := getArraySlab(v.storage, h.slabID) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by getArraySlab(). + return err + } - for _, h := range metaSlab.childrenHeaders { - // Verify child slabs - err = validArraySlabSerialization( - storage, - h.slabID, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by validArraySlabSerialization(). - return err + // Verify child slabs + err = v.verifyArraySlab(childSlab) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by verifyArraySlab(). + return err + } } - } - return nil + return nil + + default: + return NewFatalError(fmt.Errorf("ArraySlab is either *ArrayDataSlab or *ArrayMetaDataSlab, got %T", slab)) + } } -func arrayDataSlabEqual( - expected *ArrayDataSlab, - actual *ArrayDataSlab, - storage SlabStorage, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) arrayDataSlabEqual(expected, actual *ArrayDataSlab) error { // Compare extra data err := arrayExtraDataEqual(expected.extraData, actual.extraData) @@ -663,7 +637,7 @@ func arrayDataSlabEqual( return err } - // Compare inlined + // Compare inlined status if expected.inlined != actual.inlined { return NewFatalError(fmt.Errorf("inlined %t is wrong, want %t", actual.inlined, expected.inlined)) } @@ -689,44 +663,38 @@ func arrayDataSlabEqual( ae := actual.elements[i] switch ee := ee.(type) { - case SlabIDStorable: - if !compare(ee, ae) { + + case SlabIDStorable: // Compare not-inlined element + if !v.compare(ee, ae) { return NewFatalError(fmt.Errorf("element %d %+v is wrong, want %+v", i, ae, ee)) } - ev, err := ee.StoredValue(storage) + ev, err := ee.StoredValue(v.storage) if err != nil { // Don't need to wrap error as external error because err is already categorized by SlabIDStorable.StoredValue(). return err } - return ValidValueSerialization( - ev, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + return v.verifyValue(ev) - case *ArrayDataSlab: + case *ArrayDataSlab: // Compare inlined array ae, ok := ae.(*ArrayDataSlab) if !ok { - return NewFatalError(fmt.Errorf("expect element as *ArrayDataSlab, actual %T", ae)) + return NewFatalError(fmt.Errorf("expect element as inlined *ArrayDataSlab, actual %T", ae)) } - return arrayDataSlabEqual(ee, ae, storage, cborDecMode, cborEncMode, decodeStorable, decodeTypeInfo, compare) + return v.arrayDataSlabEqual(ee, ae) - case *MapDataSlab: + case *MapDataSlab: // Compare inlined map ae, ok := ae.(*MapDataSlab) if !ok { - return NewFatalError(fmt.Errorf("expect element as *MapDataSlab, actual %T", ae)) + return NewFatalError(fmt.Errorf("expect element as inlined *MapDataSlab, actual %T", ae)) } - return mapDataSlabEqual(ee, ae, storage, cborDecMode, cborEncMode, decodeStorable, decodeTypeInfo, compare) + return v.mapDataSlabEqual(ee, ae) default: - if !compare(ee, ae) { + if !v.compare(ee, ae) { return NewFatalError(fmt.Errorf("element %d %+v is wrong, want %+v", i, ae, ee)) } } @@ -735,7 +703,7 @@ func arrayDataSlabEqual( return nil } -func arrayMetaDataSlabEqual(expected, actual *ArrayMetaDataSlab) error { +func (v *serializationVerifier) arrayMetaDataSlabEqual(expected, actual *ArrayMetaDataSlab) error { // Compare extra data err := arrayExtraDataEqual(expected.extraData, actual.extraData) @@ -779,34 +747,14 @@ func arrayExtraDataEqual(expected, actual *ArrayExtraData) error { return nil } -func ValidValueSerialization( - value Value, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) verifyValue(value Value) error { - switch v := value.(type) { + switch value := value.(type) { case *Array: - return ValidArraySerialization( - v, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + return v.verifyArraySlab(value.root) + case *OrderedMap: - return ValidMapSerialization( - v, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + return v.verifyMapSlab(value.root) } return nil } diff --git a/array_test.go b/array_test.go index f14b79cc..4c668771 100644 --- a/array_test.go +++ b/array_test.go @@ -116,7 +116,7 @@ func _verifyArray( require.NoError(t, err) // Verify slab serializations - err = ValidArraySerialization( + err = VerifyArraySerialization( array, storage.cborDecMode, storage.cborEncMode, diff --git a/map_debug.go b/map_debug.go index cec40668..82850e26 100644 --- a/map_debug.go +++ b/map_debug.go @@ -899,11 +899,11 @@ func verifyValue(value Value, address Address, typeInfo TypeInfo, tic TypeInfoCo return nil } -// ValidMapSerialization traverses ordered map tree and verifies serialization +// VerifyMapSerialization traverses ordered map tree and verifies serialization // by encoding, decoding, and re-encoding slabs. // It compares in-memory objects of original slab with decoded slab. // It also compares encoded data of original slab with encoded data of decoded slab. -func ValidMapSerialization( +func VerifyMapSerialization( m *OrderedMap, cborDecMode cbor.DecMode, cborEncMode cbor.EncMode, @@ -911,54 +911,48 @@ func ValidMapSerialization( decodeTypeInfo TypeInfoDecoder, compare StorableComparator, ) error { - return validMapSlabSerialization( - m.Storage, - m.root.SlabID(), - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + v := &serializationVerifier{ + storage: m.Storage, + cborDecMode: cborDecMode, + cborEncMode: cborEncMode, + decodeStorable: decodeStorable, + decodeTypeInfo: decodeTypeInfo, + compare: compare, + } + return v.verifyMapSlab(m.root) } -func validMapSlabSerialization( - storage SlabStorage, - id SlabID, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) verifyMapSlab(slab MapSlab) error { - slab, err := getMapSlab(storage, id) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by getMapSlab(). - return err - } + id := slab.SlabID() // Encode slab - data, err := Encode(slab, cborEncMode) + data, err := Encode(slab, v.cborEncMode) if err != nil { // Don't need to wrap error as external error because err is already categorized by Encode(). return err } // Decode encoded slab - decodedSlab, err := DecodeSlab(id, data, cborDecMode, decodeStorable, decodeTypeInfo) + decodedSlab, err := DecodeSlab(id, data, v.cborDecMode, v.decodeStorable, v.decodeTypeInfo) if err != nil { // Don't need to wrap error as external error because err is already categorized by DecodeSlab(). return err } // Re-encode decoded slab - dataFromDecodedSlab, err := Encode(decodedSlab, cborEncMode) + dataFromDecodedSlab, err := Encode(decodedSlab, v.cborEncMode) if err != nil { // Don't need to wrap error as external error because err is already categorized by Encode(). return err } + // Verify encoding is deterministic (encoded data of original slab is same as encoded data of decoded slab) + if !bytes.Equal(data, dataFromDecodedSlab) { + return NewFatalError(fmt.Errorf("encoded data of original slab %s is different from encoded data of decoded slab, got %v, want %v", + id, dataFromDecodedSlab, data)) + } + // Extra check: encoded data size == header.size // This check is skipped for slabs with inlined composite because // encoded size and slab size differ for inlined composites. @@ -987,90 +981,58 @@ func validMapSlabSerialization( } } - // Compare encoded data of original slab with encoded data of decoded slab - if !bytes.Equal(data, dataFromDecodedSlab) { - return NewFatalError( - fmt.Errorf("slab %d encoded data is different from decoded slab's encoded data, got %v, want %v", - id, dataFromDecodedSlab, data)) - } - - if slab.IsData() { - dataSlab, ok := slab.(*MapDataSlab) - if !ok { - return NewFatalError(fmt.Errorf("slab %d is not MapDataSlab", id)) - } - + switch slab := slab.(type) { + case *MapDataSlab: decodedDataSlab, ok := decodedSlab.(*MapDataSlab) if !ok { return NewFatalError(fmt.Errorf("decoded slab %d is not MapDataSlab", id)) } // Compare slabs - err = mapDataSlabEqual( - dataSlab, - decodedDataSlab, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + err = v.mapDataSlabEqual(slab, decodedDataSlab) if err != nil { // Don't need to wrap error as external error because err is already categorized by mapDataSlabEqual(). return fmt.Errorf("data slab %d round-trip serialization failed: %w", id, err) } return nil - } - metaSlab, ok := slab.(*MapMetaDataSlab) - if !ok { - return NewFatalError(fmt.Errorf("slab %d is not MapMetaDataSlab", id)) - } + case *MapMetaDataSlab: + decodedMetaSlab, ok := decodedSlab.(*MapMetaDataSlab) + if !ok { + return NewFatalError(fmt.Errorf("decoded slab %d is not MapMetaDataSlab", id)) + } - decodedMetaSlab, ok := decodedSlab.(*MapMetaDataSlab) - if !ok { - return NewFatalError(fmt.Errorf("decoded slab %d is not MapMetaDataSlab", id)) - } + // Compare slabs + err = v.mapMetaDataSlabEqual(slab, decodedMetaSlab) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by mapMetaDataSlabEqual(). + return fmt.Errorf("metadata slab %d round-trip serialization failed: %w", id, err) + } - // Compare slabs - err = mapMetaDataSlabEqual(metaSlab, decodedMetaSlab) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by mapMetaDataSlabEqual(). - return fmt.Errorf("metadata slab %d round-trip serialization failed: %w", id, err) - } + for _, h := range slab.childrenHeaders { + slab, err := getMapSlab(v.storage, h.slabID) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by getMapSlab(). + return err + } - for _, h := range metaSlab.childrenHeaders { - // Verify child slabs - err = validMapSlabSerialization( - storage, - h.slabID, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by validMapSlabSerialization(). - return err + // Verify child slabs + err = v.verifyMapSlab(slab) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by verifyMapSlab(). + return err + } } - } - return nil + return nil + + default: + return NewFatalError(fmt.Errorf("MapSlab is either *MapDataSlab or *MapMetaDataSlab, got %T", slab)) + } } -func mapDataSlabEqual( - expected *MapDataSlab, - actual *MapDataSlab, - storage SlabStorage, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) mapDataSlabEqual(expected, actual *MapDataSlab) error { // Compare extra data err := mapExtraDataEqual(expected.extraData, actual.extraData) @@ -1105,16 +1067,7 @@ func mapDataSlabEqual( } // Compare elements - err = mapElementsEqual( - expected.elements, - actual.elements, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + err = v.mapElementsEqual(expected.elements, actual.elements) if err != nil { // Don't need to wrap error as external error because err is already categorized by mapElementsEqual(). return err @@ -1123,16 +1076,7 @@ func mapDataSlabEqual( return nil } -func mapElementsEqual( - expected elements, - actual elements, - storage SlabStorage, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) mapElementsEqual(expected, actual elements) error { switch expectedElems := expected.(type) { case *hkeyElements: @@ -1140,48 +1084,21 @@ func mapElementsEqual( if !ok { return NewFatalError(fmt.Errorf("elements type %T is wrong, want %T", actual, expected)) } - return mapHkeyElementsEqual( - expectedElems, - actualElems, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + return v.mapHkeyElementsEqual(expectedElems, actualElems) case *singleElements: actualElems, ok := actual.(*singleElements) if !ok { return NewFatalError(fmt.Errorf("elements type %T is wrong, want %T", actual, expected)) } - return mapSingleElementsEqual( - expectedElems, - actualElems, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + return v.mapSingleElementsEqual(expectedElems, actualElems) } return nil } -func mapHkeyElementsEqual( - expected *hkeyElements, - actual *hkeyElements, - storage SlabStorage, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) mapHkeyElementsEqual(expected, actual *hkeyElements) error { if expected.level != actual.level { return NewFatalError(fmt.Errorf("hkeyElements level %d is wrong, want %d", actual.level, expected.level)) @@ -1209,16 +1126,7 @@ func mapHkeyElementsEqual( expectedEle := expected.elems[i] actualEle := actual.elems[i] - err := mapElementEqual( - expectedEle, - actualEle, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + err := v.mapElementEqual(expectedEle, actualEle) if err != nil { // Don't need to wrap error as external error because err is already categorized by mapElementEqual(). return err @@ -1228,16 +1136,7 @@ func mapHkeyElementsEqual( return nil } -func mapSingleElementsEqual( - expected *singleElements, - actual *singleElements, - storage SlabStorage, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) mapSingleElementsEqual(expected, actual *singleElements) error { if expected.level != actual.level { return NewFatalError(fmt.Errorf("singleElements level %d is wrong, want %d", actual.level, expected.level)) @@ -1255,16 +1154,7 @@ func mapSingleElementsEqual( expectedElem := expected.elems[i] actualElem := actual.elems[i] - err := mapSingleElementEqual( - expectedElem, - actualElem, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + err := v.mapSingleElementEqual(expectedElem, actualElem) if err != nil { // Don't need to wrap error as external error because err is already categorized by mapSingleElementEqual(). return err @@ -1274,16 +1164,7 @@ func mapSingleElementsEqual( return nil } -func mapElementEqual( - expected element, - actual element, - storage SlabStorage, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) mapElementEqual(expected, actual element) error { switch expectedElem := expected.(type) { case *singleElement: @@ -1291,64 +1172,27 @@ func mapElementEqual( if !ok { return NewFatalError(fmt.Errorf("elements type %T is wrong, want %T", actual, expected)) } - return mapSingleElementEqual( - expectedElem, - actualElem, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + return v.mapSingleElementEqual(expectedElem, actualElem) case *inlineCollisionGroup: actualElem, ok := actual.(*inlineCollisionGroup) if !ok { return NewFatalError(fmt.Errorf("elements type %T is wrong, want %T", actual, expected)) } - return mapElementsEqual( - expectedElem.elements, - actualElem.elements, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + return v.mapElementsEqual(expectedElem.elements, actualElem.elements) case *externalCollisionGroup: actualElem, ok := actual.(*externalCollisionGroup) if !ok { return NewFatalError(fmt.Errorf("elements type %T is wrong, want %T", actual, expected)) } - return mapExternalCollisionElementsEqual( - expectedElem, - actualElem, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) - + return v.mapExternalCollisionElementsEqual(expectedElem, actualElem) } return nil } -func mapExternalCollisionElementsEqual( - expected *externalCollisionGroup, - actual *externalCollisionGroup, - storage SlabStorage, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) mapExternalCollisionElementsEqual(expected, actual *externalCollisionGroup) error { if expected.size != actual.size { return NewFatalError(fmt.Errorf("externalCollisionGroup size %d is wrong, want %d", actual.size, expected.size)) @@ -1358,62 +1202,44 @@ func mapExternalCollisionElementsEqual( return NewFatalError(fmt.Errorf("externalCollisionGroup id %d is wrong, want %d", actual.slabID, expected.slabID)) } + slab, err := getMapSlab(v.storage, expected.slabID) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by getMapSlab(). + return err + } + // Compare external collision slab - err := validMapSlabSerialization( - storage, - expected.slabID, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + err = v.verifyMapSlab(slab) if err != nil { - // Don't need to wrap error as external error because err is already categorized by validMapSlabSerialization(). + // Don't need to wrap error as external error because err is already categorized by verifyMapSlab(). return err } return nil } -func mapSingleElementEqual( - expected *singleElement, - actual *singleElement, - storage SlabStorage, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) mapSingleElementEqual(expected, actual *singleElement) error { if expected.size != actual.size { return NewFatalError(fmt.Errorf("singleElement size %d is wrong, want %d", actual.size, expected.size)) } - if !compare(expected.key, actual.key) { + if !v.compare(expected.key, actual.key) { return NewFatalError(fmt.Errorf("singleElement key %v is wrong, want %v", actual.key, expected.key)) } // Compare key stored in a separate slab if idStorable, ok := expected.key.(SlabIDStorable); ok { - v, err := idStorable.StoredValue(storage) + value, err := idStorable.StoredValue(v.storage) if err != nil { // Don't need to wrap error as external error because err is already categorized by SlabIDStorable.StoredValue(). return err } - err = ValidValueSerialization( - v, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + err = v.verifyValue(value) if err != nil { - // Don't need to wrap error as external error because err is already categorized by ValidValueSerialization(). + // Don't need to wrap error as external error because err is already categorized by verifyValue(). return err } } @@ -1421,26 +1247,19 @@ func mapSingleElementEqual( // Compare nested element switch ee := expected.value.(type) { case SlabIDStorable: - if !compare(expected.value, actual.value) { + if !v.compare(expected.value, actual.value) { return NewFatalError(fmt.Errorf("singleElement value %v is wrong, want %v", actual.value, expected.value)) } - v, err := ee.StoredValue(storage) + value, err := ee.StoredValue(v.storage) if err != nil { // Don't need to wrap error as external error because err is already categorized by SlabIDStorable.StoredValue(). return err } - err = ValidValueSerialization( - v, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + err = v.verifyValue(value) if err != nil { - // Don't need to wrap error as external error because err is already categorized by ValidValueSerialization(). + // Don't need to wrap error as external error because err is already categorized by verifyVaue(). return err } @@ -1450,7 +1269,7 @@ func mapSingleElementEqual( return NewFatalError(fmt.Errorf("expect element as *ArrayDataSlab, actual %T", ae)) } - return arrayDataSlabEqual(ee, ae, storage, cborDecMode, cborEncMode, decodeStorable, decodeTypeInfo, compare) + return v.arrayDataSlabEqual(ee, ae) case *MapDataSlab: ae, ok := actual.value.(*MapDataSlab) @@ -1458,10 +1277,10 @@ func mapSingleElementEqual( return NewFatalError(fmt.Errorf("expect element as *MapDataSlab, actual %T", ae)) } - return mapDataSlabEqual(ee, ae, storage, cborDecMode, cborEncMode, decodeStorable, decodeTypeInfo, compare) + return v.mapDataSlabEqual(ee, ae) default: - if !compare(expected.value, actual.value) { + if !v.compare(expected.value, actual.value) { return NewFatalError(fmt.Errorf("singleElement value %v is wrong, want %v", actual.value, expected.value)) } } @@ -1469,7 +1288,7 @@ func mapSingleElementEqual( return nil } -func mapMetaDataSlabEqual(expected, actual *MapMetaDataSlab) error { +func (v *serializationVerifier) mapMetaDataSlabEqual(expected, actual *MapMetaDataSlab) error { // Compare extra data err := mapExtraDataEqual(expected.extraData, actual.extraData) diff --git a/map_test.go b/map_test.go index 05ca3b03..66073869 100644 --- a/map_test.go +++ b/map_test.go @@ -189,7 +189,7 @@ func _verifyMap( require.NoError(t, err) // Verify slab serializations - err = ValidMapSerialization( + err = VerifyMapSerialization( m, storage.cborDecMode, storage.cborEncMode, From ed36d6571e863ff0427d92af95a0a1a78912a9f6 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Sun, 24 Sep 2023 20:56:51 -0500 Subject: [PATCH 030/126] Refactor array and map validation --- array_debug.go | 38 +++++++++----- map_debug.go | 131 +++++++++++++++++++++++++++---------------------- 2 files changed, 97 insertions(+), 72 deletions(-) diff --git a/array_debug.go b/array_debug.go index e152ae62..91392c70 100644 --- a/array_debug.go +++ b/array_debug.go @@ -63,13 +63,14 @@ func GetArrayStats(a *Array) (ArrayStats, error) { return ArrayStats{}, err } - if slab.IsData() { + switch slab.(type) { + case *ArrayDataSlab: dataSlabCount++ ids := getSlabIDFromStorable(slab, nil) storableSlabCount += uint64(len(ids)) - } else { + case *ArrayMetaDataSlab: metaDataSlabCount++ for _, storable := range slab.ChildStorables() { @@ -127,15 +128,14 @@ func DumpArraySlabs(a *Array) ([]string, error) { return nil, err } - if slab.IsData() { - dataSlab := slab.(*ArrayDataSlab) - dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, dataSlab)) + switch slab := slab.(type) { + case *ArrayDataSlab: + dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, slab)) - overflowIDs = getSlabIDFromStorable(dataSlab, overflowIDs) + overflowIDs = getSlabIDFromStorable(slab, overflowIDs) - } else { - meta := slab.(*ArrayMetaDataSlab) - dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, meta)) + case *ArrayMetaDataSlab: + dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, slab)) for _, storable := range slab.ChildStorables() { id, ok := storable.(SlabIDStorable) @@ -168,7 +168,6 @@ func DumpArraySlabs(a *Array) ([]string, error) { type TypeInfoComparator func(TypeInfo, TypeInfo) bool func VerifyArray(a *Array, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool) error { - // Verify array address (independent of array inlined status) if address != a.Address() { return NewFatalError(fmt.Errorf("array address %v, got %v", address, a.Address())) @@ -379,14 +378,27 @@ func (v *arrayVerifier) verifyDataSlab( id, e, e.ByteSize(), maxInlineArrayElementSize)) } - // Verify not-inlined array/map > inline size, or can't be inlined - if v.inlineEnabled { - if _, ok := e.(SlabIDStorable); ok { + switch e := e.(type) { + case SlabIDStorable: + // Verify not-inlined element > inline size, or can't be inlined + if v.inlineEnabled { err = verifyNotInlinedValueStatusAndSize(value, uint32(maxInlineArrayElementSize)) if err != nil { return 0, nil, nil, err } } + + case *ArrayDataSlab: + // Verify inlined element's inlined status + if !e.Inlined() { + return 0, nil, nil, NewFatalError(fmt.Errorf("inlined array inlined status is false")) + } + + case *MapDataSlab: + // Verify inlined element's inlined status + if !e.Inlined() { + return 0, nil, nil, NewFatalError(fmt.Errorf("inlined map inlined status is false")) + } } // Verify element diff --git a/map_debug.go b/map_debug.go index 82850e26..59094005 100644 --- a/map_debug.go +++ b/map_debug.go @@ -66,41 +66,41 @@ func GetMapStats(m *OrderedMap) (MapStats, error) { return MapStats{}, err } - if slab.IsData() { + switch slab := slab.(type) { + case *MapDataSlab: dataSlabCount++ - leaf := slab.(*MapDataSlab) - elementGroups := []elements{leaf.elements} + elementGroups := []elements{slab.elements} for len(elementGroups) > 0 { var nestedElementGroups []elements - for i := 0; i < len(elementGroups); i++ { - - elems := elementGroups[i] - - for j := 0; j < int(elems.Count()); j++ { - elem, err := elems.Element(j) + for _, group := range elementGroups { + for i := 0; i < int(group.Count()); i++ { + elem, err := group.Element(i) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Element(). return MapStats{}, err } - if group, ok := elem.(elementGroup); ok { - if !group.Inline() { + switch e := elem.(type) { + case elementGroup: + nestedGroup := e + + if !nestedGroup.Inline() { collisionDataSlabCount++ } - nested, err := group.Elements(m.Storage) + nested, err := nestedGroup.Elements(m.Storage) if err != nil { // Don't need to wrap error as external error because err is already categorized by elementGroup.Elements(). return MapStats{}, err } + nestedElementGroups = append(nestedElementGroups, nested) - } else { - e := elem.(*singleElement) + case *singleElement: if _, ok := e.key.(SlabIDStorable); ok { storableDataSlabCount++ } @@ -113,9 +113,11 @@ func GetMapStats(m *OrderedMap) (MapStats, error) { } } } + elementGroups = nestedElementGroups } - } else { + + case *MapMetaDataSlab: metaDataSlabCount++ for _, storable := range slab.ChildStorables() { @@ -173,12 +175,12 @@ func DumpMapSlabs(m *OrderedMap) ([]string, error) { return nil, err } - if slab.IsData() { - dataSlab := slab.(*MapDataSlab) - dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, dataSlab)) + switch slab := slab.(type) { + case *MapDataSlab: + dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, slab)) - for i := 0; i < int(dataSlab.elements.Count()); i++ { - elem, err := dataSlab.elements.Element(i) + for i := 0; i < int(slab.elements.Count()); i++ { + elem, err := slab.elements.Element(i) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Element(). return nil, err @@ -191,11 +193,10 @@ func DumpMapSlabs(m *OrderedMap) ([]string, error) { } } - overflowIDs = getSlabIDFromStorable(dataSlab, overflowIDs) + overflowIDs = getSlabIDFromStorable(slab, overflowIDs) - } else { - meta := slab.(*MapMetaDataSlab) - dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, meta)) + case *MapMetaDataSlab: + dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, slab)) for _, storable := range slab.ChildStorables() { id, ok := storable.(SlabIDStorable) @@ -247,18 +248,18 @@ func DumpMapSlabs(m *OrderedMap) ([]string, error) { func VerifyMap(m *OrderedMap, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool) error { - // Verify map address + // Verify map address (independent of array inlined status) if address != m.Address() { return NewFatalError(fmt.Errorf("map address %v, got %v", address, m.Address())) } - // Verify map value ID + // Verify map value ID (independent of array inlined status) err := verifyMapValueID(m) if err != nil { return err } - // Verify map slab ID + // Verify map slab ID (dependent of array inlined status) err = verifyMapSlabID(m) if err != nil { return err @@ -663,6 +664,10 @@ func (v *mapVerifier) verifyHkeyElements( for i := 0; i < len(elements.elems); i++ { e := elements.elems[i] + hkeys := make([]Digest, len(hkeyPrefixes)+1) + copy(hkeys, hkeyPrefixes) + hkeys[len(hkeys)-1] = elements.hkeys[i] + elementSize += digestSize // Verify element size is <= inline size @@ -674,19 +679,15 @@ func (v *mapVerifier) verifyHkeyElements( } } - if group, ok := e.(elementGroup); ok { - - ge, err := group.Elements(v.storage) + switch e := e.(type) { + case elementGroup: + group, err := e.Elements(v.storage) if err != nil { // Don't need to wrap error as external error because err is already categorized by elementGroup.Elements(). return 0, 0, err } - hkeys := make([]Digest, len(hkeyPrefixes)+1) - copy(hkeys, hkeyPrefixes) - hkeys[len(hkeys)-1] = elements.hkeys[i] - - count, size, err := v.verifyElements(id, ge, digestLevel+1, hkeys) + count, size, err := v.verifyElements(id, group, digestLevel+1, hkeys) if err != nil { // Don't need to wrap error as external error because err is already categorized by verifyElements(). return 0, 0, err @@ -707,19 +708,9 @@ func (v *mapVerifier) verifyHkeyElements( elementCount += count - } else { - - se, ok := e.(*singleElement) - if !ok { - return 0, 0, NewFatalError(fmt.Errorf("data slab %d element type %T is wrong, want *singleElement", id, e)) - } - - hkeys := make([]Digest, len(hkeyPrefixes)+1) - copy(hkeys, hkeyPrefixes) - hkeys[len(hkeys)-1] = elements.hkeys[i] - + case *singleElement: // Verify element - computedSize, maxDigestLevel, err := v.verifySingleElement(se, hkeys) + computedSize, maxDigestLevel, err := v.verifySingleElement(e, hkeys) if err != nil { // Don't need to wrap error as external error because err is already categorized by verifySingleElement(). return 0, 0, fmt.Errorf("data slab %d: %w", id, err) @@ -735,6 +726,9 @@ func (v *mapVerifier) verifyHkeyElements( elementSize += computedSize elementCount++ + + default: + return 0, 0, NewFatalError(fmt.Errorf("data slab %d element type %T is wrong, want either elementGroup or *singleElement", id, e)) } } @@ -834,6 +828,12 @@ func (v *mapVerifier) verifySingleElement( return 0, 0, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("element %s key can't be converted to value", e)) } + switch e.key.(type) { + case *ArrayDataSlab, *MapDataSlab: + // Verify key can't be inlined array or map + return 0, 0, NewFatalError(fmt.Errorf("element %s key shouldn't be inlined array or map", e)) + } + err = verifyValue(kv, v.address, nil, v.tic, v.hip, v.inlineEnabled) if err != nil { // Don't need to wrap error as external error because err is already categorized by verifyValue(). @@ -847,20 +847,33 @@ func (v *mapVerifier) verifySingleElement( return 0, 0, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("element %s value can't be converted to value", e)) } - err = verifyValue(vv, v.address, nil, v.tic, v.hip, v.inlineEnabled) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by verifyValue(). - return 0, 0, fmt.Errorf("element %s value isn't valid: %w", e, err) - } - - // Verify not-inlined array/map > inline size, or can't be inlined - if v.inlineEnabled { - if _, ok := e.value.(SlabIDStorable); ok { + switch e := e.value.(type) { + case SlabIDStorable: + // Verify not-inlined value > inline size, or can't be inlined + if v.inlineEnabled { err = verifyNotInlinedValueStatusAndSize(vv, uint32(valueSizeLimit)) if err != nil { return 0, 0, err } } + + case *ArrayDataSlab: + // Verify inlined element's inlined status + if !e.Inlined() { + return 0, 0, NewFatalError(fmt.Errorf("inlined array inlined status is false")) + } + + case *MapDataSlab: + // Verify inlined element's inlined status + if !e.Inlined() { + return 0, 0, NewFatalError(fmt.Errorf("inlined map inlined status is false")) + } + } + + err = verifyValue(vv, v.address, nil, v.tic, v.hip, v.inlineEnabled) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by verifyValue(). + return 0, 0, fmt.Errorf("element %s value isn't valid: %w", e, err) } // Verify size @@ -1246,7 +1259,7 @@ func (v *serializationVerifier) mapSingleElementEqual(expected, actual *singleEl // Compare nested element switch ee := expected.value.(type) { - case SlabIDStorable: + case SlabIDStorable: // Compare not-inlined element if !v.compare(expected.value, actual.value) { return NewFatalError(fmt.Errorf("singleElement value %v is wrong, want %v", actual.value, expected.value)) } @@ -1263,7 +1276,7 @@ func (v *serializationVerifier) mapSingleElementEqual(expected, actual *singleEl return err } - case *ArrayDataSlab: + case *ArrayDataSlab: // Compare inlined array element ae, ok := actual.value.(*ArrayDataSlab) if !ok { return NewFatalError(fmt.Errorf("expect element as *ArrayDataSlab, actual %T", ae)) @@ -1271,7 +1284,7 @@ func (v *serializationVerifier) mapSingleElementEqual(expected, actual *singleEl return v.arrayDataSlabEqual(ee, ae) - case *MapDataSlab: + case *MapDataSlab: // Compare inlined map element ae, ok := actual.value.(*MapDataSlab) if !ok { return NewFatalError(fmt.Errorf("expect element as *MapDataSlab, actual %T", ae)) From 04f93a24d671dcb7017564d7e859259a9258e686 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 25 Sep 2023 19:38:06 -0500 Subject: [PATCH 031/126] Improve tests to compare child array elements --- array_test.go | 709 +++++++++++++++++++++++++++++++++++--------------- map_test.go | 385 +++++++++++++-------------- utils_test.go | 100 +++---- 3 files changed, 726 insertions(+), 468 deletions(-) diff --git a/array_test.go b/array_test.go index 4c668771..52e005db 100644 --- a/array_test.go +++ b/array_test.go @@ -80,33 +80,33 @@ func _verifyArray( typeInfo TypeInfo, address Address, array *Array, - values []Value, + expectedValues arrayValue, hasNestedArrayMapElement bool, inlineEnabled bool, ) { require.True(t, typeInfoComparator(typeInfo, array.Type())) require.Equal(t, address, array.Address()) - require.Equal(t, uint64(len(values)), array.Count()) + require.Equal(t, uint64(len(expectedValues)), array.Count()) var err error // Verify array elements - for i, v := range values { - e, err := array.Get(uint64(i)) + for i, expected := range expectedValues { + actual, err := array.Get(uint64(i)) require.NoError(t, err) - valueEqual(t, typeInfoComparator, v, e) + valueEqual(t, expected, actual) } // Verify array elements by iterator i := 0 err = array.Iterate(func(v Value) (bool, error) { - valueEqual(t, typeInfoComparator, values[i], v) + valueEqual(t, expectedValues[i], v) i++ return true, nil }) require.NoError(t, err) - require.Equal(t, len(values), i) + require.Equal(t, len(expectedValues), i) // Verify in-memory slabs err = VerifyArray(array, address, typeInfo, typeInfoComparator, hashInputProvider, inlineEnabled) @@ -151,7 +151,7 @@ func _verifyArray( require.NoError(t, err) require.Equal(t, stats.SlabCount(), uint64(storage.Count())) - if len(values) == 0 { + if len(expectedValues) == 0 { // Verify slab count for empty array require.Equal(t, uint64(1), stats.DataSlabCount) require.Equal(t, uint64(0), stats.MetaDataSlabCount) @@ -230,7 +230,7 @@ func TestArraySetAndGet(t *testing.T) { existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, oldValue, existingValue) + valueEqual(t, oldValue, existingValue) } verifyArray(t, storage, typeInfo, address, array, values, false) @@ -276,7 +276,7 @@ func TestArraySetAndGet(t *testing.T) { existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, oldValue, existingValue) + valueEqual(t, oldValue, existingValue) } verifyArray(t, storage, typeInfo, address, array, values, false) @@ -323,7 +323,7 @@ func TestArraySetAndGet(t *testing.T) { existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, oldValue, existingValue) + valueEqual(t, oldValue, existingValue) } verifyArray(t, storage, typeInfo, address, array, values, false) @@ -516,7 +516,7 @@ func TestArrayRemove(t *testing.T) { existingValue, err := existingStorable.StoredValue(array.Storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, values[i], existingValue) + valueEqual(t, values[i], existingValue) if id, ok := existingStorable.(SlabIDStorable); ok { err = array.Storage.Remove(SlabID(id)) @@ -563,7 +563,7 @@ func TestArrayRemove(t *testing.T) { existingValue, err := existingStorable.StoredValue(array.Storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, values[i], existingValue) + valueEqual(t, values[i], existingValue) if id, ok := existingStorable.(SlabIDStorable); ok { err = array.Storage.Remove(SlabID(id)) @@ -613,7 +613,7 @@ func TestArrayRemove(t *testing.T) { existingValue, err := existingStorable.StoredValue(array.Storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, v, existingValue) + valueEqual(t, v, existingValue) if id, ok := existingStorable.(SlabIDStorable); ok { err = array.Storage.Remove(SlabID(id)) @@ -934,7 +934,7 @@ func testArrayIterateRange(t *testing.T, array *Array, values []Value) { for endIndex := startIndex; endIndex <= count; endIndex++ { i = uint64(0) err = array.IterateRange(startIndex, endIndex, func(v Value) (bool, error) { - valueEqual(t, typeInfoComparator, v, values[int(startIndex+i)]) + valueEqual(t, v, values[int(startIndex+i)]) i++ return true, nil }) @@ -1135,7 +1135,7 @@ func TestArraySetRandomValues(t *testing.T) { existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, oldValue, existingValue) + valueEqual(t, oldValue, existingValue) } verifyArray(t, storage, typeInfo, address, array, values, false) @@ -1262,7 +1262,7 @@ func TestArrayRemoveRandomValues(t *testing.T) { existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, values[k], existingValue) + valueEqual(t, values[k], existingValue) copy(values[k:], values[k+1:]) values = values[:len(values)-1] @@ -1330,7 +1330,7 @@ func testArrayAppendSetInsertRemoveRandomValues( existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, oldV, existingValue) + valueEqual(t, oldV, existingValue) if id, ok := existingStorable.(SlabIDStorable); ok { err = storage.Remove(SlabID(id)) @@ -1360,7 +1360,7 @@ func testArrayAppendSetInsertRemoveRandomValues( existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, values[k], existingValue) + valueEqual(t, values[k], existingValue) copy(values[k:], values[k+1:]) values = values[:len(values)-1] @@ -1396,7 +1396,7 @@ func TestArrayAppendSetInsertRemoveRandomValues(t *testing.T) { verifyArray(t, storage, typeInfo, address, array, values, false) } -func TestArrayNestedArrayMap(t *testing.T) { +func TestArrayWithChildArrayMap(t *testing.T) { SetThreshold(256) defer SetThreshold(1024) @@ -1405,70 +1405,77 @@ func TestArrayNestedArrayMap(t *testing.T) { const arraySize = 4096 - nestedTypeInfo := testTypeInfo{43} + typeInfo := testTypeInfo{42} + childTypeInfo := testTypeInfo{43} storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - // Create a list of arrays with 1 element. - nestedArrays := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - nested, err := NewArray(storage, address, nestedTypeInfo) - require.NoError(t, err) + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - err = nested.Append(Uint64Value(i)) + // Create child arrays with 1 element. + expectedValues := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + childArray, err := NewArray(storage, address, childTypeInfo) require.NoError(t, err) - require.True(t, nested.root.IsData()) - - nestedArrays[i] = nested - } + v := Uint64Value(i) - typeInfo := testTypeInfo{42} + err = childArray.Append(v) + require.NoError(t, err) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + require.True(t, childArray.root.IsData()) + require.False(t, childArray.Inlined()) - for _, a := range nestedArrays { - err := array.Append(a) + err = array.Append(childArray) require.NoError(t, err) + require.True(t, childArray.Inlined()) + + expectedValues[i] = arrayValue{v} } - verifyArray(t, storage, typeInfo, address, array, nestedArrays, false) + verifyArray(t, storage, typeInfo, address, array, expectedValues, false) }) t.Run("big array", func(t *testing.T) { const arraySize = 4096 + const childArraySize = 40 - nestedTypeInfo := testTypeInfo{43} + typeInfo := testTypeInfo{42} + childTypeInfo := testTypeInfo{43} storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - values := make([]Value, arraySize) + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + // Create child arrays with 40 element. + expectedValues := make([]Value, arraySize) for i := uint64(0); i < arraySize; i++ { - nested, err := NewArray(storage, address, nestedTypeInfo) + childArray, err := NewArray(storage, address, childTypeInfo) require.NoError(t, err) - for i := uint64(0); i < 40; i++ { - err := nested.Append(Uint64Value(math.MaxUint64)) - require.NoError(t, err) - } + expectedChildArrayValues := make([]Value, childArraySize) + for i := uint64(0); i < childArraySize; i++ { + v := Uint64Value(math.MaxUint64) - require.False(t, nested.root.IsData()) + err := childArray.Append(v) + require.NoError(t, err) - values[i] = nested - } + expectedChildArrayValues[i] = v + } - typeInfo := testTypeInfo{42} + require.False(t, childArray.root.IsData()) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) - for _, a := range values { - err := array.Append(a) + err = array.Append(childArray) require.NoError(t, err) + require.False(t, childArray.Inlined()) + + expectedValues[i] = arrayValue(expectedChildArrayValues) } - verifyArray(t, storage, typeInfo, address, array, values, true) + verifyArray(t, storage, typeInfo, address, array, expectedValues, true) }) t.Run("small map", func(t *testing.T) { @@ -1639,13 +1646,14 @@ func TestArrayDecodeV0(t *testing.T) { t.Run("metadataslab as root", func(t *testing.T) { storage := newTestBasicStorage(t) typeInfo := testTypeInfo{42} + childTypeInfo := testTypeInfo{43} address := Address{1, 2, 3, 4, 5, 6, 7, 8} arraySlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} arrayDataSlabID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} arrayDataSlabID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - nestedArraySlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} + childArraySlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} const arraySize = 20 values := make([]Value, arraySize) @@ -1653,16 +1661,15 @@ func TestArrayDecodeV0(t *testing.T) { values[i] = NewStringValue(strings.Repeat("a", 22)) } - typeInfo2 := testTypeInfo{43} - - nestedArray, err := NewArray(storage, address, typeInfo2) - nestedArray.root.SetSlabID(nestedArraySlabID) + childArray, err := NewArray(storage, address, childTypeInfo) + childArray.root.SetSlabID(childArraySlabID) require.NoError(t, err) - err = nestedArray.Append(Uint64Value(0)) + v := Uint64Value(0) + err = childArray.Append(v) require.NoError(t, err) - values[arraySize-1] = nestedArray + values[arraySize-1] = arrayValue{v} slabData := map[SlabID][]byte{ // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:270 count:11} ] @@ -1740,7 +1747,7 @@ func TestArrayDecodeV0(t *testing.T) { }, // (data slab) next: 0, data: [0] - nestedArraySlabID: { + childArraySlabID: { // extra data // version 0x00, @@ -1978,7 +1985,7 @@ func TestArrayEncodeDecode(t *testing.T) { // Same type info is reused. t.Run("root data slab, inlined child array of same type", func(t *testing.T) { typeInfo := testTypeInfo{42} - typeInfo2 := testTypeInfo{43} + childTypeInfo := testTypeInfo{43} storage := newTestBasicStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} @@ -1986,11 +1993,11 @@ func TestArrayEncodeDecode(t *testing.T) { require.NoError(t, err) const arraySize = 2 - values := make([]Value, arraySize) + expectedValues := make([]Value, arraySize) for i := 0; i < arraySize; i++ { v := Uint64Value(i) - childArray, err := NewArray(storage, address, typeInfo2) + childArray, err := NewArray(storage, address, childTypeInfo) require.NoError(t, err) err = childArray.Append(v) @@ -1999,7 +2006,7 @@ func TestArrayEncodeDecode(t *testing.T) { err = parentArray.Append(childArray) require.NoError(t, err) - values[i] = childArray + expectedValues[i] = arrayValue{v} } id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} @@ -2046,7 +2053,7 @@ func TestArrayEncodeDecode(t *testing.T) { array2, err := NewArrayWithRootID(storage2, parentArray.SlabID()) require.NoError(t, err) - verifyArray(t, storage2, typeInfo, address, array2, values, false) + verifyArray(t, storage2, typeInfo, address, array2, expectedValues, false) }) // Different type info are encoded. @@ -2061,7 +2068,7 @@ func TestArrayEncodeDecode(t *testing.T) { require.NoError(t, err) const arraySize = 2 - values := make([]Value, arraySize) + expectedValues := make([]Value, arraySize) for i := 0; i < arraySize; i++ { v := Uint64Value(i) @@ -2080,7 +2087,7 @@ func TestArrayEncodeDecode(t *testing.T) { err = parentArray.Append(childArray) require.NoError(t, err) - values[i] = childArray + expectedValues[i] = arrayValue{v} } id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} @@ -2131,7 +2138,7 @@ func TestArrayEncodeDecode(t *testing.T) { array2, err := NewArrayWithRootID(storage2, parentArray.SlabID()) require.NoError(t, err) - verifyArray(t, storage2, typeInfo, address, array2, values, false) + verifyArray(t, storage2, typeInfo, address, array2, expectedValues, false) }) // Same type info is reused. @@ -2146,7 +2153,7 @@ func TestArrayEncodeDecode(t *testing.T) { require.NoError(t, err) const arraySize = 2 - values := make([]Value, arraySize) + expectedValues := make([]Value, arraySize) for i := 0; i < arraySize; i++ { v := Uint64Value(i) @@ -2165,7 +2172,11 @@ func TestArrayEncodeDecode(t *testing.T) { err = parentArray.Append(childArray) require.NoError(t, err) - values[i] = childArray + expectedValues[i] = arrayValue{ + arrayValue{ + v, + }, + } } id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} @@ -2215,7 +2226,7 @@ func TestArrayEncodeDecode(t *testing.T) { array2, err := NewArrayWithRootID(storage2, parentArray.SlabID()) require.NoError(t, err) - verifyArray(t, storage2, typeInfo, address, array2, values, false) + verifyArray(t, storage2, typeInfo, address, array2, expectedValues, false) }) t.Run("root data slab, multiple levels of inlined array of different type", func(t *testing.T) { @@ -2231,7 +2242,7 @@ func TestArrayEncodeDecode(t *testing.T) { require.NoError(t, err) const arraySize = 2 - values := make([]Value, arraySize) + expectedValues := make([]Value, arraySize) for i := 0; i < arraySize; i++ { v := Uint64Value(i) @@ -2261,7 +2272,11 @@ func TestArrayEncodeDecode(t *testing.T) { err = parentArray.Append(childArray) require.NoError(t, err) - values[i] = childArray + expectedValues[i] = arrayValue{ + arrayValue{ + v, + }, + } } id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} @@ -2320,7 +2335,7 @@ func TestArrayEncodeDecode(t *testing.T) { array2, err := NewArrayWithRootID(storage2, parentArray.SlabID()) require.NoError(t, err) - verifyArray(t, storage2, typeInfo, address, array2, values, false) + verifyArray(t, storage2, typeInfo, address, array2, expectedValues, false) }) t.Run("root metadata slab, inlined array of same type", func(t *testing.T) { @@ -2334,27 +2349,28 @@ func TestArrayEncodeDecode(t *testing.T) { require.NoError(t, err) const arraySize = 20 - values := make([]Value, 0, arraySize) + expectedValues := make([]Value, 0, arraySize) for i := uint64(0); i < arraySize-2; i++ { v := NewStringValue(strings.Repeat("a", 22)) err := array.Append(v) require.NoError(t, err) - values = append(values, v) + expectedValues = append(expectedValues, v) } for i := 0; i < 2; i++ { childArray, err := NewArray(storage, address, typeInfo2) require.NoError(t, err) - err = childArray.Append(Uint64Value(i)) + v := Uint64Value(i) + err = childArray.Append(v) require.NoError(t, err) err = array.Append(childArray) require.NoError(t, err) - values = append(values, childArray) + expectedValues = append(expectedValues, arrayValue{v}) } require.Equal(t, uint64(arraySize), array.Count()) @@ -2458,7 +2474,7 @@ func TestArrayEncodeDecode(t *testing.T) { array2, err := NewArrayWithRootID(storage2, array.SlabID()) require.NoError(t, err) - verifyArray(t, storage2, typeInfo, address, array2, values, false) + verifyArray(t, storage2, typeInfo, address, array2, expectedValues, false) }) t.Run("root metadata slab, inlined array of different type", func(t *testing.T) { @@ -2473,14 +2489,14 @@ func TestArrayEncodeDecode(t *testing.T) { require.NoError(t, err) const arraySize = 20 - values := make([]Value, 0, arraySize) + expectedValues := make([]Value, 0, arraySize) for i := uint64(0); i < arraySize-2; i++ { v := NewStringValue(strings.Repeat("a", 22)) err := array.Append(v) require.NoError(t, err) - values = append(values, v) + expectedValues = append(expectedValues, v) } for i := 0; i < 2; i++ { @@ -2494,13 +2510,15 @@ func TestArrayEncodeDecode(t *testing.T) { childArray, err := NewArray(storage, address, ti) require.NoError(t, err) - err = childArray.Append(Uint64Value(i)) + v := Uint64Value(i) + + err = childArray.Append(v) require.NoError(t, err) err = array.Append(childArray) require.NoError(t, err) - values = append(values, childArray) + expectedValues = append(expectedValues, arrayValue{v}) } require.Equal(t, uint64(arraySize), array.Count()) @@ -2607,7 +2625,7 @@ func TestArrayEncodeDecode(t *testing.T) { array2, err := NewArrayWithRootID(storage2, array.SlabID()) require.NoError(t, err) - verifyArray(t, storage2, typeInfo, address, array2, values, false) + verifyArray(t, storage2, typeInfo, address, array2, expectedValues, false) }) t.Run("has pointers", func(t *testing.T) { @@ -2620,29 +2638,33 @@ func TestArrayEncodeDecode(t *testing.T) { require.NoError(t, err) const arraySize = 20 - values := make([]Value, 0, arraySize) + expectedValues := make([]Value, 0, arraySize) for i := uint64(0); i < arraySize-1; i++ { v := NewStringValue(strings.Repeat("a", 22)) err := array.Append(v) require.NoError(t, err) - values = append(values, v) + expectedValues = append(expectedValues, v) } + const childArraySize = 5 + childArray, err := NewArray(storage, address, typeInfo2) require.NoError(t, err) - for i := 0; i < 5; i++ { + expectedChildArrayValues := make([]Value, childArraySize) + for i := 0; i < childArraySize; i++ { v := NewStringValue(strings.Repeat("b", 22)) err = childArray.Append(v) require.NoError(t, err) + expectedChildArrayValues[i] = v } err = array.Append(childArray) require.NoError(t, err) - values = append(values, childArray) + expectedValues = append(expectedValues, arrayValue(expectedChildArrayValues)) require.Equal(t, uint64(arraySize), array.Count()) require.Equal(t, uint64(5), childArray.Count()) @@ -2765,7 +2787,7 @@ func TestArrayEncodeDecode(t *testing.T) { array2, err := NewArrayWithRootID(storage2, array.SlabID()) require.NoError(t, err) - verifyArray(t, storage2, typeInfo, address, array2, values, false) + verifyArray(t, storage2, typeInfo, address, array2, expectedValues, false) }) t.Run("has pointers in inlined slab", func(t *testing.T) { @@ -2779,14 +2801,14 @@ func TestArrayEncodeDecode(t *testing.T) { require.NoError(t, err) const arraySize = 20 - values := make([]Value, 0, arraySize) + expectedValues := make([]Value, 0, arraySize) for i := uint64(0); i < arraySize-1; i++ { v := NewStringValue(strings.Repeat("a", 22)) err := array.Append(v) require.NoError(t, err) - values = append(values, v) + expectedValues = append(expectedValues, v) } childArray, err := NewArray(storage, address, typeInfo3) @@ -2795,11 +2817,16 @@ func TestArrayEncodeDecode(t *testing.T) { gchildArray, err := NewArray(storage, address, typeInfo2) require.NoError(t, err) - for i := 0; i < 5; i++ { + const gchildArraySize = 5 + + expectedGChildArrayValues := make([]Value, gchildArraySize) + for i := 0; i < gchildArraySize; i++ { v := NewStringValue(strings.Repeat("b", 22)) err = gchildArray.Append(v) require.NoError(t, err) + + expectedGChildArrayValues[i] = v } err = childArray.Append(gchildArray) @@ -2808,7 +2835,9 @@ func TestArrayEncodeDecode(t *testing.T) { err = array.Append(childArray) require.NoError(t, err) - values = append(values, childArray) + expectedValues = append(expectedValues, arrayValue{ + arrayValue(expectedGChildArrayValues), + }) require.Equal(t, uint64(arraySize), array.Count()) require.Equal(t, uint64(1), childArray.Count()) @@ -2941,7 +2970,7 @@ func TestArrayEncodeDecode(t *testing.T) { array2, err := NewArrayWithRootID(storage2, array.SlabID()) require.NoError(t, err) - verifyArray(t, storage2, typeInfo, address, array2, values, false) + verifyArray(t, storage2, typeInfo, address, array2, expectedValues, false) }) } @@ -3214,7 +3243,7 @@ func TestArrayPopIterate(t *testing.T) { err = array.PopIterate(func(v Storable) { vv, err := v.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, values[arraySize-i-1], vv) + valueEqual(t, values[arraySize-i-1], vv) i++ }) require.NoError(t, err) @@ -3248,7 +3277,7 @@ func TestArrayPopIterate(t *testing.T) { err = array.PopIterate(func(v Storable) { vv, err := v.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, values[arraySize-i-1], vv) + valueEqual(t, values[arraySize-i-1], vv) i++ }) require.NoError(t, err) @@ -3810,7 +3839,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { storage := newTestPersistentStorage(t) const arraySize = 3 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + array, values, _ := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) // parent array: 1 root data slab // nested composite elements: 1 root data slab for each @@ -3824,7 +3853,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { storage := newTestPersistentStorage(t) const arraySize = 3 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + array, values, childSlabIDs := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) // parent array: 1 root data slab // nested composite elements: 1 root data slab for each @@ -3835,12 +3864,9 @@ func TestArrayLoadedValueIterator(t *testing.T) { // Unload composite element from front to back for i := 0; i < len(values); i++ { - v := values[i] - - nestedArray, ok := v.(*Array) - require.True(t, ok) + slabID := childSlabIDs[i] - err := storage.Remove(nestedArray.SlabID()) + err := storage.Remove(slabID) require.NoError(t, err) expectedValues := values[i+1:] @@ -3852,7 +3878,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { storage := newTestPersistentStorage(t) const arraySize = 3 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + array, values, childSlabIDs := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) // parent array: 1 root data slab // nested composite elements: 1 root data slab for each @@ -3863,12 +3889,9 @@ func TestArrayLoadedValueIterator(t *testing.T) { // Unload composite element from back to front for i := len(values) - 1; i >= 0; i-- { - v := values[i] + slabID := childSlabIDs[i] - nestedArray, ok := v.(*Array) - require.True(t, ok) - - err := storage.Remove(nestedArray.SlabID()) + err := storage.Remove(slabID) require.NoError(t, err) expectedValues := values[:i] @@ -3880,7 +3903,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { storage := newTestPersistentStorage(t) const arraySize = 3 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + array, values, childSlabIDs := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) // parent array: 1 root data slab // nested composite elements: 1 root data slab for each @@ -3892,12 +3915,9 @@ func TestArrayLoadedValueIterator(t *testing.T) { // Unload composite element in the middle unloadValueIndex := 1 - v := values[unloadValueIndex] - - nestedArray, ok := v.(*Array) - require.True(t, ok) + slabID := childSlabIDs[unloadValueIndex] - err := storage.Remove(nestedArray.SlabID()) + err := storage.Remove(slabID) require.NoError(t, err) copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) @@ -3910,7 +3930,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { storage := newTestPersistentStorage(t) const arraySize = 3 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + array, values, childSlabIDs := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) // parent array: 1 root data slab // nested composite elements: 1 root data slab for each @@ -3924,16 +3944,13 @@ func TestArrayLoadedValueIterator(t *testing.T) { // At this point, iterator returned first element (v). // Remove all other nested composite elements (except first element) from storage. - for _, value := range values[1:] { - nestedArray, ok := value.(*Array) - require.True(t, ok) - - err := storage.Remove(nestedArray.SlabID()) + for _, slabID := range childSlabIDs[1:] { + err := storage.Remove(slabID) require.NoError(t, err) } require.Equal(t, 0, i) - valueEqual(t, typeInfoComparator, values[0], v) + valueEqual(t, values[0], v) i++ return true, nil }) @@ -3946,10 +3963,10 @@ func TestArrayLoadedValueIterator(t *testing.T) { const arraySize = 3 // Create an array with nested composite value at specified index - for nestedCompositeIndex := 0; nestedCompositeIndex < arraySize; nestedCompositeIndex++ { + for childArrayIndex := 0; childArrayIndex < arraySize; childArrayIndex++ { storage := newTestPersistentStorage(t) - array, values := createArrayWithSimpleAndCompositeValues(t, storage, address, typeInfo, arraySize, nestedCompositeIndex) + array, values, childSlabID := createArrayWithSimpleAndChildArrayValues(t, storage, address, typeInfo, arraySize, childArrayIndex) // parent array: 1 root data slab // nested composite element: 1 root data slab @@ -3959,12 +3976,10 @@ func TestArrayLoadedValueIterator(t *testing.T) { verifyArrayLoadedElements(t, array, values) // Unload composite element - v := values[nestedCompositeIndex].(*Array) - - err := storage.Remove(v.SlabID()) + err := storage.Remove(childSlabID) require.NoError(t, err) - copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) + copy(values[childArrayIndex:], values[childArrayIndex+1:]) values = values[:len(values)-1] verifyArrayLoadedElements(t, array, values) @@ -3988,7 +4003,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { storage := newTestPersistentStorage(t) const arraySize = 20 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + array, values, _ := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) // parent array: 1 root metadata slab, 2 data slabs // nested composite value element: 1 root data slab for each @@ -4002,7 +4017,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { storage := newTestPersistentStorage(t) const arraySize = 20 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + array, values, childSlabIDs := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) // parent array: 1 root metadata slab, 2 data slabs // nested composite value element: 1 root data slab for each @@ -4012,13 +4027,10 @@ func TestArrayLoadedValueIterator(t *testing.T) { verifyArrayLoadedElements(t, array, values) // Unload composite element from front to back - for i := 0; i < len(values); i++ { - v := values[i] + for i := 0; i < len(childSlabIDs); i++ { + slabID := childSlabIDs[i] - nestedArray, ok := v.(*Array) - require.True(t, ok) - - err := storage.Remove(nestedArray.SlabID()) + err := storage.Remove(slabID) require.NoError(t, err) expectedValues := values[i+1:] @@ -4030,7 +4042,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { storage := newTestPersistentStorage(t) const arraySize = 20 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + array, values, childSlabIDs := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) // parent array: 1 root metadata slab, 2 data slabs // nested composite value element: 1 root data slab for each @@ -4040,13 +4052,10 @@ func TestArrayLoadedValueIterator(t *testing.T) { verifyArrayLoadedElements(t, array, values) // Unload composite element from back to front - for i := len(values) - 1; i >= 0; i-- { - v := values[i] + for i := len(childSlabIDs) - 1; i >= 0; i-- { + slabID := childSlabIDs[i] - nestedArray, ok := v.(*Array) - require.True(t, ok) - - err := storage.Remove(nestedArray.SlabID()) + err := storage.Remove(slabID) require.NoError(t, err) expectedValues := values[:i] @@ -4058,7 +4067,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { storage := newTestPersistentStorage(t) const arraySize = 20 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + array, values, childSlabIDs := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) // parent array: 1 root metadata slab, 2 data slabs // nested composite value element: 1 root data slab for each @@ -4070,12 +4079,9 @@ func TestArrayLoadedValueIterator(t *testing.T) { // Unload composite element in the middle for _, index := range []int{4, 14} { - v := values[index] + slabID := childSlabIDs[index] - nestedArray, ok := v.(*Array) - require.True(t, ok) - - err := storage.Remove(nestedArray.SlabID()) + err := storage.Remove(slabID) require.NoError(t, err) copy(values[index:], values[index+1:]) @@ -4089,10 +4095,10 @@ func TestArrayLoadedValueIterator(t *testing.T) { const arraySize = 20 // Create an array with composite value at specified index. - for nestedCompositeIndex := 0; nestedCompositeIndex < arraySize; nestedCompositeIndex++ { + for childArrayIndex := 0; childArrayIndex < arraySize; childArrayIndex++ { storage := newTestPersistentStorage(t) - array, values := createArrayWithSimpleAndCompositeValues(t, storage, address, typeInfo, arraySize, nestedCompositeIndex) + array, values, childSlabID := createArrayWithSimpleAndChildArrayValues(t, storage, address, typeInfo, arraySize, childArrayIndex) // parent array: 1 root metadata slab, 2 data slabs // nested composite value element: 1 root data slab for each @@ -4102,12 +4108,10 @@ func TestArrayLoadedValueIterator(t *testing.T) { verifyArrayLoadedElements(t, array, values) // Unload composite value - v := values[nestedCompositeIndex].(*Array) - - err := storage.Remove(v.SlabID()) + err := storage.Remove(childSlabID) require.NoError(t, err) - copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) + copy(values[childArrayIndex:], values[childArrayIndex+1:]) values = values[:len(values)-1] verifyArrayLoadedElements(t, array, values) @@ -4258,7 +4262,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { storage := newTestPersistentStorage(t) const arraySize = 500 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + array, values, childSlabIDs := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs // nested composite elements: 1 root data slab for each @@ -4274,17 +4278,17 @@ func TestArrayLoadedValueIterator(t *testing.T) { i := r.Intn(len(values)) - v := values[i] - - nestedArray, ok := v.(*Array) - require.True(t, ok) + slabID := childSlabIDs[i] - err := storage.Remove(nestedArray.SlabID()) + err := storage.Remove(slabID) require.NoError(t, err) copy(values[i:], values[i+1:]) values = values[:len(values)-1] + copy(childSlabIDs[i:], childSlabIDs[i+1:]) + childSlabIDs = childSlabIDs[:len(childSlabIDs)-1] + verifyArrayLoadedElements(t, array, values) } }) @@ -4294,7 +4298,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { storage := newTestPersistentStorage(t) const arraySize = 500 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + array, values, _ := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs // nested composite elements: 1 root data slab for each @@ -4359,7 +4363,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { storage := newTestPersistentStorage(t) const arraySize = 500 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + array, values, _ := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs // nested composite elements: 1 root data slab for each @@ -4525,84 +4529,105 @@ func createArrayWithSimpleValues( return array, values } -func createArrayWithCompositeValues( +func createArrayWithChildArrays( t *testing.T, storage SlabStorage, address Address, typeInfo TypeInfo, arraySize int, -) (*Array, []Value) { +) (*Array, []Value, []SlabID) { + const childArraySize = 50 // Create parent array array, err := NewArray(storage, address, typeInfo) require.NoError(t, err) expectedValues := make([]Value, arraySize) + childSlabIDs := make([]SlabID, arraySize) + for i := 0; i < arraySize; i++ { - // Create nested array - nested, err := NewArray(storage, address, typeInfo) + // Create child array + childArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - for j := 0; j < 50; j++ { - err = nested.Append(Uint64Value(j)) + expectedChildArrayValues := make([]Value, childArraySize) + for j := 0; j < childArraySize; j++ { + v := Uint64Value(j) + err = childArray.Append(v) require.NoError(t, err) + + expectedChildArrayValues[j] = v } - expectedValues[i] = nested + expectedValues[i] = arrayValue(expectedChildArrayValues) + childSlabIDs[i] = childArray.SlabID() // Append nested array to parent - err = array.Append(nested) + err = array.Append(childArray) require.NoError(t, err) } - return array, expectedValues + return array, expectedValues, childSlabIDs } -func createArrayWithSimpleAndCompositeValues( +func createArrayWithSimpleAndChildArrayValues( t *testing.T, storage SlabStorage, address Address, typeInfo TypeInfo, arraySize int, compositeValueIndex int, -) (*Array, []Value) { +) (*Array, []Value, SlabID) { + const childArraySize = 50 + require.True(t, compositeValueIndex < arraySize) array, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - values := make([]Value, arraySize) + expectedValues := make([]Value, arraySize) + var childSlabID SlabID r := 'a' for i := 0; i < arraySize; i++ { if compositeValueIndex == i { - // Create nested array with one element - a, err := NewArray(storage, address, typeInfo) + // Create child array with one element + childArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - for j := 0; j < 50; j++ { - err = a.Append(Uint64Value(j)) + expectedChildArrayValues := make([]Value, childArraySize) + for j := 0; j < childArraySize; j++ { + v := Uint64Value(j) + err = childArray.Append(v) require.NoError(t, err) + + expectedChildArrayValues[j] = v } - values[i] = a + err = array.Append(childArray) + require.NoError(t, err) + + expectedValues[i] = arrayValue(expectedChildArrayValues) + childSlabID = childArray.SlabID() } else { - values[i] = NewStringValue(strings.Repeat(string(r), 20)) + v := NewStringValue(strings.Repeat(string(r), 20)) r++ - } - err = array.Append(values[i]) - require.NoError(t, err) + err = array.Append(v) + require.NoError(t, err) + + expectedValues[i] = v + } } - return array, values + return array, expectedValues, childSlabID } func verifyArrayLoadedElements(t *testing.T, array *Array, expectedValues []Value) { i := 0 err := array.IterateLoadedValues(func(v Value) (bool, error) { require.True(t, i < len(expectedValues)) - valueEqual(t, typeInfoComparator, expectedValues[i], v) + valueEqual(t, expectedValues[i], v) i++ return true, nil }) @@ -4714,6 +4739,9 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { require.NoError(t, err) require.Equal(t, 1, getStoredDeltas(storage)) + expectedChildValues, ok := expectedValues[0].(arrayValue) + require.True(t, ok) + childArray, ok := e.(*Array) require.True(t, ok) require.True(t, childArray.Inlined()) @@ -4732,6 +4760,9 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { require.NoError(t, err) require.Equal(t, uint64(i+1), childArray.Count()) + expectedChildValues = append(expectedChildValues, v) + expectedValues[0] = expectedChildValues + require.True(t, childArray.Inlined()) require.Equal(t, 1, getStoredDeltas(storage)) require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab @@ -4752,6 +4783,9 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { err = childArray.Append(v) require.NoError(t, err) + expectedChildValues = append(expectedChildValues, v) + expectedValues[0] = expectedChildValues + require.False(t, childArray.Inlined()) require.Equal(t, 2, getStoredDeltas(storage)) // There are 2 stored slab because child array is no longer inlined. @@ -4773,6 +4807,9 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { require.NoError(t, err) require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) + expectedChildValues = expectedChildValues[1:] + expectedValues[0] = expectedChildValues + require.True(t, childArray.Inlined()) require.Equal(t, 1, getStoredDeltas(storage)) require.Equal(t, SlabIDUndefined, childArray.SlabID()) @@ -4838,7 +4875,7 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { // Appending 10 elements to child array so that inlined child array reaches max inlined size as array element. for i := 0; i < 10; i++ { - for _, child := range children { + for j, child := range children { childArray := child.array childValueID := child.valueID @@ -4846,6 +4883,12 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { require.NoError(t, err) require.Equal(t, uint64(i+1), childArray.Count()) + expectedChildValues, ok := expectedValues[j].(arrayValue) + require.True(t, ok) + + expectedChildValues = append(expectedChildValues, v) + expectedValues[j] = expectedChildValues + require.True(t, childArray.Inlined()) require.Equal(t, 1, getStoredDeltas(storage)) require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab @@ -4866,7 +4909,7 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { expectedStoredDeltas := 1 // Add one more element to child array which triggers inlined child array slab becomes standalone slab - for _, child := range children { + for i, child := range children { childArray := child.array childValueID := child.valueID @@ -4874,6 +4917,12 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { require.NoError(t, err) require.False(t, childArray.Inlined()) + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + expectedChildValues = append(expectedChildValues, v) + expectedValues[i] = expectedChildValues + expectedStoredDeltas++ require.Equal(t, expectedStoredDeltas, getStoredDeltas(storage)) // There are more stored slab because child array is no longer inlined. @@ -4893,7 +4942,7 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { } // Remove one element from child array which triggers standalone array slab becomes inlined slab again. - for _, child := range children { + for i, child := range children { childArray := child.array childValueID := child.valueID @@ -4901,6 +4950,12 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { require.NoError(t, err) require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + expectedChildValues = expectedChildValues[1:] + expectedValues[i] = expectedChildValues + require.True(t, childArray.Inlined()) expectedStoredDeltas-- @@ -4922,7 +4977,7 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { // Remove remaining elements from inlined child array childArrayCount := children[0].array.Count() for i := 0; i < int(childArrayCount); i++ { - for _, child := range children { + for j, child := range children { childArray := child.array childValueID := child.valueID @@ -4930,6 +4985,12 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { require.NoError(t, err) require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) + expectedChildValues, ok := expectedValues[j].(arrayValue) + require.True(t, ok) + + expectedChildValues = expectedChildValues[1:] + expectedValues[j] = expectedChildValues + require.True(t, childArray.Inlined()) require.Equal(t, 1, getStoredDeltas(storage)) require.Equal(t, SlabIDUndefined, childArray.SlabID()) @@ -4999,7 +5060,7 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { // Appending 10 elements to child array so that inlined child array reaches max inlined size as array element. for i := 0; i < 10; i++ { - for _, child := range children { + for j, child := range children { childArray := child.array childValueID := child.valueID @@ -5007,6 +5068,12 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { require.NoError(t, err) require.Equal(t, uint64(i+1), childArray.Count()) + expectedChildValues, ok := expectedValues[j].(arrayValue) + require.True(t, ok) + + expectedChildValues = append(expectedChildValues, v) + expectedValues[j] = expectedChildValues + require.True(t, childArray.Inlined()) require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged @@ -5025,7 +5092,7 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { require.False(t, parentArray.root.IsData()) // Add one more element to child array which triggers inlined child array slab becomes standalone slab - for _, child := range children { + for i, child := range children { childArray := child.array childValueID := child.valueID @@ -5033,6 +5100,12 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { require.NoError(t, err) require.False(t, childArray.Inlined()) + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + expectedChildValues = append(expectedChildValues, v) + expectedValues[i] = expectedChildValues + expectedSlabID := valueIDToSlabID(childValueID) require.Equal(t, expectedSlabID, childArray.SlabID()) // Storage ID is the same bytewise as value ID. require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged @@ -5048,7 +5121,7 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { require.True(t, parentArray.root.IsData()) // Remove one element from child array which triggers standalone array slab becomes inlined slab again. - for _, child := range children { + for i, child := range children { childArray := child.array childValueID := child.valueID @@ -5056,6 +5129,12 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { require.NoError(t, err) require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + expectedChildValues = expectedChildValues[1:] + expectedValues[i] = expectedChildValues + require.True(t, childArray.Inlined()) require.Equal(t, SlabIDUndefined, childArray.SlabID()) require.Equal(t, childValueID, childArray.ValueID()) // value ID is unchanged @@ -5074,7 +5153,7 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { // Remove remaining elements from inlined child array childArrayCount := children[0].array.Count() for i := 0; i < int(childArrayCount); i++ { - for _, child := range children { + for j, child := range children { childArray := child.array childValueID := child.valueID @@ -5082,6 +5161,12 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { require.NoError(t, err) require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) + expectedChildValues, ok := expectedValues[j].(arrayValue) + require.True(t, ok) + + expectedChildValues = expectedChildValues[1:] + expectedValues[j] = expectedChildValues + require.True(t, childArray.Inlined()) require.Equal(t, SlabIDUndefined, childArray.SlabID()) require.Equal(t, childValueID, childArray.ValueID()) // value ID is unchanged @@ -5169,6 +5254,16 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.Equal(t, uint64(i+1), gchildArray.Count()) require.Equal(t, uint64(1), childArray.Count()) + expectedChildValues, ok := expectedValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues = append(expectedGChildValues, v) + expectedChildValues[0] = expectedGChildValues + expectedValues[0] = expectedChildValues + require.True(t, childArray.Inlined()) require.True(t, gchildArray.Inlined()) require.Equal(t, 1, getStoredDeltas(storage)) @@ -5198,6 +5293,16 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { err = gchildArray.Append(v) require.NoError(t, err) + expectedChildValues, ok := expectedValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues = append(expectedGChildValues, v) + expectedChildValues[0] = expectedGChildValues + expectedValues[0] = expectedChildValues + require.True(t, gchildArray.Inlined()) require.False(t, childArray.Inlined()) require.Equal(t, 2, getStoredDeltas(storage)) // There are 2 stored slab because child array is no longer inlined. @@ -5227,6 +5332,16 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.NoError(t, err) require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) + expectedChildValues, ok := expectedValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues = expectedGChildValues[1:] + expectedChildValues[0] = expectedGChildValues + expectedValues[0] = expectedChildValues + require.True(t, gchildArray.Inlined()) require.True(t, gchildArray.Inlined()) require.Equal(t, 1, getStoredDeltas(storage)) @@ -5316,6 +5431,16 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.Equal(t, uint64(i+1), gchildArray.Count()) require.Equal(t, uint64(1), childArray.Count()) + expectedChildValues, ok := expectedValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues = append(expectedGChildValues, v) + expectedChildValues[0] = expectedGChildValues + expectedValues[0] = expectedChildValues + require.True(t, childArray.Inlined()) require.True(t, gchildArray.Inlined()) require.Equal(t, 1, getStoredDeltas(storage)) @@ -5347,6 +5472,16 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { err = gchildArray.Append(largeValue) require.NoError(t, err) + expectedChildValues, ok := expectedValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues = append(expectedGChildValues, largeValue) + expectedChildValues[0] = expectedGChildValues + expectedValues[0] = expectedChildValues + require.False(t, gchildArray.Inlined()) require.True(t, childArray.Inlined()) require.Equal(t, 2, getStoredDeltas(storage)) // There are 2 stored slab because child array is no longer inlined. @@ -5374,7 +5509,16 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { for gchildArray.Count() > 0 { _, err := gchildArray.Remove(gchildArray.Count() - 1) require.NoError(t, err) - // require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) + + expectedChildValues, ok := expectedValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues = expectedGChildValues[:len(expectedGChildValues)-1] + expectedChildValues[0] = expectedGChildValues + expectedValues[0] = expectedChildValues require.True(t, gchildArray.Inlined()) require.True(t, gchildArray.Inlined()) @@ -5442,7 +5586,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { err = parentArray.Append(child) require.NoError(t, err) - expectedValues[i] = child + expectedValues[i] = arrayValue{arrayValue{v}} } require.Equal(t, uint64(arraySize), parentArray.Count()) @@ -5500,7 +5644,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { // Appending 7 elements to child array so that inlined child array reaches max inlined size as array element. for i := 0; i < 7; i++ { - for _, child := range children { + for j, child := range children { childArray := child.array valueID := child.valueID gchildArray := child.child.array @@ -5510,6 +5654,13 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.NoError(t, err) require.Equal(t, uint64(i+2), childArray.Count()) + expectedChildValues, ok := expectedValues[j].(arrayValue) + require.True(t, ok) + + expectedChildValues = append(expectedChildValues, v) + + expectedValues[j] = expectedChildValues + require.True(t, childArray.Inlined()) require.True(t, gchildArray.Inlined()) require.Equal(t, 1, getStoredDeltas(storage)) @@ -5546,6 +5697,13 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { err = childArray.Append(v) require.NoError(t, err) + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + expectedChildValues = append(expectedChildValues, v) + + expectedValues[i] = expectedChildValues + require.True(t, gchildArray.Inlined()) require.False(t, childArray.Inlined()) require.Equal(t, 2+i, getStoredDeltas(storage)) // There are >1 stored slab because child array is no longer inlined. @@ -5584,6 +5742,13 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { _, err = childArray.Remove(childArray.Count() - 1) require.NoError(t, err) + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + expectedChildValues = expectedChildValues[:len(expectedChildValues)-1] + + expectedValues[i] = expectedChildValues + require.True(t, gchildArray.Inlined()) require.True(t, childArray.Inlined()) require.Equal(t, 2-i, getStoredDeltas(storage)) @@ -5613,7 +5778,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { elementCount := children[0].array.Count() for i := uint64(0); i < elementCount-1; i++ { - for _, child := range children { + for j, child := range children { childArray := child.array valueID := child.valueID gchildArray := child.child.array @@ -5623,6 +5788,13 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.NoError(t, err) require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) + expectedChildValues, ok := expectedValues[j].(arrayValue) + require.True(t, ok) + + expectedChildValues = expectedChildValues[:len(expectedChildValues)-1] + + expectedValues[j] = expectedChildValues + require.True(t, gchildArray.Inlined()) require.True(t, gchildArray.Inlined()) require.Equal(t, 1, getStoredDeltas(storage)) @@ -5688,7 +5860,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { err = parentArray.Append(child) require.NoError(t, err) - expectedValues[i] = child + expectedValues[i] = arrayValue{arrayValue{}} } require.Equal(t, uint64(arraySize), parentArray.Count()) @@ -5745,7 +5917,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { // Appending 6 elements to grand child array so that parent array root slab is metadata slab. for i := uint32(0); i < 6; i++ { - for _, child := range children { + for j, child := range children { childArray := child.array valueID := child.valueID gchildArray := child.child.array @@ -5755,6 +5927,17 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.NoError(t, err) require.Equal(t, uint64(i+1), gchildArray.Count()) + expectedChildValues, ok := expectedValues[j].(arrayValue) + require.True(t, ok) + + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues = append(expectedGChildValues, v) + + expectedChildValues[0] = expectedGChildValues + expectedValues[j] = expectedChildValues + require.True(t, childArray.Inlined()) require.True(t, gchildArray.Inlined()) require.Equal(t, 1, getStoredDeltas(storage)) @@ -5778,7 +5961,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { } // Add one more element to grand child array which triggers parent array slab becomes metadata slab (all elements are still inlined). - for _, child := range children { + for i, child := range children { childArray := child.array valueID := child.valueID gchildArray := child.child.array @@ -5787,6 +5970,17 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { err = gchildArray.Append(v) require.NoError(t, err) + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues = append(expectedGChildValues, v) + + expectedChildValues[0] = expectedGChildValues + expectedValues[i] = expectedChildValues + require.True(t, gchildArray.Inlined()) require.True(t, childArray.Inlined()) require.Equal(t, 3, getStoredDeltas(storage)) // There are 3 stored slab because parent root slab is metdata. @@ -5813,17 +6007,28 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { // Add one more element to grand child array which triggers // - child arrays become standalone slab (grand child arrays are still inlined) // - parent array slab becomes data slab - for _, child := range children { + for i, child := range children { childArray := child.array valueID := child.valueID gchildArray := child.child.array gValueID := child.child.valueID - for i := 0; i < 2; i++ { + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) + + for j := 0; j < 2; j++ { err = gchildArray.Append(v) require.NoError(t, err) + + expectedGChildValues = append(expectedGChildValues, v) } + expectedChildValues[0] = expectedGChildValues + expectedValues[i] = expectedChildValues + require.True(t, gchildArray.Inlined()) require.False(t, childArray.Inlined()) @@ -5849,17 +6054,28 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.True(t, parentArray.root.IsData()) // Remove elements from grand child array to trigger child array inlined again. - for _, child := range children { + for i, child := range children { childArray := child.array valueID := child.valueID gchildArray := child.child.array gValueID := child.child.valueID - for i := 0; i < 2; i++ { + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) + + for j := 0; j < 2; j++ { _, err = gchildArray.Remove(0) require.NoError(t, err) + + expectedGChildValues = expectedGChildValues[:len(expectedGChildValues)-1] } + expectedChildValues[0] = expectedGChildValues + expectedValues[i] = expectedChildValues + require.True(t, gchildArray.Inlined()) require.True(t, childArray.Inlined()) @@ -5888,7 +6104,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { elementCount := children[0].child.array.Count() for i := uint64(0); i < elementCount; i++ { - for _, child := range children { + for j, child := range children { childArray := child.array valueID := child.valueID gchildArray := child.child.array @@ -5898,6 +6114,17 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.NoError(t, err) require.Equal(t, v, existingStorable) + expectedChildValues, ok := expectedValues[j].(arrayValue) + require.True(t, ok) + + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues = expectedGChildValues[1:] + + expectedChildValues[0] = expectedGChildValues + expectedValues[j] = expectedChildValues + require.True(t, gchildArray.Inlined()) require.True(t, gchildArray.Inlined()) @@ -5952,9 +6179,10 @@ func TestChildArrayWhenParentArrayIsModified(t *testing.T) { verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - children := make([]struct { - array *Array - valueID ValueID + children := make([]*struct { + array *Array + valueID ValueID + parentIndex int }, arraySize) for i := 0; i < arraySize; i++ { @@ -5971,8 +6199,13 @@ func TestChildArrayWhenParentArrayIsModified(t *testing.T) { require.Equal(t, address[:], valueID[:slabAddressSize]) require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) - children[i].array = childArray - children[i].valueID = valueID + children[i] = &struct { + array *Array + valueID ValueID + parentIndex int + }{ + childArray, valueID, i, + } } t.Run("insert elements in parent array", func(t *testing.T) { @@ -5996,6 +6229,14 @@ func TestChildArrayWhenParentArrayIsModified(t *testing.T) { require.NoError(t, err) require.Equal(t, uint64(1), childArray.Count()) + child.parentIndex = i + 1 + + expectedChildValues, ok := expectedValues[child.parentIndex].(arrayValue) + require.True(t, ok) + + expectedChildValues = append(expectedChildValues, v) + expectedValues[child.parentIndex] = expectedChildValues + require.True(t, childArray.Inlined()) require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged @@ -6027,6 +6268,16 @@ func TestChildArrayWhenParentArrayIsModified(t *testing.T) { require.NoError(t, err) require.Equal(t, uint64(2), childArray.Count()) + if i > 0 { + child.parentIndex++ + } + + expectedChildValues, ok := expectedValues[child.parentIndex].(arrayValue) + require.True(t, ok) + + expectedChildValues = append(expectedChildValues, v) + expectedValues[child.parentIndex] = expectedChildValues + require.True(t, childArray.Inlined()) require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged @@ -6057,6 +6308,12 @@ func TestChildArrayWhenParentArrayIsModified(t *testing.T) { require.NoError(t, err) require.Equal(t, uint64(3), childArray.Count()) + expectedChildValues, ok := expectedValues[child.parentIndex].(arrayValue) + require.True(t, ok) + + expectedChildValues = append(expectedChildValues, v) + expectedValues[child.parentIndex] = expectedChildValues + require.True(t, childArray.Inlined()) require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged @@ -6090,6 +6347,14 @@ func TestChildArrayWhenParentArrayIsModified(t *testing.T) { require.NoError(t, err) require.Equal(t, uint64(4), childArray.Count()) + child.parentIndex-- + + expectedChildValues, ok := expectedValues[child.parentIndex].(arrayValue) + require.True(t, ok) + + expectedChildValues = append(expectedChildValues, v) + expectedValues[child.parentIndex] = expectedChildValues + require.True(t, childArray.Inlined()) require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged @@ -6121,6 +6386,16 @@ func TestChildArrayWhenParentArrayIsModified(t *testing.T) { require.NoError(t, err) require.Equal(t, uint64(5), childArray.Count()) + if i > 0 { + child.parentIndex-- + } + + expectedChildValues, ok := expectedValues[child.parentIndex].(arrayValue) + require.True(t, ok) + + expectedChildValues = append(expectedChildValues, v) + expectedValues[child.parentIndex] = expectedChildValues + require.True(t, childArray.Inlined()) require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged @@ -6151,6 +6426,12 @@ func TestChildArrayWhenParentArrayIsModified(t *testing.T) { require.NoError(t, err) require.Equal(t, uint64(6), childArray.Count()) + expectedChildValues, ok := expectedValues[child.parentIndex].(arrayValue) + require.True(t, ok) + + expectedChildValues = append(expectedChildValues, v) + expectedValues[child.parentIndex] = expectedChildValues + require.True(t, childArray.Inlined()) require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged @@ -6186,7 +6467,7 @@ func createArrayWithEmptyChildArray( err = array.Append(child) require.NoError(t, err) - expectedValues[i] = child + expectedValues[i] = arrayValue{} } return array, expectedValues @@ -6222,7 +6503,7 @@ func createArrayWithEmpty2LevelChildArray( err = array.Append(child) require.NoError(t, err) - expectedValues[i] = child + expectedValues[i] = arrayValue{arrayValue{}} } return array, expectedValues diff --git a/map_test.go b/map_test.go index 66073869..1788b391 100644 --- a/map_test.go +++ b/map_test.go @@ -143,42 +143,42 @@ func _verifyMap( typeInfo TypeInfo, address Address, m *OrderedMap, - keyValues map[Value]Value, + expectedKeyValues map[Value]Value, sortedKeys []Value, hasNestedArrayMapElement bool, inlineEnabled bool, ) { require.True(t, typeInfoComparator(typeInfo, m.Type())) require.Equal(t, address, m.Address()) - require.Equal(t, uint64(len(keyValues)), m.Count()) + require.Equal(t, uint64(len(expectedKeyValues)), m.Count()) var err error // Verify map elements - for k, v := range keyValues { - e, err := m.Get(compare, hashInputProvider, k) + for k, expected := range expectedKeyValues { + actual, err := m.Get(compare, hashInputProvider, k) require.NoError(t, err) - valueEqual(t, typeInfoComparator, v, e) + valueEqual(t, expected, actual) } // Verify map elements ordering if len(sortedKeys) > 0 { - require.Equal(t, len(keyValues), len(sortedKeys)) + require.Equal(t, len(expectedKeyValues), len(sortedKeys)) i := 0 err = m.Iterate(func(k, v Value) (bool, error) { expectedKey := sortedKeys[i] - expectedValue := keyValues[expectedKey] + expectedValue := expectedKeyValues[expectedKey] - valueEqual(t, typeInfoComparator, expectedKey, k) - valueEqual(t, typeInfoComparator, expectedValue, v) + valueEqual(t, expectedKey, k) + valueEqual(t, expectedValue, v) i++ return true, nil }) require.NoError(t, err) - require.Equal(t, len(keyValues), i) + require.Equal(t, len(expectedKeyValues), i) } // Verify in-memory slabs @@ -224,7 +224,7 @@ func _verifyMap( require.NoError(t, err) require.Equal(t, stats.SlabCount(), uint64(storage.Count())) - if len(keyValues) == 0 { + if len(expectedKeyValues) == 0 { // Verify slab count for empty map require.Equal(t, uint64(1), stats.DataSlabCount) require.Equal(t, uint64(0), stats.MetaDataSlabCount) @@ -358,7 +358,7 @@ func TestMapSetAndGet(t *testing.T) { existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, oldValue, existingValue) + valueEqual(t, oldValue, existingValue) keyValues[k] = newValue } @@ -506,7 +506,7 @@ func TestMapSetAndGet(t *testing.T) { existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, oldValue, existingValue) + valueEqual(t, oldValue, existingValue) keyValues[k] = newValue } @@ -719,11 +719,11 @@ func testMapRemoveElement(t *testing.T, m *OrderedMap, k Value, expectedV Value) removedKey, err := removedKeyStorable.StoredValue(m.Storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, k, removedKey) + valueEqual(t, k, removedKey) removedValue, err := removedValueStorable.StoredValue(m.Storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, expectedV, removedValue) + valueEqual(t, expectedV, removedValue) if id, ok := removedKeyStorable.(SlabIDStorable); ok { err = m.Storage.Remove(SlabID(id)) @@ -1124,8 +1124,8 @@ func TestMapIterate(t *testing.T) { // Iterate key value pairs i = uint64(0) err = m.Iterate(func(k Value, v Value) (resume bool, err error) { - valueEqual(t, typeInfoComparator, sortedKeys[i], k) - valueEqual(t, typeInfoComparator, keyValues[k], v) + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) i++ return true, nil }) @@ -1136,7 +1136,7 @@ func TestMapIterate(t *testing.T) { // Iterate keys i = uint64(0) err = m.IterateKeys(func(k Value) (resume bool, err error) { - valueEqual(t, typeInfoComparator, sortedKeys[i], k) + valueEqual(t, sortedKeys[i], k) i++ return true, nil }) @@ -1148,7 +1148,7 @@ func TestMapIterate(t *testing.T) { i = uint64(0) err = m.IterateValues(func(v Value) (resume bool, err error) { k := sortedKeys[i] - valueEqual(t, typeInfoComparator, keyValues[k], v) + valueEqual(t, keyValues[k], v) i++ return true, nil }) @@ -1210,8 +1210,8 @@ func TestMapIterate(t *testing.T) { // Iterate key value pairs i := uint64(0) err = m.Iterate(func(k Value, v Value) (resume bool, err error) { - valueEqual(t, typeInfoComparator, sortedKeys[i], k) - valueEqual(t, typeInfoComparator, keyValues[k], v) + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) i++ return true, nil }) @@ -1223,7 +1223,7 @@ func TestMapIterate(t *testing.T) { // Iterate keys i = uint64(0) err = m.IterateKeys(func(k Value) (resume bool, err error) { - valueEqual(t, typeInfoComparator, sortedKeys[i], k) + valueEqual(t, sortedKeys[i], k) i++ return true, nil }) @@ -1235,7 +1235,7 @@ func TestMapIterate(t *testing.T) { // Iterate values i = uint64(0) err = m.IterateValues(func(v Value) (resume bool, err error) { - valueEqual(t, typeInfoComparator, keyValues[sortedKeys[i]], v) + valueEqual(t, keyValues[sortedKeys[i]], v) i++ return true, nil }) @@ -1323,11 +1323,11 @@ func testMapDeterministicHashCollision(t *testing.T, r *rand.Rand, maxDigestLeve removedKey, err := removedKeyStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, k, removedKey) + valueEqual(t, k, removedKey) removedValue, err := removedValueStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, v, removedValue) + valueEqual(t, v, removedValue) if id, ok := removedKeyStorable.(SlabIDStorable); ok { err = storage.Remove(SlabID(id)) @@ -1391,11 +1391,11 @@ func testMapRandomHashCollision(t *testing.T, r *rand.Rand, maxDigestLevel int) removedKey, err := removedKeyStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, k, removedKey) + valueEqual(t, k, removedKey) removedValue, err := removedValueStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, v, removedValue) + valueEqual(t, v, removedValue) if id, ok := removedKeyStorable.(SlabIDStorable); ok { err = storage.Remove(SlabID(id)) @@ -1492,7 +1492,7 @@ func testMapSetRemoveRandomValues( existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, oldv, existingValue) + valueEqual(t, oldv, existingValue) if id, ok := existingStorable.(SlabIDStorable); ok { err = storage.Remove(SlabID(id)) @@ -1515,11 +1515,11 @@ func testMapSetRemoveRandomValues( removedKey, err := removedKeyStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, k, removedKey) + valueEqual(t, k, removedKey) removedValue, err := removedValueStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, keyValues[k], removedValue) + valueEqual(t, keyValues[k], removedValue) if id, ok := removedKeyStorable.(SlabIDStorable); ok { err := storage.Remove(SlabID(id)) @@ -1721,16 +1721,16 @@ func TestMapDecodeV0(t *testing.T) { id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} nestedSlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} - nested, err := NewArray(storage, address, typeInfo2) - nested.root.SetSlabID(nestedSlabID) + childArray, err := NewArray(storage, address, typeInfo2) + childArray.root.SetSlabID(nestedSlabID) require.NoError(t, err) - err = nested.Append(Uint64Value(0)) + err = childArray.Append(Uint64Value(0)) require.NoError(t, err) k := NewStringValue(strings.Repeat(string(r), 22)) - v := nested - keyValues[k] = v + + keyValues[k] = arrayValue{Uint64Value(0)} digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) @@ -2708,18 +2708,19 @@ func TestMapEncodeDecode(t *testing.T) { r++ } - // Create nested array + // Create child array typeInfo2 := testTypeInfo{43} - nested, err := NewArray(storage, address, typeInfo2) + childArray, err := NewArray(storage, address, typeInfo2) require.NoError(t, err) - err = nested.Append(Uint64Value(0)) + err = childArray.Append(Uint64Value(0)) require.NoError(t, err) k := NewStringValue(strings.Repeat(string(r), 22)) - v := nested - keyValues[k] = v + v := childArray + + keyValues[k] = arrayValue{Uint64Value(0)} digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) @@ -5837,21 +5838,27 @@ func TestMapEncodeDecode(t *testing.T) { r++ } - // Create nested array + // Create child array + const childArraySize = 5 + typeInfo2 := testTypeInfo{43} - nestedArray, err := NewArray(storage, address, typeInfo2) + childArray, err := NewArray(storage, address, typeInfo2) require.NoError(t, err) - for i := 0; i < 5; i++ { + expectedChildValues := make([]Value, childArraySize) + for i := 0; i < childArraySize; i++ { v := NewStringValue(strings.Repeat("b", 22)) - err = nestedArray.Append(v) + err = childArray.Append(v) require.NoError(t, err) + + expectedChildValues[i] = v } k := NewStringValue(strings.Repeat(string(r), 22)) - v := nestedArray - keyValues[k] = v + v := childArray + + keyValues[k] = arrayValue(expectedChildValues) digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) @@ -6087,15 +6094,20 @@ func TestMapEncodeDecode(t *testing.T) { require.NoError(t, err) // Create grand child array + const gchildArraySize = 5 + gchildTypeInfo := testTypeInfo{44} gchildArray, err := NewArray(storage, address, gchildTypeInfo) require.NoError(t, err) - for i := 0; i < 5; i++ { + expectedGChildValues := make([]Value, gchildArraySize) + for i := 0; i < gchildArraySize; i++ { v := NewStringValue(strings.Repeat("b", 22)) err = gchildArray.Append(v) require.NoError(t, err) + + expectedGChildValues[i] = v } // Insert grand child array to child array @@ -6104,7 +6116,8 @@ func TestMapEncodeDecode(t *testing.T) { k := NewStringValue(strings.Repeat(string(r), 22)) v := childArray - keyValues[k] = v + + keyValues[k] = arrayValue{arrayValue(expectedGChildValues)} digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) @@ -6307,7 +6320,7 @@ func TestMapEncodeDecode(t *testing.T) { existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, v, existingValue) + valueEqual(t, v, existingValue) expectedHasPointer := []byte{ @@ -7677,11 +7690,11 @@ func TestMapPopIterate(t *testing.T) { kv, err := k.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, sortedKeys[i], kv) + valueEqual(t, sortedKeys[i], kv) vv, err := v.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, keyValues[sortedKeys[i]], vv) + valueEqual(t, keyValues[sortedKeys[i]], vv) }) require.NoError(t, err) @@ -7733,11 +7746,11 @@ func TestMapPopIterate(t *testing.T) { kv, err := k.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, sortedKeys[i], kv) + valueEqual(t, sortedKeys[i], kv) vv, err := v.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, keyValues[sortedKeys[i]], vv) + valueEqual(t, keyValues[sortedKeys[i]], vv) }) require.NoError(t, err) @@ -7802,11 +7815,11 @@ func TestMapPopIterate(t *testing.T) { kv, err := k.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, sortedKeys[i], kv) + valueEqual(t, sortedKeys[i], kv) vv, err := v.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, keyValues[sortedKeys[i]], vv) + valueEqual(t, keyValues[sortedKeys[i]], vv) }) require.NoError(t, err) @@ -8442,23 +8455,23 @@ func TestMapNestedStorables(t *testing.T) { keyValues := make(map[Value]Value) for i := uint64(0); i < mapSize; i++ { - // Create a nested array with one element - array, err := NewArray(storage, address, typeInfo) + // Create a child array with one element + childArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) vs := strings.Repeat("b", int(i)) v := SomeValue{Value: NewStringValue(vs)} - err = array.Append(v) + err = childArray.Append(v) require.NoError(t, err) // Insert nested array into map ks := strings.Repeat("a", int(i)) k := SomeValue{Value: NewStringValue(ks)} - keyValues[k] = array + keyValues[k] = arrayValue{v} - existingStorable, err := m.Set(compare, hashInputProvider, k, array) + existingStorable, err := m.Set(compare, hashInputProvider, k, childArray) require.NoError(t, err) require.Nil(t, existingStorable) } @@ -8940,7 +8953,7 @@ func TestMapLoadedValueIterator(t *testing.T) { storage := newTestPersistentStorage(t) const mapSize = 3 - m, values := createMapWithCompositeValues( + m, values, _ := createMapWithChildArrayValues( t, storage, address, @@ -8962,7 +8975,7 @@ func TestMapLoadedValueIterator(t *testing.T) { // Create parent map with 3 collision groups, 2 elements in each group. const mapSize = 6 - m, values := createMapWithCompositeValues( + m, values, _ := createMapWithChildArrayValues( t, storage, address, @@ -8984,7 +8997,7 @@ func TestMapLoadedValueIterator(t *testing.T) { // Create parent map with 3 external collision group, 4 elements in the group. const mapSize = 12 - m, values := createMapWithCompositeValues( + m, values, _ := createMapWithChildArrayValues( t, storage, address, @@ -9005,7 +9018,7 @@ func TestMapLoadedValueIterator(t *testing.T) { storage := newTestPersistentStorage(t) const mapSize = 3 - m, values := createMapWithCompositeValues( + m, values, childSlabIDs := createMapWithChildArrayValues( t, storage, address, @@ -9023,12 +9036,7 @@ func TestMapLoadedValueIterator(t *testing.T) { // Unload composite element from front to back. for i := 0; i < len(values); i++ { - v := values[i][1] - - nestedArray, ok := v.(*Array) - require.True(t, ok) - - err := storage.Remove(nestedArray.SlabID()) + err := storage.Remove(childSlabIDs[i]) require.NoError(t, err) expectedValues := values[i+1:] @@ -9084,7 +9092,7 @@ func TestMapLoadedValueIterator(t *testing.T) { // Create parent map with 3 collision groups, 2 elements in each group. const mapSize = 6 - m, values := createMapWithCompositeValues( + m, values, childSlabIDs := createMapWithChildArrayValues( t, storage, address, @@ -9102,12 +9110,7 @@ func TestMapLoadedValueIterator(t *testing.T) { // Unload composite element from front to back. for i := 0; i < len(values); i++ { - v := values[i][1] - - nestedArray, ok := v.(*Array) - require.True(t, ok) - - err := storage.Remove(nestedArray.SlabID()) + err := storage.Remove(childSlabIDs[i]) require.NoError(t, err) expectedValues := values[i+1:] @@ -9120,7 +9123,7 @@ func TestMapLoadedValueIterator(t *testing.T) { // Create parent map with 3 external collision groups, 4 elements in the group. const mapSize = 12 - m, values := createMapWithCompositeValues( + m, values, childSlabIDs := createMapWithChildArrayValues( t, storage, address, @@ -9138,12 +9141,7 @@ func TestMapLoadedValueIterator(t *testing.T) { // Unload composite element from front to back for i := 0; i < len(values); i++ { - v := values[i][1] - - nestedArray, ok := v.(*Array) - require.True(t, ok) - - err := storage.Remove(nestedArray.SlabID()) + err := storage.Remove(childSlabIDs[i]) require.NoError(t, err) expectedValues := values[i+1:] @@ -9156,7 +9154,7 @@ func TestMapLoadedValueIterator(t *testing.T) { // Create parent map with 3 external collision groups, 4 elements in the group. const mapSize = 12 - m, values := createMapWithCompositeValues( + m, values, _ := createMapWithChildArrayValues( t, storage, address, @@ -9206,7 +9204,7 @@ func TestMapLoadedValueIterator(t *testing.T) { storage := newTestPersistentStorage(t) const mapSize = 3 - m, values := createMapWithCompositeValues( + m, values, childSlabIDs := createMapWithChildArrayValues( t, storage, address, @@ -9224,12 +9222,7 @@ func TestMapLoadedValueIterator(t *testing.T) { // Unload composite element from back to front. for i := len(values) - 1; i >= 0; i-- { - v := values[i][1] - - nestedArray, ok := v.(*Array) - require.True(t, ok) - - err := storage.Remove(nestedArray.SlabID()) + err := storage.Remove(childSlabIDs[i]) require.NoError(t, err) expectedValues := values[:i] @@ -9285,7 +9278,7 @@ func TestMapLoadedValueIterator(t *testing.T) { // Create parent map with 3 collision groups, 2 elements in each group. const mapSize = 6 - m, values := createMapWithCompositeValues( + m, values, childSlabIDs := createMapWithChildArrayValues( t, storage, address, @@ -9303,12 +9296,7 @@ func TestMapLoadedValueIterator(t *testing.T) { // Unload composite element from back to front for i := len(values) - 1; i >= 0; i-- { - v := values[i][1] - - nestedArray, ok := v.(*Array) - require.True(t, ok) - - err := storage.Remove(nestedArray.SlabID()) + err := storage.Remove(childSlabIDs[i]) require.NoError(t, err) expectedValues := values[:i] @@ -9321,7 +9309,7 @@ func TestMapLoadedValueIterator(t *testing.T) { // Create parent map with 3 external collision groups, 4 elements in the group. const mapSize = 12 - m, values := createMapWithCompositeValues( + m, values, childSlabIDs := createMapWithChildArrayValues( t, storage, address, @@ -9339,12 +9327,7 @@ func TestMapLoadedValueIterator(t *testing.T) { // Unload composite element from back to front for i := len(values) - 1; i >= 0; i-- { - v := values[i][1] - - nestedArray, ok := v.(*Array) - require.True(t, ok) - - err := storage.Remove(nestedArray.SlabID()) + err := storage.Remove(childSlabIDs[i]) require.NoError(t, err) expectedValues := values[:i] @@ -9357,7 +9340,7 @@ func TestMapLoadedValueIterator(t *testing.T) { // Create parent map with 3 external collision groups, 4 elements in the group. const mapSize = 12 - m, values := createMapWithCompositeValues( + m, values, _ := createMapWithChildArrayValues( t, storage, address, @@ -9406,7 +9389,7 @@ func TestMapLoadedValueIterator(t *testing.T) { storage := newTestPersistentStorage(t) const mapSize = 3 - m, values := createMapWithCompositeValues( + m, values, childSlabIDs := createMapWithChildArrayValues( t, storage, address, @@ -9425,12 +9408,7 @@ func TestMapLoadedValueIterator(t *testing.T) { // Unload value in the middle unloadValueIndex := 1 - v := values[unloadValueIndex][1] - - nestedArray, ok := v.(*Array) - require.True(t, ok) - - err := storage.Remove(nestedArray.SlabID()) + err := storage.Remove(childSlabIDs[unloadValueIndex]) require.NoError(t, err) copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) @@ -9489,7 +9467,7 @@ func TestMapLoadedValueIterator(t *testing.T) { // Create parent map with 3 collision groups, 2 elements in each group. const mapSize = 6 - m, values := createMapWithCompositeValues( + m, values, childSlabIDs := createMapWithChildArrayValues( t, storage, address, @@ -9507,12 +9485,7 @@ func TestMapLoadedValueIterator(t *testing.T) { // Unload composite element in the middle for _, unloadValueIndex := range []int{1, 3, 5} { - v := values[unloadValueIndex][1] - - nestedArray, ok := v.(*Array) - require.True(t, ok) - - err := storage.Remove(nestedArray.SlabID()) + err := storage.Remove(childSlabIDs[unloadValueIndex]) require.NoError(t, err) } @@ -9529,7 +9502,7 @@ func TestMapLoadedValueIterator(t *testing.T) { // Create parent map with 3 external collision groups, 4 elements in the group. const mapSize = 12 - m, values := createMapWithCompositeValues( + m, values, childSlabIDs := createMapWithChildArrayValues( t, storage, address, @@ -9547,12 +9520,7 @@ func TestMapLoadedValueIterator(t *testing.T) { // Unload composite value in the middle. for _, unloadValueIndex := range []int{1, 3, 5, 7, 9, 11} { - v := values[unloadValueIndex][1] - - nestedArray, ok := v.(*Array) - require.True(t, ok) - - err := storage.Remove(nestedArray.SlabID()) + err := storage.Remove(childSlabIDs[unloadValueIndex]) require.NoError(t, err) } @@ -9572,7 +9540,7 @@ func TestMapLoadedValueIterator(t *testing.T) { // Create parent map with 3 external collision groups, 4 elements in the group. const mapSize = 12 - m, values := createMapWithCompositeValues( + m, values, _ := createMapWithChildArrayValues( t, storage, address, @@ -9622,7 +9590,7 @@ func TestMapLoadedValueIterator(t *testing.T) { storage := newTestPersistentStorage(t) const mapSize = 3 - m, values := createMapWithCompositeValues( + m, values, childSlabIDs := createMapWithChildArrayValues( t, storage, address, @@ -9643,18 +9611,14 @@ func TestMapLoadedValueIterator(t *testing.T) { // At this point, iterator returned first element (v). // Remove all other nested composite elements (except first element) from storage. - for _, element := range values[1:] { - value := element[1] - nestedArray, ok := value.(*Array) - require.True(t, ok) - - err := storage.Remove(nestedArray.SlabID()) + for _, slabID := range childSlabIDs[1:] { + err := storage.Remove(slabID) require.NoError(t, err) } require.Equal(t, 0, i) - valueEqual(t, typeInfoComparator, values[0][0], k) - valueEqual(t, typeInfoComparator, values[0][1], v) + valueEqual(t, values[0][0], k) + valueEqual(t, values[0][1], v) i++ return true, nil }) @@ -9667,16 +9631,16 @@ func TestMapLoadedValueIterator(t *testing.T) { const mapSize = 3 // Create a map with nested composite value at specified index - for nestedCompositeIndex := 0; nestedCompositeIndex < mapSize; nestedCompositeIndex++ { + for childArrayIndex := 0; childArrayIndex < mapSize; childArrayIndex++ { storage := newTestPersistentStorage(t) - m, values := createMapWithSimpleAndCompositeValues( + m, values, childSlabID := createMapWithSimpleAndChildArrayValues( t, storage, address, typeInfo, mapSize, - nestedCompositeIndex, + childArrayIndex, func(i int) []Digest { return []Digest{Digest(i)} }, ) @@ -9688,12 +9652,10 @@ func TestMapLoadedValueIterator(t *testing.T) { verifyMapLoadedElements(t, m, values) // Unload composite value - v := values[nestedCompositeIndex][1].(*Array) - - err := storage.Remove(v.SlabID()) + err := storage.Remove(childSlabID) require.NoError(t, err) - copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) + copy(values[childArrayIndex:], values[childArrayIndex+1:]) values = values[:len(values)-1] verifyMapLoadedElements(t, m, values) @@ -9724,7 +9686,7 @@ func TestMapLoadedValueIterator(t *testing.T) { storage := newTestPersistentStorage(t) const mapSize = 20 - m, values := createMapWithCompositeValues( + m, values, _ := createMapWithChildArrayValues( t, storage, address, @@ -9745,7 +9707,7 @@ func TestMapLoadedValueIterator(t *testing.T) { storage := newTestPersistentStorage(t) const mapSize = 20 - m, values := createMapWithCompositeValues( + m, values, childSlabIDs := createMapWithChildArrayValues( t, storage, address, @@ -9763,12 +9725,7 @@ func TestMapLoadedValueIterator(t *testing.T) { // Unload composite element from front to back for i := 0; i < len(values); i++ { - v := values[i][1] - - nestedArray, ok := v.(*Array) - require.True(t, ok) - - err := storage.Remove(nestedArray.SlabID()) + err := storage.Remove(childSlabIDs[i]) require.NoError(t, err) expectedValues := values[i+1:] @@ -9780,7 +9737,7 @@ func TestMapLoadedValueIterator(t *testing.T) { storage := newTestPersistentStorage(t) const mapSize = 20 - m, values := createMapWithCompositeValues( + m, values, childSlabIDs := createMapWithChildArrayValues( t, storage, address, @@ -9798,12 +9755,7 @@ func TestMapLoadedValueIterator(t *testing.T) { // Unload composite element from back to front for i := len(values) - 1; i >= 0; i-- { - v := values[i][1] - - nestedArray, ok := v.(*Array) - require.True(t, ok) - - err := storage.Remove(nestedArray.SlabID()) + err := storage.Remove(childSlabIDs[i]) require.NoError(t, err) expectedValues := values[:i] @@ -9815,7 +9767,7 @@ func TestMapLoadedValueIterator(t *testing.T) { storage := newTestPersistentStorage(t) const mapSize = 20 - m, values := createMapWithCompositeValues( + m, values, childSlabIDs := createMapWithChildArrayValues( t, storage, address, @@ -9833,18 +9785,15 @@ func TestMapLoadedValueIterator(t *testing.T) { // Unload composite element in the middle for _, index := range []int{4, 14} { - - v := values[index][1] - - nestedArray, ok := v.(*Array) - require.True(t, ok) - - err := storage.Remove(nestedArray.SlabID()) + err := storage.Remove(childSlabIDs[index]) require.NoError(t, err) copy(values[index:], values[index+1:]) values = values[:len(values)-1] + copy(childSlabIDs[index:], childSlabIDs[index+1:]) + childSlabIDs = childSlabIDs[:len(childSlabIDs)-1] + verifyMapLoadedElements(t, m, values) } }) @@ -9853,16 +9802,16 @@ func TestMapLoadedValueIterator(t *testing.T) { const mapSize = 20 // Create a map with nested composite value at specified index - for nestedCompositeIndex := 0; nestedCompositeIndex < mapSize; nestedCompositeIndex++ { + for childArrayIndex := 0; childArrayIndex < mapSize; childArrayIndex++ { storage := newTestPersistentStorage(t) - m, values := createMapWithSimpleAndCompositeValues( + m, values, childSlabID := createMapWithSimpleAndChildArrayValues( t, storage, address, typeInfo, mapSize, - nestedCompositeIndex, + childArrayIndex, func(i int) []Digest { return []Digest{Digest(i)} }, ) @@ -9873,12 +9822,10 @@ func TestMapLoadedValueIterator(t *testing.T) { verifyMapLoadedElements(t, m, values) - v := values[nestedCompositeIndex][1].(*Array) - - err := storage.Remove(v.SlabID()) + err := storage.Remove(childSlabID) require.NoError(t, err) - copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) + copy(values[childArrayIndex:], values[childArrayIndex+1:]) values = values[:len(values)-1] verifyMapLoadedElements(t, m, values) @@ -10101,7 +10048,7 @@ func TestMapLoadedValueIterator(t *testing.T) { storage := newTestPersistentStorage(t) const mapSize = 500 - m, values := createMapWithCompositeValues( + m, values, childSlabIDs := createMapWithChildArrayValues( t, storage, address, @@ -10124,17 +10071,15 @@ func TestMapLoadedValueIterator(t *testing.T) { i := r.Intn(len(values)) - v := values[i][1] - - nestedArray, ok := v.(*Array) - require.True(t, ok) - - err := storage.Remove(nestedArray.SlabID()) + err := storage.Remove(childSlabIDs[i]) require.NoError(t, err) copy(values[i:], values[i+1:]) values = values[:len(values)-1] + copy(childSlabIDs[i:], childSlabIDs[i+1:]) + childSlabIDs = childSlabIDs[:len(childSlabIDs)-1] + verifyMapLoadedElements(t, m, values) } }) @@ -10144,7 +10089,7 @@ func TestMapLoadedValueIterator(t *testing.T) { storage := newTestPersistentStorage(t) const mapSize = 500 - m, values := createMapWithCompositeValues( + m, values, _ := createMapWithChildArrayValues( t, storage, address, @@ -10223,7 +10168,7 @@ func TestMapLoadedValueIterator(t *testing.T) { storage := newTestPersistentStorage(t) const mapSize = 500 - m, values := createMapWithCompositeValues( + m, values, _ := createMapWithChildArrayValues( t, storage, address, @@ -10450,14 +10395,15 @@ func createMapWithSimpleValues( return m, expectedValues } -func createMapWithCompositeValues( +func createMapWithChildArrayValues( t *testing.T, storage SlabStorage, address Address, typeInfo TypeInfo, size int, newDigests func(i int) []Digest, -) (*OrderedMap, [][2]Value) { +) (*OrderedMap, [][2]Value, []SlabID) { + const childArraySize = 50 // Use mockDigesterBuilder to guarantee element order. digesterBuilder := &mockDigesterBuilder{} @@ -10466,35 +10412,42 @@ func createMapWithCompositeValues( m, err := NewMap(storage, address, digesterBuilder, typeInfo) require.NoError(t, err) + slabIDs := make([]SlabID, size) expectedValues := make([][2]Value, size) for i := 0; i < size; i++ { - // Create nested array - nested, err := NewArray(storage, address, typeInfo) + // Create child array + childArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - for j := 0; j < 50; j++ { - err = nested.Append(Uint64Value(j)) + expectedChildValues := make([]Value, childArraySize) + for j := 0; j < childArraySize; j++ { + v := Uint64Value(j) + + err = childArray.Append(v) require.NoError(t, err) + + expectedChildValues[j] = v } k := Uint64Value(i) - v := nested + v := childArray - expectedValues[i] = [2]Value{k, v} + expectedValues[i] = [2]Value{k, arrayValue(expectedChildValues)} + slabIDs[i] = childArray.SlabID() digests := newDigests(i) digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - // Set nested array to parent + // Set child array to parent existingStorable, err := m.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) } - return m, expectedValues + return m, expectedValues, slabIDs } -func createMapWithSimpleAndCompositeValues( +func createMapWithSimpleAndChildArrayValues( t *testing.T, storage SlabStorage, address Address, @@ -10502,7 +10455,8 @@ func createMapWithSimpleAndCompositeValues( size int, compositeValueIndex int, newDigests func(i int) []Digest, -) (*OrderedMap, [][2]Value) { +) (*OrderedMap, [][2]Value, SlabID) { + const childArraySize = 50 digesterBuilder := &mockDigesterBuilder{} @@ -10510,6 +10464,7 @@ func createMapWithSimpleAndCompositeValues( m, err := NewMap(storage, address, digesterBuilder, typeInfo) require.NoError(t, err) + var slabID SlabID values := make([][2]Value, size) r := 'a' for i := 0; i < size; i++ { @@ -10520,34 +10475,46 @@ func createMapWithSimpleAndCompositeValues( digesterBuilder.On("Digest", k).Return(mockDigester{digests}) if compositeValueIndex == i { - // Create nested array with one element - a, err := NewArray(storage, address, typeInfo) + // Create child array with one element + childArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - for j := 0; j < 50; j++ { - err = a.Append(Uint64Value(j)) + expectedChildValues := make([]Value, childArraySize) + for j := 0; j < childArraySize; j++ { + v := Uint64Value(j) + err = childArray.Append(v) require.NoError(t, err) + + expectedChildValues[j] = v } - values[i] = [2]Value{k, a} + values[i] = [2]Value{k, arrayValue(expectedChildValues)} + + existingStorable, err := m.Set(compare, hashInputProvider, k, childArray) + require.NoError(t, err) + require.Nil(t, existingStorable) + + slabID = childArray.SlabID() + } else { - values[i] = [2]Value{k, NewStringValue(strings.Repeat(string(r), 18))} - } + v := NewStringValue(strings.Repeat(string(r), 18)) + values[i] = [2]Value{k, v} - existingStorable, err := m.Set(compare, hashInputProvider, values[i][0], values[i][1]) - require.NoError(t, err) - require.Nil(t, existingStorable) + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } } - return m, values + return m, values, slabID } func verifyMapLoadedElements(t *testing.T, m *OrderedMap, expectedValues [][2]Value) { i := 0 err := m.IterateLoadedValues(func(k Value, v Value) (bool, error) { require.True(t, i < len(expectedValues)) - valueEqual(t, typeInfoComparator, expectedValues[i][0], k) - valueEqual(t, typeInfoComparator, expectedValues[i][1], v) + valueEqual(t, expectedValues[i][0], k) + valueEqual(t, expectedValues[i][1], v) i++ return true, nil }) diff --git a/utils_test.go b/utils_test.go index 56eb2274..837fde72 100644 --- a/utils_test.go +++ b/utils_test.go @@ -315,68 +315,61 @@ func (s *InMemBaseStorage) ResetReporter() { s.segmentsTouched = make(map[SlabID]struct{}) } -func valueEqual(t *testing.T, tic TypeInfoComparator, a Value, b Value) { - switch a.(type) { +func valueEqual(t *testing.T, expected Value, actual Value) { + switch expected := expected.(type) { + case arrayValue: + actual, ok := actual.(*Array) + require.True(t, ok) + + arrayEqual(t, expected, actual) + case *Array: - arrayEqual(t, tic, a, b) + require.FailNow(t, "expected value shouldn't be *Array") + case *OrderedMap: - mapEqual(t, tic, a, b) + mapEqual(t, expected, actual) + default: - require.Equal(t, a, b) + require.Equal(t, expected, actual) } } -func arrayEqual(t *testing.T, tic TypeInfoComparator, a Value, b Value) { - array1, ok := a.(*Array) - require.True(t, ok) - - array2, ok := b.(*Array) - require.True(t, ok) - - require.True(t, tic(array1.Type(), array2.Type())) - require.Equal(t, array1.Address(), array2.Address()) - require.Equal(t, array1.Count(), array2.Count()) - require.Equal(t, array1.SlabID(), array2.SlabID()) - - iterator1, err := array1.Iterator() - require.NoError(t, err) +func arrayEqual(t *testing.T, expected arrayValue, actual *Array) { + require.Equal(t, uint64(len(expected)), actual.Count()) - iterator2, err := array2.Iterator() + iterator, err := actual.Iterator() require.NoError(t, err) + i := 0 for { - value1, err := iterator1.Next() + actualValue, err := iterator.Next() require.NoError(t, err) - value2, err := iterator2.Next() - require.NoError(t, err) - - valueEqual(t, tic, value1, value2) - - if value1 == nil || value2 == nil { + if actualValue == nil { break } + + valueEqual(t, expected[i], actualValue) + i++ } + require.Equal(t, len(expected), i) } -func mapEqual(t *testing.T, tic TypeInfoComparator, a Value, b Value) { - m1, ok := a.(*OrderedMap) +func mapEqual(t *testing.T, expected Value, actual Value) { + m1, ok := expected.(*OrderedMap) require.True(t, ok) - m2, ok := b.(*OrderedMap) + m2, ok := actual.(*OrderedMap) require.True(t, ok) - require.True(t, tic(m1.Type(), m2.Type())) require.Equal(t, m1.Address(), m2.Address()) require.Equal(t, m1.Count(), m2.Count()) require.Equal(t, m1.SlabID(), m2.SlabID()) - iterator1, err := m1.Iterator() - require.NoError(t, err) + if m1.Seed() != m2.Seed() { - if m1.Type().IsComposite() { - // Check element by key for composite type because - // composite fields can be rearranged to reuse seed and digests. + iterator1, err := m1.Iterator() + require.NoError(t, err) for { key1, value1, err := iterator1.Next() @@ -389,22 +382,23 @@ func mapEqual(t *testing.T, tic TypeInfoComparator, a Value, b Value) { iterator2, err := m2.Iterator() require.NoError(t, err) - var value2 Value for { - key, value, err := iterator2.Next() + key2, value2, err := iterator2.Next() require.NoError(t, err) - require.NotNil(t, key) + require.NotNil(t, key2) - if reflect.DeepEqual(key, key1) { - value2 = value + if reflect.DeepEqual(key1, key2) { + valueEqual(t, value1, value2) break } } - - valueEqual(t, tic, value1, value2) } + } else { + iterator1, err := m1.Iterator() + require.NoError(t, err) + iterator2, err := m2.Iterator() require.NoError(t, err) @@ -415,8 +409,8 @@ func mapEqual(t *testing.T, tic TypeInfoComparator, a Value, b Value) { key2, value2, err := iterator2.Next() require.NoError(t, err) - valueEqual(t, tic, key1, key2) - valueEqual(t, tic, value1, value2) + valueEqual(t, key1, key2) + valueEqual(t, value1, value2) if key1 == nil || key2 == nil { break @@ -454,3 +448,19 @@ func testNotInlinedSlabIDAndValueID(t *testing.T, expectedAddress Address, slabI require.Equal(t, slabID.address[:], valueID[:slabAddressSize]) require.Equal(t, slabID.index[:], valueID[slabAddressSize:]) } + +type arrayValue []Value + +var _ Value = &arrayValue{} + +func (v arrayValue) Storable(SlabStorage, Address, uint64) (Storable, error) { + panic("not reachable") +} + +type mapValue map[Value]Value + +var _ Value = &mapValue{} + +func (v mapValue) Storable(SlabStorage, Address, uint64) (Storable, error) { + panic("not reachable") +} From 00a6df8975975ad8c55ec607a566948e3b8f0788 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 26 Sep 2023 14:52:16 -0500 Subject: [PATCH 032/126] Improve tests to compare child map elements --- array_test.go | 66 ++--- map_test.go | 745 +++++++++++++++++++++++++++++++++++--------------- utils_test.go | 77 ++---- 3 files changed, 586 insertions(+), 302 deletions(-) diff --git a/array_test.go b/array_test.go index 52e005db..4bd1b6fa 100644 --- a/array_test.go +++ b/array_test.go @@ -1482,73 +1482,75 @@ func TestArrayWithChildArrayMap(t *testing.T) { const arraySize = 4096 - nestedTypeInfo := testTypeInfo{43} - + typeInfo := testTypeInfo{42} + childArayTypeInfo := testTypeInfo{43} storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - nestedMaps := make([]Value, arraySize) + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) for i := uint64(0); i < arraySize; i++ { - nested, err := NewMap(storage, address, NewDefaultDigesterBuilder(), nestedTypeInfo) + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childArayTypeInfo) require.NoError(t, err) - storable, err := nested.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*2)) + k := Uint64Value(i) + v := Uint64Value(i * 2) + storable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, storable) - require.True(t, nested.root.IsData()) + require.True(t, childMap.root.IsData()) - nestedMaps[i] = nested - } - - typeInfo := testTypeInfo{42} - - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) - - for _, a := range nestedMaps { - err := array.Append(a) + err = array.Append(childMap) require.NoError(t, err) + + expectedValues[i] = mapValue{k: v} } - verifyArray(t, storage, typeInfo, address, array, nestedMaps, false) + verifyArray(t, storage, typeInfo, address, array, expectedValues, false) }) t.Run("big map", func(t *testing.T) { const arraySize = 4096 + typeInfo := testTypeInfo{42} nestedTypeInfo := testTypeInfo{43} storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - values := make([]Value, arraySize) + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) for i := uint64(0); i < arraySize; i++ { - nested, err := NewMap(storage, address, NewDefaultDigesterBuilder(), nestedTypeInfo) + + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), nestedTypeInfo) require.NoError(t, err) + expectedChildMapValues := mapValue{} for i := uint64(0); i < 25; i++ { - storable, err := nested.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*2)) + k := Uint64Value(i) + v := Uint64Value(i * 2) + + storable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, storable) - } - require.False(t, nested.root.IsData()) - - values[i] = nested - } + expectedChildMapValues[k] = v + } - typeInfo := testTypeInfo{42} + require.False(t, childMap.root.IsData()) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) - for _, a := range values { - err := array.Append(a) + err = array.Append(childMap) require.NoError(t, err) + + expectedValues[i] = expectedChildMapValues } - verifyArray(t, storage, typeInfo, address, array, values, true) + verifyArray(t, storage, typeInfo, address, array, expectedValues, true) }) } diff --git a/map_test.go b/map_test.go index 1788b391..2b0c5ec7 100644 --- a/map_test.go +++ b/map_test.go @@ -2922,21 +2922,20 @@ func TestMapEncodeDecode(t *testing.T) { keyValues := make(map[Value]Value, mapSize) r := 'a' for i := uint64(0); i < mapSize; i++ { - var k, v Value // Create child map childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) require.NoError(t, err) - k = Uint64Value(i) - v = Uint64Value(i * 2) + ck := Uint64Value(i) + cv := Uint64Value(i * 2) // Insert element to child map - existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) require.NoError(t, err) require.Nil(t, existingStorable) - k = NewStringValue(string(r)) + k := NewStringValue(string(r)) r++ digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) @@ -2946,7 +2945,7 @@ func TestMapEncodeDecode(t *testing.T) { require.NoError(t, err) require.Nil(t, existingStorable) - keyValues[k] = childMap + keyValues[k] = mapValue{ck: cv} } require.Equal(t, uint64(mapSize), parentMap.Count()) @@ -3105,7 +3104,6 @@ func TestMapEncodeDecode(t *testing.T) { keyValues := make(map[Value]Value, mapSize) r := 'a' for i := uint64(0); i < mapSize; i++ { - var k, v Value var ti TypeInfo if i%2 == 0 { @@ -3118,15 +3116,15 @@ func TestMapEncodeDecode(t *testing.T) { childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), ti) require.NoError(t, err) - k = Uint64Value(i) - v = Uint64Value(i * 2) + ck := Uint64Value(i) + cv := Uint64Value(i * 2) // Insert element to child map - existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) require.NoError(t, err) require.Nil(t, existingStorable) - k = NewStringValue(string(r)) + k := NewStringValue(string(r)) r++ digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) @@ -3136,7 +3134,7 @@ func TestMapEncodeDecode(t *testing.T) { require.NoError(t, err) require.Nil(t, existingStorable) - keyValues[k] = childMap + keyValues[k] = mapValue{ck: cv} } require.Equal(t, uint64(mapSize), parentMap.Count()) @@ -3294,17 +3292,15 @@ func TestMapEncodeDecode(t *testing.T) { keyValues := make(map[Value]Value, mapSize) r := 'a' for i := uint64(0); i < mapSize; i++ { - var k, v Value - // Create grand child map gchildMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) require.NoError(t, err) - k = Uint64Value(i) - v = Uint64Value(i * 2) + gck := Uint64Value(i) + gcv := Uint64Value(i * 2) // Insert element to grand child map - existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) + existingStorable, err := gchildMap.Set(compare, hashInputProvider, gck, gcv) require.NoError(t, err) require.Nil(t, existingStorable) @@ -3312,14 +3308,14 @@ func TestMapEncodeDecode(t *testing.T) { childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) require.NoError(t, err) - k = Uint64Value(i) + ck := Uint64Value(i) // Insert grand child map to child map - existingStorable, err = childMap.Set(compare, hashInputProvider, k, gchildMap) + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, gchildMap) require.NoError(t, err) require.Nil(t, existingStorable) - k = NewStringValue(string(r)) + k := NewStringValue(string(r)) r++ digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) @@ -3329,7 +3325,7 @@ func TestMapEncodeDecode(t *testing.T) { require.NoError(t, err) require.Nil(t, existingStorable) - keyValues[k] = childMap + keyValues[k] = mapValue{ck: mapValue{gck: gcv}} } require.Equal(t, uint64(mapSize), parentMap.Count()) @@ -3552,8 +3548,6 @@ func TestMapEncodeDecode(t *testing.T) { keyValues := make(map[Value]Value, mapSize) r := 'a' for i := uint64(0); i < mapSize; i++ { - var k, v Value - var gti TypeInfo if i%2 == 0 { gti = gchildMapTypeInfo2 @@ -3565,11 +3559,11 @@ func TestMapEncodeDecode(t *testing.T) { gchildMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), gti) require.NoError(t, err) - k = Uint64Value(i) - v = Uint64Value(i * 2) + gck := Uint64Value(i) + gcv := Uint64Value(i * 2) // Insert element to grand child map - existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) + existingStorable, err := gchildMap.Set(compare, hashInputProvider, gck, gcv) require.NoError(t, err) require.Nil(t, existingStorable) @@ -3584,14 +3578,14 @@ func TestMapEncodeDecode(t *testing.T) { childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), cti) require.NoError(t, err) - k = Uint64Value(i) + ck := Uint64Value(i) // Insert grand child map to child map - existingStorable, err = childMap.Set(compare, hashInputProvider, k, gchildMap) + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, gchildMap) require.NoError(t, err) require.Nil(t, existingStorable) - k = NewStringValue(string(r)) + k := NewStringValue(string(r)) r++ digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) @@ -3601,7 +3595,7 @@ func TestMapEncodeDecode(t *testing.T) { require.NoError(t, err) require.Nil(t, existingStorable) - keyValues[k] = childMap + keyValues[k] = mapValue{ck: mapValue{gck: gcv}} } require.Equal(t, uint64(mapSize), parentMap.Count()) @@ -3823,21 +3817,20 @@ func TestMapEncodeDecode(t *testing.T) { keyValues := make(map[Value]Value, mapSize) r := 'a' for i := uint64(0); i < mapSize; i++ { - var k, v Value // Create child map childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) require.NoError(t, err) - k = Uint64Value(i) - v = Uint64Value(i * 2) + ck := Uint64Value(i) + cv := Uint64Value(i * 2) // Insert element to child map - existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) require.NoError(t, err) require.Nil(t, existingStorable) - k = NewStringValue(string(r)) + k := NewStringValue(string(r)) r++ digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) @@ -3847,7 +3840,7 @@ func TestMapEncodeDecode(t *testing.T) { require.NoError(t, err) require.Nil(t, existingStorable) - keyValues[k] = childMap + keyValues[k] = mapValue{ck: cv} } require.Equal(t, uint64(mapSize), parentMap.Count()) @@ -4293,7 +4286,6 @@ func TestMapEncodeDecode(t *testing.T) { keyValues := make(map[Value]Value, mapSize) r := 'a' for i := uint64(0); i < mapSize; i++ { - var k, v Value var ti TypeInfo switch i % 4 { @@ -4311,15 +4303,15 @@ func TestMapEncodeDecode(t *testing.T) { childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), ti) require.NoError(t, err) - k = Uint64Value(i) - v = Uint64Value(i * 2) + ck := Uint64Value(i) + cv := Uint64Value(i * 2) // Insert element to child map - existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) require.NoError(t, err) require.Nil(t, existingStorable) - k = NewStringValue(string(r)) + k := NewStringValue(string(r)) r++ digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) @@ -4329,7 +4321,7 @@ func TestMapEncodeDecode(t *testing.T) { require.NoError(t, err) require.Nil(t, existingStorable) - keyValues[k] = childMap + keyValues[k] = mapValue{ck: cv} } require.Equal(t, uint64(mapSize), parentMap.Count()) @@ -5444,6 +5436,7 @@ func TestMapEncodeDecode(t *testing.T) { childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo2) require.NoError(t, err) + expectedChildMapValues := mapValue{} for i := 0; i < 2; i++ { k := Uint64Value(i) v := NewStringValue(strings.Repeat("b", 22)) @@ -5451,11 +5444,13 @@ func TestMapEncodeDecode(t *testing.T) { existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) + + expectedChildMapValues[k] = v } k := NewStringValue(strings.Repeat(string(r), 22)) v := childMap - keyValues[k] = v + keyValues[k] = expectedChildMapValues digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) @@ -5623,6 +5618,7 @@ func TestMapEncodeDecode(t *testing.T) { gchildMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), gchildTypeInfo) require.NoError(t, err) + expectedGChildMapValues := mapValue{} r := 'a' for i := 0; i < 2; i++ { k := NewStringValue(strings.Repeat(string(r), 22)) @@ -5632,6 +5628,8 @@ func TestMapEncodeDecode(t *testing.T) { require.NoError(t, err) require.Nil(t, existingStorable) + expectedGChildMapValues[k] = v + r++ } @@ -5642,7 +5640,7 @@ func TestMapEncodeDecode(t *testing.T) { k := Uint64Value(mapSize - 1) v := childMap - keyValues[k] = v + keyValues[k] = mapValue{Uint64Value(0): expectedGChildMapValues} digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) @@ -6402,21 +6400,20 @@ func TestMapEncodeDecode(t *testing.T) { const mapSize = 2 keyValues := make(map[Value]Value, mapSize) for i := uint64(0); i < mapSize; i++ { - var k, v Value // Create child map, composite with one field "uuid" childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) require.NoError(t, err) - k = NewStringValue("uuid") - v = Uint64Value(i) + ck := NewStringValue("uuid") + cv := Uint64Value(i) // Insert element to child map - existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) require.NoError(t, err) require.Nil(t, existingStorable) - k = Uint64Value(i) + k := Uint64Value(i) digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) @@ -6425,7 +6422,7 @@ func TestMapEncodeDecode(t *testing.T) { require.NoError(t, err) require.Nil(t, existingStorable) - keyValues[k] = childMap + keyValues[k] = mapValue{ck: cv} } require.Equal(t, uint64(mapSize), parentMap.Count()) @@ -6558,29 +6555,33 @@ func TestMapEncodeDecode(t *testing.T) { const mapSize = 2 keyValues := make(map[Value]Value, mapSize) for i := uint64(0); i < mapSize; i++ { - var k, v Value + expectedChildMapVaues := mapValue{} // Create child map, composite with one field "uuid" childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) require.NoError(t, err) - k = NewStringValue("uuid") - v = Uint64Value(i) + ck := NewStringValue("uuid") + cv := Uint64Value(i) // Insert element to child map - existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) require.NoError(t, err) require.Nil(t, existingStorable) - k = NewStringValue("amount") - v = Uint64Value(i * 2) + expectedChildMapVaues[ck] = cv + + ck = NewStringValue("amount") + cv = Uint64Value(i * 2) // Insert element to child map - existingStorable, err = childMap.Set(compare, hashInputProvider, k, v) + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, cv) require.NoError(t, err) require.Nil(t, existingStorable) - k = Uint64Value(i) + expectedChildMapVaues[ck] = cv + + k := Uint64Value(i) digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) @@ -6589,7 +6590,7 @@ func TestMapEncodeDecode(t *testing.T) { require.NoError(t, err) require.Nil(t, existingStorable) - keyValues[k] = childMap + keyValues[k] = expectedChildMapVaues } require.Equal(t, uint64(mapSize), parentMap.Count()) @@ -6729,29 +6730,33 @@ func TestMapEncodeDecode(t *testing.T) { keyValues := make(map[Value]Value, mapSize) // fields are ordered differently because of different seed. for i := uint64(0); i < mapSize; i++ { - var k, v Value + expectedChildMapValues := mapValue{} // Create child map, composite with one field "uuid" childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) require.NoError(t, err) - k = NewStringValue("uuid") - v = Uint64Value(i) + ck := NewStringValue("uuid") + cv := Uint64Value(i) // Insert element to child map - existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) require.NoError(t, err) require.Nil(t, existingStorable) - k = NewStringValue("a") - v = Uint64Value(i * 2) + expectedChildMapValues[ck] = cv + + ck = NewStringValue("a") + cv = Uint64Value(i * 2) // Insert element to child map - existingStorable, err = childMap.Set(compare, hashInputProvider, k, v) + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, cv) require.NoError(t, err) require.Nil(t, existingStorable) - k = Uint64Value(i) + expectedChildMapValues[ck] = cv + + k := Uint64Value(i) digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) @@ -6760,7 +6765,7 @@ func TestMapEncodeDecode(t *testing.T) { require.NoError(t, err) require.Nil(t, existingStorable) - keyValues[k] = childMap + keyValues[k] = expectedChildMapValues } require.Equal(t, uint64(mapSize), parentMap.Count()) @@ -6902,28 +6907,45 @@ func TestMapEncodeDecode(t *testing.T) { keyValues := make(map[Value]Value, mapSize) for i := uint64(0); i < mapSize; i++ { + expectedChildMapValues := mapValue{} // Create child map childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) require.NoError(t, err) + ck := NewStringValue("uuid") + cv := Uint64Value(i) + // Insert first element "uuid" to child map - existingStorable, err := childMap.Set(compare, hashInputProvider, NewStringValue("uuid"), Uint64Value(i)) + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) require.NoError(t, err) require.Nil(t, existingStorable) + expectedChildMapValues[ck] = cv + // Insert second element to child map (second element is different) switch i % 3 { case 0: - existingStorable, err = childMap.Set(compare, hashInputProvider, NewStringValue("a"), Uint64Value(i*2)) + ck = NewStringValue("a") + cv = Uint64Value(i * 2) + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, cv) + case 1: - existingStorable, err = childMap.Set(compare, hashInputProvider, NewStringValue("b"), Uint64Value(i*2)) + ck = NewStringValue("b") + cv = Uint64Value(i * 2) + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, cv) + case 2: - existingStorable, err = childMap.Set(compare, hashInputProvider, NewStringValue("c"), Uint64Value(i*2)) + ck = NewStringValue("c") + cv = Uint64Value(i * 2) + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, cv) } + require.NoError(t, err) require.Nil(t, existingStorable) + expectedChildMapValues[ck] = cv + k := Uint64Value(i) digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) @@ -6933,7 +6955,7 @@ func TestMapEncodeDecode(t *testing.T) { require.NoError(t, err) require.Nil(t, existingStorable) - keyValues[k] = childMap + keyValues[k] = expectedChildMapValues } require.Equal(t, uint64(mapSize), parentMap.Count()) @@ -7132,31 +7154,35 @@ func TestMapEncodeDecode(t *testing.T) { keyValues := make(map[Value]Value, mapSize) // fields are ordered differently because of different seed. for i := uint64(0); i < mapSize; i++ { - var k, v Value + expectedChildMapValues := mapValue{} // Create child map, composite with one field "uuid" childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) require.NoError(t, err) - k = NewStringValue("uuid") - v = Uint64Value(i) + ck := NewStringValue("uuid") + cv := Uint64Value(i) // Insert element to child map - existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) require.NoError(t, err) require.Nil(t, existingStorable) + expectedChildMapValues[ck] = cv + if i == 0 { - k = NewStringValue("a") - v = Uint64Value(i * 2) + ck = NewStringValue("a") + cv = Uint64Value(i * 2) // Insert element to child map - existingStorable, err = childMap.Set(compare, hashInputProvider, k, v) + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, cv) require.NoError(t, err) require.Nil(t, existingStorable) + + expectedChildMapValues[ck] = cv } - k = Uint64Value(i) + k := Uint64Value(i) digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) @@ -7165,7 +7191,7 @@ func TestMapEncodeDecode(t *testing.T) { require.NoError(t, err) require.Nil(t, existingStorable) - keyValues[k] = childMap + keyValues[k] = expectedChildMapValues } require.Equal(t, uint64(mapSize), parentMap.Count()) @@ -7321,6 +7347,8 @@ func TestMapEncodeDecode(t *testing.T) { keyValues := make(map[Value]Value, mapSize) // fields are ordered differently because of different seed. for i := uint64(0); i < mapSize; i++ { + expectedChildMapValues := mapValue{} + var ti TypeInfo if i%2 == 0 { ti = childMapTypeInfo1 @@ -7328,29 +7356,31 @@ func TestMapEncodeDecode(t *testing.T) { ti = childMapTypeInfo2 } - var k, v Value - // Create child map, composite with two field "uuid" and "a" childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), ti) require.NoError(t, err) - k = NewStringValue("uuid") - v = Uint64Value(i) + ck := NewStringValue("uuid") + cv := Uint64Value(i) // Insert element to child map - existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) require.NoError(t, err) require.Nil(t, existingStorable) - k = NewStringValue("a") - v = Uint64Value(i * 2) + expectedChildMapValues[ck] = cv + + ck = NewStringValue("a") + cv = Uint64Value(i * 2) // Insert element to child map - existingStorable, err = childMap.Set(compare, hashInputProvider, k, v) + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, cv) require.NoError(t, err) require.Nil(t, existingStorable) - k = Uint64Value(i) + expectedChildMapValues[ck] = cv + + k := Uint64Value(i) digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) @@ -7359,7 +7389,7 @@ func TestMapEncodeDecode(t *testing.T) { require.NoError(t, err) require.Nil(t, existingStorable) - keyValues[k] = childMap + keyValues[k] = expectedChildMapValues } require.Equal(t, uint64(mapSize), parentMap.Count()) @@ -10760,20 +10790,23 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { // Appending 3 elements to child map so that inlined child map reaches max inlined size as map element. for i := 0; i < 3; i++ { - for _, child := range children { + for childKey, child := range children { childMap := child.m valueID := child.valueID + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + k := NewStringValue(randStr(r, keyStringSize)) v := NewStringValue(randStr(r, valueStringSize)) - child.keys = append(child.keys, k) - existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) require.Equal(t, uint64(i+1), childMap.Count()) + expectedChildMapValues[k] = v + require.True(t, childMap.Inlined()) require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged @@ -10796,22 +10829,28 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { } // Add one more element to child array which triggers inlined child array slab becomes standalone slab - for i, child := range children { + i := 0 + for childKey, child := range children { childMap := child.m valueID := child.valueID + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + k := NewStringValue(randStr(r, keyStringSize)) v := NewStringValue(randStr(r, valueStringSize)) - child.keys = append(child.keys, k) - existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) + expectedChildMapValues[k] = v + require.False(t, childMap.Inlined()) require.Equal(t, 1+1+i, getStoredDeltas(storage)) // There are >1 stored slab because child map is no longer inlined. + i++ + expectedSlabID := valueIDToSlabID(valueID) require.Equal(t, expectedSlabID, childMap.SlabID()) // Storage ID is the same bytewise as value ID. require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged @@ -10830,10 +10869,17 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { } // Remove elements from child map which triggers standalone map slab becomes inlined slab again. - for _, child := range children { + for childKey, child := range children { childMap := child.m valueID := child.valueID - keys := child.keys + + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + + keys := make([]Value, 0, len(expectedChildMapValues)) + for k := range expectedChildMapValues { + keys = append(keys, k) + } for _, k := range keys { existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, k) @@ -10841,6 +10887,8 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { require.Equal(t, k, existingKey) require.NotNil(t, existingValue) + delete(expectedChildMapValues, k) + require.True(t, childMap.Inlined()) require.Equal(t, SlabIDUndefined, childMap.SlabID()) require.Equal(t, valueID, childMap.ValueID()) // value ID is unchanged @@ -10898,20 +10946,23 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { // Appending 3 elements to child map so that inlined child map reaches max inlined size as map element. for i := 0; i < 3; i++ { - for _, child := range children { + for childKey, child := range children { childMap := child.m valueID := child.valueID + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + k := NewStringValue(randStr(r, keyStringSize)) v := NewStringValue(randStr(r, valueStringSize)) - child.keys = append(child.keys, k) - existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) require.Equal(t, uint64(i+1), childMap.Count()) + expectedChildMapValues[k] = v + require.True(t, childMap.Inlined()) require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged @@ -10932,22 +10983,28 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { } // Add one more element to child array which triggers inlined child array slab becomes standalone slab - for i, child := range children { + i := 0 + for childKey, child := range children { childMap := child.m valueID := child.valueID + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + k := NewStringValue(randStr(r, keyStringSize)) v := NewStringValue(randStr(r, valueStringSize)) - child.keys = append(child.keys, k) - existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) + expectedChildMapValues[k] = v + require.False(t, childMap.Inlined()) require.Equal(t, 1+1+i, getStoredDeltas(storage)) // There are >1 stored slab because child map is no longer inlined. + i++ + expectedSlabID := valueIDToSlabID(valueID) require.Equal(t, expectedSlabID, childMap.SlabID()) // Storage ID is the same bytewise as value ID. require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged @@ -10970,21 +11027,31 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { require.Equal(t, 1+mapSize, getStoredDeltas(storage)) // There are >1 stored slab because child map is no longer inlined. // Remove one element from each child map which triggers standalone map slab becomes inlined slab again. - for i, child := range children { + i = 0 + for childKey, child := range children { childMap := child.m valueID := child.valueID - keys := child.keys - lastKey := keys[len(keys)-1] - child.keys = child.keys[:len(keys)-1] + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + + var aKey Value + for k := range expectedChildMapValues { + aKey = k + break + } - existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, lastKey) + existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, aKey) require.NoError(t, err) - require.Equal(t, lastKey, existingKey) + require.Equal(t, aKey, existingKey) require.NotNil(t, existingValue) + delete(expectedChildMapValues, aKey) + require.Equal(t, 1+mapSize-1-i, getStoredDeltas(storage)) + i++ + require.True(t, childMap.Inlined()) require.Equal(t, SlabIDUndefined, childMap.SlabID()) require.Equal(t, valueID, childMap.ValueID()) // value ID is unchanged @@ -11004,10 +11071,17 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { } // Remove remaining elements from each inlined child map. - for _, child := range children { + for childKey, child := range children { childMap := child.m valueID := child.valueID - keys := child.keys + + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + + keys := make([]Value, 0, len(expectedChildMapValues)) + for k := range expectedChildMapValues { + keys = append(keys, k) + } for _, k := range keys { existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, k) @@ -11015,6 +11089,8 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { require.Equal(t, k, existingKey) require.NotNil(t, existingValue) + delete(expectedChildMapValues, k) + require.Equal(t, 1, getStoredDeltas(storage)) require.True(t, childMap.Inlined()) @@ -11069,20 +11145,23 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { // Appending 3 elements to child map so that inlined child map reaches max inlined size as map element. for i := 0; i < 3; i++ { - for _, child := range children { + for childKey, child := range children { childMap := child.m valueID := child.valueID + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + k := NewStringValue(randStr(r, keyStringSize)) v := NewStringValue(randStr(r, valueStringSize)) - child.keys = append(child.keys, k) - existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) require.Equal(t, uint64(i+1), childMap.Count()) + expectedChildMapValues[k] = v + require.True(t, childMap.Inlined()) require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged @@ -11103,19 +11182,22 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { require.False(t, parentMap.root.IsData()) // Add one more element to child array which triggers inlined child array slab becomes standalone slab - for _, child := range children { + for childKey, child := range children { childMap := child.m valueID := child.valueID + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + k := NewStringValue(randStr(r, keyStringSize)) v := NewStringValue(randStr(r, valueStringSize)) - child.keys = append(child.keys, k) - existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) + expectedChildMapValues[k] = v + require.False(t, childMap.Inlined()) expectedSlabID := valueIDToSlabID(valueID) @@ -11136,19 +11218,26 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { require.True(t, parentMap.root.IsData()) // Remove one element from each child map which triggers standalone map slab becomes inlined slab again. - for _, child := range children { + for childKey, child := range children { childMap := child.m valueID := child.valueID - keys := child.keys - lastKey := keys[len(keys)-1] - child.keys = child.keys[:len(keys)-1] + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + + var aKey Value + for k := range expectedChildMapValues { + aKey = k + break + } - existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, lastKey) + existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, aKey) require.NoError(t, err) - require.Equal(t, lastKey, existingKey) + require.Equal(t, aKey, existingKey) require.NotNil(t, existingValue) + delete(expectedChildMapValues, aKey) + require.True(t, childMap.Inlined()) require.Equal(t, SlabIDUndefined, childMap.SlabID()) require.Equal(t, valueID, childMap.ValueID()) // value ID is unchanged @@ -11166,10 +11255,17 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { require.False(t, parentMap.root.IsData()) // Remove remaining elements from each inlined child map. - for _, child := range children { + for childKey, child := range children { childMap := child.m valueID := child.valueID - keys := child.keys + + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + + keys := make([]Value, 0, len(expectedChildMapValues)) + for k := range expectedChildMapValues { + keys = append(keys, k) + } for _, k := range keys { existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, k) @@ -11177,6 +11273,8 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { require.Equal(t, k, existingKey) require.NotNil(t, existingValue) + delete(expectedChildMapValues, k) + require.True(t, childMap.Inlined()) require.Equal(t, SlabIDUndefined, childMap.SlabID()) require.Equal(t, valueID, childMap.ValueID()) // value ID is unchanged @@ -11247,25 +11345,38 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { expectedParentSize := parentMap.root.ByteSize() // Inserting 1 elements to grand child map so that inlined grand child map reaches max inlined size as map element. - for _, child := range children { + for childKey, child := range children { require.Equal(t, 1, len(child.children)) childMap := child.m cValueID := child.valueID - gchild := child.children[0] + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } + gchildMap := gchild.m gValueID := gchild.valueID + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) + k := NewStringValue(randStr(r, keyStringSize)) v := NewStringValue(randStr(r, valueStringSize)) - gchild.keys = append(gchild.keys, k) - existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) + expectedGChildMapValues[k] = v + // Grand child map is still inlined require.True(t, gchildMap.Inlined()) require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab @@ -11305,25 +11416,38 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) // Add one more element to grand child map which triggers inlined child map slab (NOT grand child map slab) becomes standalone slab - for _, child := range children { + for childKey, child := range children { require.Equal(t, 1, len(child.children)) childMap := child.m cValueID := child.valueID - gchild := child.children[0] + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } + gchildMap := gchild.m gValueID := gchild.valueID + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) + k := NewStringValue(randStr(r, keyStringSize)) v := NewStringValue(randStr(r, valueStringSize)) - gchild.keys = append(gchild.keys, k) - existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) + expectedGChildMapValues[k] = v + // Grand child map is inlined require.True(t, gchildMap.Inlined()) require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab @@ -11364,20 +11488,40 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) // Remove elements from grand child map which triggers standalone child map slab becomes inlined slab again. - for _, child := range children { + for childKey, child := range children { childMap := child.m cValueID := child.valueID - gchild := child.children[0] + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } + gchildMap := gchild.m gValueID := gchild.valueID - for _, k := range gchild.keys { + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) + + gchildKeys := make([]Value, 0, len(expectedGChildMapValues)) + for k := range expectedGChildMapValues { + gchildKeys = append(gchildKeys, k) + } + + for _, k := range gchildKeys { existingKey, existingValue, err := gchildMap.Remove(compare, hashInputProvider, k) require.NoError(t, err) require.Equal(t, k, existingKey) require.NotNil(t, existingValue) + delete(expectedGChildMapValues, k) + // Child map is inlined require.True(t, childMap.Inlined()) require.Equal(t, SlabIDUndefined, childMap.SlabID()) @@ -11459,25 +11603,38 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { expectedParentSize := parentMap.root.ByteSize() // Inserting 1 elements to grand child map so that inlined grand child map reaches max inlined size as map element. - for _, child := range children { + for childKey, child := range children { require.Equal(t, 1, len(child.children)) childMap := child.m cValueID := child.valueID - gchild := child.children[0] + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } + gchildMap := gchild.m gValueID := gchild.valueID + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) + k := NewStringValue(randStr(r, keyStringSize)) v := NewStringValue(randStr(r, valueStringSize)) - gchild.keys = append(gchild.keys, k) - existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) + expectedGChildMapValues[k] = v + // Grand child map is still inlined require.True(t, gchildMap.Inlined()) require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab @@ -11516,26 +11673,42 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + gchildLargeElementKeys := make(map[Value]Value) // key: child map key, value: gchild map key // Add one large element to grand child map which triggers inlined grand child map slab (NOT child map slab) becomes standalone slab - for _, child := range children { + for childKey, child := range children { require.Equal(t, 1, len(child.children)) childMap := child.m cValueID := child.valueID - gchild := child.children[0] + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } + gchildMap := gchild.m gValueID := gchild.valueID + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) + k := NewStringValue(randStr(r, keyStringSize)) v := NewStringValue(randStr(r, largeValueStringSize)) - gchild.keys = append(gchild.keys, k) - existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) + expectedGChildMapValues[k] = v + + gchildLargeElementKeys[childKey] = k + // Grand child map is NOT inlined require.False(t, gchildMap.Inlined()) require.Equal(t, valueIDToSlabID(gValueID), gchildMap.SlabID()) // Slab ID is valid for not inlined slab @@ -11576,23 +11749,46 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) // Remove elements from grand child map which triggers standalone child map slab becomes inlined slab again. - for _, child := range children { + for childKey, child := range children { childMap := child.m cValueID := child.valueID - gchild := child.children[0] + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } + gchildMap := gchild.m gValueID := gchild.valueID - // Remove the last element (large element) first to trigger grand child map being inlined again. - for i := len(gchild.keys) - 1; i >= 0; i-- { - k := gchild.keys[i] + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) + + // Get all grand child map keys with large element key first + keys := make([]Value, 0, len(expectedGChildMapValues)) + keys = append(keys, gchildLargeElementKeys[childKey]) + for k := range expectedGChildMapValues { + if k != gchildLargeElementKeys[childKey] { + keys = append(keys, k) + } + } + + // Remove all elements (large element first) to trigger grand child map being inlined again. + for _, k := range keys { existingKey, existingValue, err := gchildMap.Remove(compare, hashInputProvider, k) require.NoError(t, err) require.Equal(t, k, existingKey) require.NotNil(t, existingValue) + delete(expectedGChildMapValues, k) + // Child map is inlined require.True(t, childMap.Inlined()) require.Equal(t, SlabIDUndefined, childMap.SlabID()) @@ -11672,23 +11868,36 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { expectedParentSize := parentMap.root.ByteSize() // Insert 1 elements to grand child map (both child map and grand child map are still inlined). - for _, child := range children { + for childKey, child := range children { childMap := child.m cValueID := child.valueID - gchild := child.children[0] + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } + gchildMap := gchild.m gValueID := gchild.valueID + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) + k := NewStringValue(randStr(r, keyStringSize)) v := NewStringValue(randStr(r, valueStringSize)) - gchild.keys = append(gchild.keys, k) - existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) + expectedGChildMapValues[k] = v + // Grand child map is still inlined require.True(t, gchildMap.Inlined()) require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab @@ -11730,24 +11939,32 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { expectedParentSize = parentMap.root.ByteSize() // Add 1 element to each child map so child map reaches its max size - for _, child := range children { + for childKey, child := range children { childMap := child.m cValueID := child.valueID - gchild := child.children[0] + var gchild *mapInfo + for _, gv := range child.children { + gchild = gv + break + } + gchildMap := gchild.m gValueID := gchild.valueID + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + k := NewStringValue(randStr(r, keyStringSize)) v := NewStringValue(randStr(r, valueStringSize)) - child.keys = append(child.keys, k) - existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) + expectedChildMapValues[k] = v + // Grand child map is inlined require.True(t, gchildMap.Inlined()) require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab @@ -11787,24 +12004,33 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) // Add 1 more element to each child map so child map reaches its max size - for i, child := range children { + i := 0 + for childKey, child := range children { childMap := child.m cValueID := child.valueID - gchild := child.children[0] + var gchild *mapInfo + for _, gv := range child.children { + gchild = gv + break + } + gchildMap := gchild.m gValueID := gchild.valueID + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + k := NewStringValue(randStr(r, keyStringSize)) v := NewStringValue(randStr(r, valueStringSize)) - child.keys = append(child.keys, k) - existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) + expectedChildMapValues[k] = v + // Grand child map is inlined require.True(t, gchildMap.Inlined()) require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab @@ -11819,6 +12045,8 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { require.False(t, parentMap.Inlined()) require.Equal(t, (1 + i + 1), getStoredDeltas(storage)) + i++ + // Test inlined grand child slab size expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + @@ -11847,23 +12075,39 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { expectedParentMapSize := parentMap.root.ByteSize() // Remove one element from child map which triggers standalone child map slab becomes inlined slab again. - for _, child := range children { + for childKey, child := range children { childMap := child.m cValueID := child.valueID - gchild := child.children[0] + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } gchildMap := gchild.m gValueID := gchild.valueID - // Remove one element - k := child.keys[len(child.keys)-1] - child.keys = child.keys[:len(child.keys)-1] + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, k) + var aKey Value + for k := range expectedChildMapValues { + if k != gchildKey { + aKey = k + break + } + } + + // Remove one element + existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, aKey) require.NoError(t, err) - require.Equal(t, k, existingKey) + require.Equal(t, aKey, existingKey) require.NotNil(t, existingValue) + delete(expectedChildMapValues, aKey) + // Child map is inlined require.True(t, childMap.Inlined()) require.Equal(t, SlabIDUndefined, childMap.SlabID()) @@ -11901,21 +12145,39 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) // remove remaining elements from child map, except for grand child map - for _, child := range children { + for childKey, child := range children { childMap := child.m cValueID := child.valueID - gchild := child.children[0] + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } gchildMap := gchild.m gValueID := gchild.valueID - // Remove all elements, except grand child map (first element in child.keys) - for _, k := range child.keys[1:] { + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + + keys := make([]Value, 0, len(expectedChildMapValues)-1) + for k := range expectedChildMapValues { + if k != gchildKey { + keys = append(keys, k) + } + } + + // Remove all elements, except grand child map + for _, k := range keys { existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, k) require.NoError(t, err) require.Equal(t, k, existingKey) require.NotNil(t, existingValue) + delete(expectedChildMapValues, k) + // Child map is inlined require.True(t, childMap.Inlined()) require.Equal(t, SlabIDUndefined, childMap.SlabID()) @@ -11993,23 +12255,35 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { // Insert 1 element to grand child map // Both child map and grand child map are still inlined, but parent map's root slab is metadata slab. - for _, child := range children { + for childKey, child := range children { childMap := child.m cValueID := child.valueID - gchild := child.children[0] + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } gchildMap := gchild.m gValueID := gchild.valueID + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) + k := NewStringValue(randStr(r, keyStringSize)) v := NewStringValue(randStr(r, valueStringSize)) - gchild.keys = append(gchild.keys, k) - existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) + expectedGChildMapValues[k] = v + // Grand child map is still inlined require.True(t, gchildMap.Inlined()) require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab @@ -12046,23 +12320,35 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { // - grand child maps are inlined // - child maps are standalone // - parent map's root slab is data slab. - for _, child := range children { + for childKey, child := range children { childMap := child.m cValueID := child.valueID - gchild := child.children[0] + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } gchildMap := gchild.m gValueID := gchild.valueID + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) + k := NewStringValue(randStr(r, keyStringSize)) v := NewStringValue(randStr(r, valueStringSize)) - gchild.keys = append(gchild.keys, k) - existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) + expectedGChildMapValues[k] = v + // Grand child map is still inlined require.True(t, gchildMap.Inlined()) require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab @@ -12104,23 +12390,40 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { // - grand child maps are inlined // - child maps are inlined // - parent map root slab is metadata slab - for _, child := range children { + for childKey, child := range children { childMap := child.m cValueID := child.valueID - gchild := child.children[0] + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } gchildMap := gchild.m gValueID := gchild.valueID - // Remove one element from grand child map - k := gchild.keys[len(gchild.keys)-1] - gchild.keys = gchild.keys[:len(gchild.keys)-1] + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) + + var aKey Value + for k := range expectedGChildMapValues { + aKey = k + break + } - existingKey, existingValue, err := gchildMap.Remove(compare, hashInputProvider, k) + // Remove one element from grand child map + existingKey, existingValue, err := gchildMap.Remove(compare, hashInputProvider, aKey) require.NoError(t, err) - require.Equal(t, k, existingKey) + require.Equal(t, aKey, existingKey) require.NotNil(t, existingValue) + delete(expectedGChildMapValues, aKey) + // Child map is inlined require.True(t, childMap.Inlined()) require.Equal(t, SlabIDUndefined, childMap.SlabID()) @@ -12156,17 +12459,27 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { // Remove all grand child element to trigger // - child maps are inlined // - parent map root slab is data slab - for _, child := range children { + for childKey, child := range children { childMap := child.m cValueID := child.valueID + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + + keys := make([]Value, 0, len(expectedChildMapValues)) + for k := range expectedChildMapValues { + keys = append(keys, k) + } + // Remove grand children - for _, k := range child.keys { + for _, k := range keys { existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, k) require.NoError(t, err) require.Equal(t, k, existingKey) require.NotNil(t, existingValue) + delete(expectedChildMapValues, k) + // Child map is inlined require.True(t, childMap.Inlined()) require.Equal(t, SlabIDUndefined, childMap.SlabID()) @@ -12239,7 +12552,7 @@ func TestChildMapWhenParentMapIsModified(t *testing.T) { require.NoError(t, err) require.Nil(t, existingStorable) - expectedKeyValues[k] = childMap + expectedKeyValues[k] = mapValue{} require.True(t, childMap.Inlined()) testInlinedMapIDs(t, address, childMap) @@ -12288,17 +12601,25 @@ func TestChildMapWhenParentMapIsModified(t *testing.T) { expectedKeyValues[k] = v keysForNonChildMaps = append(keysForNonChildMaps, k) - for i, child := range children { + i := 0 + for childKey, child := range children { childMap := child.m childValueID := child.valueID + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + k := NewStringValue(randStr(r, keyStringSize)) v := Uint64Value(i) + i++ + existingStorable, err = childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) + expectedChildMapValues[k] = v + require.True(t, childMap.Inlined()) require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab require.Equal(t, childValueID, childMap.ValueID()) // Value ID is unchanged @@ -12327,17 +12648,25 @@ func TestChildMapWhenParentMapIsModified(t *testing.T) { delete(expectedKeyValues, k) - for i, child := range children { + i := 0 + for childKey, child := range children { childMap := child.m childValueID := child.valueID + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + k := NewStringValue(randStr(r, keyStringSize)) v := Uint64Value(i) + i++ + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) + expectedChildMapValues[k] = v + require.True(t, childMap.Inlined()) require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab require.Equal(t, childValueID, childMap.ValueID()) // Value ID is unchanged @@ -12387,7 +12716,7 @@ func createMapWithEmptyChildMap( require.NoError(t, err) require.Nil(t, existingStorable) - expectedKeyValues[k] = childMap + expectedKeyValues[k] = mapValue{} require.True(t, childMap.Inlined()) testInlinedMapIDs(t, address, childMap) @@ -12449,7 +12778,7 @@ func createMapWithEmpty2LevelChildMap( require.NoError(t, err) require.Nil(t, existingStorable) - expectedKeyValues[k] = childMap + expectedKeyValues[k] = mapValue{k: mapValue{}} require.True(t, childMap.Inlined()) testInlinedMapIDs(t, address, childMap) @@ -12478,13 +12807,12 @@ func createMapWithEmpty2LevelChildMap( type mapInfo struct { m *OrderedMap valueID ValueID - keys []Value - children []*mapInfo + children map[Value]*mapInfo } -func getInlinedChildMapsFromParentMap(t *testing.T, address Address, parentMap *OrderedMap) []*mapInfo { +func getInlinedChildMapsFromParentMap(t *testing.T, address Address, parentMap *OrderedMap) map[Value]*mapInfo { - children := make([]*mapInfo, 0, parentMap.Count()) + children := make(map[Value]*mapInfo) err := parentMap.IterateKeys(func(k Value) (bool, error) { if k == nil { @@ -12505,22 +12833,11 @@ func getInlinedChildMapsFromParentMap(t *testing.T, address Address, parentMap * testNotInlinedMapIDs(t, address, childMap) } - var childKeys []Value - err = childMap.IterateKeys(func(key Value) (bool, error) { - if key == nil { - return false, nil - } - childKeys = append(childKeys, key) - return true, nil - }) - require.NoError(t, err) - - children = append(children, &mapInfo{ + children[k] = &mapInfo{ m: childMap, valueID: childMap.ValueID(), - keys: childKeys, children: getInlinedChildMapsFromParentMap(t, address, childMap), - }) + } return true, nil }) diff --git a/utils_test.go b/utils_test.go index 837fde72..6bcd6608 100644 --- a/utils_test.go +++ b/utils_test.go @@ -22,7 +22,6 @@ import ( "flag" "fmt" "math/rand" - "reflect" "testing" "time" @@ -326,9 +325,15 @@ func valueEqual(t *testing.T, expected Value, actual Value) { case *Array: require.FailNow(t, "expected value shouldn't be *Array") - case *OrderedMap: + case mapValue: + actual, ok := actual.(*OrderedMap) + require.True(t, ok) + mapEqual(t, expected, actual) + case *OrderedMap: + require.FailNow(t, "expected value shouldn't be *OrderedMap") + default: require.Equal(t, expected, actual) } @@ -355,68 +360,28 @@ func arrayEqual(t *testing.T, expected arrayValue, actual *Array) { require.Equal(t, len(expected), i) } -func mapEqual(t *testing.T, expected Value, actual Value) { - m1, ok := expected.(*OrderedMap) - require.True(t, ok) - - m2, ok := actual.(*OrderedMap) - require.True(t, ok) - - require.Equal(t, m1.Address(), m2.Address()) - require.Equal(t, m1.Count(), m2.Count()) - require.Equal(t, m1.SlabID(), m2.SlabID()) +func mapEqual(t *testing.T, expected mapValue, actual *OrderedMap) { + require.Equal(t, uint64(len(expected)), actual.Count()) - if m1.Seed() != m2.Seed() { + iterator, err := actual.Iterator() + require.NoError(t, err) - iterator1, err := m1.Iterator() + i := 0 + for { + actualKey, actualValue, err := iterator.Next() require.NoError(t, err) - for { - key1, value1, err := iterator1.Next() - require.NoError(t, err) - - if key1 == nil { - break - } - - iterator2, err := m2.Iterator() - require.NoError(t, err) - - for { - key2, value2, err := iterator2.Next() - require.NoError(t, err) - require.NotNil(t, key2) - - if reflect.DeepEqual(key1, key2) { - valueEqual(t, value1, value2) - break - } - } + if actualKey == nil { + break } - } else { - - iterator1, err := m1.Iterator() - require.NoError(t, err) - - iterator2, err := m2.Iterator() - require.NoError(t, err) + expectedValue, exist := expected[actualKey] + require.True(t, exist) - for { - key1, value1, err := iterator1.Next() - require.NoError(t, err) - - key2, value2, err := iterator2.Next() - require.NoError(t, err) - - valueEqual(t, key1, key2) - valueEqual(t, value1, value2) - - if key1 == nil || key2 == nil { - break - } - } + valueEqual(t, expectedValue, actualValue) + i++ } + require.Equal(t, len(expected), i) } func valueIDToSlabID(vid ValueID) SlabID { From d3de29174cec542bb548ac8a239eb53b55cbfce7 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 18 Jul 2023 14:54:24 -0500 Subject: [PATCH 033/126] Add ReadOnly iterators and refactor other iterators This change: - Adds ReadOnly iterators that match current iterator API (except for the "ReadOnly" suffix added to some function names). - Refactors API of non-Readonly iterators because register inlining will require more parameters for MapIterator. For ReadOnly iterators, the caller is responsible for preventing changes to child containers during iteration because mutations of child containers are not guaranteed to persist. For non-ReadOnly iterators, two additional parameters are needed to update child container in parent map when child container is modified. --- array.go | 87 +++++++++++++++++++++++++++++---------------- array_bench_test.go | 4 +-- array_test.go | 48 ++++++++++++------------- cmd/stress/utils.go | 12 +++---- map.go | 75 +++++++++++++++++++++++--------------- map_test.go | 38 ++++++++++---------- utils_test.go | 4 +-- 7 files changed, 156 insertions(+), 112 deletions(-) diff --git a/array.go b/array.go index 40bb8686..b63b3e6e 100644 --- a/array.go +++ b/array.go @@ -3117,6 +3117,7 @@ type ArrayIterator struct { dataSlab *ArrayDataSlab index int remainingCount int + readOnly bool } func (i *ArrayIterator) Next() (Value, error) { @@ -3179,6 +3180,19 @@ func (a *Array) Iterator() (*ArrayIterator, error) { }, nil } +// ReadOnlyIterator returns readonly iterator for array elements. +// If elements of child containers are mutated, those changes +// are not guaranteed to persist. +func (a *Array) ReadOnlyIterator() (*ArrayIterator, error) { + iterator, err := a.Iterator() + if err != nil { + // Don't need to wrap error as external error because err is already categorized by Iterator(). + return nil, err + } + iterator.readOnly = true + return iterator, nil +} + func (a *Array) RangeIterator(startIndex uint64, endIndex uint64) (*ArrayIterator, error) { count := a.Count() @@ -3229,16 +3243,18 @@ func (a *Array) RangeIterator(startIndex uint64, endIndex uint64) (*ArrayIterato }, nil } -type ArrayIterationFunc func(element Value) (resume bool, err error) - -func (a *Array) Iterate(fn ArrayIterationFunc) error { - - iterator, err := a.Iterator() +func (a *Array) ReadOnlyRangeIterator(startIndex uint64, endIndex uint64) (*ArrayIterator, error) { + iterator, err := a.RangeIterator(startIndex, endIndex) if err != nil { - // Don't need to wrap error as external error because err is already categorized by Array.Iterator(). - return err + return nil, err } + iterator.readOnly = true + return iterator, nil +} + +type ArrayIterationFunc func(element Value) (resume bool, err error) +func iterate(iterator *ArrayIterator, fn ArrayIterationFunc) error { for { value, err := iterator.Next() if err != nil { @@ -3259,33 +3275,42 @@ func (a *Array) Iterate(fn ArrayIterationFunc) error { } } -func (a *Array) IterateRange(startIndex uint64, endIndex uint64, fn ArrayIterationFunc) error { +func (a *Array) Iterate(fn ArrayIterationFunc) error { + iterator, err := a.Iterator() + if err != nil { + // Don't need to wrap error as external error because err is already categorized by Array.Iterator(). + return err + } + return iterate(iterator, fn) +} + +func (a *Array) IterateReadOnly(fn ArrayIterationFunc) error { + iterator, err := a.ReadOnlyIterator() + if err != nil { + // Don't need to wrap error as external error because err is already categorized by Array.ReadOnlyIterator(). + return err + } + return iterate(iterator, fn) +} +func (a *Array) IterateRange(startIndex uint64, endIndex uint64, fn ArrayIterationFunc) error { iterator, err := a.RangeIterator(startIndex, endIndex) if err != nil { // Don't need to wrap error as external error because err is already categorized by Array.RangeIterator(). return err } + return iterate(iterator, fn) +} - for { - value, err := iterator.Next() - if err != nil { - // Don't need to wrap error as external error because err is already categorized by ArrayIterator.Next(). - return err - } - if value == nil { - return nil - } - resume, err := fn(value) - if err != nil { - // Wrap err as external error (if needed) because err is returned by ArrayIterationFunc callback. - return wrapErrorAsExternalErrorIfNeeded(err) - } - if !resume { - return nil - } +func (a *Array) IterateReadOnlyRange(startIndex uint64, endIndex uint64, fn ArrayIterationFunc) error { + iterator, err := a.ReadOnlyRangeIterator(startIndex, endIndex) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by Array.ReadOnlyRangeIterator(). + return err } + return iterate(iterator, fn) } + func (a *Array) Count() uint64 { return uint64(a.root.Header().count) } @@ -3309,7 +3334,7 @@ func (a *Array) Type() TypeInfo { } func (a *Array) String() string { - iterator, err := a.Iterator() + iterator, err := a.ReadOnlyIterator() if err != nil { return err.Error() } @@ -3793,8 +3818,8 @@ func (i *ArrayLoadedValueIterator) Next() (Value, error) { return nil, nil } -// LoadedValueIterator returns iterator to iterate loaded array elements. -func (a *Array) LoadedValueIterator() (*ArrayLoadedValueIterator, error) { +// ReadOnlyLoadedValueIterator returns iterator to iterate loaded array elements. +func (a *Array) ReadOnlyLoadedValueIterator() (*ArrayLoadedValueIterator, error) { switch slab := a.root.(type) { case *ArrayDataSlab: @@ -3832,9 +3857,9 @@ func (a *Array) LoadedValueIterator() (*ArrayLoadedValueIterator, error) { } } -// IterateLoadedValues iterates loaded array values. -func (a *Array) IterateLoadedValues(fn ArrayIterationFunc) error { - iterator, err := a.LoadedValueIterator() +// IterateReadOnlyLoadedValues iterates loaded array values. +func (a *Array) IterateReadOnlyLoadedValues(fn ArrayIterationFunc) error { + iterator, err := a.ReadOnlyLoadedValueIterator() if err != nil { // Don't need to wrap error as external error because err is already categorized by Array.LoadedValueIterator(). return err diff --git a/array_bench_test.go b/array_bench_test.go index 572abff8..b8c06cd0 100644 --- a/array_bench_test.go +++ b/array_bench_test.go @@ -355,7 +355,7 @@ func benchmarkNewArrayFromAppend(b *testing.B, initialArraySize int) { for i := 0; i < b.N; i++ { copied, _ := NewArray(storage, array.Address(), array.Type()) - _ = array.Iterate(func(value Value) (bool, error) { + _ = array.IterateReadOnly(func(value Value) (bool, error) { _ = copied.Append(value) return true, nil }) @@ -379,7 +379,7 @@ func benchmarkNewArrayFromBatchData(b *testing.B, initialArraySize int) { b.StartTimer() for i := 0; i < b.N; i++ { - iter, err := array.Iterator() + iter, err := array.ReadOnlyIterator() require.NoError(b, err) copied, _ := NewArrayFromBatchData(storage, array.Address(), array.Type(), func() (Value, error) { diff --git a/array_test.go b/array_test.go index 4bd1b6fa..599f2365 100644 --- a/array_test.go +++ b/array_test.go @@ -100,7 +100,7 @@ func _verifyArray( // Verify array elements by iterator i := 0 - err = array.Iterate(func(v Value) (bool, error) { + err = array.IterateReadOnly(func(v Value) (bool, error) { valueEqual(t, expectedValues[i], v) i++ return true, nil @@ -679,7 +679,7 @@ func TestArrayIterate(t *testing.T) { require.NoError(t, err) i := uint64(0) - err = array.Iterate(func(v Value) (bool, error) { + err = array.IterateReadOnly(func(v Value) (bool, error) { i++ return true, nil }) @@ -706,7 +706,7 @@ func TestArrayIterate(t *testing.T) { } i := uint64(0) - err = array.Iterate(func(v Value) (bool, error) { + err = array.IterateReadOnly(func(v Value) (bool, error) { require.Equal(t, Uint64Value(i), v) i++ return true, nil @@ -743,7 +743,7 @@ func TestArrayIterate(t *testing.T) { } i := uint64(0) - err = array.Iterate(func(v Value) (bool, error) { + err = array.IterateReadOnly(func(v Value) (bool, error) { require.Equal(t, Uint64Value(i), v) i++ return true, nil @@ -776,7 +776,7 @@ func TestArrayIterate(t *testing.T) { } i := uint64(0) - err = array.Iterate(func(v Value) (bool, error) { + err = array.IterateReadOnly(func(v Value) (bool, error) { require.Equal(t, Uint64Value(i), v) i++ return true, nil @@ -812,7 +812,7 @@ func TestArrayIterate(t *testing.T) { i := uint64(0) j := uint64(1) - err = array.Iterate(func(v Value) (bool, error) { + err = array.IterateReadOnly(func(v Value) (bool, error) { require.Equal(t, Uint64Value(j), v) i++ j += 2 @@ -838,7 +838,7 @@ func TestArrayIterate(t *testing.T) { } i := 0 - err = array.Iterate(func(_ Value) (bool, error) { + err = array.IterateReadOnly(func(_ Value) (bool, error) { if i == count/2 { return false, nil } @@ -867,7 +867,7 @@ func TestArrayIterate(t *testing.T) { testErr := errors.New("test") i := 0 - err = array.Iterate(func(_ Value) (bool, error) { + err = array.IterateReadOnly(func(_ Value) (bool, error) { if i == count/2 { return false, testErr } @@ -893,7 +893,7 @@ func testArrayIterateRange(t *testing.T, array *Array, values []Value) { count := array.Count() // If startIndex > count, IterateRange returns SliceOutOfBoundsError - err = array.IterateRange(count+1, count+1, func(v Value) (bool, error) { + err = array.IterateReadOnlyRange(count+1, count+1, func(v Value) (bool, error) { i++ return true, nil }) @@ -906,7 +906,7 @@ func testArrayIterateRange(t *testing.T, array *Array, values []Value) { require.Equal(t, uint64(0), i) // If endIndex > count, IterateRange returns SliceOutOfBoundsError - err = array.IterateRange(0, count+1, func(v Value) (bool, error) { + err = array.IterateReadOnlyRange(0, count+1, func(v Value) (bool, error) { i++ return true, nil }) @@ -918,7 +918,7 @@ func testArrayIterateRange(t *testing.T, array *Array, values []Value) { // If startIndex > endIndex, IterateRange returns InvalidSliceIndexError if count > 0 { - err = array.IterateRange(1, 0, func(v Value) (bool, error) { + err = array.IterateReadOnlyRange(1, 0, func(v Value) (bool, error) { i++ return true, nil }) @@ -933,7 +933,7 @@ func testArrayIterateRange(t *testing.T, array *Array, values []Value) { for startIndex := uint64(0); startIndex <= count; startIndex++ { for endIndex := startIndex; endIndex <= count; endIndex++ { i = uint64(0) - err = array.IterateRange(startIndex, endIndex, func(v Value) (bool, error) { + err = array.IterateReadOnlyRange(startIndex, endIndex, func(v Value) (bool, error) { valueEqual(t, v, values[int(startIndex+i)]) i++ return true, nil @@ -1015,7 +1015,7 @@ func TestArrayIterateRange(t *testing.T) { startIndex := uint64(1) endIndex := uint64(5) count := endIndex - startIndex - err = array.IterateRange(startIndex, endIndex, func(_ Value) (bool, error) { + err = array.IterateReadOnlyRange(startIndex, endIndex, func(_ Value) (bool, error) { if i == count/2 { return false, nil } @@ -1044,7 +1044,7 @@ func TestArrayIterateRange(t *testing.T) { startIndex := uint64(1) endIndex := uint64(5) count := endIndex - startIndex - err = array.IterateRange(startIndex, endIndex, func(_ Value) (bool, error) { + err = array.IterateReadOnlyRange(startIndex, endIndex, func(_ Value) (bool, error) { if i == count/2 { return false, testErr } @@ -3059,7 +3059,7 @@ func TestEmptyArray(t *testing.T) { t.Run("iterate", func(t *testing.T) { i := uint64(0) - err := array.Iterate(func(v Value) (bool, error) { + err := array.IterateReadOnly(func(v Value) (bool, error) { i++ return true, nil }) @@ -3301,7 +3301,7 @@ func TestArrayFromBatchData(t *testing.T) { require.NoError(t, err) require.Equal(t, uint64(0), array.Count()) - iter, err := array.Iterator() + iter, err := array.ReadOnlyIterator() require.NoError(t, err) // Create a new array with new storage, new address, and original array's elements. @@ -3341,7 +3341,7 @@ func TestArrayFromBatchData(t *testing.T) { require.Equal(t, uint64(arraySize), array.Count()) - iter, err := array.Iterator() + iter, err := array.ReadOnlyIterator() require.NoError(t, err) // Create a new array with new storage, new address, and original array's elements. @@ -3385,7 +3385,7 @@ func TestArrayFromBatchData(t *testing.T) { require.Equal(t, uint64(arraySize), array.Count()) - iter, err := array.Iterator() + iter, err := array.ReadOnlyIterator() require.NoError(t, err) address := Address{2, 3, 4, 5, 6, 7, 8, 9} @@ -3435,7 +3435,7 @@ func TestArrayFromBatchData(t *testing.T) { require.Equal(t, uint64(36), array.Count()) - iter, err := array.Iterator() + iter, err := array.ReadOnlyIterator() require.NoError(t, err) storage := newTestPersistentStorage(t) @@ -3485,7 +3485,7 @@ func TestArrayFromBatchData(t *testing.T) { require.Equal(t, uint64(36), array.Count()) - iter, err := array.Iterator() + iter, err := array.ReadOnlyIterator() require.NoError(t, err) storage := newTestPersistentStorage(t) @@ -3531,7 +3531,7 @@ func TestArrayFromBatchData(t *testing.T) { require.Equal(t, uint64(arraySize), array.Count()) - iter, err := array.Iterator() + iter, err := array.ReadOnlyIterator() require.NoError(t, err) storage := newTestPersistentStorage(t) @@ -3586,7 +3586,7 @@ func TestArrayFromBatchData(t *testing.T) { err = array.Append(v) require.NoError(t, err) - iter, err := array.Iterator() + iter, err := array.ReadOnlyIterator() require.NoError(t, err) storage := newTestPersistentStorage(t) @@ -3942,7 +3942,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { verifyArrayLoadedElements(t, array, values) i := 0 - err := array.IterateLoadedValues(func(v Value) (bool, error) { + err := array.IterateReadOnlyLoadedValues(func(v Value) (bool, error) { // At this point, iterator returned first element (v). // Remove all other nested composite elements (except first element) from storage. @@ -4627,7 +4627,7 @@ func createArrayWithSimpleAndChildArrayValues( func verifyArrayLoadedElements(t *testing.T, array *Array, expectedValues []Value) { i := 0 - err := array.IterateLoadedValues(func(v Value) (bool, error) { + err := array.IterateReadOnlyLoadedValues(func(v Value) (bool, error) { require.True(t, i < len(expectedValues)) valueEqual(t, expectedValues[i], v) i++ diff --git a/cmd/stress/utils.go b/cmd/stress/utils.go index c75296fe..96f72584 100644 --- a/cmd/stress/utils.go +++ b/cmd/stress/utils.go @@ -132,7 +132,7 @@ func copyValue(storage *atree.PersistentSlabStorage, address atree.Address, valu } func copyArray(storage *atree.PersistentSlabStorage, address atree.Address, array *atree.Array) (*atree.Array, error) { - iterator, err := array.Iterator() + iterator, err := array.ReadOnlyIterator() if err != nil { return nil, err } @@ -149,7 +149,7 @@ func copyArray(storage *atree.PersistentSlabStorage, address atree.Address, arra } func copyMap(storage *atree.PersistentSlabStorage, address atree.Address, m *atree.OrderedMap) (*atree.OrderedMap, error) { - iterator, err := m.Iterator() + iterator, err := m.ReadOnlyIterator() if err != nil { return nil, err } @@ -260,12 +260,12 @@ func arrayEqual(a atree.Value, b atree.Value) error { return fmt.Errorf("array %s count %d != array %s count %d", array1, array1.Count(), array2, array2.Count()) } - iterator1, err := array1.Iterator() + iterator1, err := array1.ReadOnlyIterator() if err != nil { return fmt.Errorf("failed to get array1 iterator: %w", err) } - iterator2, err := array2.Iterator() + iterator2, err := array2.ReadOnlyIterator() if err != nil { return fmt.Errorf("failed to get array2 iterator: %w", err) } @@ -309,12 +309,12 @@ func mapEqual(a atree.Value, b atree.Value) error { return fmt.Errorf("map %s count %d != map %s count %d", m1, m1.Count(), m2, m2.Count()) } - iterator1, err := m1.Iterator() + iterator1, err := m1.ReadOnlyIterator() if err != nil { return fmt.Errorf("failed to get m1 iterator: %w", err) } - iterator2, err := m2.Iterator() + iterator2, err := m2.ReadOnlyIterator() if err != nil { return fmt.Errorf("failed to get m2 iterator: %w", err) } diff --git a/map.go b/map.go index 9daa8e93..b47ce9bf 100644 --- a/map.go +++ b/map.go @@ -5076,7 +5076,7 @@ func (m *OrderedMap) Type() TypeInfo { } func (m *OrderedMap) String() string { - iterator, err := m.Iterator() + iterator, err := m.ReadOnlyIterator() if err != nil { return err.Error() } @@ -5135,19 +5135,19 @@ func (m *MapExtraData) decrementCount() { m.Count-- } -type MapElementIterator struct { +type mapElementIterator struct { storage SlabStorage elements elements index int - nestedIterator *MapElementIterator + nestedIterator *mapElementIterator } -func (i *MapElementIterator) Next() (key MapKey, value MapValue, err error) { +func (i *mapElementIterator) next() (key MapKey, value MapValue, err error) { if i.nestedIterator != nil { - key, value, err = i.nestedIterator.Next() + key, value, err = i.nestedIterator.next() if err != nil { - // Don't need to wrap error as external error because err is already categorized by MapElementIterator.Next(). + // Don't need to wrap error as external error because err is already categorized by mapElementIterator.next(). return nil, nil, err } if key != nil { @@ -5178,14 +5178,14 @@ func (i *MapElementIterator) Next() (key MapKey, value MapValue, err error) { return nil, nil, err } - i.nestedIterator = &MapElementIterator{ + i.nestedIterator = &mapElementIterator{ storage: i.storage, elements: elems, } i.index++ // Don't need to wrap error as external error because err is already categorized by MapElementIterator.Next(). - return i.nestedIterator.Next() + return i.nestedIterator.next() default: return nil, nil, NewSlabDataError(fmt.Errorf("unexpected element type %T during map iteration", e)) @@ -5197,8 +5197,10 @@ type MapElementIterationFunc func(Value) (resume bool, err error) type MapIterator struct { storage SlabStorage + comparator ValueComparator // TODO: use comparator and hip to update child element in parent map in register inlining. + hip HashInputProvider id SlabID - elemIterator *MapElementIterator + elemIterator *mapElementIterator } func (i *MapIterator) Next() (key Value, value Value, err error) { @@ -5215,7 +5217,7 @@ func (i *MapIterator) Next() (key Value, value Value, err error) { } var ks, vs Storable - ks, vs, err = i.elemIterator.Next() + ks, vs, err = i.elemIterator.next() if err != nil { // Don't need to wrap error as external error because err is already categorized by MapElementIterator.Next(). return nil, nil, err @@ -5256,7 +5258,7 @@ func (i *MapIterator) NextKey() (key Value, err error) { } var ks Storable - ks, _, err = i.elemIterator.Next() + ks, _, err = i.elemIterator.next() if err != nil { // Don't need to wrap error as external error because err is already categorized by MapElementIterator.Next(). return nil, err @@ -5291,7 +5293,7 @@ func (i *MapIterator) NextValue() (value Value, err error) { } var vs Storable - _, vs, err = i.elemIterator.Next() + _, vs, err = i.elemIterator.next() if err != nil { // Don't need to wrap error as external error because err is already categorized by MapElementIterator.Next(). return nil, err @@ -5329,7 +5331,7 @@ func (i *MapIterator) advance() error { i.id = dataSlab.next - i.elemIterator = &MapElementIterator{ + i.elemIterator = &mapElementIterator{ storage: i.storage, elements: dataSlab.elements, } @@ -5337,7 +5339,7 @@ func (i *MapIterator) advance() error { return nil } -func (m *OrderedMap) Iterator() (*MapIterator, error) { +func (m *OrderedMap) Iterator(comparator ValueComparator, hip HashInputProvider) (*MapIterator, error) { slab, err := firstMapDataSlab(m.Storage, m.root) if err != nil { // Don't need to wrap error as external error because err is already categorized by firstMapDataSlab(). @@ -5347,18 +5349,27 @@ func (m *OrderedMap) Iterator() (*MapIterator, error) { dataSlab := slab.(*MapDataSlab) return &MapIterator{ - storage: m.Storage, - id: dataSlab.next, - elemIterator: &MapElementIterator{ + storage: m.Storage, + comparator: comparator, + hip: hip, + id: dataSlab.next, + elemIterator: &mapElementIterator{ storage: m.Storage, elements: dataSlab.elements, }, }, nil } -func (m *OrderedMap) Iterate(fn MapEntryIterationFunc) error { +// ReadOnlyIterator returns readonly iterator for map elements. +// If elements of child containers are mutated, those changes +// are not guaranteed to persist. +func (m *OrderedMap) ReadOnlyIterator() (*MapIterator, error) { + return m.Iterator(nil, nil) +} + +func (m *OrderedMap) Iterate(comparator ValueComparator, hip HashInputProvider, fn MapEntryIterationFunc) error { - iterator, err := m.Iterator() + iterator, err := m.Iterator(comparator, hip) if err != nil { // Don't need to wrap error as external error because err is already categorized by OrderedMap.Iterator(). return err @@ -5385,9 +5396,13 @@ func (m *OrderedMap) Iterate(fn MapEntryIterationFunc) error { } } -func (m *OrderedMap) IterateKeys(fn MapElementIterationFunc) error { +func (m *OrderedMap) IterateReadOnly(fn MapEntryIterationFunc) error { + return m.Iterate(nil, nil, fn) +} + +func (m *OrderedMap) IterateReadOnlyKeys(fn MapElementIterationFunc) error { - iterator, err := m.Iterator() + iterator, err := m.ReadOnlyIterator() if err != nil { // Don't need to wrap error as external error because err is already categorized by OrderedMap.Iterator(). return err @@ -5414,9 +5429,9 @@ func (m *OrderedMap) IterateKeys(fn MapElementIterationFunc) error { } } -func (m *OrderedMap) IterateValues(fn MapElementIterationFunc) error { +func (m *OrderedMap) IterateValues(comparator ValueComparator, hip HashInputProvider, fn MapElementIterationFunc) error { - iterator, err := m.Iterator() + iterator, err := m.Iterator(comparator, hip) if err != nil { // Don't need to wrap error as external error because err is already categorized by OrderedMap.Iterator(). return err @@ -5443,6 +5458,10 @@ func (m *OrderedMap) IterateValues(fn MapElementIterationFunc) error { } } +func (m *OrderedMap) IterateReadOnlyValues(fn MapElementIterationFunc) error { + return m.IterateValues(nil, nil, fn) +} + type MapPopIterationFunc func(Storable, Storable) // PopIterate iterates and removes elements backward. @@ -6040,8 +6059,8 @@ func (i *MapLoadedValueIterator) Next() (Value, Value, error) { return nil, nil, nil } -// LoadedValueIterator returns iterator to iterate loaded map elements. -func (m *OrderedMap) LoadedValueIterator() (*MapLoadedValueIterator, error) { +// ReadOnlyLoadedValueIterator returns iterator to iterate loaded map elements. +func (m *OrderedMap) ReadOnlyLoadedValueIterator() (*MapLoadedValueIterator, error) { switch slab := m.root.(type) { case *MapDataSlab: @@ -6079,9 +6098,9 @@ func (m *OrderedMap) LoadedValueIterator() (*MapLoadedValueIterator, error) { } } -// IterateLoadedValues iterates loaded map values. -func (m *OrderedMap) IterateLoadedValues(fn MapEntryIterationFunc) error { - iterator, err := m.LoadedValueIterator() +// IterateReadOnlyLoadedValues iterates loaded map values. +func (m *OrderedMap) IterateReadOnlyLoadedValues(fn MapEntryIterationFunc) error { + iterator, err := m.ReadOnlyLoadedValueIterator() if err != nil { // Don't need to wrap error as external error because err is already categorized by OrderedMap.LoadedValueIterator(). return err diff --git a/map_test.go b/map_test.go index 2b0c5ec7..f05110ea 100644 --- a/map_test.go +++ b/map_test.go @@ -167,7 +167,7 @@ func _verifyMap( require.Equal(t, len(expectedKeyValues), len(sortedKeys)) i := 0 - err = m.Iterate(func(k, v Value) (bool, error) { + err = m.IterateReadOnly(func(k, v Value) (bool, error) { expectedKey := sortedKeys[i] expectedValue := expectedKeyValues[expectedKey] @@ -1123,7 +1123,7 @@ func TestMapIterate(t *testing.T) { // Iterate key value pairs i = uint64(0) - err = m.Iterate(func(k Value, v Value) (resume bool, err error) { + err = m.IterateReadOnly(func(k Value, v Value) (resume bool, err error) { valueEqual(t, sortedKeys[i], k) valueEqual(t, keyValues[k], v) i++ @@ -1135,7 +1135,7 @@ func TestMapIterate(t *testing.T) { // Iterate keys i = uint64(0) - err = m.IterateKeys(func(k Value) (resume bool, err error) { + err = m.IterateReadOnlyKeys(func(k Value) (resume bool, err error) { valueEqual(t, sortedKeys[i], k) i++ return true, nil @@ -1146,7 +1146,7 @@ func TestMapIterate(t *testing.T) { // Iterate values i = uint64(0) - err = m.IterateValues(func(v Value) (resume bool, err error) { + err = m.IterateReadOnlyValues(func(v Value) (resume bool, err error) { k := sortedKeys[i] valueEqual(t, keyValues[k], v) i++ @@ -1209,7 +1209,7 @@ func TestMapIterate(t *testing.T) { // Iterate key value pairs i := uint64(0) - err = m.Iterate(func(k Value, v Value) (resume bool, err error) { + err = m.IterateReadOnly(func(k Value, v Value) (resume bool, err error) { valueEqual(t, sortedKeys[i], k) valueEqual(t, keyValues[k], v) i++ @@ -1222,7 +1222,7 @@ func TestMapIterate(t *testing.T) { // Iterate keys i = uint64(0) - err = m.IterateKeys(func(k Value) (resume bool, err error) { + err = m.IterateReadOnlyKeys(func(k Value) (resume bool, err error) { valueEqual(t, sortedKeys[i], k) i++ return true, nil @@ -1234,7 +1234,7 @@ func TestMapIterate(t *testing.T) { // Iterate values i = uint64(0) - err = m.IterateValues(func(v Value) (resume bool, err error) { + err = m.IterateReadOnlyValues(func(v Value) (resume bool, err error) { valueEqual(t, keyValues[sortedKeys[i]], v) i++ return true, nil @@ -7895,7 +7895,7 @@ func TestEmptyMap(t *testing.T) { t.Run("iterate", func(t *testing.T) { i := 0 - err := m.Iterate(func(k Value, v Value) (bool, error) { + err := m.IterateReadOnly(func(k Value, v Value) (bool, error) { i++ return true, nil }) @@ -7933,7 +7933,7 @@ func TestMapFromBatchData(t *testing.T) { require.NoError(t, err) require.Equal(t, uint64(0), m.Count()) - iter, err := m.Iterator() + iter, err := m.ReadOnlyIterator() require.NoError(t, err) storage := newTestPersistentStorage(t) @@ -7980,7 +7980,7 @@ func TestMapFromBatchData(t *testing.T) { require.Equal(t, uint64(mapSize), m.Count()) - iter, err := m.Iterator() + iter, err := m.ReadOnlyIterator() require.NoError(t, err) var sortedKeys []Value @@ -8042,7 +8042,7 @@ func TestMapFromBatchData(t *testing.T) { require.Equal(t, uint64(mapSize), m.Count()) - iter, err := m.Iterator() + iter, err := m.ReadOnlyIterator() require.NoError(t, err) var sortedKeys []Value @@ -8107,7 +8107,7 @@ func TestMapFromBatchData(t *testing.T) { require.Equal(t, uint64(mapSize+1), m.Count()) - iter, err := m.Iterator() + iter, err := m.ReadOnlyIterator() require.NoError(t, err) var sortedKeys []Value @@ -8176,7 +8176,7 @@ func TestMapFromBatchData(t *testing.T) { require.Equal(t, uint64(mapSize+1), m.Count()) require.Equal(t, typeInfo, m.Type()) - iter, err := m.Iterator() + iter, err := m.ReadOnlyIterator() require.NoError(t, err) var sortedKeys []Value @@ -8239,7 +8239,7 @@ func TestMapFromBatchData(t *testing.T) { require.Equal(t, uint64(mapSize), m.Count()) - iter, err := m.Iterator() + iter, err := m.ReadOnlyIterator() require.NoError(t, err) storage := newTestPersistentStorage(t) @@ -8320,7 +8320,7 @@ func TestMapFromBatchData(t *testing.T) { require.Equal(t, uint64(mapSize), m.Count()) - iter, err := m.Iterator() + iter, err := m.ReadOnlyIterator() require.NoError(t, err) var sortedKeys []Value @@ -8404,7 +8404,7 @@ func TestMapFromBatchData(t *testing.T) { require.NoError(t, err) require.Nil(t, storable) - iter, err := m.Iterator() + iter, err := m.ReadOnlyIterator() require.NoError(t, err) var sortedKeys []Value @@ -9637,7 +9637,7 @@ func TestMapLoadedValueIterator(t *testing.T) { verifyMapLoadedElements(t, m, values) i := 0 - err := m.IterateLoadedValues(func(k Value, v Value) (bool, error) { + err := m.IterateReadOnlyLoadedValues(func(k Value, v Value) (bool, error) { // At this point, iterator returned first element (v). // Remove all other nested composite elements (except first element) from storage. @@ -10541,7 +10541,7 @@ func createMapWithSimpleAndChildArrayValues( func verifyMapLoadedElements(t *testing.T, m *OrderedMap, expectedValues [][2]Value) { i := 0 - err := m.IterateLoadedValues(func(k Value, v Value) (bool, error) { + err := m.IterateReadOnlyLoadedValues(func(k Value, v Value) (bool, error) { require.True(t, i < len(expectedValues)) valueEqual(t, expectedValues[i][0], k) valueEqual(t, expectedValues[i][1], v) @@ -12814,7 +12814,7 @@ func getInlinedChildMapsFromParentMap(t *testing.T, address Address, parentMap * children := make(map[Value]*mapInfo) - err := parentMap.IterateKeys(func(k Value) (bool, error) { + err := parentMap.IterateReadOnlyKeys(func(k Value) (bool, error) { if k == nil { return false, nil } diff --git a/utils_test.go b/utils_test.go index 6bcd6608..4584e71c 100644 --- a/utils_test.go +++ b/utils_test.go @@ -342,7 +342,7 @@ func valueEqual(t *testing.T, expected Value, actual Value) { func arrayEqual(t *testing.T, expected arrayValue, actual *Array) { require.Equal(t, uint64(len(expected)), actual.Count()) - iterator, err := actual.Iterator() + iterator, err := actual.ReadOnlyIterator() require.NoError(t, err) i := 0 @@ -363,7 +363,7 @@ func arrayEqual(t *testing.T, expected arrayValue, actual *Array) { func mapEqual(t *testing.T, expected mapValue, actual *OrderedMap) { require.Equal(t, uint64(len(expected)), actual.Count()) - iterator, err := actual.Iterator() + iterator, err := actual.ReadOnlyIterator() require.NoError(t, err) i := 0 From e88a73e60f126ba002263f2f4112a3bc968c7edb Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Fri, 29 Sep 2023 08:13:05 -0500 Subject: [PATCH 034/126] Support value mutation from non-readonly iterators --- array.go | 55 ++++++++------- array_test.go | 125 ++++++++++++++++++++++++++++++++++ map.go | 99 ++++++++++++++++++--------- map_test.go | 183 ++++++++++++++++++++++++++++++++++++++++++++++---- 4 files changed, 396 insertions(+), 66 deletions(-) diff --git a/array.go b/array.go index b63b3e6e..fde54568 100644 --- a/array.go +++ b/array.go @@ -3112,12 +3112,13 @@ func (a *Array) Storable(_ SlabStorage, _ Address, maxInlineSize uint64) (Storab var emptyArrayIterator = &ArrayIterator{} type ArrayIterator struct { - storage SlabStorage - id SlabID - dataSlab *ArrayDataSlab - index int - remainingCount int - readOnly bool + array *Array + id SlabID + dataSlab *ArrayDataSlab + indexInArray int + indexInDataSlab int + remainingCount int + readOnly bool } func (i *ArrayIterator) Next() (Value, error) { @@ -3130,7 +3131,7 @@ func (i *ArrayIterator) Next() (Value, error) { return nil, nil } - slab, found, err := i.storage.Retrieve(i.id) + slab, found, err := i.array.Storage.Retrieve(i.id) if err != nil { // Wrap err as external error (if needed) because err is returned by SlabStorage interface. return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve slab %s", i.id)) @@ -3140,22 +3141,29 @@ func (i *ArrayIterator) Next() (Value, error) { } i.dataSlab = slab.(*ArrayDataSlab) - i.index = 0 + i.indexInDataSlab = 0 } var element Value var err error - if i.index < len(i.dataSlab.elements) { - element, err = i.dataSlab.elements[i.index].StoredValue(i.storage) + if i.indexInDataSlab < len(i.dataSlab.elements) { + element, err = i.dataSlab.elements[i.indexInDataSlab].StoredValue(i.array.Storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") } - i.index++ + if !i.readOnly { + // Set up notification callback in child value so + // when child value is modified parent a is notified. + i.array.setCallbackWithChild(uint64(i.indexInArray), element, maxInlineArrayElementSize) + } + + i.indexInDataSlab++ + i.indexInArray++ } - if i.index >= len(i.dataSlab.elements) { + if i.indexInDataSlab >= len(i.dataSlab.elements) { i.id = i.dataSlab.next i.dataSlab = nil } @@ -3173,7 +3181,7 @@ func (a *Array) Iterator() (*ArrayIterator, error) { } return &ArrayIterator{ - storage: a.Storage, + array: a, id: slab.SlabID(), dataSlab: slab, remainingCount: int(a.Count()), @@ -3235,11 +3243,12 @@ func (a *Array) RangeIterator(startIndex uint64, endIndex uint64) (*ArrayIterato } return &ArrayIterator{ - storage: a.Storage, - id: dataSlab.SlabID(), - dataSlab: dataSlab, - index: int(index), - remainingCount: int(numberOfElements), + array: a, + id: dataSlab.SlabID(), + dataSlab: dataSlab, + indexInArray: int(startIndex), + indexInDataSlab: int(index), + remainingCount: int(numberOfElements), }, nil } @@ -3254,7 +3263,7 @@ func (a *Array) ReadOnlyRangeIterator(startIndex uint64, endIndex uint64) (*Arra type ArrayIterationFunc func(element Value) (resume bool, err error) -func iterate(iterator *ArrayIterator, fn ArrayIterationFunc) error { +func iterateArray(iterator *ArrayIterator, fn ArrayIterationFunc) error { for { value, err := iterator.Next() if err != nil { @@ -3281,7 +3290,7 @@ func (a *Array) Iterate(fn ArrayIterationFunc) error { // Don't need to wrap error as external error because err is already categorized by Array.Iterator(). return err } - return iterate(iterator, fn) + return iterateArray(iterator, fn) } func (a *Array) IterateReadOnly(fn ArrayIterationFunc) error { @@ -3290,7 +3299,7 @@ func (a *Array) IterateReadOnly(fn ArrayIterationFunc) error { // Don't need to wrap error as external error because err is already categorized by Array.ReadOnlyIterator(). return err } - return iterate(iterator, fn) + return iterateArray(iterator, fn) } func (a *Array) IterateRange(startIndex uint64, endIndex uint64, fn ArrayIterationFunc) error { @@ -3299,7 +3308,7 @@ func (a *Array) IterateRange(startIndex uint64, endIndex uint64, fn ArrayIterati // Don't need to wrap error as external error because err is already categorized by Array.RangeIterator(). return err } - return iterate(iterator, fn) + return iterateArray(iterator, fn) } func (a *Array) IterateReadOnlyRange(startIndex uint64, endIndex uint64, fn ArrayIterationFunc) error { @@ -3308,7 +3317,7 @@ func (a *Array) IterateReadOnlyRange(startIndex uint64, endIndex uint64, fn Arra // Don't need to wrap error as external error because err is already categorized by Array.ReadOnlyRangeIterator(). return err } - return iterate(iterator, fn) + return iterateArray(iterator, fn) } func (a *Array) Count() uint64 { diff --git a/array_test.go b/array_test.go index 599f2365..c9fc2987 100644 --- a/array_test.go +++ b/array_test.go @@ -882,6 +882,67 @@ func TestArrayIterate(t *testing.T) { require.Equal(t, count/2, i) }) + + t.Run("mutation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 15 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + v := Uint64Value(i) + err = childArray.Append(v) + require.NoError(t, err) + + err = array.Append(childArray) + require.NoError(t, err) + + expectedValues[i] = arrayValue{v} + } + require.True(t, array.root.IsData()) + + sizeBeforeMutation := array.root.Header().size + + i := 0 + newElement := Uint64Value(0) + err = array.Iterate(func(v Value) (bool, error) { + childArray, ok := v.(*Array) + require.True(t, ok) + require.Equal(t, uint64(1), childArray.Count()) + require.True(t, childArray.Inlined()) + + err := childArray.Append(newElement) + require.NoError(t, err) + + expectedChildArrayValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + expectedChildArrayValues = append(expectedChildArrayValues, newElement) + expectedValues[i] = expectedChildArrayValues + + i++ + + require.Equal(t, array.root.Header().size, sizeBeforeMutation+uint32(i)*newElement.ByteSize()) + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + require.True(t, array.root.IsData()) + + verifyArray(t, storage, typeInfo, address, array, expectedValues, false) + }) } func testArrayIterateRange(t *testing.T, array *Array, values []Value) { @@ -1058,6 +1119,70 @@ func TestArrayIterateRange(t *testing.T) { require.Equal(t, testErr, externalError.Unwrap()) require.Equal(t, count/2, i) }) + + t.Run("mutation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 15 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + v := Uint64Value(i) + err = childArray.Append(v) + require.NoError(t, err) + + err = array.Append(childArray) + require.NoError(t, err) + + expectedValues[i] = arrayValue{v} + } + require.True(t, array.root.IsData()) + + sizeBeforeMutation := array.root.Header().size + + i := 0 + startIndex := uint64(1) + endIndex := array.Count() - 2 + newElement := Uint64Value(0) + err = array.IterateRange(startIndex, endIndex, func(v Value) (bool, error) { + childArray, ok := v.(*Array) + require.True(t, ok) + require.Equal(t, uint64(1), childArray.Count()) + require.True(t, childArray.Inlined()) + + err := childArray.Append(newElement) + require.NoError(t, err) + + index := int(startIndex) + i + expectedChildArrayValues, ok := expectedValues[index].(arrayValue) + require.True(t, ok) + + expectedChildArrayValues = append(expectedChildArrayValues, newElement) + expectedValues[index] = expectedChildArrayValues + + i++ + + require.Equal(t, array.root.Header().size, sizeBeforeMutation+uint32(i)*newElement.ByteSize()) + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, endIndex-startIndex, uint64(i)) + require.True(t, array.root.IsData()) + + verifyArray(t, storage, typeInfo, address, array, expectedValues, false) + }) } func TestArrayRootSlabID(t *testing.T) { diff --git a/map.go b/map.go index b47ce9bf..f337179a 100644 --- a/map.go +++ b/map.go @@ -5196,7 +5196,7 @@ type MapEntryIterationFunc func(Value, Value) (resume bool, err error) type MapElementIterationFunc func(Value) (resume bool, err error) type MapIterator struct { - storage SlabStorage + m *OrderedMap comparator ValueComparator // TODO: use comparator and hip to update child element in parent map in register inlining. hip HashInputProvider id SlabID @@ -5223,18 +5223,23 @@ func (i *MapIterator) Next() (key Value, value Value, err error) { return nil, nil, err } if ks != nil { - key, err = ks.StoredValue(i.storage) + key, err = ks.StoredValue(i.m.Storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get map key's stored value") } - value, err = vs.StoredValue(i.storage) + value, err = vs.StoredValue(i.m.Storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get map value's stored value") } + if i.comparator != nil && i.hip != nil { + maxInlineSize := maxInlineMapValueSize(uint64(ks.ByteSize())) + i.m.setCallbackWithChild(i.comparator, i.hip, key, value, maxInlineSize) + } + return key, value, nil } @@ -5264,7 +5269,7 @@ func (i *MapIterator) NextKey() (key Value, err error) { return nil, err } if ks != nil { - key, err = ks.StoredValue(i.storage) + key, err = ks.StoredValue(i.m.Storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get map key's stored value") @@ -5292,19 +5297,30 @@ func (i *MapIterator) NextValue() (value Value, err error) { } } - var vs Storable - _, vs, err = i.elemIterator.next() + var ks, vs Storable + ks, vs, err = i.elemIterator.next() if err != nil { // Don't need to wrap error as external error because err is already categorized by MapElementIterator.Next(). return nil, err } if vs != nil { - value, err = vs.StoredValue(i.storage) + value, err = vs.StoredValue(i.m.Storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get map value's stored value") } + if i.comparator != nil && i.hip != nil { + key, err := ks.StoredValue(i.m.Storage) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storable interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get map value's stored value") + } + + maxInlineSize := maxInlineMapValueSize(uint64(ks.ByteSize())) + i.m.setCallbackWithChild(i.comparator, i.hip, key, value, maxInlineSize) + } + return value, nil } @@ -5315,7 +5331,7 @@ func (i *MapIterator) NextValue() (value Value, err error) { } func (i *MapIterator) advance() error { - slab, found, err := i.storage.Retrieve(i.id) + slab, found, err := i.m.Storage.Retrieve(i.id) if err != nil { // Wrap err as external error (if needed) because err is returned by SlabStorage interface. return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve slab %s", i.id)) @@ -5332,14 +5348,14 @@ func (i *MapIterator) advance() error { i.id = dataSlab.next i.elemIterator = &mapElementIterator{ - storage: i.storage, + storage: i.m.Storage, elements: dataSlab.elements, } return nil } -func (m *OrderedMap) Iterator(comparator ValueComparator, hip HashInputProvider) (*MapIterator, error) { +func (m *OrderedMap) iterator(comparator ValueComparator, hip HashInputProvider) (*MapIterator, error) { slab, err := firstMapDataSlab(m.Storage, m.root) if err != nil { // Don't need to wrap error as external error because err is already categorized by firstMapDataSlab(). @@ -5349,7 +5365,7 @@ func (m *OrderedMap) Iterator(comparator ValueComparator, hip HashInputProvider) dataSlab := slab.(*MapDataSlab) return &MapIterator{ - storage: m.Storage, + m: m, comparator: comparator, hip: hip, id: dataSlab.next, @@ -5360,21 +5376,22 @@ func (m *OrderedMap) Iterator(comparator ValueComparator, hip HashInputProvider) }, nil } +func (m *OrderedMap) Iterator(comparator ValueComparator, hip HashInputProvider) (*MapIterator, error) { + if comparator == nil || hip == nil { + return nil, NewUserError(fmt.Errorf("failed to create MapIterator: ValueComparator or HashInputProvider is nil")) + } + return m.iterator(comparator, hip) +} + // ReadOnlyIterator returns readonly iterator for map elements. // If elements of child containers are mutated, those changes // are not guaranteed to persist. func (m *OrderedMap) ReadOnlyIterator() (*MapIterator, error) { - return m.Iterator(nil, nil) + return m.iterator(nil, nil) } -func (m *OrderedMap) Iterate(comparator ValueComparator, hip HashInputProvider, fn MapEntryIterationFunc) error { - - iterator, err := m.Iterator(comparator, hip) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by OrderedMap.Iterator(). - return err - } - +func iterateMap(iterator *MapIterator, fn MapEntryIterationFunc) error { + var err error var key, value Value for { key, value, err = iterator.Next() @@ -5396,8 +5413,22 @@ func (m *OrderedMap) Iterate(comparator ValueComparator, hip HashInputProvider, } } +func (m *OrderedMap) Iterate(comparator ValueComparator, hip HashInputProvider, fn MapEntryIterationFunc) error { + iterator, err := m.Iterator(comparator, hip) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by OrderedMap.Iterator(). + return err + } + return iterateMap(iterator, fn) +} + func (m *OrderedMap) IterateReadOnly(fn MapEntryIterationFunc) error { - return m.Iterate(nil, nil, fn) + iterator, err := m.ReadOnlyIterator() + if err != nil { + // Don't need to wrap error as external error because err is already categorized by OrderedMap.ReadOnlyIterator(). + return err + } + return iterateMap(iterator, fn) } func (m *OrderedMap) IterateReadOnlyKeys(fn MapElementIterationFunc) error { @@ -5429,14 +5460,8 @@ func (m *OrderedMap) IterateReadOnlyKeys(fn MapElementIterationFunc) error { } } -func (m *OrderedMap) IterateValues(comparator ValueComparator, hip HashInputProvider, fn MapElementIterationFunc) error { - - iterator, err := m.Iterator(comparator, hip) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by OrderedMap.Iterator(). - return err - } - +func iterateMapValues(iterator *MapIterator, fn MapElementIterationFunc) error { + var err error var value Value for { value, err = iterator.NextValue() @@ -5458,8 +5483,22 @@ func (m *OrderedMap) IterateValues(comparator ValueComparator, hip HashInputProv } } +func (m *OrderedMap) IterateValues(comparator ValueComparator, hip HashInputProvider, fn MapElementIterationFunc) error { + iterator, err := m.Iterator(comparator, hip) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by OrderedMap.Iterator(). + return err + } + return iterateMapValues(iterator, fn) +} + func (m *OrderedMap) IterateReadOnlyValues(fn MapElementIterationFunc) error { - return m.IterateValues(nil, nil, fn) + iterator, err := m.ReadOnlyIterator() + if err != nil { + // Don't need to wrap error as external error because err is already categorized by OrderedMap.ReadOnlyIterator(). + return err + } + return iterateMapValues(iterator, fn) } type MapPopIterationFunc func(Storable, Storable) diff --git a/map_test.go b/map_test.go index f05110ea..8f7c5951 100644 --- a/map_test.go +++ b/map_test.go @@ -128,7 +128,7 @@ func verifyMap( typeInfo TypeInfo, address Address, m *OrderedMap, - keyValues map[Value]Value, + keyValues mapValue, sortedKeys []Value, hasNestedArrayMapElement bool, ) { @@ -1084,6 +1084,48 @@ func TestMapRemove(t *testing.T) { func TestMapIterate(t *testing.T) { + t.Run("empty", func(t *testing.T) { + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + // Iterate key value pairs + i := 0 + err = m.IterateReadOnly(func(k Value, v Value) (resume bool, err error) { + i++ + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + // Iterate keys + i = 0 + err = m.IterateReadOnlyKeys(func(k Value) (resume bool, err error) { + i++ + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + // Iterate values + i = 0 + err = m.IterateReadOnlyValues(func(v Value) (resume bool, err error) { + i++ + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + verifyMap(t, storage, typeInfo, address, m, mapValue{}, nil, false) + }) + t.Run("no collision", func(t *testing.T) { const ( mapSize = 2048 @@ -1200,13 +1242,9 @@ func TestMapIterate(t *testing.T) { } } - t.Log("created map of unique key value pairs") - // Sort keys by digest sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) - t.Log("sorted keys by digests") - // Iterate key value pairs i := uint64(0) err = m.IterateReadOnly(func(k Value, v Value) (resume bool, err error) { @@ -1218,8 +1256,6 @@ func TestMapIterate(t *testing.T) { require.NoError(t, err) require.Equal(t, i, uint64(mapSize)) - t.Log("iterated key value pairs") - // Iterate keys i = uint64(0) err = m.IterateReadOnlyKeys(func(k Value) (resume bool, err error) { @@ -1230,8 +1266,6 @@ func TestMapIterate(t *testing.T) { require.NoError(t, err) require.Equal(t, i, uint64(mapSize)) - t.Log("iterated keys") - // Iterate values i = uint64(0) err = m.IterateReadOnlyValues(func(v Value) (resume bool, err error) { @@ -1242,10 +1276,134 @@ func TestMapIterate(t *testing.T) { require.NoError(t, err) require.Equal(t, i, uint64(mapSize)) - t.Log("iterated values") - verifyMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) }) + + t.Run("mutation", func(t *testing.T) { + const ( + mapSize = 15 + valueStringSize = 16 + ) + + r := newRand(t) + + elementSize := digestSize + singleElementPrefixSize + Uint64Value(0).ByteSize() + NewStringValue(randStr(r, valueStringSize)).ByteSize() + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, 0, mapSize) + i := uint64(0) + for i := 0; i < mapSize; i++ { + ck := Uint64Value(0) + cv := NewStringValue(randStr(r, valueStringSize)) + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + sortedKeys = append(sortedKeys, k) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + } + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + sizeBeforeMutation := m.root.Header().size + + // Iterate and mutate child map (inserting elements) + i = uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + newChildMapKey := Uint64Value(1) // Previous key is 0 + newChildMapValue := NewStringValue(randStr(r, valueStringSize)) + + existingStorable, err := childMap.Set(compare, hashInputProvider, newChildMapKey, newChildMapValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + expectedChildMapValues[newChildMapKey] = newChildMapValue + + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + i++ + + require.Equal(t, m.root.Header().size, sizeBeforeMutation+uint32(i)*elementSize) + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + sizeAfterInsertionMutation := m.root.Header().size + + // Iterate and mutate child map (removing elements) + i = uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(2), childMap.Count()) + require.True(t, childMap.Inlined()) + + // Remove key 0 + ck := Uint64Value(0) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, ck) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + + i++ + + require.Equal(t, m.root.Header().size, sizeAfterInsertionMutation-uint32(i)*elementSize) + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + + for k := range keyValues { + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + delete(expectedChildMapValues, Uint64Value(0)) + } + + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) } func testMapDeterministicHashCollision(t *testing.T, r *rand.Rand, maxDigestLevel int) { @@ -7662,9 +7820,8 @@ func TestMapPopIterate(t *testing.T) { typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - digesterBuilder := newBasicDigesterBuilder() - m, err := NewMap(storage, address, digesterBuilder, typeInfo) + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) require.NoError(t, err) err = storage.Commit() From a75e3886e2987a452487a5ed02559c284a4b096c Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Sun, 1 Oct 2023 13:26:41 -0500 Subject: [PATCH 035/126] Add more comments about how atree inlining works --- array.go | 50 +++++++++++++++++++++++++++++++++++++++++--------- map.go | 30 +++++++++++++++++++++++++----- 2 files changed, 66 insertions(+), 14 deletions(-) diff --git a/array.go b/array.go index 40bb8686..841c38b6 100644 --- a/array.go +++ b/array.go @@ -2709,8 +2709,8 @@ func (a *Array) setParentUpdater(f parentUpdater) { a.parentUpdater = f } -// setCallbackWithChild sets up callback function with child value so -// parent array a can be notified when child value is modified. +// setCallbackWithChild sets up callback function with child value (child) +// so parent array (a) can be notified when child value is modified. func (a *Array) setCallbackWithChild(i uint64, child Value, maxInlineSize uint64) { c, ok := child.(mutableValueNotifier) if !ok { @@ -2784,7 +2784,7 @@ func (a *Array) setCallbackWithChild(i uint64, child Value, maxInlineSize uint64 }) } -// notifyParentIfNeeded calls parent updater if this array is a child value. +// notifyParentIfNeeded calls parent updater if this array (a) is a child element in another container. func (a *Array) notifyParentIfNeeded() error { if a.parentUpdater == nil { return nil @@ -2805,8 +2805,8 @@ func (a *Array) Get(i uint64) (Value, error) { return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") } - // Set up notification callback in child value so - // when child value is modified parent a is notified. + // As a parent, this array (a) sets up notification callback with child + // value (v) so this array can be notified when child value is modified. a.setCallbackWithChild(i, v, maxInlineArrayElementSize) return v, nil @@ -2839,13 +2839,28 @@ func (a *Array) Set(index uint64, value Value) (Storable, error) { } } + // This array (a) is a parent to the new child (value), and this array + // can also be a child in another container. + // + // As a parent, this array needs to setup notification callback with + // the new child value, so it can be notified when child is modified. + // + // If this array is a child, it needs to notify its parent because its + // content (maybe also its size) is changed by this "Set" operation. + + // If this array is a child, it notifies parent by invoking callback because + // this array is changed by setting new child. err = a.notifyParentIfNeeded() if err != nil { return nil, err } - // Set up notification callback in child value so - // when child value is modified parent a is notified. + // As a parent, this array sets up notification callback with child value + // so this array can be notified when child value is modified. + // + // Setting up notification with new child value can happen at any time + // (either before or after this array notifies its parent) because + // setting up notification doesn't trigger any read/write ops on parent or child. a.setCallbackWithChild(index, value, maxInlineArrayElementSize) return existingStorable, nil @@ -2870,13 +2885,28 @@ func (a *Array) Insert(index uint64, value Value) error { a.incrementIndexFrom(index) + // This array (a) is a parent to the new child (value), and this array + // can also be a child in another container. + // + // As a parent, this array needs to setup notification callback with + // the new child value, so it can be notified when child is modified. + // + // If this array is a child, it needs to notify its parent because its + // content (also its size) is changed by this "Insert" operation. + + // If this array is a child, it notifies parent by invoking callback because + // this array is changed by inserting new child. err = a.notifyParentIfNeeded() if err != nil { return err } - // Set up notification callback in child value so - // when child value is modified parent a is notified. + // As a parent, this array sets up notification callback with child value + // so this array can be notified when child value is modified. + // + // Setting up notification with new child value can happen at any time + // (either before or after this array notifies its parent) because + // setting up notification doesn't trigger any read/write ops on parent or child. a.setCallbackWithChild(index, value, maxInlineArrayElementSize) return nil @@ -2903,6 +2933,8 @@ func (a *Array) Remove(index uint64) (Storable, error) { a.decrementIndexFrom(index) + // If this array is a child, it notifies parent by invoking callback because + // this array is changed by removing element. err = a.notifyParentIfNeeded() if err != nil { return nil, err diff --git a/map.go b/map.go index 9daa8e93..27a485ab 100644 --- a/map.go +++ b/map.go @@ -4618,8 +4618,8 @@ func (m *OrderedMap) setParentUpdater(f parentUpdater) { m.parentUpdater = f } -// setCallbackWithChild sets up callback function with child value so -// parent map m can be notified when child value is modified. +// setCallbackWithChild sets up callback function with child value (child) +// so parent map (m) can be notified when child value is modified. func (m *OrderedMap) setCallbackWithChild( comparator ValueComparator, hip HashInputProvider, @@ -4689,7 +4689,8 @@ func (m *OrderedMap) setCallbackWithChild( }) } -// notifyParentIfNeeded calls parent updater if this map is a child value. +// notifyParentIfNeeded calls parent updater if this map (m) is a child +// element in another container. func (m *OrderedMap) notifyParentIfNeeded() error { if m.parentUpdater == nil { return nil @@ -4724,8 +4725,9 @@ func (m *OrderedMap) Get(comparator ValueComparator, hip HashInputProvider, key return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") } + // As a parent, this map (m) sets up notification callback with child + // value (v) so this map can be notified when child value is modified. maxInlineSize := maxInlineMapValueSize(uint64(keyStorable.ByteSize())) - m.setCallbackWithChild(comparator, hip, key, v, maxInlineSize) return v, nil @@ -4800,13 +4802,29 @@ func (m *OrderedMap) Set(comparator ValueComparator, hip HashInputProvider, key } } + // This map (m) is a parent to the new child (value), and this map + // can also be a child in another container. + // + // As a parent, this map needs to setup notification callback with + // the new child value, so it can be notified when child is modified. + // + // If this map is a child, it needs to notify its parent because its + // content (maybe also its size) is changed by this "Set" operation. + + // If this map is a child, it notifies parent by invoking callback because + // this map is changed by setting new child. err = m.notifyParentIfNeeded() if err != nil { return nil, err } + // As a parent, this map sets up notification callback with child value + // so this map can be notified when child value is modified. + // + // Setting up notification with new child value can happen at any time + // (either before or after this map notifies its parent) because + // setting up notification doesn't trigger any read/write ops on parent or child. maxInlineSize := maxInlineMapValueSize(uint64(keyStorable.ByteSize())) - m.setCallbackWithChild(comparator, hip, key, value, maxInlineSize) return existingValue, nil @@ -4858,6 +4876,8 @@ func (m *OrderedMap) Remove(comparator ValueComparator, hip HashInputProvider, k } } + // If this map is a child, it notifies parent by invoking callback because + // this map is changed by removing element. err = m.notifyParentIfNeeded() if err != nil { return nil, nil, err From 042eb686fc19f48fe3e78a69b3cbe6de979a4001 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Sun, 1 Oct 2023 18:51:29 -0500 Subject: [PATCH 036/126] Uninline slab when it is overwritten or removed Currently, Set() and Remove() return overwritten or removed storable. If storable is inlined slab, it is not stored in storage (because it is removed from its parent slab which is in storage), so any future changes to it would be lost. On the other hand, if overwritten or removed storable is SlabIDStorable, any future changes to it can still be persisted because it is in its own slab in storage. This inconsistency (not merged or deployed yet) can cause potential data loss or unexpected behavior if deployed. This commit uninlines inlined slabs that is overwritten or removed, stores them in storage, and returns their SlabID as storable to caller. --- array.go | 146 +++++++++++--- array_test.go | 478 +++++++++++++++++++++++++++++++++++++++++++++ map.go | 132 ++++++++++--- map_test.go | 528 ++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 1229 insertions(+), 55 deletions(-) diff --git a/array.go b/array.go index 841c38b6..55ddb93a 100644 --- a/array.go +++ b/array.go @@ -906,6 +906,56 @@ func (a *ArrayDataSlab) Inlinable(maxInlineSize uint64) bool { return uint64(inlinedSize) <= maxInlineSize } +// inline converts not-inlined ArrayDataSlab to inlined ArrayDataSlab and removes it from storage. +func (a *ArrayDataSlab) inline(storage SlabStorage) error { + if a.inlined { + return NewFatalError(fmt.Errorf("failed to inline ArrayDataSlab %s: it is inlined already", a.header.slabID)) + } + + id := a.header.slabID + + // Remove slab from storage because it is going to be inlined. + err := storage.Remove(id) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to remove slab %s", id)) + } + + // Update data slab size as inlined slab. + a.header.size = a.header.size - + arrayRootDataSlabPrefixSize + + inlinedArrayDataSlabPrefixSize + + // Update data slab inlined status. + a.inlined = true + + return nil +} + +// uninline converts an inlined ArrayDataSlab to uninlined ArrayDataSlab and stores it in storage. +func (a *ArrayDataSlab) uninline(storage SlabStorage) error { + if !a.inlined { + return NewFatalError(fmt.Errorf("failed to un-inline ArrayDataSlab %s: it is not inlined", a.header.slabID)) + } + + // Update data slab size + a.header.size = a.header.size - + inlinedArrayDataSlabPrefixSize + + arrayRootDataSlabPrefixSize + + // Update data slab inlined status + a.inlined = false + + // Store slab in storage + err := storage.Store(a.header.slabID, a) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + } + + return nil +} + func (a *ArrayDataSlab) hasPointer() bool { for _, e := range a.elements { if hasPointer(e) { @@ -2740,7 +2790,7 @@ func (a *Array) setCallbackWithChild(i uint64, child Value, maxInlineSize uint64 // Set child value with parent array using updated index. // Set() calls c.Storable() which returns inlined or not-inlined child storable. - existingValueStorable, err := a.Set(index, c) + existingValueStorable, err := a.set(index, c) if err != nil { return err } @@ -2813,6 +2863,34 @@ func (a *Array) Get(i uint64) (Value, error) { } func (a *Array) Set(index uint64, value Value) (Storable, error) { + existingStorable, err := a.set(index, value) + if err != nil { + return nil, err + } + + // If overwritten storable is an inlined slab, uninline the slab and store it in storage. + // This is to prevent potential data loss because the overwritten inlined slab was not in + // storage and any future changes to it would have been lost. + switch s := existingStorable.(type) { + case *ArrayDataSlab: + err = s.uninline(a.Storage) + if err != nil { + return nil, err + } + existingStorable = SlabIDStorable(s.header.slabID) + + case *MapDataSlab: + err = s.uninline(a.Storage) + if err != nil { + return nil, err + } + existingStorable = SlabIDStorable(s.header.slabID) + } + + return existingStorable, nil +} + +func (a *Array) set(index uint64, value Value) (Storable, error) { existingStorable, err := a.root.Set(a.Storage, a.Address(), index, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by ArraySlab.Set(). @@ -2825,7 +2903,6 @@ func (a *Array) Set(index uint64, value Value) (Storable, error) { // Don't need to wrap error as external error because err is already categorized by Array.splitRoot(). return nil, err } - return existingStorable, nil } if !a.root.IsData() { @@ -2879,8 +2956,11 @@ func (a *Array) Insert(index uint64, value Value) error { } if a.root.IsFull() { - // Don't need to wrap error as external error because err is already categorized by Array.splitRoot(). - return a.splitRoot() + err = a.splitRoot() + if err != nil { + // Don't need to wrap error as external error because err is already categorized by Array.splitRoot(). + return err + } } a.incrementIndexFrom(index) @@ -2913,6 +2993,34 @@ func (a *Array) Insert(index uint64, value Value) error { } func (a *Array) Remove(index uint64) (Storable, error) { + storable, err := a.remove(index) + if err != nil { + return nil, err + } + + // If overwritten storable is an inlined slab, uninline the slab and store it in storage. + // This is to prevent potential data loss because the overwritten inlined slab was not in + // storage and any future changes to it would have been lost. + switch s := storable.(type) { + case *ArrayDataSlab: + err = s.uninline(a.Storage) + if err != nil { + return nil, err + } + storable = SlabIDStorable(s.header.slabID) + + case *MapDataSlab: + err = s.uninline(a.Storage) + if err != nil { + return nil, err + } + storable = SlabIDStorable(s.header.slabID) + } + + return storable, nil +} + +func (a *Array) remove(index uint64) (Storable, error) { storable, err := a.root.Remove(a.Storage, index) if err != nil { // Don't need to wrap error as external error because err is already categorized by ArraySlab.Remove(). @@ -3088,23 +3196,11 @@ func (a *Array) Storable(_ SlabStorage, _ Address, maxInlineSize uint64) (Storab return nil, NewFatalError(fmt.Errorf("unexpected inlinable array slab type %T", a.root)) } - rootID := rootDataSlab.header.slabID - - // Remove root slab from storage because it is going to be inlined. - err := a.Storage.Remove(rootID) + err := rootDataSlab.inline(a.Storage) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to remove slab %s", rootID)) + return nil, err } - // Update root data slab size as inlined slab. - rootDataSlab.header.size = rootDataSlab.header.size - - arrayRootDataSlabPrefixSize + - inlinedArrayDataSlabPrefixSize - - // Update root data slab inlined status. - rootDataSlab.inlined = true - return rootDataSlab, nil case !inlinable && inlined: @@ -3119,19 +3215,9 @@ func (a *Array) Storable(_ SlabStorage, _ Address, maxInlineSize uint64) (Storab return nil, NewFatalError(fmt.Errorf("unexpected inlined array slab type %T", a.root)) } - // Update root data slab size - rootDataSlab.header.size = rootDataSlab.header.size - - inlinedArrayDataSlabPrefixSize + - arrayRootDataSlabPrefixSize - - // Update root data slab inlined status. - rootDataSlab.inlined = false - - // Store root slab in storage - err := a.Storage.Store(rootDataSlab.header.slabID, a.root) + err := rootDataSlab.uninline(a.Storage) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.SlabID())) + return nil, err } return SlabIDStorable(a.SlabID()), nil diff --git a/array_test.go b/array_test.go index 4bd1b6fa..fd7c8d44 100644 --- a/array_test.go +++ b/array_test.go @@ -6520,3 +6520,481 @@ func getStoredDeltas(storage *PersistentSlabStorage) int { } return count } + +func TestArraySetReturnedValue(t *testing.T) { + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("child array is not inlined", func(t *testing.T) { + const arraySize = 2 + + storage := newTestPersistentStorage(t) + + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + var expectedValues arrayValue + + for i := 0; i < arraySize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + err = parentArray.Append(childArray) + require.NoError(t, err) + + var expectedChildValues arrayValue + for { + v := NewStringValue(strings.Repeat("a", 10)) + + err = childArray.Append(v) + require.NoError(t, err) + + expectedChildValues = append(expectedChildValues, v) + + if !childArray.Inlined() { + break + } + } + + expectedValues = append(expectedValues, expectedChildValues) + } + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Overwrite existing child array value + for i := 0; i < arraySize; i++ { + existingStorable, err := parentArray.Set(uint64(i), Uint64Value(0)) + require.NoError(t, err) + require.NotNil(t, existingStorable) + + id, ok := existingStorable.(SlabIDStorable) + require.True(t, ok) + + child, err := id.StoredValue(storage) + require.NoError(t, err) + + valueEqual(t, expectedValues[i], child) + + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + + expectedValues[i] = Uint64Value(0) + } + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + }) + + t.Run("child array is inlined", func(t *testing.T) { + const arraySize = 2 + + storage := newTestPersistentStorage(t) + + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + var expectedValues arrayValue + + for i := 0; i < arraySize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + err = parentArray.Append(childArray) + require.NoError(t, err) + + // Insert one element to child array + v := NewStringValue(strings.Repeat("a", 10)) + + err = childArray.Append(v) + require.NoError(t, err) + require.True(t, childArray.Inlined()) + + expectedValues = append(expectedValues, arrayValue{v}) + } + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Overwrite existing child array value + for i := 0; i < arraySize; i++ { + existingStorable, err := parentArray.Set(uint64(i), Uint64Value(0)) + require.NoError(t, err) + require.NotNil(t, existingStorable) + + id, ok := existingStorable.(SlabIDStorable) + require.True(t, ok) + + child, err := id.StoredValue(storage) + require.NoError(t, err) + + valueEqual(t, expectedValues[i], child) + + expectedValues[i] = Uint64Value(0) + + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + }) + + t.Run("child map is not inlined", func(t *testing.T) { + const arraySize = 2 + + storage := newTestPersistentStorage(t) + + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + var expectedValues arrayValue + + for i := 0; i < arraySize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + err = parentArray.Append(childMap) + require.NoError(t, err) + + expectedChildValues := make(mapValue) + expectedValues = append(expectedValues, expectedChildValues) + + // Insert into child map until child map is not inlined + j := 0 + for { + k := Uint64Value(j) + v := NewStringValue(strings.Repeat("a", 10)) + j++ + + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildValues[k] = v + + if !childMap.Inlined() { + break + } + } + } + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Overwrite existing child map value + for i := 0; i < arraySize; i++ { + existingStorable, err := parentArray.Set(uint64(i), Uint64Value(0)) + require.NoError(t, err) + require.NotNil(t, existingStorable) + + id, ok := existingStorable.(SlabIDStorable) + require.True(t, ok) + + child, err := id.StoredValue(storage) + require.NoError(t, err) + + valueEqual(t, expectedValues[i], child) + + expectedValues[i] = Uint64Value(0) + + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + }) + + t.Run("child map is inlined", func(t *testing.T) { + const arraySize = 2 + + storage := newTestPersistentStorage(t) + + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + var expectedValues arrayValue + + for i := 0; i < arraySize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + k := Uint64Value(i) + + err = parentArray.Append(childMap) + require.NoError(t, err) + + expectedChildValues := make(mapValue) + expectedValues = append(expectedValues, expectedChildValues) + + // Insert into child map until child map is not inlined + v := NewStringValue(strings.Repeat("a", 10)) + + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildValues[k] = v + } + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Overwrite existing child map value + for i := 0; i < arraySize; i++ { + existingStorable, err := parentArray.Set(uint64(i), Uint64Value(0)) + require.NoError(t, err) + require.NotNil(t, existingStorable) + + id, ok := existingStorable.(SlabIDStorable) + require.True(t, ok) + + child, err := id.StoredValue(storage) + require.NoError(t, err) + + valueEqual(t, expectedValues[i], child) + + expectedValues[i] = Uint64Value(0) + + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + }) +} + +func TestArrayRemoveReturnedValue(t *testing.T) { + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("child array is not inlined", func(t *testing.T) { + const arraySize = 2 + + storage := newTestPersistentStorage(t) + + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + var expectedValues arrayValue + + for i := 0; i < arraySize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + err = parentArray.Append(childArray) + require.NoError(t, err) + + var expectedChildValues arrayValue + for { + v := NewStringValue(strings.Repeat("a", 10)) + + err = childArray.Append(v) + require.NoError(t, err) + + expectedChildValues = append(expectedChildValues, v) + + if !childArray.Inlined() { + break + } + } + + expectedValues = append(expectedValues, expectedChildValues) + } + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Remove child array value + for i := 0; i < arraySize; i++ { + valueStorable, err := parentArray.Remove(uint64(0)) + require.NoError(t, err) + + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) + + child, err := id.StoredValue(storage) + require.NoError(t, err) + + valueEqual(t, expectedValues[i], child) + + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } + + verifyEmptyArray(t, storage, typeInfo, address, parentArray) + }) + + t.Run("child array is inlined", func(t *testing.T) { + const arraySize = 2 + + storage := newTestPersistentStorage(t) + + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + var expectedValues arrayValue + + for i := 0; i < arraySize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + err = parentArray.Append(childArray) + require.NoError(t, err) + + // Insert one element to child array + v := NewStringValue(strings.Repeat("a", 10)) + + err = childArray.Append(v) + require.NoError(t, err) + require.True(t, childArray.Inlined()) + + expectedValues = append(expectedValues, arrayValue{v}) + } + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Remove child array value + for i := 0; i < arraySize; i++ { + valueStorable, err := parentArray.Remove(uint64(0)) + require.NoError(t, err) + + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) + + child, err := id.StoredValue(storage) + require.NoError(t, err) + + valueEqual(t, expectedValues[i], child) + + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } + + verifyEmptyArray(t, storage, typeInfo, address, parentArray) + }) + + t.Run("child map is not inlined", func(t *testing.T) { + const arraySize = 2 + + storage := newTestPersistentStorage(t) + + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + var expectedValues arrayValue + + for i := 0; i < arraySize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + err = parentArray.Append(childMap) + require.NoError(t, err) + + expectedChildValues := make(mapValue) + expectedValues = append(expectedValues, expectedChildValues) + + // Insert into child map until child map is not inlined + j := 0 + for { + k := Uint64Value(j) + v := NewStringValue(strings.Repeat("a", 10)) + j++ + + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildValues[k] = v + + if !childMap.Inlined() { + break + } + } + } + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Remove child map value + for i := 0; i < arraySize; i++ { + valueStorable, err := parentArray.Remove(uint64(0)) + require.NoError(t, err) + + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) + + child, err := id.StoredValue(storage) + require.NoError(t, err) + + valueEqual(t, expectedValues[i], child) + + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } + + verifyEmptyArray(t, storage, typeInfo, address, parentArray) + }) + + t.Run("child map is inlined", func(t *testing.T) { + const arraySize = 2 + + storage := newTestPersistentStorage(t) + + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + var expectedValues arrayValue + + for i := 0; i < arraySize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + k := Uint64Value(i) + + err = parentArray.Append(childMap) + require.NoError(t, err) + + expectedChildValues := make(mapValue) + expectedValues = append(expectedValues, expectedChildValues) + + // Insert into child map until child map is not inlined + v := NewStringValue(strings.Repeat("a", 10)) + + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildValues[k] = v + } + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Remove child map value + for i := 0; i < arraySize; i++ { + valueStorable, err := parentArray.Remove(uint64(0)) + require.NoError(t, err) + + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) + + child, err := id.StoredValue(storage) + require.NoError(t, err) + + valueEqual(t, expectedValues[i], child) + + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } + + verifyEmptyArray(t, storage, typeInfo, address, parentArray) + }) +} diff --git a/map.go b/map.go index 27a485ab..848442f1 100644 --- a/map.go +++ b/map.go @@ -3060,6 +3060,52 @@ func (m *MapDataSlab) Inlinable(maxInlineSize uint64) bool { return uint64(inlinedSize) <= maxInlineSize } +// inline converts not-inlined MapDataSlab to inlined MapDataSlab and removes it from storage. +func (m *MapDataSlab) inline(storage SlabStorage) error { + if m.inlined { + return NewFatalError(fmt.Errorf("failed to inline MapDataSlab %s: it is inlined already", m.header.slabID)) + } + + id := m.header.slabID + + // Remove slab from storage because it is going to be inlined. + err := storage.Remove(id) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to remove slab %s", id)) + } + + // Update data slab size from not inlined to inlined + m.header.size = inlinedMapDataSlabPrefixSize + m.elements.Size() + + // Update data slab inlined status. + m.inlined = true + + return nil +} + +// uninline converts an inlined MapDataSlab to uninlined MapDataSlab and stores it in storage. +func (m *MapDataSlab) uninline(storage SlabStorage) error { + if !m.inlined { + return NewFatalError(fmt.Errorf("failed to uninline MapDataSlab %s: it is not inlined", m.header.slabID)) + } + + // Update data slab size from inlined to not inlined. + m.header.size = mapRootDataSlabPrefixSize + m.elements.Size() + + // Update data slab inlined status. + m.inlined = false + + // Store slab in storage + err := storage.Store(m.header.slabID, m) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + } + + return nil +} + func elementsStorables(elems elements, childStorables []Storable) []Storable { switch v := elems.(type) { @@ -4646,7 +4692,7 @@ func (m *OrderedMap) setCallbackWithChild( // Set child value with parent map using same key. // Set() calls c.Storable() which returns inlined or not-inlined child storable. - existingValueStorable, err := m.Set(comparator, hip, key, c) + existingValueStorable, err := m.set(comparator, hip, key, c) if err != nil { return err } @@ -4755,6 +4801,34 @@ func (m *OrderedMap) get(comparator ValueComparator, hip HashInputProvider, key } func (m *OrderedMap) Set(comparator ValueComparator, hip HashInputProvider, key Value, value Value) (Storable, error) { + storable, err := m.set(comparator, hip, key, value) + if err != nil { + return nil, err + } + + // If overwritten storable is an inlined slab, uninline the slab and store it in storage. + // This is to prevent potential data loss because the overwritten inlined slab was not in + // storage and any future changes to it would have been lost. + switch s := storable.(type) { + case *ArrayDataSlab: + err = s.uninline(m.Storage) + if err != nil { + return nil, err + } + storable = SlabIDStorable(s.header.slabID) + + case *MapDataSlab: + err = s.uninline(m.Storage) + if err != nil { + return nil, err + } + storable = SlabIDStorable(s.header.slabID) + } + + return storable, nil +} + +func (m *OrderedMap) set(comparator ValueComparator, hip HashInputProvider, key Value, value Value) (Storable, error) { keyDigest, err := m.digesterBuilder.Digest(hip, key) if err != nil { @@ -4790,7 +4864,6 @@ func (m *OrderedMap) Set(comparator ValueComparator, hip HashInputProvider, key // Don't need to wrap error as external error because err is already categorized by OrderedMap.promoteChildAsNewRoot(). return nil, err } - return existingValue, nil } } @@ -4831,6 +4904,34 @@ func (m *OrderedMap) Set(comparator ValueComparator, hip HashInputProvider, key } func (m *OrderedMap) Remove(comparator ValueComparator, hip HashInputProvider, key Value) (Storable, Storable, error) { + keyStorable, valueStorable, err := m.remove(comparator, hip, key) + if err != nil { + return nil, nil, err + } + + // If overwritten storable is an inlined slab, uninline the slab and store it in storage. + // This is to prevent potential data loss because the overwritten inlined slab was not in + // storage and any future changes to it would have been lost. + switch s := valueStorable.(type) { + case *ArrayDataSlab: + err = s.uninline(m.Storage) + if err != nil { + return nil, nil, err + } + valueStorable = SlabIDStorable(s.header.slabID) + + case *MapDataSlab: + err = s.uninline(m.Storage) + if err != nil { + return nil, nil, err + } + valueStorable = SlabIDStorable(s.header.slabID) + } + + return keyStorable, valueStorable, nil +} + +func (m *OrderedMap) remove(comparator ValueComparator, hip HashInputProvider, key Value) (Storable, Storable, error) { keyDigest, err := m.digesterBuilder.Digest(hip, key) if err != nil { @@ -4864,7 +4965,6 @@ func (m *OrderedMap) Remove(comparator ValueComparator, hip HashInputProvider, k // Don't need to wrap error as external error because err is already categorized by OrderedMap.promoteChildAsNewRoot(). return nil, nil, err } - return k, v, nil } } @@ -5031,21 +5131,11 @@ func (m *OrderedMap) Storable(_ SlabStorage, _ Address, maxInlineSize uint64) (S return nil, NewFatalError(fmt.Errorf("unexpected inlinable map slab type %T", m.root)) } - rootID := rootDataSlab.header.slabID - - // Remove root slab from storage because it is going to be inlined. - err := m.Storage.Remove(rootID) + err := rootDataSlab.inline(m.Storage) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to remove slab %s", rootID)) + return nil, err } - // Update root data slab size from not inlined to inlined - rootDataSlab.header.size = inlinedMapDataSlabPrefixSize + rootDataSlab.elements.Size() - - // Update root data slab inlined status. - rootDataSlab.inlined = true - return rootDataSlab, nil case !inlinable && inlined: @@ -5060,17 +5150,9 @@ func (m *OrderedMap) Storable(_ SlabStorage, _ Address, maxInlineSize uint64) (S return nil, NewFatalError(fmt.Errorf("unexpected inlined map slab type %T", m.root)) } - // Update root data slab size from inlined to not inlined. - rootDataSlab.header.size = mapRootDataSlabPrefixSize + rootDataSlab.elements.Size() - - // Update root data slab inlined status. - rootDataSlab.inlined = false - - // Store root slab in storage - err := m.Storage.Store(m.SlabID(), m.root) + err := rootDataSlab.uninline(m.Storage) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.SlabID())) + return nil, err } return SlabIDStorable(m.SlabID()), nil diff --git a/map_test.go b/map_test.go index 2b0c5ec7..095f9f00 100644 --- a/map_test.go +++ b/map_test.go @@ -12478,6 +12478,24 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { require.Equal(t, k, existingKey) require.NotNil(t, existingValue) + // Grand child map is returned as SlabIDStorable, even if it was stored inlined in the parent. + id, ok := existingValue.(SlabIDStorable) + require.True(t, ok) + + v, err := id.StoredValue(storage) + require.NoError(t, err) + + gchildMap, ok := v.(*OrderedMap) + require.True(t, ok) + + expectedGChildMapValues, ok := expectedChildMapValues[k].(mapValue) + require.True(t, ok) + + valueEqual(t, expectedGChildMapValues, gchildMap) + + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + delete(expectedChildMapValues, k) // Child map is inlined @@ -12845,3 +12863,513 @@ func getInlinedChildMapsFromParentMap(t *testing.T, address Address, parentMap * return children } + +func TestMapSetReturnedValue(t *testing.T) { + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("child array is not inlined", func(t *testing.T) { + const mapSize = 2 + + storage := newTestPersistentStorage(t) + + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + expectedKeyValues := make(map[Value]Value) + + for i := 0; i < mapSize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + k := Uint64Value(i) + + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childArray) + require.NoError(t, err) + require.Nil(t, existingStorable) + + var expectedChildValues arrayValue + for { + v := NewStringValue(strings.Repeat("a", 10)) + + err = childArray.Append(v) + require.NoError(t, err) + + expectedChildValues = append(expectedChildValues, v) + + if !childArray.Inlined() { + break + } + } + + expectedKeyValues[k] = expectedChildValues + } + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + + // Overwrite existing child array value + for k := range expectedKeyValues { + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, Uint64Value(0)) + require.NoError(t, err) + require.NotNil(t, existingStorable) + + id, ok := existingStorable.(SlabIDStorable) + require.True(t, ok) + + child, err := id.StoredValue(storage) + require.NoError(t, err) + + valueEqual(t, expectedKeyValues[k], child) + + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + + expectedKeyValues[k] = Uint64Value(0) + } + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + }) + + t.Run("child array is inlined", func(t *testing.T) { + const mapSize = 2 + + storage := newTestPersistentStorage(t) + + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + expectedKeyValues := make(map[Value]Value) + + for i := 0; i < mapSize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + k := Uint64Value(i) + + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childArray) + require.NoError(t, err) + require.Nil(t, existingStorable) + + // Insert one element to child array + v := NewStringValue(strings.Repeat("a", 10)) + + err = childArray.Append(v) + require.NoError(t, err) + require.True(t, childArray.Inlined()) + + expectedKeyValues[k] = arrayValue{v} + } + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + + // Overwrite existing child array value + for k := range expectedKeyValues { + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, Uint64Value(0)) + require.NoError(t, err) + require.NotNil(t, existingStorable) + + id, ok := existingStorable.(SlabIDStorable) + require.True(t, ok) + + child, err := id.StoredValue(storage) + require.NoError(t, err) + + valueEqual(t, expectedKeyValues[k], child) + + expectedKeyValues[k] = Uint64Value(0) + + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + }) + + t.Run("child map is not inlined", func(t *testing.T) { + const mapSize = 2 + + storage := newTestPersistentStorage(t) + + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + expectedKeyValues := make(map[Value]Value) + + for i := 0; i < mapSize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + k := Uint64Value(i) + + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildValues := make(mapValue) + expectedKeyValues[k] = expectedChildValues + + // Insert into child map until child map is not inlined + j := 0 + for { + k := Uint64Value(j) + v := NewStringValue(strings.Repeat("a", 10)) + j++ + + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildValues[k] = v + + if !childMap.Inlined() { + break + } + } + } + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + + // Overwrite existing child map value + for k := range expectedKeyValues { + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, Uint64Value(0)) + require.NoError(t, err) + require.NotNil(t, existingStorable) + + id, ok := existingStorable.(SlabIDStorable) + require.True(t, ok) + + child, err := id.StoredValue(storage) + require.NoError(t, err) + + valueEqual(t, expectedKeyValues[k], child) + + expectedKeyValues[k] = Uint64Value(0) + + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + }) + + t.Run("child map is inlined", func(t *testing.T) { + const mapSize = 2 + + storage := newTestPersistentStorage(t) + + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + expectedKeyValues := make(map[Value]Value) + + for i := 0; i < mapSize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + k := Uint64Value(i) + + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildValues := make(mapValue) + expectedKeyValues[k] = expectedChildValues + + // Insert into child map until child map is not inlined + v := NewStringValue(strings.Repeat("a", 10)) + + existingStorable, err = childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildValues[k] = v + } + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + + // Overwrite existing child map value + for k := range expectedKeyValues { + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, Uint64Value(0)) + require.NoError(t, err) + require.NotNil(t, existingStorable) + + id, ok := existingStorable.(SlabIDStorable) + require.True(t, ok) + + child, err := id.StoredValue(storage) + require.NoError(t, err) + + valueEqual(t, expectedKeyValues[k], child) + + expectedKeyValues[k] = Uint64Value(0) + + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + }) +} + +func TestMapRemoveReturnedValue(t *testing.T) { + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("child array is not inlined", func(t *testing.T) { + const mapSize = 2 + + storage := newTestPersistentStorage(t) + + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + expectedKeyValues := make(map[Value]Value) + + for i := 0; i < mapSize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + k := Uint64Value(i) + + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childArray) + require.NoError(t, err) + require.Nil(t, existingStorable) + + var expectedChildValues arrayValue + for { + v := NewStringValue(strings.Repeat("a", 10)) + + err = childArray.Append(v) + require.NoError(t, err) + + expectedChildValues = append(expectedChildValues, v) + + if !childArray.Inlined() { + break + } + } + + expectedKeyValues[k] = expectedChildValues + } + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + + // Remove child array value + for k := range expectedKeyValues { + keyStorable, valueStorable, err := parentMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, keyStorable, k) + + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) + + child, err := id.StoredValue(storage) + require.NoError(t, err) + + valueEqual(t, expectedKeyValues[k], child) + + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + + delete(expectedKeyValues, k) + } + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + }) + + t.Run("child array is inlined", func(t *testing.T) { + const mapSize = 2 + + storage := newTestPersistentStorage(t) + + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + expectedKeyValues := make(map[Value]Value) + + for i := 0; i < mapSize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + k := Uint64Value(i) + + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childArray) + require.NoError(t, err) + require.Nil(t, existingStorable) + + // Insert one element to child array + v := NewStringValue(strings.Repeat("a", 10)) + + err = childArray.Append(v) + require.NoError(t, err) + require.True(t, childArray.Inlined()) + + expectedKeyValues[k] = arrayValue{v} + } + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + + // Remove child array value + for k := range expectedKeyValues { + keyStorable, valueStorable, err := parentMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, keyStorable, k) + + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) + + child, err := id.StoredValue(storage) + require.NoError(t, err) + + valueEqual(t, expectedKeyValues[k], child) + + delete(expectedKeyValues, k) + + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + }) + + t.Run("child map is not inlined", func(t *testing.T) { + const mapSize = 2 + + storage := newTestPersistentStorage(t) + + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + expectedKeyValues := make(map[Value]Value) + + for i := 0; i < mapSize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + k := Uint64Value(i) + + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildValues := make(mapValue) + expectedKeyValues[k] = expectedChildValues + + // Insert into child map until child map is not inlined + j := 0 + for { + k := Uint64Value(j) + v := NewStringValue(strings.Repeat("a", 10)) + j++ + + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildValues[k] = v + + if !childMap.Inlined() { + break + } + } + } + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + + // Remove child map value + for k := range expectedKeyValues { + keyStorable, valueStorable, err := parentMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, keyStorable, k) + + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) + + child, err := id.StoredValue(storage) + require.NoError(t, err) + + valueEqual(t, expectedKeyValues[k], child) + + delete(expectedKeyValues, k) + + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + }) + + t.Run("child map is inlined", func(t *testing.T) { + const mapSize = 2 + + storage := newTestPersistentStorage(t) + + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + expectedKeyValues := make(map[Value]Value) + + for i := 0; i < mapSize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + k := Uint64Value(i) + + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildValues := make(mapValue) + expectedKeyValues[k] = expectedChildValues + + // Insert into child map until child map is not inlined + v := NewStringValue(strings.Repeat("a", 10)) + + existingStorable, err = childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildValues[k] = v + } + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + + // Remove child map value + for k := range expectedKeyValues { + keyStorable, valueStorable, err := parentMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, keyStorable, k) + + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) + + child, err := id.StoredValue(storage) + require.NoError(t, err) + + valueEqual(t, expectedKeyValues[k], child) + + delete(expectedKeyValues, k) + + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + }) +} From f17354c4f3fdaa9d922a56b8a08677871a3a856f Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Sun, 1 Oct 2023 19:31:03 -0500 Subject: [PATCH 037/126] Refactor inlining functions --- array.go | 64 ++++++++++++++++++++++++++------------------------------ map.go | 61 +++++++++++++++++++++++++---------------------------- 2 files changed, 58 insertions(+), 67 deletions(-) diff --git a/array.go b/array.go index 55ddb93a..6c80cd6b 100644 --- a/array.go +++ b/array.go @@ -165,6 +165,8 @@ type ArraySlab interface { Inlined() bool Inlinable(maxInlineSize uint64) bool + Inline(SlabStorage) error + Uninline(SlabStorage) error } // Array is a heterogeneous variable-size array, storing any type of values @@ -906,8 +908,8 @@ func (a *ArrayDataSlab) Inlinable(maxInlineSize uint64) bool { return uint64(inlinedSize) <= maxInlineSize } -// inline converts not-inlined ArrayDataSlab to inlined ArrayDataSlab and removes it from storage. -func (a *ArrayDataSlab) inline(storage SlabStorage) error { +// Inline converts not-inlined ArrayDataSlab to inlined ArrayDataSlab and removes it from storage. +func (a *ArrayDataSlab) Inline(storage SlabStorage) error { if a.inlined { return NewFatalError(fmt.Errorf("failed to inline ArrayDataSlab %s: it is inlined already", a.header.slabID)) } @@ -932,8 +934,8 @@ func (a *ArrayDataSlab) inline(storage SlabStorage) error { return nil } -// uninline converts an inlined ArrayDataSlab to uninlined ArrayDataSlab and stores it in storage. -func (a *ArrayDataSlab) uninline(storage SlabStorage) error { +// Uninline converts an inlined ArrayDataSlab to uninlined ArrayDataSlab and stores it in storage. +func (a *ArrayDataSlab) Uninline(storage SlabStorage) error { if !a.inlined { return NewFatalError(fmt.Errorf("failed to un-inline ArrayDataSlab %s: it is not inlined", a.header.slabID)) } @@ -2584,6 +2586,14 @@ func (a *ArrayMetaDataSlab) Inlinable(_ uint64) bool { return false } +func (a *ArrayMetaDataSlab) Inline(_ SlabStorage) error { + return NewFatalError(fmt.Errorf("failed to inline ArrayMetaDataSlab %s: ArrayMetaDataSlab can't be inlined", a.header.slabID)) +} + +func (a *ArrayMetaDataSlab) Uninline(_ SlabStorage) error { + return NewFatalError(fmt.Errorf("failed to uninline ArrayMetaDataSlab %s: ArrayMetaDataSlab is already unlined", a.header.slabID)) +} + func (a *ArrayMetaDataSlab) IsData() bool { return false } @@ -2872,19 +2882,19 @@ func (a *Array) Set(index uint64, value Value) (Storable, error) { // This is to prevent potential data loss because the overwritten inlined slab was not in // storage and any future changes to it would have been lost. switch s := existingStorable.(type) { - case *ArrayDataSlab: - err = s.uninline(a.Storage) + case ArraySlab: + err = s.Uninline(a.Storage) if err != nil { return nil, err } - existingStorable = SlabIDStorable(s.header.slabID) + existingStorable = SlabIDStorable(s.SlabID()) - case *MapDataSlab: - err = s.uninline(a.Storage) + case MapSlab: + err = s.Uninline(a.Storage) if err != nil { return nil, err } - existingStorable = SlabIDStorable(s.header.slabID) + existingStorable = SlabIDStorable(s.SlabID()) } return existingStorable, nil @@ -3002,19 +3012,19 @@ func (a *Array) Remove(index uint64) (Storable, error) { // This is to prevent potential data loss because the overwritten inlined slab was not in // storage and any future changes to it would have been lost. switch s := storable.(type) { - case *ArrayDataSlab: - err = s.uninline(a.Storage) + case ArraySlab: + err = s.Uninline(a.Storage) if err != nil { return nil, err } - storable = SlabIDStorable(s.header.slabID) + storable = SlabIDStorable(s.SlabID()) - case *MapDataSlab: - err = s.uninline(a.Storage) + case MapSlab: + err = s.Uninline(a.Storage) if err != nil { return nil, err } - storable = SlabIDStorable(s.header.slabID) + storable = SlabIDStorable(s.SlabID()) } return storable, nil @@ -3189,33 +3199,19 @@ func (a *Array) Storable(_ SlabStorage, _ Address, maxInlineSize uint64) (Storab // Root slab is inlinable and was NOT inlined. // Inline root data slab. - - // Inlineable root slab must be data slab. - rootDataSlab, ok := a.root.(*ArrayDataSlab) - if !ok { - return nil, NewFatalError(fmt.Errorf("unexpected inlinable array slab type %T", a.root)) - } - - err := rootDataSlab.inline(a.Storage) + err := a.root.Inline(a.Storage) if err != nil { return nil, err } - return rootDataSlab, nil + return a.root, nil case !inlinable && inlined: // Root slab is NOT inlinable and was previously inlined. - // Un-inline root slab. - - // Inlined root slab must be data slab. - rootDataSlab, ok := a.root.(*ArrayDataSlab) - if !ok { - return nil, NewFatalError(fmt.Errorf("unexpected inlined array slab type %T", a.root)) - } - - err := rootDataSlab.uninline(a.Storage) + // Uninline root slab. + err := a.root.Uninline(a.Storage) if err != nil { return nil, err } diff --git a/map.go b/map.go index 848442f1..3a8d855e 100644 --- a/map.go +++ b/map.go @@ -363,6 +363,8 @@ type MapSlab interface { Inlined() bool Inlinable(maxInlineSize uint64) bool + Inline(SlabStorage) error + Uninline(SlabStorage) error } // OrderedMap is an ordered map of key-value pairs; keys can be any hashable type @@ -3061,7 +3063,7 @@ func (m *MapDataSlab) Inlinable(maxInlineSize uint64) bool { } // inline converts not-inlined MapDataSlab to inlined MapDataSlab and removes it from storage. -func (m *MapDataSlab) inline(storage SlabStorage) error { +func (m *MapDataSlab) Inline(storage SlabStorage) error { if m.inlined { return NewFatalError(fmt.Errorf("failed to inline MapDataSlab %s: it is inlined already", m.header.slabID)) } @@ -3085,7 +3087,7 @@ func (m *MapDataSlab) inline(storage SlabStorage) error { } // uninline converts an inlined MapDataSlab to uninlined MapDataSlab and stores it in storage. -func (m *MapDataSlab) uninline(storage SlabStorage) error { +func (m *MapDataSlab) Uninline(storage SlabStorage) error { if !m.inlined { return NewFatalError(fmt.Errorf("failed to uninline MapDataSlab %s: it is not inlined", m.header.slabID)) } @@ -3781,6 +3783,14 @@ func (m *MapMetaDataSlab) Inlinable(_ uint64) bool { return false } +func (m *MapMetaDataSlab) Inline(_ SlabStorage) error { + return NewFatalError(fmt.Errorf("failed to inline MapMetaDataSlab %s: MapMetaDataSlab can't be inlined", m.header.slabID)) +} + +func (m *MapMetaDataSlab) Uninline(_ SlabStorage) error { + return NewFatalError(fmt.Errorf("failed to uninline MapMetaDataSlab %s: MapMetaDataSlab is already unlined", m.header.slabID)) +} + func (m *MapMetaDataSlab) StoredValue(storage SlabStorage) (Value, error) { if m.extraData == nil { return nil, NewNotValueError(m.SlabID()) @@ -4810,19 +4820,19 @@ func (m *OrderedMap) Set(comparator ValueComparator, hip HashInputProvider, key // This is to prevent potential data loss because the overwritten inlined slab was not in // storage and any future changes to it would have been lost. switch s := storable.(type) { - case *ArrayDataSlab: - err = s.uninline(m.Storage) + case ArraySlab: + err = s.Uninline(m.Storage) if err != nil { return nil, err } - storable = SlabIDStorable(s.header.slabID) + storable = SlabIDStorable(s.SlabID()) - case *MapDataSlab: - err = s.uninline(m.Storage) + case MapSlab: + err = s.Uninline(m.Storage) if err != nil { return nil, err } - storable = SlabIDStorable(s.header.slabID) + storable = SlabIDStorable(s.SlabID()) } return storable, nil @@ -4913,19 +4923,19 @@ func (m *OrderedMap) Remove(comparator ValueComparator, hip HashInputProvider, k // This is to prevent potential data loss because the overwritten inlined slab was not in // storage and any future changes to it would have been lost. switch s := valueStorable.(type) { - case *ArrayDataSlab: - err = s.uninline(m.Storage) + case ArraySlab: + err = s.Uninline(m.Storage) if err != nil { return nil, nil, err } - valueStorable = SlabIDStorable(s.header.slabID) + valueStorable = SlabIDStorable(s.SlabID()) - case *MapDataSlab: - err = s.uninline(m.Storage) + case MapSlab: + err = s.Uninline(m.Storage) if err != nil { return nil, nil, err } - valueStorable = SlabIDStorable(s.header.slabID) + valueStorable = SlabIDStorable(s.SlabID()) } return keyStorable, valueStorable, nil @@ -5124,33 +5134,18 @@ func (m *OrderedMap) Storable(_ SlabStorage, _ Address, maxInlineSize uint64) (S // Root slab is inlinable and was NOT inlined. // Inline root data slab. - - // Inlineable root slab must be data slab. - rootDataSlab, ok := m.root.(*MapDataSlab) - if !ok { - return nil, NewFatalError(fmt.Errorf("unexpected inlinable map slab type %T", m.root)) - } - - err := rootDataSlab.inline(m.Storage) + err := m.root.Inline(m.Storage) if err != nil { return nil, err } - return rootDataSlab, nil + return m.root, nil case !inlinable && inlined: - // Root slab is NOT inlinable and was inlined. - // Un-inline root slab. - - // Inlined root slab must be data slab. - rootDataSlab, ok := m.root.(*MapDataSlab) - if !ok { - return nil, NewFatalError(fmt.Errorf("unexpected inlined map slab type %T", m.root)) - } - - err := rootDataSlab.uninline(m.Storage) + // Uninline root slab. + err := m.root.Uninline(m.Storage) if err != nil { return nil, err } From 1a2e69ffbb9d83693b11375bde873fe93cb40773 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 2 Oct 2023 10:35:20 -0500 Subject: [PATCH 038/126] Prune Array.mutableElementIndex in Set and Remove This commit prunes Array.mutableElementIndex by: - deleting overwritten element from mutableElementIndex - deleting removed element from mutableElementIndex While at it, add more checks for index in mutableElementIndex: - can't exceed number of array elements - can't be less than 0 For context, Array.mutableElementIndex contains mutable element's updated index in parent array. When parent array is modified, index in mutableElementIndex is updated. --- array.go | 52 ++++++++++++- array_test.go | 202 ++++++++++++++++++++++++++++++++++++++++++++++++++ storage.go | 2 + 3 files changed, 252 insertions(+), 4 deletions(-) diff --git a/array.go b/array.go index 6c80cd6b..c0706563 100644 --- a/array.go +++ b/array.go @@ -2737,27 +2737,35 @@ func NewArrayWithRootID(storage SlabStorage, rootID SlabID) (*Array, error) { } // TODO: maybe optimize this -func (a *Array) incrementIndexFrom(index uint64) { +func (a *Array) incrementIndexFrom(index uint64) error { // Although range loop over Go map is not deterministic, it is OK // to use here because this operation is free of side-effect and // leads to the same results independent of map order. for id, i := range a.mutableElementIndex { if i >= index { + if a.mutableElementIndex[id]+1 >= a.Count() { + return NewFatalError(fmt.Errorf("failed to increment index of ValueID %s in array %s: new index exceeds array count", id, a.ValueID())) + } a.mutableElementIndex[id]++ } } + return nil } // TODO: maybe optimize this -func (a *Array) decrementIndexFrom(index uint64) { +func (a *Array) decrementIndexFrom(index uint64) error { // Although range loop over Go map is not deterministic, it is OK // to use here because this operation is free of side-effect and // leads to the same results independent of map order. for id, i := range a.mutableElementIndex { if i > index { + if a.mutableElementIndex[id] <= 0 { + return NewFatalError(fmt.Errorf("failed to decrement index of ValueID %s in array %s: new index < 0", id, a.ValueID())) + } a.mutableElementIndex[id]-- } } + return nil } func (a *Array) getIndexByValueID(id ValueID) (uint64, bool) { @@ -2878,6 +2886,8 @@ func (a *Array) Set(index uint64, value Value) (Storable, error) { return nil, err } + var existingValueID ValueID + // If overwritten storable is an inlined slab, uninline the slab and store it in storage. // This is to prevent potential data loss because the overwritten inlined slab was not in // storage and any future changes to it would have been lost. @@ -2888,6 +2898,7 @@ func (a *Array) Set(index uint64, value Value) (Storable, error) { return nil, err } existingStorable = SlabIDStorable(s.SlabID()) + existingValueID = slabIDToValueID(s.SlabID()) case MapSlab: err = s.Uninline(a.Storage) @@ -2895,6 +2906,20 @@ func (a *Array) Set(index uint64, value Value) (Storable, error) { return nil, err } existingStorable = SlabIDStorable(s.SlabID()) + existingValueID = slabIDToValueID(s.SlabID()) + + case SlabIDStorable: + existingValueID = slabIDToValueID(SlabID(s)) + } + + // Remove overwritten array/map's ValueID from mutableElementIndex if: + // - new value isn't array/map, or + // - new value is array/map with different value ID + if existingValueID != emptyValueID { + newValue, ok := value.(mutableValueNotifier) + if !ok || existingValueID != newValue.ValueID() { + delete(a.mutableElementIndex, existingValueID) + } } return existingStorable, nil @@ -2973,7 +2998,10 @@ func (a *Array) Insert(index uint64, value Value) error { } } - a.incrementIndexFrom(index) + err = a.incrementIndexFrom(index) + if err != nil { + return err + } // This array (a) is a parent to the new child (value), and this array // can also be a child in another container. @@ -3019,12 +3047,25 @@ func (a *Array) Remove(index uint64) (Storable, error) { } storable = SlabIDStorable(s.SlabID()) + // Delete removed element ValueID from mutableElementIndex + removedValueID := slabIDToValueID(s.SlabID()) + delete(a.mutableElementIndex, removedValueID) + case MapSlab: err = s.Uninline(a.Storage) if err != nil { return nil, err } storable = SlabIDStorable(s.SlabID()) + + // Delete removed element ValueID from mutableElementIndex + removedValueID := slabIDToValueID(s.SlabID()) + delete(a.mutableElementIndex, removedValueID) + + case SlabIDStorable: + // Delete removed element ValueID from mutableElementIndex + removedValueID := slabIDToValueID(SlabID(s)) + delete(a.mutableElementIndex, removedValueID) } return storable, nil @@ -3049,7 +3090,10 @@ func (a *Array) remove(index uint64) (Storable, error) { } } - a.decrementIndexFrom(index) + err = a.decrementIndexFrom(index) + if err != nil { + return nil, err + } // If this array is a child, it notifies parent by invoking callback because // this array is changed by removing element. diff --git a/array_test.go b/array_test.go index fd7c8d44..45d7a2f3 100644 --- a/array_test.go +++ b/array_test.go @@ -4726,6 +4726,7 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { // Create an array with empty child array as element. parentArray, expectedValues := createArrayWithEmptyChildArray(t, storage, address, typeInfo, arraySize) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) require.Equal(t, uint64(arraySize), parentArray.Count()) require.True(t, parentArray.root.IsData()) require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. @@ -4778,6 +4779,9 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedSize require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } @@ -4801,6 +4805,9 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { expectedParentSize = arrayRootDataSlabPrefixSize + SlabIDStorable(expectedSlabID).ByteSize() require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Remove elements from child array which triggers standalone array slab becomes inlined slab again. @@ -4823,6 +4830,9 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedSize require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } @@ -4847,6 +4857,9 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*arraySize require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) children := make([]struct { @@ -4904,6 +4917,9 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { expectedParentSize += vSize require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } } @@ -4940,6 +4956,9 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { expectedParentSize += SlabIDStorable(expectedSlabID).ByteSize() require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } @@ -4973,6 +4992,9 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { expectedParentSize += expectedInlinedSize require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } @@ -5004,6 +5026,9 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { expectedParentSize -= vSize require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } } @@ -5032,6 +5057,9 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*arraySize require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) children := make([]struct { @@ -5084,6 +5112,9 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } } @@ -5115,6 +5146,9 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { expectedStandaloneSlabSize := arrayRootDataSlabPrefixSize + uint32(childArray.Count())*vSize require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } @@ -5144,6 +5178,9 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } @@ -5176,6 +5213,9 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } } @@ -5215,6 +5255,9 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*2*arraySize require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Get inlined child array @@ -5288,6 +5331,11 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedChildSize require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } @@ -5326,6 +5374,11 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { expectedParentSize = arrayRootDataSlabPrefixSize + SlabIDStorable(expectedSlabID).ByteSize() require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Remove elements from grand child array which triggers standalone child array slab becomes inlined slab again. @@ -5366,6 +5419,11 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedChildSize require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } @@ -5392,6 +5450,9 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*2*arraySize require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Get inlined child array @@ -5465,6 +5526,11 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedChildSize require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } @@ -5505,6 +5571,11 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { expectedParentSize = arrayRootDataSlabPrefixSize + expectedStandaloneSlabSize require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Remove elements from grand child array which triggers standalone child array slab becomes inlined slab again. @@ -5544,6 +5615,11 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedChildSize require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } @@ -5599,6 +5675,9 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*2*arraySize + vSize*arraySize require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) type arrayInfo struct { @@ -5685,6 +5764,11 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { expectedParentSize += vSize require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } } @@ -5724,6 +5808,11 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { expectedStandaloneSlabSize := arrayRootDataSlabPrefixSize + expectedInlinedGrandChildSize + vSize*uint32(childArray.Count()-1) require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } @@ -5771,6 +5860,11 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { expectedParentSize += expectedInlinedChildSize + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } @@ -5819,6 +5913,11 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { expectedParentSize -= vSize require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } } @@ -5872,6 +5971,9 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*2*arraySize require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) type arrayInfo struct { @@ -5958,6 +6060,11 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } } @@ -6000,6 +6107,11 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { expectedInlinedChildSlabSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize require.Equal(t, expectedInlinedChildSlabSize, childArray.root.ByteSize()) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } @@ -6048,6 +6160,11 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { expectedStandaloneChildSlabSize := arrayRootDataSlabPrefixSize + expectedInlinedGrandChildSize require.Equal(t, expectedStandaloneChildSlabSize, childArray.root.ByteSize()) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } @@ -6095,6 +6212,11 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } @@ -6144,6 +6266,11 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } } @@ -6179,6 +6306,9 @@ func TestChildArrayWhenParentArrayIsModified(t *testing.T) { expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*arraySize require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) children := make([]*struct { @@ -6247,6 +6377,10 @@ func TestChildArrayWhenParentArrayIsModified(t *testing.T) { expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } @@ -6288,6 +6422,10 @@ func TestChildArrayWhenParentArrayIsModified(t *testing.T) { expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } @@ -6324,6 +6462,10 @@ func TestChildArrayWhenParentArrayIsModified(t *testing.T) { expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } }) @@ -6365,6 +6507,10 @@ func TestChildArrayWhenParentArrayIsModified(t *testing.T) { expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } @@ -6406,6 +6552,10 @@ func TestChildArrayWhenParentArrayIsModified(t *testing.T) { expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } @@ -6442,6 +6592,10 @@ func TestChildArrayWhenParentArrayIsModified(t *testing.T) { expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } }) @@ -6561,6 +6715,9 @@ func TestArraySetReturnedValue(t *testing.T) { expectedValues = append(expectedValues, expectedChildValues) } + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Overwrite existing child array value @@ -6581,6 +6738,9 @@ func TestArraySetReturnedValue(t *testing.T) { require.NoError(t, err) expectedValues[i] = Uint64Value(0) + + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) } verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) @@ -6615,6 +6775,9 @@ func TestArraySetReturnedValue(t *testing.T) { expectedValues = append(expectedValues, arrayValue{v}) } + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Overwrite existing child array value @@ -6637,6 +6800,9 @@ func TestArraySetReturnedValue(t *testing.T) { require.NoError(t, err) } + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) }) @@ -6681,6 +6847,9 @@ func TestArraySetReturnedValue(t *testing.T) { } } + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Overwrite existing child map value @@ -6703,6 +6872,9 @@ func TestArraySetReturnedValue(t *testing.T) { require.NoError(t, err) } + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) }) @@ -6740,6 +6912,9 @@ func TestArraySetReturnedValue(t *testing.T) { expectedChildValues[k] = v } + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Overwrite existing child map value @@ -6762,6 +6937,9 @@ func TestArraySetReturnedValue(t *testing.T) { require.NoError(t, err) } + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) }) } @@ -6806,6 +6984,9 @@ func TestArrayRemoveReturnedValue(t *testing.T) { expectedValues = append(expectedValues, expectedChildValues) } + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Remove child array value @@ -6825,6 +7006,9 @@ func TestArrayRemoveReturnedValue(t *testing.T) { require.NoError(t, err) } + // Test array's mutableElementIndex + require.Equal(t, 0, len(parentArray.mutableElementIndex)) + verifyEmptyArray(t, storage, typeInfo, address, parentArray) }) @@ -6857,6 +7041,9 @@ func TestArrayRemoveReturnedValue(t *testing.T) { expectedValues = append(expectedValues, arrayValue{v}) } + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Remove child array value @@ -6876,6 +7063,9 @@ func TestArrayRemoveReturnedValue(t *testing.T) { require.NoError(t, err) } + // Test array's mutableElementIndex + require.Equal(t, 0, len(parentArray.mutableElementIndex)) + verifyEmptyArray(t, storage, typeInfo, address, parentArray) }) @@ -6920,6 +7110,9 @@ func TestArrayRemoveReturnedValue(t *testing.T) { } } + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Remove child map value @@ -6939,6 +7132,9 @@ func TestArrayRemoveReturnedValue(t *testing.T) { require.NoError(t, err) } + // Test array's mutableElementIndex + require.Equal(t, 0, len(parentArray.mutableElementIndex)) + verifyEmptyArray(t, storage, typeInfo, address, parentArray) }) @@ -6976,6 +7172,9 @@ func TestArrayRemoveReturnedValue(t *testing.T) { expectedChildValues[k] = v } + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Remove child map value @@ -6995,6 +7194,9 @@ func TestArrayRemoveReturnedValue(t *testing.T) { require.NoError(t, err) } + // Test array's mutableElementIndex + require.Equal(t, 0, len(parentArray.mutableElementIndex)) + verifyEmptyArray(t, storage, typeInfo, address, parentArray) }) } diff --git a/storage.go b/storage.go index 4c5a450a..95725315 100644 --- a/storage.go +++ b/storage.go @@ -39,6 +39,8 @@ const LedgerBaseStorageSlabPrefix = "$" // resource tracking, etc. type ValueID [16]byte +var emptyValueID = ValueID{} + func slabIDToValueID(sid SlabID) ValueID { var id ValueID copy(id[:], sid.address[:]) From 63ea7a74dd58db15fc871c40f4c3c47772dc8c81 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 2 Oct 2023 11:28:34 -0500 Subject: [PATCH 039/126] No-op on parentUpdater() if child isn't in parent This commit checks if child is in parent container at adjusted index or under the same key in parentUpdater() before modifying parent container. If child is no longer part of parent, parentUpdater() returns with no-op, and this callback is set to nil. This is to handle outdated child reference modifying parent after it is removed. --- array.go | 61 +++++++++++++++++++------ array_test.go | 115 +++++++++++++++++++++++++++++++++++++++++++++++ map.go | 60 +++++++++++++++++++++---- map_test.go | 122 ++++++++++++++++++++++++++++++++++++++++++++++++++ value.go | 2 +- 5 files changed, 337 insertions(+), 23 deletions(-) diff --git a/array.go b/array.go index c0706563..c6f47872 100644 --- a/array.go +++ b/array.go @@ -2790,27 +2790,51 @@ func (a *Array) setCallbackWithChild(i uint64, child Value, maxInlineSize uint64 // Index i will be updated with array operations, which affects element index. a.mutableElementIndex[vid] = i - c.setParentUpdater(func() error { + c.setParentUpdater(func() (found bool, err error) { // Avoid unnecessary write operation on parent container. // Child value was stored as SlabIDStorable (not inlined) in parent container, // and continues to be stored as SlabIDStorable (still not inlinable), // so no update to parent container is needed. if !c.Inlined() && !c.Inlinable(maxInlineSize) { - return nil + return true, nil } - // Get latest index by child value ID. - index, exist := a.getIndexByValueID(vid) + // Get latest adjusted index by child value ID. + adjustedIndex, exist := a.getIndexByValueID(vid) if !exist { - return NewFatalError(fmt.Errorf("failed to get index for child element with value id %s", vid)) + return false, nil + } + + storable, err := a.root.Get(a.Storage, adjustedIndex) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by ArraySlab.Get(). + return false, err + } + + // Verify retrieved element is either SlabIDStorable or Slab, with identical value ID. + switch x := storable.(type) { + case SlabIDStorable: + sid := SlabID(x) + if !vid.equal(sid) { + return false, nil + } + + case Slab: + sid := x.SlabID() + if !vid.equal(sid) { + return false, nil + } + + default: + return false, nil } // Set child value with parent array using updated index. // Set() calls c.Storable() which returns inlined or not-inlined child storable. - existingValueStorable, err := a.set(index, c) + existingValueStorable, err := a.set(adjustedIndex, c) if err != nil { - return err + return false, err } // Verify overwritten storable has identical value ID. @@ -2819,7 +2843,7 @@ func (a *Array) setCallbackWithChild(i uint64, child Value, maxInlineSize uint64 case SlabIDStorable: sid := SlabID(x) if !vid.equal(sid) { - return NewFatalError( + return false, NewFatalError( fmt.Errorf( "failed to reset child value in parent updater callback: overwritten SlabIDStorable %s != value ID %s", sid, @@ -2829,7 +2853,7 @@ func (a *Array) setCallbackWithChild(i uint64, child Value, maxInlineSize uint64 case Slab: sid := x.SlabID() if !vid.equal(sid) { - return NewFatalError( + return false, NewFatalError( fmt.Errorf( "failed to reset child value in parent updater callback: overwritten Slab ID %s != value ID %s", sid, @@ -2837,18 +2861,18 @@ func (a *Array) setCallbackWithChild(i uint64, child Value, maxInlineSize uint64 } case nil: - return NewFatalError( + return false, NewFatalError( fmt.Errorf( "failed to reset child value in parent updater callback: overwritten value is nil")) default: - return NewFatalError( + return false, NewFatalError( fmt.Errorf( "failed to reset child value in parent updater callback: overwritten value is wrong type %T", existingValueStorable)) } - return nil + return true, nil }) } @@ -2857,7 +2881,18 @@ func (a *Array) notifyParentIfNeeded() error { if a.parentUpdater == nil { return nil } - return a.parentUpdater() + + // If parentUpdater() doesn't find child array (a), then no-op on parent container + // and unset parentUpdater callback in child array. This can happen when child + // array is an outdated reference (removed or overwritten in parent container). + found, err := a.parentUpdater() + if err != nil { + return err + } + if !found { + a.parentUpdater = nil + } + return nil } func (a *Array) Get(i uint64) (Value, error) { diff --git a/array_test.go b/array_test.go index 45d7a2f3..6026f5d8 100644 --- a/array_test.go +++ b/array_test.go @@ -7200,3 +7200,118 @@ func TestArrayRemoveReturnedValue(t *testing.T) { verifyEmptyArray(t, storage, typeInfo, address, parentArray) }) } + +func TestArrayWithOutdatedCallback(t *testing.T) { + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("overwritten child array", func(t *testing.T) { + + storage := newTestPersistentStorage(t) + + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + var expectedValues arrayValue + + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + // Insert child array to parent array + err = parentArray.Append(childArray) + require.NoError(t, err) + + v := NewStringValue(strings.Repeat("a", 10)) + + err = childArray.Append(v) + require.NoError(t, err) + + expectedValues = append(expectedValues, arrayValue{v}) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Overwrite child array value from parent + valueStorable, err := parentArray.Set(0, Uint64Value(0)) + require.NoError(t, err) + + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) + + child, err := id.StoredValue(storage) + require.NoError(t, err) + + valueEqual(t, expectedValues[0], child) + + expectedValues[0] = Uint64Value(0) + + // childArray.parentUpdater isn't nil before callback is invoked. + require.NotNil(t, childArray.parentUpdater) + + // modify overwritten child array + err = childArray.Append(Uint64Value(0)) + require.NoError(t, err) + + // childArray.parentUpdater is nil after callback is invoked. + require.Nil(t, childArray.parentUpdater) + + // No-op on parent + valueEqual(t, expectedValues, parentArray) + }) + + t.Run("removed child array", func(t *testing.T) { + + storage := newTestPersistentStorage(t) + + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + var expectedValues arrayValue + + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + // Insert child array to parent array + err = parentArray.Append(childArray) + require.NoError(t, err) + + v := NewStringValue(strings.Repeat("a", 10)) + + err = childArray.Append(v) + require.NoError(t, err) + + expectedValues = append(expectedValues, arrayValue{v}) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Remove child array value from parent + valueStorable, err := parentArray.Remove(0) + require.NoError(t, err) + + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) + + child, err := id.StoredValue(storage) + require.NoError(t, err) + + valueEqual(t, expectedValues[0], child) + + expectedValues = arrayValue{} + + // childArray.parentUpdater isn't nil before callback is invoked. + require.NotNil(t, childArray.parentUpdater) + + // modify removed child array + err = childArray.Append(Uint64Value(0)) + require.NoError(t, err) + + // childArray.parentUpdater is nil after callback is invoked. + require.Nil(t, childArray.parentUpdater) + + // No-op on parent + valueEqual(t, expectedValues, parentArray) + }) +} diff --git a/map.go b/map.go index 3a8d855e..d9ce5e54 100644 --- a/map.go +++ b/map.go @@ -4690,21 +4690,51 @@ func (m *OrderedMap) setCallbackWithChild( vid := c.ValueID() - c.setParentUpdater(func() error { + c.setParentUpdater(func() (found bool, err error) { // Avoid unnecessary write operation on parent container. // Child value was stored as SlabIDStorable (not inlined) in parent container, // and continues to be stored as SlabIDStorable (still not inlinable), // so no update to parent container is needed. if !c.Inlined() && !c.Inlinable(maxInlineSize) { - return nil + return true, nil + } + + // Retrieve element value under the same key and + // verify retrieved value is this child (c). + _, valueStorable, err := m.get(comparator, hip, key) + if err != nil { + var knf *KeyNotFoundError + if errors.As(err, &knf) { + return false, nil + } + // Don't need to wrap error as external error because err is already categorized by OrderedMap.Get(). + return false, err + } + + // Verify retrieved element value is either SlabIDStorable or Slab, with identical value ID. + switch x := valueStorable.(type) { + case SlabIDStorable: + sid := SlabID(x) + if !vid.equal(sid) { + return false, nil + } + + case Slab: + sid := x.SlabID() + if !vid.equal(sid) { + return false, nil + } + + default: + return false, nil } // Set child value with parent map using same key. // Set() calls c.Storable() which returns inlined or not-inlined child storable. existingValueStorable, err := m.set(comparator, hip, key, c) if err != nil { - return err + return false, err } // Verify overwritten storable has identical value ID. @@ -4713,7 +4743,7 @@ func (m *OrderedMap) setCallbackWithChild( case SlabIDStorable: sid := SlabID(x) if !vid.equal(sid) { - return NewFatalError( + return false, NewFatalError( fmt.Errorf( "failed to reset child value in parent updater callback: overwritten SlabIDStorable %s != value ID %s", sid, @@ -4723,7 +4753,7 @@ func (m *OrderedMap) setCallbackWithChild( case Slab: sid := x.SlabID() if !vid.equal(sid) { - return NewFatalError( + return false, NewFatalError( fmt.Errorf( "failed to reset child value in parent updater callback: overwritten Slab ID %s != value ID %s", sid, @@ -4731,17 +4761,18 @@ func (m *OrderedMap) setCallbackWithChild( } case nil: - return NewFatalError( + return false, NewFatalError( fmt.Errorf( "failed to reset child value in parent updater callback: overwritten value is nil")) default: - return NewFatalError( + return false, NewFatalError( fmt.Errorf( "failed to reset child value in parent updater callback: overwritten value is wrong type %T", existingValueStorable)) } - return nil + + return true, nil }) } @@ -4751,7 +4782,18 @@ func (m *OrderedMap) notifyParentIfNeeded() error { if m.parentUpdater == nil { return nil } - return m.parentUpdater() + + // If parentUpdater() doesn't find child map (m), then no-op on parent container + // and unset parentUpdater callback in child map. This can happen when child + // map is an outdated reference (removed or overwritten in parent container). + found, err := m.parentUpdater() + if err != nil { + return err + } + if !found { + m.parentUpdater = nil + } + return nil } func (m *OrderedMap) Has(comparator ValueComparator, hip HashInputProvider, key Value) (bool, error) { diff --git a/map_test.go b/map_test.go index 095f9f00..5be038b8 100644 --- a/map_test.go +++ b/map_test.go @@ -13373,3 +13373,125 @@ func TestMapRemoveReturnedValue(t *testing.T) { verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) }) } + +func TestMapWithOutdatedCallback(t *testing.T) { + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("overwritten child array", func(t *testing.T) { + + storage := newTestPersistentStorage(t) + + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + expectedKeyValues := make(mapValue) + + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + k := Uint64Value(0) + + // Insert child array to parent map + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childArray) + require.NoError(t, err) + require.Nil(t, existingStorable) + + v := NewStringValue(strings.Repeat("a", 10)) + + err = childArray.Append(v) + require.NoError(t, err) + + expectedKeyValues[k] = arrayValue{v} + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + + // Overwrite child array value from parent + valueStorable, err := parentMap.Set(compare, hashInputProvider, k, Uint64Value(0)) + require.NoError(t, err) + + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) + + child, err := id.StoredValue(storage) + require.NoError(t, err) + + valueEqual(t, expectedKeyValues[k], child) + + expectedKeyValues[k] = Uint64Value(0) + + // childArray.parentUpdater isn't nil before callback is invoked. + require.NotNil(t, childArray.parentUpdater) + + // modify overwritten child array + err = childArray.Append(Uint64Value(0)) + require.NoError(t, err) + + // childArray.parentUpdater is nil after callback is invoked. + require.Nil(t, childArray.parentUpdater) + + // No-op on parent + valueEqual(t, expectedKeyValues, parentMap) + }) + + t.Run("removed child array", func(t *testing.T) { + + storage := newTestPersistentStorage(t) + + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + expectedKeyValues := make(mapValue) + + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + k := Uint64Value(0) + + // Insert child array to parent map + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childArray) + require.NoError(t, err) + require.Nil(t, existingStorable) + + v := NewStringValue(strings.Repeat("a", 10)) + + err = childArray.Append(v) + require.NoError(t, err) + + expectedKeyValues[k] = arrayValue{v} + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + + // Remove child array value from parent + keyStorable, valueStorable, err := parentMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, keyStorable, k) + + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) + + child, err := id.StoredValue(storage) + require.NoError(t, err) + + valueEqual(t, expectedKeyValues[k], child) + + delete(expectedKeyValues, k) + + // childArray.parentUpdater isn't nil before callback is invoked. + require.NotNil(t, childArray.parentUpdater) + + // modify removed child array + err = childArray.Append(Uint64Value(0)) + require.NoError(t, err) + + // childArray.parentUpdater is nil after callback is invoked. + require.Nil(t, childArray.parentUpdater) + + // No-op on parent + valueEqual(t, expectedKeyValues, parentMap) + }) +} diff --git a/value.go b/value.go index ec590c0c..c8be86e3 100644 --- a/value.go +++ b/value.go @@ -26,7 +26,7 @@ type ValueComparator func(SlabStorage, Value, Storable) (bool, error) type StorableComparator func(Storable, Storable) bool -type parentUpdater func() error +type parentUpdater func() (found bool, err error) // mutableValueNotifier is an interface that allows mutable child value to notify and update parent. type mutableValueNotifier interface { From 53a716fab97a3987400691772e52c4ac08df1d97 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 2 Oct 2023 14:51:21 -0500 Subject: [PATCH 040/126] Replace magic numbers with constants --- array.go | 24 ++++++++++++++++++++++-- map.go | 6 +++++- 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/array.go b/array.go index c6f47872..2e4a3426 100644 --- a/array.go +++ b/array.go @@ -61,14 +61,34 @@ const ( // 32 is faster than 24 and 40. linearScanThreshold = 32 + // inlined tag number size: CBOR tag number CBORTagInlinedArray or CBORTagInlinedMap + inlinedTagNumSize = 2 + + // inlined CBOR array head size: CBOR array head of 3 elements (extra data index, value id, elements) + inlinedCBORArrayHeadSize = 1 + + // inlined extra data index size: CBOR positive number encoded in 2 bytes [0, 255] (fixed-size for easy computation) + inlinedExtraDataIndexSize = 2 + + // inlined CBOR byte string head size for value ID: CBOR byte string head for byte string of 8 bytes + inlinedCBORValueIDHeadSize = 1 + + // inlined value id size: encoded in 8 bytes + inlinedValueIDSize = 8 + // inlined array data slab prefix size: // tag number (2 bytes) + // 3-element array head (1 byte) + - // extra data ref index (2 bytes) [0, 255] + + // extra data index (2 bytes) [0, 255] + // value ID index head (1 byte) + // value ID index (8 bytes) + // element array head (3 bytes) - inlinedArrayDataSlabPrefixSize = 2 + 1 + 2 + 1 + 8 + arrayDataSlabElementHeadSize + inlinedArrayDataSlabPrefixSize = inlinedTagNumSize + + inlinedCBORArrayHeadSize + + inlinedExtraDataIndexSize + + inlinedCBORValueIDHeadSize + + inlinedValueIDSize + + arrayDataSlabElementHeadSize ) type ArraySlabHeader struct { diff --git a/map.go b/map.go index d9ce5e54..8b7fa854 100644 --- a/map.go +++ b/map.go @@ -93,7 +93,11 @@ const ( // extra data ref index (2 bytes) [0, 255] + // value index head (1 byte) + // value index (8 bytes) - inlinedMapDataSlabPrefixSize = 2 + 1 + 2 + 1 + 8 + inlinedMapDataSlabPrefixSize = inlinedTagNumSize + + inlinedCBORArrayHeadSize + + inlinedExtraDataIndexSize + + inlinedCBORValueIDHeadSize + + inlinedValueIDSize ) // MaxCollisionLimitPerDigest is the noncryptographic hash collision limit From 7a2aeb15104cd5702f23a1ea5c6cfe555b3a9729 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 2 Oct 2023 15:03:25 -0500 Subject: [PATCH 041/126] Use unsafe.Sizeof instead of magic number Previously, the constant expression used hardcoded numbers. --- storage.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/storage.go b/storage.go index 95725315..42ca7490 100644 --- a/storage.go +++ b/storage.go @@ -25,6 +25,7 @@ import ( "sort" "strings" "sync" + "unsafe" "github.com/fxamacker/cbor/v2" ) @@ -37,20 +38,20 @@ const LedgerBaseStorageSlabPrefix = "$" // By contrast, SlabID is affected by inlining because it identifies // a slab in storage. Given this, ValueID should be used for // resource tracking, etc. -type ValueID [16]byte +type ValueID [unsafe.Sizeof(Address{}) + unsafe.Sizeof(SlabIndex{})]byte var emptyValueID = ValueID{} func slabIDToValueID(sid SlabID) ValueID { var id ValueID - copy(id[:], sid.address[:]) - copy(id[8:], sid.index[:]) + n := copy(id[:], sid.address[:]) + copy(id[n:], sid.index[:]) return id } func (vid ValueID) equal(sid SlabID) bool { - return bytes.Equal(vid[:8], sid.address[:]) && - bytes.Equal(vid[8:], sid.index[:]) + return bytes.Equal(vid[:len(sid.address)], sid.address[:]) && + bytes.Equal(vid[len(sid.address):], sid.index[:]) } func (vid ValueID) String() string { From e6fa347db4ae96c0c28e8091793d09a3a562c41b Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 2 Oct 2023 15:20:23 -0500 Subject: [PATCH 042/126] Add comment for potential overlapping tag nums in Cadence Currently, Atree uses CBOR tag numbers [247, 255] and grows downwards. Cadence uses CBOR tag numbers [128, 224] and grows upwards. There MUST not be any overlap. This commit adds comment to warning about overlapping. We can be more proactive about this by dividing up the remaining available tag numbers between Cadence and Atree. NOTE: A similar comment will be added to Cadence repo at github.com/onflow/cadence when the next atree-cadence integration PR is opened. --- storable.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/storable.go b/storable.go index 52892575..e8228d56 100644 --- a/storable.go +++ b/storable.go @@ -67,6 +67,11 @@ func hasPointer(storable Storable) bool { } const ( + // WARNING: tag numbers defined in here in github.com/onflow/atree + // MUST not overlap with tag numbers used by Cadence internal value encoding. + // As of Oct. 2, 2023, Cadence uses tag numbers from 128 to 224. + // See runtime/interpreter/encode.go at github.com/onflow/cadence. + CBORTagInlinedArrayExtraData = 247 CBORTagInlinedMapExtraData = 248 CBORTagInlinedCompositeExtraData = 249 From 03acd499492eadd274209dfd14657fe04387bf5d Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 2 Oct 2023 16:02:30 -0500 Subject: [PATCH 043/126] Create Array.mutableElementIndex lazily --- array.go | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/array.go b/array.go index 2e4a3426..a5612558 100644 --- a/array.go +++ b/array.go @@ -123,9 +123,8 @@ func (a *ArrayDataSlab) StoredValue(storage SlabStorage) (Value, error) { return nil, NewNotValueError(a.SlabID()) } return &Array{ - Storage: storage, - root: a, - mutableElementIndex: make(map[ValueID]uint64), + Storage: storage, + root: a, }, nil } @@ -152,9 +151,8 @@ func (a *ArrayMetaDataSlab) StoredValue(storage SlabStorage) (Value, error) { return nil, NewNotValueError(a.SlabID()) } return &Array{ - Storage: storage, - root: a, - mutableElementIndex: make(map[ValueID]uint64), + Storage: storage, + root: a, }, nil } @@ -214,6 +212,8 @@ type Array struct { // mutableElementIndex tracks index of mutable element, such as Array and OrderedMap. // This is needed by mutable element to properly update itself through parentUpdater. + // WARNING: since mutableElementIndex is created lazily, we need to create mutableElementIndex + // if it is nil before adding/updating elements. Range, delete, and read are no-ops on nil Go map. // TODO: maybe optimize by replacing map to get faster updates. mutableElementIndex map[ValueID]uint64 } @@ -2727,9 +2727,8 @@ func NewArray(storage SlabStorage, address Address, typeInfo TypeInfo) (*Array, } return &Array{ - Storage: storage, - root: root, - mutableElementIndex: make(map[ValueID]uint64), + Storage: storage, + root: root, }, nil } @@ -2750,9 +2749,8 @@ func NewArrayWithRootID(storage SlabStorage, rootID SlabID) (*Array, error) { } return &Array{ - Storage: storage, - root: root, - mutableElementIndex: make(map[ValueID]uint64), + Storage: storage, + root: root, }, nil } @@ -2807,6 +2805,11 @@ func (a *Array) setCallbackWithChild(i uint64, child Value, maxInlineSize uint64 vid := c.ValueID() + // mutableElementIndex is lazily initialized. + if a.mutableElementIndex == nil { + a.mutableElementIndex = make(map[ValueID]uint64) + } + // Index i will be updated with array operations, which affects element index. a.mutableElementIndex[vid] = i @@ -3802,9 +3805,8 @@ func NewArrayFromBatchData(storage SlabStorage, address Address, typeInfo TypeIn } return &Array{ - Storage: storage, - root: root, - mutableElementIndex: make(map[ValueID]uint64), + Storage: storage, + root: root, }, nil } From 90be7ac4d23abbaa3128dbd076aa2338a9b12ae5 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 2 Oct 2023 16:07:33 -0500 Subject: [PATCH 044/126] Refactor to use SlabIndex instead of [8]byte --- array.go | 2 +- map.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/array.go b/array.go index a5612558..d6f2efa7 100644 --- a/array.go +++ b/array.go @@ -634,7 +634,7 @@ func DecodeInlinedArrayStorable( len(b))) } - var index [8]byte + var index SlabIndex copy(index[:], b) slabID := NewSlabID(parentSlabID.address, index) diff --git a/map.go b/map.go index 8b7fa854..ea83d641 100644 --- a/map.go +++ b/map.go @@ -2479,10 +2479,10 @@ func DecodeInlinedCompositeStorable( len(b))) } - var index [8]byte + var index SlabIndex copy(index[:], b) - slabID := NewSlabID(parentSlabID.address, SlabIndex(index)) + slabID := NewSlabID(parentSlabID.address, index) // Decode values elemCount, err := dec.DecodeArrayHead() @@ -2618,10 +2618,10 @@ func DecodeInlinedMapStorable( len(b))) } - var index [8]byte + var index SlabIndex copy(index[:], b) - slabID := NewSlabID(parentSlabID.address, SlabIndex(index)) + slabID := NewSlabID(parentSlabID.address, index) // Decode elements elements, err := newElementsFromData(dec, decodeStorable, parentSlabID, inlinedExtraData) From f3fdb2d2112958d111c76131fc7a2e125f1b3437 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 2 Oct 2023 16:12:25 -0500 Subject: [PATCH 045/126] Replace magic number with constant --- array.go | 6 ++++-- map.go | 8 ++++---- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/array.go b/array.go index d6f2efa7..5b9b72a5 100644 --- a/array.go +++ b/array.go @@ -89,6 +89,8 @@ const ( inlinedCBORValueIDHeadSize + inlinedValueIDSize + arrayDataSlabElementHeadSize + + maxInlinedExtraDataIndex = 255 ) type ArraySlabHeader struct { @@ -697,9 +699,9 @@ func (a *ArrayDataSlab) encodeAsInlined(enc *Encoder, inlinedTypeInfo *inlinedEx extraDataIndex := inlinedTypeInfo.addArrayExtraData(a.extraData) - if extraDataIndex > 255 { + if extraDataIndex > maxInlinedExtraDataIndex { return NewEncodingError( - fmt.Errorf("failed to encode inlined array data slab: extra data index %d exceeds limit 255", extraDataIndex)) + fmt.Errorf("failed to encode inlined array data slab: extra data index %d exceeds limit %d", extraDataIndex, maxInlinedExtraDataIndex)) } var err error diff --git a/map.go b/map.go index ea83d641..563eb9b9 100644 --- a/map.go +++ b/map.go @@ -2820,8 +2820,8 @@ func (m *MapDataSlab) encodeAsInlinedMap(enc *Encoder, inlinedTypeInfo *inlinedE extraDataIndex := inlinedTypeInfo.addMapExtraData(m.extraData) - if extraDataIndex > 255 { - return NewEncodingError(fmt.Errorf("extra data index %d exceeds limit 255", extraDataIndex)) + if extraDataIndex > maxInlinedExtraDataIndex { + return NewEncodingError(fmt.Errorf("extra data index %d exceeds limit %d", extraDataIndex, maxInlinedExtraDataIndex)) } var err error @@ -2885,9 +2885,9 @@ func encodeAsInlinedComposite( return NewEncodingError(fmt.Errorf("number of elements %d is different from number of elements in cached composite type %d", len(keys), len(cachedKeys))) } - if extraDataIndex > 255 { + if extraDataIndex > maxInlinedExtraDataIndex { // This should never happen because of slab size. - return NewEncodingError(fmt.Errorf("extra data index %d exceeds limit 255", extraDataIndex)) + return NewEncodingError(fmt.Errorf("extra data index %d exceeds limit %d", extraDataIndex, maxInlinedExtraDataIndex)) } var err error From 2775ff5671e1c34e8a5ce99a40fc01738e83138d Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 2 Oct 2023 16:20:57 -0500 Subject: [PATCH 046/126] Refactor to use same variable in type switches --- array.go | 12 ++++++------ map.go | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/array.go b/array.go index 5b9b72a5..7801acf4 100644 --- a/array.go +++ b/array.go @@ -2838,15 +2838,15 @@ func (a *Array) setCallbackWithChild(i uint64, child Value, maxInlineSize uint64 } // Verify retrieved element is either SlabIDStorable or Slab, with identical value ID. - switch x := storable.(type) { + switch storable := storable.(type) { case SlabIDStorable: - sid := SlabID(x) + sid := SlabID(storable) if !vid.equal(sid) { return false, nil } case Slab: - sid := x.SlabID() + sid := storable.SlabID() if !vid.equal(sid) { return false, nil } @@ -2864,9 +2864,9 @@ func (a *Array) setCallbackWithChild(i uint64, child Value, maxInlineSize uint64 // Verify overwritten storable has identical value ID. - switch x := existingValueStorable.(type) { + switch existingValueStorable := existingValueStorable.(type) { case SlabIDStorable: - sid := SlabID(x) + sid := SlabID(existingValueStorable) if !vid.equal(sid) { return false, NewFatalError( fmt.Errorf( @@ -2876,7 +2876,7 @@ func (a *Array) setCallbackWithChild(i uint64, child Value, maxInlineSize uint64 } case Slab: - sid := x.SlabID() + sid := existingValueStorable.SlabID() if !vid.equal(sid) { return false, NewFatalError( fmt.Errorf( diff --git a/map.go b/map.go index 563eb9b9..6f4a7fb1 100644 --- a/map.go +++ b/map.go @@ -4717,15 +4717,15 @@ func (m *OrderedMap) setCallbackWithChild( } // Verify retrieved element value is either SlabIDStorable or Slab, with identical value ID. - switch x := valueStorable.(type) { + switch valueStorable := valueStorable.(type) { case SlabIDStorable: - sid := SlabID(x) + sid := SlabID(valueStorable) if !vid.equal(sid) { return false, nil } case Slab: - sid := x.SlabID() + sid := valueStorable.SlabID() if !vid.equal(sid) { return false, nil } @@ -4743,9 +4743,9 @@ func (m *OrderedMap) setCallbackWithChild( // Verify overwritten storable has identical value ID. - switch x := existingValueStorable.(type) { + switch existingValueStorable := existingValueStorable.(type) { case SlabIDStorable: - sid := SlabID(x) + sid := SlabID(existingValueStorable) if !vid.equal(sid) { return false, NewFatalError( fmt.Errorf( @@ -4755,7 +4755,7 @@ func (m *OrderedMap) setCallbackWithChild( } case Slab: - sid := x.SlabID() + sid := existingValueStorable.SlabID() if !vid.equal(sid) { return false, NewFatalError( fmt.Errorf( From 836eb70922e82d00723b90ebf022275be5fb74a9 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 2 Oct 2023 17:23:22 -0500 Subject: [PATCH 047/126] Reuse buffer from pool when encoding elements --- array.go | 26 +++++++++++++++++++++++--- map.go | 11 ++++++----- 2 files changed, 29 insertions(+), 8 deletions(-) diff --git a/array.go b/array.go index 7801acf4..80b5cc80 100644 --- a/array.go +++ b/array.go @@ -24,6 +24,7 @@ import ( "fmt" "math" "strings" + "sync" "github.com/fxamacker/cbor/v2" ) @@ -220,6 +221,23 @@ type Array struct { mutableElementIndex map[ValueID]uint64 } +var bufferPool = sync.Pool{ + New: func() interface{} { + e := new(bytes.Buffer) + e.Grow(int(maxThreshold)) + return e + }, +} + +func getBuffer() *bytes.Buffer { + return bufferPool.Get().(*bytes.Buffer) +} + +func putBuffer(e *bytes.Buffer) { + e.Reset() + bufferPool.Put(e) +} + var _ Value = &Array{} var _ mutableValueNotifier = &Array{} @@ -775,9 +793,11 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { inlinedTypes := newInlinedExtraData() - // TODO: maybe use a buffer pool - var elementBuf bytes.Buffer - elementEnc := NewEncoder(&elementBuf, enc.encMode) + // Get a buffer from a pool to encode elements. + elementBuf := getBuffer() + defer putBuffer(elementBuf) + + elementEnc := NewEncoder(elementBuf, enc.encMode) err := a.encodeElements(elementEnc, inlinedTypes) if err != nil { diff --git a/map.go b/map.go index 6f4a7fb1..3aec8eeb 100644 --- a/map.go +++ b/map.go @@ -19,7 +19,6 @@ package atree import ( - "bytes" "encoding/binary" "errors" "fmt" @@ -2683,9 +2682,11 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { inlinedTypes := newInlinedExtraData() - // TODO: maybe use a buffer pool - var buf bytes.Buffer - elemEnc := NewEncoder(&buf, enc.encMode) + // Get a buffer from a pool to encode elements. + elementBuf := getBuffer() + defer putBuffer(elementBuf) + + elemEnc := NewEncoder(elementBuf, enc.encMode) err := m.encodeElements(elemEnc, inlinedTypes) if err != nil { @@ -2763,7 +2764,7 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { } // Encode elements - err = enc.CBOR.EncodeRawBytes(buf.Bytes()) + err = enc.CBOR.EncodeRawBytes(elementBuf.Bytes()) if err != nil { return NewEncodingError(err) } From 4b3c26cb860e418238e7791842ec746248a46f79 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 2 Oct 2023 18:21:27 -0500 Subject: [PATCH 048/126] Rename composite to compactMap --- array_debug.go | 14 ++++----- map.go | 52 ++++++++++++++++----------------- map_debug.go | 10 +++---- map_test.go | 30 +++++++++---------- storable.go | 14 ++++----- storable_test.go | 4 +-- typeinfo.go | 76 ++++++++++++++++++++++++------------------------ 7 files changed, 100 insertions(+), 100 deletions(-) diff --git a/array_debug.go b/array_debug.go index 91392c70..22b69cb4 100644 --- a/array_debug.go +++ b/array_debug.go @@ -563,14 +563,14 @@ func (v *serializationVerifier) verifyArraySlab(slab ArraySlab) error { } // Extra check: encoded data size == header.size - // This check is skipped for slabs with inlined composite because + // This check is skipped for slabs with inlined compact map because // encoded size and slab size differ for inlined composites. // For inlined composites, digests and field keys are encoded in - // composite extra data section for reuse, and only composite field + // compact map extra data section for reuse, and only compact map field // values are encoded in non-extra data section. - // This reduces encoding size because composite values of the same - // composite type can reuse encoded type info, seed, digests, and field names. - // TODO: maybe add size check for slabs with inlined composite by decoding entire slab. + // This reduces encoding size because compact map values of the same + // compact map type can reuse encoded type info, seed, digests, and field names. + // TODO: maybe add size check for slabs with inlined compact map by decoding entire slab. inlinedComposite, err := hasInlinedComposite(data) if err != nil { // Don't need to wrap error as external error because err is already categorized by hasInlinedComposite(). @@ -832,7 +832,7 @@ func hasInlinedComposite(data []byte) (bool, error) { data = data[len(b):] } - // Parse inlined extra data to find composite extra data. + // Parse inlined extra data to find compact map extra data. dec := cbor.NewStreamDecoder(bytes.NewBuffer(data)) count, err := dec.DecodeArrayHead() if err != nil { @@ -844,7 +844,7 @@ func hasInlinedComposite(data []byte) (bool, error) { if err != nil { return false, NewDecodingError(err) } - if tagNum == CBORTagInlinedCompositeExtraData { + if tagNum == CBORTagInlinedCompactMapExtraData { return true, nil } err = dec.Skip() diff --git a/map.go b/map.go index 3aec8eeb..f361119b 100644 --- a/map.go +++ b/map.go @@ -2410,8 +2410,8 @@ func newMapDataSlabFromDataV1( }, nil } -// DecodeInlinedCompositeStorable decodes inlined composite data. Encoding is -// version 1 with CBOR tag having tag number CBORTagInlinedComposite, and tag contant +// DecodeInlinedCompactMapStorable decodes inlined compact map data. Encoding is +// version 1 with CBOR tag having tag number CBORTagInlinedCompactMap, and tag contant // as 3-element array: // // - index of inlined extra data @@ -2420,7 +2420,7 @@ func newMapDataSlabFromDataV1( // // NOTE: This function doesn't decode tag number because tag number is decoded // in the caller and decoder only contains tag content. -func DecodeInlinedCompositeStorable( +func DecodeInlinedCompactMapStorable( dec *cbor.StreamDecoder, decodeStorable StorableDecoder, parentSlabID SlabID, @@ -2439,7 +2439,7 @@ func DecodeInlinedCompositeStorable( if arrayCount != inlinedMapDataSlabArrayCount { return nil, NewDecodingError( fmt.Errorf( - "failed to decode inlined composite, expect array of %d elements, got %d elements", + "failed to decode inlined compact map data, expect array of %d elements, got %d elements", inlinedMapDataSlabArrayCount, arrayCount)) } @@ -2452,16 +2452,16 @@ func DecodeInlinedCompositeStorable( if extraDataIndex >= uint64(len(inlinedExtraData)) { return nil, NewDecodingError( fmt.Errorf( - "failed to decode inlined composite: inlined extra data index %d exceeds number of inlined extra data %d", + "failed to decode inlined compact map data: inlined extra data index %d exceeds number of inlined extra data %d", extraDataIndex, len(inlinedExtraData))) } - extraData, ok := inlinedExtraData[extraDataIndex].(*compositeExtraData) + extraData, ok := inlinedExtraData[extraDataIndex].(*compactMapExtraData) if !ok { return nil, NewDecodingError( fmt.Errorf( - "failed to decode inlined composite: expect *compositeExtraData, got %T", + "failed to decode inlined compact map data: expect *compactMapExtraData, got %T", inlinedExtraData[extraDataIndex])) } @@ -2473,7 +2473,7 @@ func DecodeInlinedCompositeStorable( if len(b) != slabIndexSize { return nil, NewDecodingError( fmt.Errorf( - "failed to decode inlined composite: expect %d bytes for slab index, got %d bytes", + "failed to decode inlined compact map data: expect %d bytes for slab index, got %d bytes", slabIndexSize, len(b))) } @@ -2492,12 +2492,12 @@ func DecodeInlinedCompositeStorable( if elemCount != uint64(len(extraData.keys)) { return nil, NewDecodingError( fmt.Errorf( - "failed to decode composite values: got %d, expect %d", + "failed to decode compact map values: got %d, expect %d", elemCount, extraData.mapExtraData.Count)) } - // Make a copy of digests because extraData is shared by all inlined composite referring to the same type. + // Make a copy of digests because extraData is shared by all inlined compact map data referring to the same type. hkeys := make([]Digest, len(extraData.hkeys)) copy(hkeys, extraData.hkeys) @@ -2592,7 +2592,7 @@ func DecodeInlinedMapStorable( if extraDataIndex >= uint64(len(inlinedExtraData)) { return nil, NewDecodingError( fmt.Errorf( - "failed to decode inlined composite: inlined extra data index %d exceeds number of inlined extra data %d", + "failed to decode inlined compact map data: inlined extra data index %d exceeds number of inlined extra data %d", extraDataIndex, len(inlinedExtraData))) } @@ -2612,7 +2612,7 @@ func DecodeInlinedMapStorable( if len(b) != slabIndexSize { return nil, NewDecodingError( fmt.Errorf( - "failed to decode inlined composite: expect %d bytes for slab index, got %d bytes", + "failed to decode inlined compact map data: expect %d bytes for slab index, got %d bytes", slabIndexSize, len(b))) } @@ -2810,8 +2810,8 @@ func (m *MapDataSlab) encodeAsInlined(enc *Encoder, inlinedTypeInfo *inlinedExtr fmt.Errorf("failed to encode standalone map data slab as inlined")) } - if hkeys, keys, values, ok := m.canBeEncodedAsComposite(); ok { - return encodeAsInlinedComposite(enc, m.header.slabID, m.extraData, hkeys, keys, values, inlinedTypeInfo) + if hkeys, keys, values, ok := m.canBeEncodedAsCompactMap(); ok { + return encodeAsInlinedCompactMap(enc, m.header.slabID, m.extraData, hkeys, keys, values, inlinedTypeInfo) } return m.encodeAsInlinedMap(enc, inlinedTypeInfo) @@ -2869,8 +2869,8 @@ func (m *MapDataSlab) encodeAsInlinedMap(enc *Encoder, inlinedTypeInfo *inlinedE return nil } -// encodeAsInlinedComposite encodes hkeys, keys, and values as inlined composite value. -func encodeAsInlinedComposite( +// encodeAsInlinedCompactMap encodes hkeys, keys, and values as inlined compact map value. +func encodeAsInlinedCompactMap( enc *Encoder, slabID SlabID, extraData *MapExtraData, @@ -2880,10 +2880,10 @@ func encodeAsInlinedComposite( inlinedTypeInfo *inlinedExtraData, ) error { - extraDataIndex, cachedKeys := inlinedTypeInfo.addCompositeExtraData(extraData, hkeys, keys) + extraDataIndex, cachedKeys := inlinedTypeInfo.addCompactMapExtraData(extraData, hkeys, keys) if len(keys) != len(cachedKeys) { - return NewEncodingError(fmt.Errorf("number of elements %d is different from number of elements in cached composite type %d", len(keys), len(cachedKeys))) + return NewEncodingError(fmt.Errorf("number of elements %d is different from number of elements in cached compact map type %d", len(keys), len(cachedKeys))) } if extraDataIndex > maxInlinedExtraDataIndex { @@ -2896,7 +2896,7 @@ func encodeAsInlinedComposite( // Encode tag number and array head of 3 elements err = enc.CBOR.EncodeRawBytes([]byte{ // tag number - 0xd8, CBORTagInlinedComposite, + 0xd8, CBORTagInlinedCompactMap, // array head of 3 elements 0x83, }) @@ -2920,8 +2920,8 @@ func encodeAsInlinedComposite( return NewEncodingError(err) } - // element 2: composite values in the order of cachedKeys - err = encodeCompositeValues(enc, cachedKeys, keys, values, inlinedTypeInfo) + // element 2: compact map values in the order of cachedKeys + err = encodeCompactMapValues(enc, cachedKeys, keys, values, inlinedTypeInfo) if err != nil { return NewEncodingError(err) } @@ -2934,8 +2934,8 @@ func encodeAsInlinedComposite( return nil } -// encodeCompositeValues encodes composite values as an array of values ordered by cachedKeys. -func encodeCompositeValues( +// encodeCompactMapValues encodes compact values as an array of values ordered by cachedKeys. +func encodeCompactMapValues( enc *Encoder, cachedKeys []ComparableStorable, keys []ComparableStorable, @@ -2983,12 +2983,12 @@ func encodeCompositeValues( return nil } -// canBeEncodedAsComposite returns true if: +// canBeEncodedAsCompactMap returns true if: // - map data slab is inlined -// - map is composite type +// - map type is composite type // - no collision elements // - keys are stored inline (not in a separate slab) -func (m *MapDataSlab) canBeEncodedAsComposite() ([]Digest, []ComparableStorable, []Storable, bool) { +func (m *MapDataSlab) canBeEncodedAsCompactMap() ([]Digest, []ComparableStorable, []Storable, bool) { if !m.inlined { return nil, nil, nil, false } diff --git a/map_debug.go b/map_debug.go index 59094005..1d9a8fea 100644 --- a/map_debug.go +++ b/map_debug.go @@ -967,14 +967,14 @@ func (v *serializationVerifier) verifyMapSlab(slab MapSlab) error { } // Extra check: encoded data size == header.size - // This check is skipped for slabs with inlined composite because + // This check is skipped for slabs with inlined compact map because // encoded size and slab size differ for inlined composites. // For inlined composites, digests and field keys are encoded in - // composite extra data section for reuse, and only composite field + // compact map extra data section for reuse, and only compact map field // values are encoded in non-extra data section. - // This reduces encoding size because composite values of the same - // composite type can reuse encoded type info, seed, digests, and field names. - // TODO: maybe add size check for slabs with inlined composite by decoding entire slab. + // This reduces encoding size because compact map values of the same + // compact map type can reuse encoded type info, seed, digests, and field names. + // TODO: maybe add size check for slabs with inlined compact map by decoding entire slab. inlinedComposite, err := hasInlinedComposite(data) if err != nil { // Don't need to wrap error as external error because err is already categorized by hasInlinedComposite(). diff --git a/map_test.go b/map_test.go index 5be038b8..ef64e67b 100644 --- a/map_test.go +++ b/map_test.go @@ -6488,7 +6488,7 @@ func TestMapEncodeDecode(t *testing.T) { 0x82, // key: 0 0xd8, 0xa4, 0x00, - // value: inlined composite (tag: CBORTagInlinedComposite) + // value: inlined composite (tag: CBORTagInlinedCompactMap) 0xd8, 0xfc, // array of 3 elements 0x83, @@ -6505,7 +6505,7 @@ func TestMapEncodeDecode(t *testing.T) { 0x82, // key: 2 0xd8, 0xa4, 0x01, - // value: inlined composite (tag: CBORTagInlinedComposite) + // value: inlined composite (tag: CBORTagInlinedCompactMap) 0xd8, 0xfc, // array of 3 elements 0x83, @@ -6658,7 +6658,7 @@ func TestMapEncodeDecode(t *testing.T) { 0x82, // key: 0 0xd8, 0xa4, 0x00, - // value: inlined composite (tag: CBORTagInlinedComposite) + // value: inlined composite (tag: CBORTagInlinedCompactMap) 0xd8, 0xfc, // array of 3 elements 0x83, @@ -6677,7 +6677,7 @@ func TestMapEncodeDecode(t *testing.T) { 0x82, // key: 2 0xd8, 0xa4, 0x01, - // value: inlined composite (tag: CBORTagInlinedComposite) + // value: inlined composite (tag: CBORTagInlinedCompactMap) 0xd8, 0xfc, // array of 3 elements 0x83, @@ -6835,7 +6835,7 @@ func TestMapEncodeDecode(t *testing.T) { 0x82, // key: 0 0xd8, 0xa4, 0x00, - // value: inlined composite (tag: CBORTagInlinedComposite) + // value: inlined composite (tag: CBORTagInlinedCompactMap) 0xd8, 0xfc, // array of 3 elements 0x83, @@ -6854,7 +6854,7 @@ func TestMapEncodeDecode(t *testing.T) { 0x82, // key: 2 0xd8, 0xa4, 0x01, - // value: inlined composite (tag: CBORTagInlinedComposite) + // value: inlined composite (tag: CBORTagInlinedCompactMap) 0xd8, 0xfc, // array of 3 elements 0x83, @@ -7063,7 +7063,7 @@ func TestMapEncodeDecode(t *testing.T) { 0x82, // key: 0 0xd8, 0xa4, 0x00, - // value: inlined composite (tag: CBORTagInlinedComposite) + // value: inlined composite (tag: CBORTagInlinedCompactMap) 0xd8, 0xfc, // array of 3 elements 0x83, @@ -7082,7 +7082,7 @@ func TestMapEncodeDecode(t *testing.T) { 0x82, // key: 1 0xd8, 0xa4, 0x01, - // value: inlined composite (tag: CBORTagInlinedComposite) + // value: inlined composite (tag: CBORTagInlinedCompactMap) 0xd8, 0xfc, // array of 3 elements 0x83, @@ -7101,7 +7101,7 @@ func TestMapEncodeDecode(t *testing.T) { 0x82, // key: 2 0xd8, 0xa4, 0x02, - // value: inlined composite (tag: CBORTagInlinedComposite) + // value: inlined composite (tag: CBORTagInlinedCompactMap) 0xd8, 0xfc, // array of 3 elements 0x83, @@ -7276,7 +7276,7 @@ func TestMapEncodeDecode(t *testing.T) { 0x82, // key: 0 0xd8, 0xa4, 0x00, - // value: inlined composite (tag: CBORTagInlinedComposite) + // value: inlined composite (tag: CBORTagInlinedCompactMap) 0xd8, 0xfc, // array of 3 elements 0x83, @@ -7295,7 +7295,7 @@ func TestMapEncodeDecode(t *testing.T) { 0x82, // key: 2 0xd8, 0xa4, 0x01, - // value: inlined composite (tag: CBORTagInlinedComposite) + // value: inlined composite (tag: CBORTagInlinedCompactMap) 0xd8, 0xfc, // array of 3 elements 0x83, @@ -7479,7 +7479,7 @@ func TestMapEncodeDecode(t *testing.T) { 0x82, // key: 0 0xd8, 0xa4, 0x00, - // value: inlined composite (tag: CBORTagInlinedComposite) + // value: inlined composite (tag: CBORTagInlinedCompactMap) 0xd8, 0xfc, // array of 3 elements 0x83, @@ -7498,7 +7498,7 @@ func TestMapEncodeDecode(t *testing.T) { 0x82, // key: 1 0xd8, 0xa4, 0x01, - // value: inlined composite (tag: CBORTagInlinedComposite) + // value: inlined composite (tag: CBORTagInlinedCompactMap) 0xd8, 0xfc, // array of 3 elements 0x83, @@ -7517,7 +7517,7 @@ func TestMapEncodeDecode(t *testing.T) { 0x82, // key: 2 0xd8, 0xa4, 0x02, - // value: inlined composite (tag: CBORTagInlinedComposite) + // value: inlined composite (tag: CBORTagInlinedCompactMap) 0xd8, 0xfc, // array of 3 elements 0x83, @@ -7536,7 +7536,7 @@ func TestMapEncodeDecode(t *testing.T) { 0x82, // key: 3 0xd8, 0xa4, 0x03, - // value: inlined composite (tag: CBORTagInlinedComposite) + // value: inlined composite (tag: CBORTagInlinedCompactMap) 0xd8, 0xfc, // array of 3 elements 0x83, diff --git a/storable.go b/storable.go index e8228d56..02888130 100644 --- a/storable.go +++ b/storable.go @@ -38,7 +38,7 @@ type Storable interface { } // ComparableStorable is an interface that supports comparison and cloning of Storable. -// This is only used for composite keys. +// This is only used for compact keys. type ComparableStorable interface { Storable @@ -72,13 +72,13 @@ const ( // As of Oct. 2, 2023, Cadence uses tag numbers from 128 to 224. // See runtime/interpreter/encode.go at github.com/onflow/cadence. - CBORTagInlinedArrayExtraData = 247 - CBORTagInlinedMapExtraData = 248 - CBORTagInlinedCompositeExtraData = 249 + CBORTagInlinedArrayExtraData = 247 + CBORTagInlinedMapExtraData = 248 + CBORTagInlinedCompactMapExtraData = 249 - CBORTagInlinedArray = 250 - CBORTagInlinedMap = 251 - CBORTagInlinedComposite = 252 + CBORTagInlinedArray = 250 + CBORTagInlinedMap = 251 + CBORTagInlinedCompactMap = 252 CBORTagInlineCollisionGroup = 253 CBORTagExternalCollisionGroup = 254 diff --git a/storable_test.go b/storable_test.go index 4cd52b6f..12b732f2 100644 --- a/storable_test.go +++ b/storable_test.go @@ -480,8 +480,8 @@ func decodeStorable(dec *cbor.StreamDecoder, id SlabID, inlinedExtraData []Extra case CBORTagInlinedMap: return DecodeInlinedMapStorable(dec, decodeStorable, id, inlinedExtraData) - case CBORTagInlinedComposite: - return DecodeInlinedCompositeStorable(dec, decodeStorable, id, inlinedExtraData) + case CBORTagInlinedCompactMap: + return DecodeInlinedCompactMapStorable(dec, decodeStorable, id, inlinedExtraData) case CBORTagSlabID: return DecodeSlabIDStorable(dec) diff --git a/typeinfo.go b/typeinfo.go index 7f0a34bc..6702135a 100644 --- a/typeinfo.go +++ b/typeinfo.go @@ -45,26 +45,26 @@ type ExtraData interface { Encode(enc *Encoder) error } -// compositeExtraData is used for inlining composite values. -// compositeExtraData includes hkeys and keys with map extra data +// compactMapExtraData is used for inlining compact values. +// compactMapExtraData includes hkeys and keys with map extra data // because hkeys and keys are the same in order and content for -// all values with the same composite type and map seed. -type compositeExtraData struct { +// all values with the same compact type and map seed. +type compactMapExtraData struct { mapExtraData *MapExtraData hkeys []Digest // hkeys is ordered by mapExtraData.Seed keys []ComparableStorable // keys is ordered by mapExtraData.Seed } -var _ ExtraData = &compositeExtraData{} +var _ ExtraData = &compactMapExtraData{} -const compositeExtraDataLength = 3 +const compactMapExtraDataLength = 3 -func (c *compositeExtraData) isExtraData() bool { +func (c *compactMapExtraData) isExtraData() bool { return true } -func (c *compositeExtraData) Encode(enc *Encoder) error { - err := enc.CBOR.EncodeArrayHead(compositeExtraDataLength) +func (c *compactMapExtraData) Encode(enc *Encoder) error { + err := enc.CBOR.EncodeArrayHead(compactMapExtraDataLength) if err != nil { return NewEncodingError(err) } @@ -115,21 +115,21 @@ func (c *compositeExtraData) Encode(enc *Encoder) error { return nil } -func newCompositeExtraData( +func newCompactMapExtraData( dec *cbor.StreamDecoder, decodeTypeInfo TypeInfoDecoder, decodeStorable StorableDecoder, -) (*compositeExtraData, error) { +) (*compactMapExtraData, error) { length, err := dec.DecodeArrayHead() if err != nil { return nil, NewDecodingError(err) } - if length != compositeExtraDataLength { + if length != compactMapExtraDataLength { return nil, NewDecodingError( fmt.Errorf( - "composite extra data has invalid length %d, want %d", + "compact extra data has invalid length %d, want %d", length, arrayExtraDataLength, )) @@ -166,7 +166,7 @@ func newCompositeExtraData( if keyCount != uint64(digestCount) { return nil, NewDecodingError( fmt.Errorf( - "decoding composite key failed: number of keys %d is different from number of digests %d", + "decoding compact map key failed: number of keys %d is different from number of digests %d", keyCount, digestCount)) } @@ -178,37 +178,37 @@ func newCompositeExtraData( keys := make([]ComparableStorable, keyCount) for i := uint64(0); i < keyCount; i++ { - // Decode composite key + // Decode compact map key key, err := decodeStorable(dec, SlabIDUndefined, nil) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode key's storable") } - compositeKey, ok := key.(ComparableStorable) + compactMapKey, ok := key.(ComparableStorable) if !ok { return nil, NewDecodingError(fmt.Errorf("failed to decode key's storable: got %T, expect ComparableStorable", key)) } - keys[i] = compositeKey + keys[i] = compactMapKey } - return &compositeExtraData{mapExtraData: mapExtraData, hkeys: hkeys, keys: keys}, nil + return &compactMapExtraData{mapExtraData: mapExtraData, hkeys: hkeys, keys: keys}, nil } -type compositeTypeInfo struct { +type compactMapTypeInfo struct { index int keys []ComparableStorable } type inlinedExtraData struct { - extraData []ExtraData - compositeTypes map[string]compositeTypeInfo - arrayTypes map[string]int + extraData []ExtraData + compactMapTypes map[string]compactMapTypeInfo + arrayTypes map[string]int } func newInlinedExtraData() *inlinedExtraData { return &inlinedExtraData{ - compositeTypes: make(map[string]compositeTypeInfo), - arrayTypes: make(map[string]int), + compactMapTypes: make(map[string]compactMapTypeInfo), + arrayTypes: make(map[string]int), } } @@ -229,8 +229,8 @@ func (ied *inlinedExtraData) Encode(enc *Encoder) error { case *MapExtraData: tagNum = CBORTagInlinedMapExtraData - case *compositeExtraData: - tagNum = CBORTagInlinedCompositeExtraData + case *compactMapExtraData: + tagNum = CBORTagInlinedCompactMapExtraData default: return NewEncodingError(fmt.Errorf("failed to encode unsupported extra data type %T", extraData)) @@ -293,8 +293,8 @@ func newInlinedExtraDataFromData( return nil, nil, err } - case CBORTagInlinedCompositeExtraData: - inlinedExtraData[i], err = newCompositeExtraData(dec, decodeTypeInfo, decodeStorable) + case CBORTagInlinedCompactMapExtraData: + inlinedExtraData[i], err = newCompactMapExtraData(dec, decodeTypeInfo, decodeStorable) if err != nil { return nil, nil, err } @@ -331,30 +331,30 @@ func (ied *inlinedExtraData) addMapExtraData(data *MapExtraData) int { return index } -// addCompositeExtraData returns index of deduplicated composite extra data. -// Composite extra data is deduplicated by TypeInfo.ID() with sorted field names. -func (ied *inlinedExtraData) addCompositeExtraData( +// addCompactMapExtraData returns index of deduplicated compact map extra data. +// Compact map extra data is deduplicated by TypeInfo.ID() with sorted field names. +func (ied *inlinedExtraData) addCompactMapExtraData( data *MapExtraData, digests []Digest, keys []ComparableStorable, ) (int, []ComparableStorable) { - id := makeCompositeTypeID(data.TypeInfo, keys) - info, exist := ied.compositeTypes[id] + id := makeCompactMapTypeID(data.TypeInfo, keys) + info, exist := ied.compactMapTypes[id] if exist { return info.index, info.keys } - compositeData := &compositeExtraData{ + compactMapData := &compactMapExtraData{ mapExtraData: data, hkeys: digests, keys: keys, } index := len(ied.extraData) - ied.extraData = append(ied.extraData, compositeData) + ied.extraData = append(ied.extraData, compactMapData) - ied.compositeTypes[id] = compositeTypeInfo{ + ied.compactMapTypes[id] = compactMapTypeInfo{ keys: keys, index: index, } @@ -366,8 +366,8 @@ func (ied *inlinedExtraData) empty() bool { return len(ied.extraData) == 0 } -// makeCompositeTypeID returns id of concatenated t.ID() with sorted names with "," as separator. -func makeCompositeTypeID(t TypeInfo, names []ComparableStorable) string { +// makeCompactMapTypeID returns id of concatenated t.ID() with sorted names with "," as separator. +func makeCompactMapTypeID(t TypeInfo, names []ComparableStorable) string { const separator = "," if len(names) == 1 { From 3f87ec5af875c15a841d113b42eab86775b7ba7f Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 3 Oct 2023 12:04:40 -0500 Subject: [PATCH 049/126] Check duplicate SlabID in inlined slabs in tests This commit adds an extra check in exported validation functions that are only used for tests in Atree but can also be used by Cadence. --- array_debug.go | 24 ++- array_test.go | 332 ++++++++++++++++++------------------- map.go | 4 +- map_debug.go | 48 ++++-- map_test.go | 440 ++++++++++++++++++++++++------------------------- 5 files changed, 440 insertions(+), 408 deletions(-) diff --git a/array_debug.go b/array_debug.go index 22b69cb4..18b88556 100644 --- a/array_debug.go +++ b/array_debug.go @@ -168,6 +168,10 @@ func DumpArraySlabs(a *Array) ([]string, error) { type TypeInfoComparator func(TypeInfo, TypeInfo) bool func VerifyArray(a *Array, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool) error { + return verifyArray(a, address, typeInfo, tic, hip, inlineEnabled, map[SlabID]struct{}{}) +} + +func verifyArray(a *Array, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool, slabIDs map[SlabID]struct{}) error { // Verify array address (independent of array inlined status) if address != a.Address() { return NewFatalError(fmt.Errorf("array address %v, got %v", address, a.Address())) @@ -210,7 +214,7 @@ func VerifyArray(a *Array, address Address, typeInfo TypeInfo, tic TypeInfoCompa } // Verify array slabs - computedCount, dataSlabIDs, nextDataSlabIDs, err := v.verifySlab(a.root, 0, nil, []SlabID{}, []SlabID{}) + computedCount, dataSlabIDs, nextDataSlabIDs, err := v.verifySlab(a.root, 0, nil, []SlabID{}, []SlabID{}, slabIDs) if err != nil { // Don't need to wrap error as external error because err is already categorized by verifySlab(). return err @@ -245,6 +249,7 @@ func (v *arrayVerifier) verifySlab( headerFromParentSlab *ArraySlabHeader, dataSlabIDs []SlabID, nextDataSlabIDs []SlabID, + slabIDs map[SlabID]struct{}, ) ( elementCount uint32, _dataSlabIDs []SlabID, @@ -253,6 +258,13 @@ func (v *arrayVerifier) verifySlab( ) { id := slab.Header().slabID + // Verify SlabID is unique + if _, exist := slabIDs[id]; exist { + return 0, nil, nil, NewFatalError(fmt.Errorf("found duplicate slab ID %s", id)) + } + + slabIDs[id] = struct{}{} + // Verify slab address (independent of array inlined status) if v.address != id.address { return 0, nil, nil, NewFatalError(fmt.Errorf("array slab address %v, got %v", v.address, id.address)) @@ -298,10 +310,10 @@ func (v *arrayVerifier) verifySlab( switch slab := slab.(type) { case *ArrayDataSlab: - return v.verifyDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs) + return v.verifyDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs, slabIDs) case *ArrayMetaDataSlab: - return v.verifyMetaDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs) + return v.verifyMetaDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs, slabIDs) default: return 0, nil, nil, NewFatalError(fmt.Errorf("ArraySlab is either *ArrayDataSlab or *ArrayMetaDataSlab, got %T", slab)) @@ -313,6 +325,7 @@ func (v *arrayVerifier) verifyDataSlab( level int, dataSlabIDs []SlabID, nextDataSlabIDs []SlabID, + slabIDs map[SlabID]struct{}, ) ( elementCount uint32, _dataSlabIDs []SlabID, @@ -402,7 +415,7 @@ func (v *arrayVerifier) verifyDataSlab( } // Verify element - err = verifyValue(value, v.address, nil, v.tic, v.hip, v.inlineEnabled) + err = verifyValue(value, v.address, nil, v.tic, v.hip, v.inlineEnabled, slabIDs) if err != nil { // Don't need to wrap error as external error because err is already categorized by verifyValue(). return 0, nil, nil, fmt.Errorf( @@ -420,6 +433,7 @@ func (v *arrayVerifier) verifyMetaDataSlab( level int, dataSlabIDs []SlabID, nextDataSlabIDs []SlabID, + slabIDs map[SlabID]struct{}, ) ( elementCount uint32, _dataSlabIDs []SlabID, @@ -467,7 +481,7 @@ func (v *arrayVerifier) verifyMetaDataSlab( // Verify child slabs var count uint32 count, dataSlabIDs, nextDataSlabIDs, err = - v.verifySlab(childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs) + v.verifySlab(childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs, slabIDs) if err != nil { // Don't need to wrap error as external error because err is already categorized by verifySlab(). return 0, nil, nil, err diff --git a/array_test.go b/array_test.go index 6026f5d8..0cdf231f 100644 --- a/array_test.go +++ b/array_test.go @@ -29,27 +29,27 @@ import ( "github.com/stretchr/testify/require" ) -func verifyEmptyArrayV0( +func testEmptyArrayV0( t *testing.T, storage *PersistentSlabStorage, typeInfo TypeInfo, address Address, array *Array, ) { - verifyArrayV0(t, storage, typeInfo, address, array, nil, false) + testArrayV0(t, storage, typeInfo, address, array, nil, false) } -func verifyEmptyArray( +func testEmptyArray( t *testing.T, storage *PersistentSlabStorage, typeInfo TypeInfo, address Address, array *Array, ) { - verifyArray(t, storage, typeInfo, address, array, nil, false) + testArray(t, storage, typeInfo, address, array, nil, false) } -func verifyArrayV0( +func testArrayV0( t *testing.T, storage *PersistentSlabStorage, typeInfo TypeInfo, @@ -58,10 +58,10 @@ func verifyArrayV0( values []Value, hasNestedArrayMapElement bool, ) { - _verifyArray(t, storage, typeInfo, address, array, values, hasNestedArrayMapElement, false) + _testArray(t, storage, typeInfo, address, array, values, hasNestedArrayMapElement, false) } -func verifyArray( +func testArray( t *testing.T, storage *PersistentSlabStorage, typeInfo TypeInfo, @@ -70,11 +70,11 @@ func verifyArray( values []Value, hasNestedArrayMapElement bool, ) { - _verifyArray(t, storage, typeInfo, address, array, values, hasNestedArrayMapElement, true) + _testArray(t, storage, typeInfo, address, array, values, hasNestedArrayMapElement, true) } -// verifyArray verifies array elements and validates serialization and in-memory slab tree. -func _verifyArray( +// _testArray tests array elements, serialization, and in-memory slab tree. +func _testArray( t *testing.T, storage *PersistentSlabStorage, typeInfo TypeInfo, @@ -195,7 +195,7 @@ func TestArrayAppendAndGet(t *testing.T) { require.ErrorAs(t, err, &indexOutOfBoundsError) require.ErrorAs(t, userError, &indexOutOfBoundsError) - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) } func TestArraySetAndGet(t *testing.T) { @@ -218,7 +218,7 @@ func TestArraySetAndGet(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) for i := uint64(0); i < arraySize; i++ { oldValue := values[i] @@ -233,7 +233,7 @@ func TestArraySetAndGet(t *testing.T) { valueEqual(t, oldValue, existingValue) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) // This tests slabs splitting and root slab reassignment caused by Set operation. @@ -264,7 +264,7 @@ func TestArraySetAndGet(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) for i := uint64(0); i < arraySize; i++ { oldValue := values[i] @@ -279,7 +279,7 @@ func TestArraySetAndGet(t *testing.T) { valueEqual(t, oldValue, existingValue) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) // This tests slabs merging and root slab reassignment caused by Set operation. @@ -311,7 +311,7 @@ func TestArraySetAndGet(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) for i := uint64(0); i < arraySize; i++ { oldValue := values[i] @@ -326,7 +326,7 @@ func TestArraySetAndGet(t *testing.T) { valueEqual(t, oldValue, existingValue) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) t.Run("index out of bounds", func(t *testing.T) { @@ -361,7 +361,7 @@ func TestArraySetAndGet(t *testing.T) { require.ErrorAs(t, err, &indexOutOfBoundsError) require.ErrorAs(t, userError, &indexOutOfBoundsError) - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) } @@ -389,7 +389,7 @@ func TestArrayInsertAndGet(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) t.Run("insert-last", func(t *testing.T) { @@ -411,7 +411,7 @@ func TestArrayInsertAndGet(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) t.Run("insert", func(t *testing.T) { @@ -444,7 +444,7 @@ func TestArrayInsertAndGet(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) t.Run("index out of bounds", func(t *testing.T) { @@ -478,7 +478,7 @@ func TestArrayInsertAndGet(t *testing.T) { require.ErrorAs(t, err, &indexOutOfBoundsError) require.ErrorAs(t, userError, &indexOutOfBoundsError) - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) } @@ -526,11 +526,11 @@ func TestArrayRemove(t *testing.T) { require.Equal(t, arraySize-i-1, array.Count()) if i%256 == 0 { - verifyArray(t, storage, typeInfo, address, array, values[i+1:], false) + testArray(t, storage, typeInfo, address, array, values[i+1:], false) } } - verifyEmptyArray(t, storage, typeInfo, address, array) + testEmptyArray(t, storage, typeInfo, address, array) }) t.Run("remove-last", func(t *testing.T) { @@ -573,11 +573,11 @@ func TestArrayRemove(t *testing.T) { require.Equal(t, uint64(i), array.Count()) if i%256 == 0 { - verifyArray(t, storage, typeInfo, address, array, values[:i], false) + testArray(t, storage, typeInfo, address, array, values[:i], false) } } - verifyEmptyArray(t, storage, typeInfo, address, array) + testEmptyArray(t, storage, typeInfo, address, array) }) t.Run("remove", func(t *testing.T) { @@ -626,13 +626,13 @@ func TestArrayRemove(t *testing.T) { require.Equal(t, uint64(len(values)), array.Count()) if i%256 == 0 { - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) } } require.Equal(t, arraySize/2, len(values)) - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) t.Run("index out of bounds", func(t *testing.T) { @@ -664,7 +664,7 @@ func TestArrayRemove(t *testing.T) { require.ErrorAs(t, err, &indexOutOfBounds) require.ErrorAs(t, userError, &indexOutOfBounds) - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) } @@ -1138,7 +1138,7 @@ func TestArraySetRandomValues(t *testing.T) { valueEqual(t, oldValue, existingValue) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) } func TestArrayInsertRandomValues(t *testing.T) { @@ -1168,7 +1168,7 @@ func TestArrayInsertRandomValues(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) t.Run("insert-last", func(t *testing.T) { @@ -1193,7 +1193,7 @@ func TestArrayInsertRandomValues(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) t.Run("insert-random", func(t *testing.T) { @@ -1221,7 +1221,7 @@ func TestArrayInsertRandomValues(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) } @@ -1251,7 +1251,7 @@ func TestArrayRemoveRandomValues(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) // Remove n elements at random index for i := uint64(0); i < arraySize; i++ { @@ -1273,7 +1273,7 @@ func TestArrayRemoveRandomValues(t *testing.T) { } } - verifyEmptyArray(t, storage, typeInfo, address, array) + testEmptyArray(t, storage, typeInfo, address, array) } func testArrayAppendSetInsertRemoveRandomValues( @@ -1393,7 +1393,7 @@ func TestArrayAppendSetInsertRemoveRandomValues(t *testing.T) { address := Address{1, 2, 3, 4, 5, 6, 7, 8} array, values := testArrayAppendSetInsertRemoveRandomValues(t, r, storage, typeInfo, address, opCount) - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) } func TestArrayWithChildArrayMap(t *testing.T) { @@ -1434,7 +1434,7 @@ func TestArrayWithChildArrayMap(t *testing.T) { expectedValues[i] = arrayValue{v} } - verifyArray(t, storage, typeInfo, address, array, expectedValues, false) + testArray(t, storage, typeInfo, address, array, expectedValues, false) }) t.Run("big array", func(t *testing.T) { @@ -1475,7 +1475,7 @@ func TestArrayWithChildArrayMap(t *testing.T) { expectedValues[i] = arrayValue(expectedChildArrayValues) } - verifyArray(t, storage, typeInfo, address, array, expectedValues, true) + testArray(t, storage, typeInfo, address, array, expectedValues, true) }) t.Run("small map", func(t *testing.T) { @@ -1509,7 +1509,7 @@ func TestArrayWithChildArrayMap(t *testing.T) { expectedValues[i] = mapValue{k: v} } - verifyArray(t, storage, typeInfo, address, array, expectedValues, false) + testArray(t, storage, typeInfo, address, array, expectedValues, false) }) t.Run("big map", func(t *testing.T) { @@ -1550,7 +1550,7 @@ func TestArrayWithChildArrayMap(t *testing.T) { expectedValues[i] = expectedChildMapValues } - verifyArray(t, storage, typeInfo, address, array, expectedValues, true) + testArray(t, storage, typeInfo, address, array, expectedValues, true) }) } @@ -1596,7 +1596,7 @@ func TestArrayDecodeV0(t *testing.T) { array, err := NewArrayWithRootID(storage, arraySlabID) require.NoError(t, err) - verifyEmptyArrayV0(t, storage, typeInfo, address, array) + testEmptyArrayV0(t, storage, typeInfo, address, array) }) t.Run("dataslab as root", func(t *testing.T) { @@ -1642,7 +1642,7 @@ func TestArrayDecodeV0(t *testing.T) { array, err := NewArrayWithRootID(storage, arraySlabID) require.NoError(t, err) - verifyArrayV0(t, storage, typeInfo, address, array, values, false) + testArrayV0(t, storage, typeInfo, address, array, values, false) }) t.Run("metadataslab as root", func(t *testing.T) { @@ -1778,7 +1778,7 @@ func TestArrayDecodeV0(t *testing.T) { array, err := NewArrayWithRootID(storage2, arraySlabID) require.NoError(t, err) - verifyArrayV0(t, storage2, typeInfo, address, array, values, false) + testArrayV0(t, storage2, typeInfo, address, array, values, false) }) } @@ -1823,7 +1823,7 @@ func TestArrayEncodeDecode(t *testing.T) { array2, err := NewArrayWithRootID(storage2, array.SlabID()) require.NoError(t, err) - verifyEmptyArray(t, storage2, typeInfo, address, array2) + testEmptyArray(t, storage2, typeInfo, address, array2) }) t.Run("root dataslab", func(t *testing.T) { @@ -1869,7 +1869,7 @@ func TestArrayEncodeDecode(t *testing.T) { array2, err := NewArrayWithRootID(storage2, array.SlabID()) require.NoError(t, err) - verifyArray(t, storage2, typeInfo, address, array2, values, false) + testArray(t, storage2, typeInfo, address, array2, values, false) }) t.Run("root metadata slab", func(t *testing.T) { @@ -1981,7 +1981,7 @@ func TestArrayEncodeDecode(t *testing.T) { array2, err := NewArrayWithRootID(storage2, array.SlabID()) require.NoError(t, err) - verifyArray(t, storage2, typeInfo, address, array2, values, false) + testArray(t, storage2, typeInfo, address, array2, values, false) }) // Same type info is reused. @@ -2055,7 +2055,7 @@ func TestArrayEncodeDecode(t *testing.T) { array2, err := NewArrayWithRootID(storage2, parentArray.SlabID()) require.NoError(t, err) - verifyArray(t, storage2, typeInfo, address, array2, expectedValues, false) + testArray(t, storage2, typeInfo, address, array2, expectedValues, false) }) // Different type info are encoded. @@ -2140,7 +2140,7 @@ func TestArrayEncodeDecode(t *testing.T) { array2, err := NewArrayWithRootID(storage2, parentArray.SlabID()) require.NoError(t, err) - verifyArray(t, storage2, typeInfo, address, array2, expectedValues, false) + testArray(t, storage2, typeInfo, address, array2, expectedValues, false) }) // Same type info is reused. @@ -2228,7 +2228,7 @@ func TestArrayEncodeDecode(t *testing.T) { array2, err := NewArrayWithRootID(storage2, parentArray.SlabID()) require.NoError(t, err) - verifyArray(t, storage2, typeInfo, address, array2, expectedValues, false) + testArray(t, storage2, typeInfo, address, array2, expectedValues, false) }) t.Run("root data slab, multiple levels of inlined array of different type", func(t *testing.T) { @@ -2337,7 +2337,7 @@ func TestArrayEncodeDecode(t *testing.T) { array2, err := NewArrayWithRootID(storage2, parentArray.SlabID()) require.NoError(t, err) - verifyArray(t, storage2, typeInfo, address, array2, expectedValues, false) + testArray(t, storage2, typeInfo, address, array2, expectedValues, false) }) t.Run("root metadata slab, inlined array of same type", func(t *testing.T) { @@ -2476,7 +2476,7 @@ func TestArrayEncodeDecode(t *testing.T) { array2, err := NewArrayWithRootID(storage2, array.SlabID()) require.NoError(t, err) - verifyArray(t, storage2, typeInfo, address, array2, expectedValues, false) + testArray(t, storage2, typeInfo, address, array2, expectedValues, false) }) t.Run("root metadata slab, inlined array of different type", func(t *testing.T) { @@ -2627,7 +2627,7 @@ func TestArrayEncodeDecode(t *testing.T) { array2, err := NewArrayWithRootID(storage2, array.SlabID()) require.NoError(t, err) - verifyArray(t, storage2, typeInfo, address, array2, expectedValues, false) + testArray(t, storage2, typeInfo, address, array2, expectedValues, false) }) t.Run("has pointers", func(t *testing.T) { @@ -2789,7 +2789,7 @@ func TestArrayEncodeDecode(t *testing.T) { array2, err := NewArrayWithRootID(storage2, array.SlabID()) require.NoError(t, err) - verifyArray(t, storage2, typeInfo, address, array2, expectedValues, false) + testArray(t, storage2, typeInfo, address, array2, expectedValues, false) }) t.Run("has pointers in inlined slab", func(t *testing.T) { @@ -2972,7 +2972,7 @@ func TestArrayEncodeDecode(t *testing.T) { array2, err := NewArrayWithRootID(storage2, array.SlabID()) require.NoError(t, err) - verifyArray(t, storage2, typeInfo, address, array2, expectedValues, false) + testArray(t, storage2, typeInfo, address, array2, expectedValues, false) }) } @@ -2991,7 +2991,7 @@ func TestArrayEncodeDecodeRandomValues(t *testing.T) { array, values := testArrayAppendSetInsertRemoveRandomValues(t, r, storage, typeInfo, address, opCount) - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) // Decode data to new storage storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) @@ -3000,7 +3000,7 @@ func TestArrayEncodeDecodeRandomValues(t *testing.T) { array2, err := NewArrayWithRootID(storage2, array.SlabID()) require.NoError(t, err) - verifyArray(t, storage2, typeInfo, address, array2, values, false) + testArray(t, storage2, typeInfo, address, array2, values, false) } func TestEmptyArray(t *testing.T) { @@ -3109,7 +3109,7 @@ func TestArrayStringElement(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) stats, err := GetArrayStats(array) require.NoError(t, err) @@ -3142,7 +3142,7 @@ func TestArrayStringElement(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) stats, err := GetArrayStats(array) require.NoError(t, err) @@ -3189,7 +3189,7 @@ func TestArrayStoredValue(t *testing.T) { array2, ok := value.(*Array) require.True(t, ok) - verifyArray(t, storage, typeInfo, address, array2, values, false) + testArray(t, storage, typeInfo, address, array2, values, false) } else { require.Equal(t, 1, errorCategorizationCount(err)) var fatalError *FatalError @@ -3219,7 +3219,7 @@ func TestArrayPopIterate(t *testing.T) { require.NoError(t, err) require.Equal(t, uint64(0), i) - verifyEmptyArray(t, storage, typeInfo, address, array) + testEmptyArray(t, storage, typeInfo, address, array) }) t.Run("root-dataslab", func(t *testing.T) { @@ -3251,7 +3251,7 @@ func TestArrayPopIterate(t *testing.T) { require.NoError(t, err) require.Equal(t, arraySize, i) - verifyEmptyArray(t, storage, typeInfo, address, array) + testEmptyArray(t, storage, typeInfo, address, array) }) t.Run("root-metaslab", func(t *testing.T) { @@ -3285,7 +3285,7 @@ func TestArrayPopIterate(t *testing.T) { require.NoError(t, err) require.Equal(t, arraySize, i) - verifyEmptyArray(t, storage, typeInfo, address, array) + testEmptyArray(t, storage, typeInfo, address, array) }) } @@ -3317,7 +3317,7 @@ func TestArrayFromBatchData(t *testing.T) { require.NoError(t, err) require.NotEqual(t, copied.SlabID(), array.SlabID()) - verifyEmptyArray(t, storage, typeInfo, address, copied) + testEmptyArray(t, storage, typeInfo, address, copied) }) t.Run("root-dataslab", func(t *testing.T) { @@ -3358,7 +3358,7 @@ func TestArrayFromBatchData(t *testing.T) { require.NoError(t, err) require.NotEqual(t, copied.SlabID(), array.SlabID()) - verifyArray(t, storage, typeInfo, address, copied, values, false) + testArray(t, storage, typeInfo, address, copied, values, false) }) t.Run("root-metaslab", func(t *testing.T) { @@ -3401,7 +3401,7 @@ func TestArrayFromBatchData(t *testing.T) { require.NoError(t, err) require.NotEqual(t, array.SlabID(), copied.SlabID()) - verifyArray(t, storage, typeInfo, address, copied, values, false) + testArray(t, storage, typeInfo, address, copied, values, false) }) t.Run("rebalance two data slabs", func(t *testing.T) { @@ -3451,7 +3451,7 @@ func TestArrayFromBatchData(t *testing.T) { require.NoError(t, err) require.NotEqual(t, array.SlabID(), copied.SlabID()) - verifyArray(t, storage, typeInfo, address, copied, values, false) + testArray(t, storage, typeInfo, address, copied, values, false) }) t.Run("merge two data slabs", func(t *testing.T) { @@ -3501,7 +3501,7 @@ func TestArrayFromBatchData(t *testing.T) { require.NoError(t, err) require.NotEqual(t, array.SlabID(), copied.SlabID()) - verifyArray(t, storage, typeInfo, address, copied, values, false) + testArray(t, storage, typeInfo, address, copied, values, false) }) t.Run("random", func(t *testing.T) { @@ -3548,7 +3548,7 @@ func TestArrayFromBatchData(t *testing.T) { require.NoError(t, err) require.NotEqual(t, array.SlabID(), copied.SlabID()) - verifyArray(t, storage, typeInfo, address, copied, values, false) + testArray(t, storage, typeInfo, address, copied, values, false) }) t.Run("data slab too large", func(t *testing.T) { @@ -3602,7 +3602,7 @@ func TestArrayFromBatchData(t *testing.T) { require.NoError(t, err) require.NotEqual(t, array.SlabID(), copied.SlabID()) - verifyArray(t, storage, typeInfo, address, copied, values, false) + testArray(t, storage, typeInfo, address, copied, values, false) }) } @@ -3630,7 +3630,7 @@ func TestArrayNestedStorables(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, true) + testArray(t, storage, typeInfo, address, array, values, true) } func TestArrayMaxInlineElement(t *testing.T) { @@ -3662,7 +3662,7 @@ func TestArrayMaxInlineElement(t *testing.T) { // (for rounding when computing max inline array element size). require.Equal(t, targetThreshold-slabIDSize-1, uint64(array.root.Header().size)) - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) } func TestArrayString(t *testing.T) { @@ -3821,7 +3821,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { require.Equal(t, 1, len(storage.deltas)) require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) - verifyArrayLoadedElements(t, array, nil) + testArrayLoadedElements(t, array, nil) }) t.Run("root data slab with simple values", func(t *testing.T) { @@ -3834,7 +3834,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { require.Equal(t, 1, len(storage.deltas)) require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) }) t.Run("root data slab with composite values", func(t *testing.T) { @@ -3848,7 +3848,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { require.Equal(t, 1+arraySize, len(storage.deltas)) require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) }) t.Run("root data slab with composite values, unload composite element from front to back", func(t *testing.T) { @@ -3862,7 +3862,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { require.Equal(t, 1+arraySize, len(storage.deltas)) require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) // Unload composite element from front to back for i := 0; i < len(values); i++ { @@ -3872,7 +3872,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { require.NoError(t, err) expectedValues := values[i+1:] - verifyArrayLoadedElements(t, array, expectedValues) + testArrayLoadedElements(t, array, expectedValues) } }) @@ -3887,7 +3887,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { require.Equal(t, 1+arraySize, len(storage.deltas)) require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) // Unload composite element from back to front for i := len(values) - 1; i >= 0; i-- { @@ -3897,7 +3897,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { require.NoError(t, err) expectedValues := values[:i] - verifyArrayLoadedElements(t, array, expectedValues) + testArrayLoadedElements(t, array, expectedValues) } }) @@ -3912,7 +3912,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { require.Equal(t, 1+arraySize, len(storage.deltas)) require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) // Unload composite element in the middle unloadValueIndex := 1 @@ -3925,7 +3925,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) values = values[:len(values)-1] - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) }) t.Run("root data slab with composite values, unload composite elements during iteration", func(t *testing.T) { @@ -3939,7 +3939,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { require.Equal(t, 1+arraySize, len(storage.deltas)) require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) i := 0 err := array.IterateLoadedValues(func(v Value) (bool, error) { @@ -3975,7 +3975,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { require.Equal(t, 2, len(storage.deltas)) require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) // Unload composite element err := storage.Remove(childSlabID) @@ -3984,7 +3984,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { copy(values[childArrayIndex:], values[childArrayIndex+1:]) values = values[:len(values)-1] - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) } }) @@ -3998,7 +3998,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { require.Equal(t, 3, len(storage.deltas)) require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) }) t.Run("root metadata slab with composite values", func(t *testing.T) { @@ -4012,7 +4012,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { require.Equal(t, 3+arraySize, len(storage.deltas)) require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) }) t.Run("root metadata slab with composite values, unload composite element from front to back", func(t *testing.T) { @@ -4026,7 +4026,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { require.Equal(t, 3+arraySize, len(storage.deltas)) require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) // Unload composite element from front to back for i := 0; i < len(childSlabIDs); i++ { @@ -4036,7 +4036,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { require.NoError(t, err) expectedValues := values[i+1:] - verifyArrayLoadedElements(t, array, expectedValues) + testArrayLoadedElements(t, array, expectedValues) } }) @@ -4051,7 +4051,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { require.Equal(t, 3+arraySize, len(storage.deltas)) require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) // Unload composite element from back to front for i := len(childSlabIDs) - 1; i >= 0; i-- { @@ -4061,7 +4061,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { require.NoError(t, err) expectedValues := values[:i] - verifyArrayLoadedElements(t, array, expectedValues) + testArrayLoadedElements(t, array, expectedValues) } }) @@ -4076,7 +4076,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { require.Equal(t, 3+arraySize, len(storage.deltas)) require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) // Unload composite element in the middle for _, index := range []int{4, 14} { @@ -4089,7 +4089,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { copy(values[index:], values[index+1:]) values = values[:len(values)-1] - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) } }) @@ -4107,7 +4107,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { require.Equal(t, 3+1, len(storage.deltas)) require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) // Unload composite value err := storage.Remove(childSlabID) @@ -4116,7 +4116,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { copy(values[childArrayIndex:], values[childArrayIndex+1:]) values = values[:len(values)-1] - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) } }) @@ -4130,7 +4130,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { require.Equal(t, 4, len(storage.deltas)) require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) require.True(t, ok) @@ -4145,7 +4145,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { values = values[childHeader.count:] - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) } }) @@ -4159,7 +4159,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { require.Equal(t, 4, len(storage.deltas)) require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) require.True(t, ok) @@ -4174,7 +4174,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { values = values[:len(values)-int(childHeader.count)] - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) } }) @@ -4188,7 +4188,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { require.Equal(t, 4, len(storage.deltas)) require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) require.True(t, ok) @@ -4204,7 +4204,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { copy(values[metaDataSlab.childrenCountSum[index-1]:], values[metaDataSlab.childrenCountSum[index]:]) values = values[:array.Count()-uint64(childHeader.count)] - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) }) t.Run("root metadata slab, unload non-root metadata slab from front to back", func(t *testing.T) { @@ -4229,7 +4229,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { values = values[childHeader.count:] - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) } }) @@ -4255,7 +4255,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { values = values[childHeader.count:] - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) } }) @@ -4271,7 +4271,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { require.True(t, len(storage.deltas) > 1+arraySize) require.True(t, getArrayMetaDataSlabCount(storage) > 1) - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) r := newRand(t) @@ -4291,7 +4291,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { copy(childSlabIDs[i:], childSlabIDs[i+1:]) childSlabIDs = childSlabIDs[:len(childSlabIDs)-1] - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) } }) @@ -4307,7 +4307,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { require.True(t, len(storage.deltas) > 1+arraySize) require.True(t, getArrayMetaDataSlabCount(storage) > 1) - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) require.True(t, ok) @@ -4354,7 +4354,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { copy(values[slabInfoToUnload.startIndex:], values[slabInfoToUnload.startIndex+slabInfoToUnload.count:]) values = values[:len(values)-slabInfoToUnload.count] - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) } require.Equal(t, 0, len(values)) @@ -4372,7 +4372,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { require.True(t, len(storage.deltas) > 1+arraySize) require.True(t, getArrayMetaDataSlabCount(storage) > 1) - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) type slabInfo struct { id SlabID @@ -4500,7 +4500,7 @@ func TestArrayLoadedValueIterator(t *testing.T) { values = values[:len(values)-slabInfoToBeRemoved.count] } - verifyArrayLoadedElements(t, array, values) + testArrayLoadedElements(t, array, values) } require.Equal(t, 0, len(values)) @@ -4625,7 +4625,7 @@ func createArrayWithSimpleAndChildArrayValues( return array, expectedValues, childSlabID } -func verifyArrayLoadedElements(t *testing.T, array *Array, expectedValues []Value) { +func testArrayLoadedElements(t *testing.T, array *Array, expectedValues []Value) { i := 0 err := array.IterateLoadedValues(func(v Value) (bool, error) { require.True(t, i < len(expectedValues)) @@ -4735,7 +4735,7 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*arraySize require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Get inlined child array e, err := parentArray.Get(0) @@ -4782,7 +4782,7 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { // Test parent array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } // Add one more element to child array which triggers inlined child array slab becomes standalone slab @@ -4808,7 +4808,7 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { // Test parent array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Remove elements from child array which triggers standalone array slab becomes inlined slab again. for childArray.Count() > 0 { @@ -4833,7 +4833,7 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { // Test parent array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } require.Equal(t, uint64(0), childArray.Count()) @@ -4860,7 +4860,7 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { // Test parent array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) children := make([]struct { array *Array @@ -4920,7 +4920,7 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { // Test parent array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } } @@ -4959,7 +4959,7 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { // Test parent array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } // Remove one element from child array which triggers standalone array slab becomes inlined slab again. @@ -4995,7 +4995,7 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { // Test parent array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } // Remove remaining elements from inlined child array @@ -5029,7 +5029,7 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { // Test parent array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } } @@ -5060,7 +5060,7 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { // Test parent array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) children := make([]struct { array *Array @@ -5115,7 +5115,7 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { // Test parent array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } } @@ -5149,7 +5149,7 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { // Test parent array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } // Parent array has one data slab and all child arrays are not inlined. @@ -5181,7 +5181,7 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { // Test parent array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } // Parent array has 1 meta data slab and 2 data slabs. @@ -5216,7 +5216,7 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { // Test parent array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } } @@ -5258,7 +5258,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { // Test parent array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Get inlined child array e, err := parentArray.Get(0) @@ -5336,7 +5336,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } // Add one more element to grand child array which triggers inlined child array slab (NOT grand child array slab) becomes standalone slab @@ -5379,7 +5379,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Remove elements from grand child array which triggers standalone child array slab becomes inlined slab again. for gchildArray.Count() > 0 { @@ -5424,7 +5424,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } require.Equal(t, uint64(0), gchildArray.Count()) @@ -5453,7 +5453,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { // Test parent array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Get inlined child array e, err := parentArray.Get(0) @@ -5531,7 +5531,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } // Add one more element to grand child array which triggers inlined grand child array slab (NOT child array slab) becomes standalone slab @@ -5576,7 +5576,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Remove elements from grand child array which triggers standalone child array slab becomes inlined slab again. for gchildArray.Count() > 0 { @@ -5620,7 +5620,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } require.Equal(t, uint64(0), gchildArray.Count()) @@ -5678,7 +5678,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { // Test parent array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) type arrayInfo struct { array *Array @@ -5769,7 +5769,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } } @@ -5813,7 +5813,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } require.Equal(t, 3, getStoredDeltas(storage)) // There are 3 stored slab because child array is no longer inlined. @@ -5865,7 +5865,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) @@ -5918,7 +5918,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } } @@ -5974,7 +5974,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { // Test parent array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) type arrayInfo struct { array *Array @@ -6065,7 +6065,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } } @@ -6112,7 +6112,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } require.Equal(t, 3, getStoredDeltas(storage)) // There are 3 stored slab because child array is no longer inlined. @@ -6165,7 +6165,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } // Parent array has one root data slab, 4 grand child array with standalone root data slab. @@ -6217,7 +6217,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } // Parent array has 1 metadata slab, and two data slab, all child and grand child arrays are inlined. @@ -6271,7 +6271,7 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } } @@ -6309,7 +6309,7 @@ func TestChildArrayWhenParentArrayIsModified(t *testing.T) { // Test array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) children := make([]*struct { array *Array @@ -6381,7 +6381,7 @@ func TestChildArrayWhenParentArrayIsModified(t *testing.T) { require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } // insert value at index 2, so only second child array index is moved by +1 @@ -6426,7 +6426,7 @@ func TestChildArrayWhenParentArrayIsModified(t *testing.T) { require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } // insert value at index 4, so none of child array indexes are affected. @@ -6466,7 +6466,7 @@ func TestChildArrayWhenParentArrayIsModified(t *testing.T) { require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } }) @@ -6511,7 +6511,7 @@ func TestChildArrayWhenParentArrayIsModified(t *testing.T) { require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } // Remove value at index 1, so only second child array index is moved by -1 @@ -6556,7 +6556,7 @@ func TestChildArrayWhenParentArrayIsModified(t *testing.T) { require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } // Remove value at index 2 (last element), so none of child array indexes are affected. @@ -6596,7 +6596,7 @@ func TestChildArrayWhenParentArrayIsModified(t *testing.T) { require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } }) } @@ -6718,7 +6718,7 @@ func TestArraySetReturnedValue(t *testing.T) { // Test array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Overwrite existing child array value for i := 0; i < arraySize; i++ { @@ -6743,7 +6743,7 @@ func TestArraySetReturnedValue(t *testing.T) { require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) } - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) }) t.Run("child array is inlined", func(t *testing.T) { @@ -6778,7 +6778,7 @@ func TestArraySetReturnedValue(t *testing.T) { // Test array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Overwrite existing child array value for i := 0; i < arraySize; i++ { @@ -6803,7 +6803,7 @@ func TestArraySetReturnedValue(t *testing.T) { // Test array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) }) t.Run("child map is not inlined", func(t *testing.T) { @@ -6850,7 +6850,7 @@ func TestArraySetReturnedValue(t *testing.T) { // Test array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Overwrite existing child map value for i := 0; i < arraySize; i++ { @@ -6875,7 +6875,7 @@ func TestArraySetReturnedValue(t *testing.T) { // Test array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) }) t.Run("child map is inlined", func(t *testing.T) { @@ -6915,7 +6915,7 @@ func TestArraySetReturnedValue(t *testing.T) { // Test array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Overwrite existing child map value for i := 0; i < arraySize; i++ { @@ -6940,7 +6940,7 @@ func TestArraySetReturnedValue(t *testing.T) { // Test array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) }) } @@ -6987,7 +6987,7 @@ func TestArrayRemoveReturnedValue(t *testing.T) { // Test array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Remove child array value for i := 0; i < arraySize; i++ { @@ -7009,7 +7009,7 @@ func TestArrayRemoveReturnedValue(t *testing.T) { // Test array's mutableElementIndex require.Equal(t, 0, len(parentArray.mutableElementIndex)) - verifyEmptyArray(t, storage, typeInfo, address, parentArray) + testEmptyArray(t, storage, typeInfo, address, parentArray) }) t.Run("child array is inlined", func(t *testing.T) { @@ -7044,7 +7044,7 @@ func TestArrayRemoveReturnedValue(t *testing.T) { // Test array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Remove child array value for i := 0; i < arraySize; i++ { @@ -7066,7 +7066,7 @@ func TestArrayRemoveReturnedValue(t *testing.T) { // Test array's mutableElementIndex require.Equal(t, 0, len(parentArray.mutableElementIndex)) - verifyEmptyArray(t, storage, typeInfo, address, parentArray) + testEmptyArray(t, storage, typeInfo, address, parentArray) }) t.Run("child map is not inlined", func(t *testing.T) { @@ -7113,7 +7113,7 @@ func TestArrayRemoveReturnedValue(t *testing.T) { // Test array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Remove child map value for i := 0; i < arraySize; i++ { @@ -7135,7 +7135,7 @@ func TestArrayRemoveReturnedValue(t *testing.T) { // Test array's mutableElementIndex require.Equal(t, 0, len(parentArray.mutableElementIndex)) - verifyEmptyArray(t, storage, typeInfo, address, parentArray) + testEmptyArray(t, storage, typeInfo, address, parentArray) }) t.Run("child map is inlined", func(t *testing.T) { @@ -7175,7 +7175,7 @@ func TestArrayRemoveReturnedValue(t *testing.T) { // Test array's mutableElementIndex require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Remove child map value for i := 0; i < arraySize; i++ { @@ -7197,7 +7197,7 @@ func TestArrayRemoveReturnedValue(t *testing.T) { // Test array's mutableElementIndex require.Equal(t, 0, len(parentArray.mutableElementIndex)) - verifyEmptyArray(t, storage, typeInfo, address, parentArray) + testEmptyArray(t, storage, typeInfo, address, parentArray) }) } @@ -7230,7 +7230,7 @@ func TestArrayWithOutdatedCallback(t *testing.T) { expectedValues = append(expectedValues, arrayValue{v}) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Overwrite child array value from parent valueStorable, err := parentArray.Set(0, Uint64Value(0)) @@ -7285,7 +7285,7 @@ func TestArrayWithOutdatedCallback(t *testing.T) { expectedValues = append(expectedValues, arrayValue{v}) - verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) // Remove child array value from parent valueStorable, err := parentArray.Remove(0) diff --git a/map.go b/map.go index f361119b..e3144822 100644 --- a/map.go +++ b/map.go @@ -2505,7 +2505,7 @@ func DecodeInlinedCompactMapStorable( size := uint32(hkeyElementsPrefixSize) elems := make([]element, elemCount) for i := 0; i < int(elemCount); i++ { - value, err := decodeStorable(dec, parentSlabID, inlinedExtraData) + value, err := decodeStorable(dec, slabID, inlinedExtraData) if err != nil { return nil, err } @@ -2623,7 +2623,7 @@ func DecodeInlinedMapStorable( slabID := NewSlabID(parentSlabID.address, index) // Decode elements - elements, err := newElementsFromData(dec, decodeStorable, parentSlabID, inlinedExtraData) + elements, err := newElementsFromData(dec, decodeStorable, slabID, inlinedExtraData) if err != nil { // Don't need to wrap error as external error because err is already categorized by newElementsFromData(). return nil, err diff --git a/map_debug.go b/map_debug.go index 1d9a8fea..3b444350 100644 --- a/map_debug.go +++ b/map_debug.go @@ -247,6 +247,10 @@ func DumpMapSlabs(m *OrderedMap) ([]string, error) { } func VerifyMap(m *OrderedMap, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool) error { + return verifyMap(m, address, typeInfo, tic, hip, inlineEnabled, map[SlabID]struct{}{}) +} + +func verifyMap(m *OrderedMap, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool, slabIDs map[SlabID]struct{}) error { // Verify map address (independent of array inlined status) if address != m.Address() { @@ -297,7 +301,7 @@ func VerifyMap(m *OrderedMap, address Address, typeInfo TypeInfo, tic TypeInfoCo } computedCount, dataSlabIDs, nextDataSlabIDs, firstKeys, err := v.verifySlab( - m.root, 0, nil, []SlabID{}, []SlabID{}, []Digest{}) + m.root, 0, nil, []SlabID{}, []SlabID{}, []Digest{}, slabIDs) if err != nil { // Don't need to wrap error as external error because err is already categorized by verifySlab(). return err @@ -357,6 +361,7 @@ func (v *mapVerifier) verifySlab( dataSlabIDs []SlabID, nextDataSlabIDs []SlabID, firstKeys []Digest, + slabIDs map[SlabID]struct{}, ) ( elementCount uint64, _dataSlabIDs []SlabID, @@ -367,6 +372,13 @@ func (v *mapVerifier) verifySlab( id := slab.Header().slabID + // Verify SlabID is unique + if _, exist := slabIDs[id]; exist { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("found duplicate slab ID %s", id)) + } + + slabIDs[id] = struct{}{} + // Verify slab address (independent of map inlined status) if v.address != id.address { return 0, nil, nil, nil, NewFatalError(fmt.Errorf("map slab address %v, got %v", v.address, id.address)) @@ -413,10 +425,10 @@ func (v *mapVerifier) verifySlab( switch slab := slab.(type) { case *MapDataSlab: - return v.verifyDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys) + return v.verifyDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys, slabIDs) case *MapMetaDataSlab: - return v.verifyMetaDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys) + return v.verifyMetaDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys, slabIDs) default: return 0, nil, nil, nil, NewFatalError(fmt.Errorf("MapSlab is either *MapDataSlab or *MapMetaDataSlab, got %T", slab)) @@ -429,6 +441,7 @@ func (v *mapVerifier) verifyDataSlab( dataSlabIDs []SlabID, nextDataSlabIDs []SlabID, firstKeys []Digest, + slabIDs map[SlabID]struct{}, ) ( elementCount uint64, _dataSlabIDs []SlabID, @@ -443,7 +456,7 @@ func (v *mapVerifier) verifyDataSlab( } // Verify data slab's elements - elementCount, elementSize, err := v.verifyElements(id, dataSlab.elements, 0, nil) + elementCount, elementSize, err := v.verifyElements(id, dataSlab.elements, 0, nil, slabIDs) if err != nil { // Don't need to wrap error as external error because err is already categorized by verifyElements(). return 0, nil, nil, nil, err @@ -508,6 +521,7 @@ func (v *mapVerifier) verifyMetaDataSlab( dataSlabIDs []SlabID, nextDataSlabIDs []SlabID, firstKeys []Digest, + slabIDs map[SlabID]struct{}, ) ( elementCount uint64, _dataSlabIDs []SlabID, @@ -547,7 +561,7 @@ func (v *mapVerifier) verifyMetaDataSlab( // Verify child slabs count := uint64(0) count, dataSlabIDs, nextDataSlabIDs, firstKeys, err = - v.verifySlab(childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs, firstKeys) + v.verifySlab(childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs, firstKeys, slabIDs) if err != nil { // Don't need to wrap error as external error because err is already categorized by verifySlab(). return 0, nil, nil, nil, err @@ -600,6 +614,7 @@ func (v *mapVerifier) verifyElements( elements elements, digestLevel uint, hkeyPrefixes []Digest, + slabIDs map[SlabID]struct{}, ) ( elementCount uint64, elementSize uint32, @@ -608,9 +623,9 @@ func (v *mapVerifier) verifyElements( switch elems := elements.(type) { case *hkeyElements: - return v.verifyHkeyElements(id, elems, digestLevel, hkeyPrefixes) + return v.verifyHkeyElements(id, elems, digestLevel, hkeyPrefixes, slabIDs) case *singleElements: - return v.verifySingleElements(id, elems, digestLevel, hkeyPrefixes) + return v.verifySingleElements(id, elems, digestLevel, hkeyPrefixes, slabIDs) default: return 0, 0, NewFatalError(fmt.Errorf("slab %d has unknown elements type %T at digest level %d", id, elements, digestLevel)) } @@ -621,6 +636,7 @@ func (v *mapVerifier) verifyHkeyElements( elements *hkeyElements, digestLevel uint, hkeyPrefixes []Digest, + slabIDs map[SlabID]struct{}, ) ( elementCount uint64, elementSize uint32, @@ -687,7 +703,7 @@ func (v *mapVerifier) verifyHkeyElements( return 0, 0, err } - count, size, err := v.verifyElements(id, group, digestLevel+1, hkeys) + count, size, err := v.verifyElements(id, group, digestLevel+1, hkeys, slabIDs) if err != nil { // Don't need to wrap error as external error because err is already categorized by verifyElements(). return 0, 0, err @@ -710,7 +726,7 @@ func (v *mapVerifier) verifyHkeyElements( case *singleElement: // Verify element - computedSize, maxDigestLevel, err := v.verifySingleElement(e, hkeys) + computedSize, maxDigestLevel, err := v.verifySingleElement(e, hkeys, slabIDs) if err != nil { // Don't need to wrap error as external error because err is already categorized by verifySingleElement(). return 0, 0, fmt.Errorf("data slab %d: %w", id, err) @@ -745,6 +761,7 @@ func (v *mapVerifier) verifySingleElements( elements *singleElements, digestLevel uint, hkeyPrefixes []Digest, + slabIDs map[SlabID]struct{}, ) ( elementCount uint64, elementSize uint32, @@ -763,7 +780,7 @@ func (v *mapVerifier) verifySingleElements( for _, e := range elements.elems { // Verify element - computedSize, maxDigestLevel, err := v.verifySingleElement(e, hkeyPrefixes) + computedSize, maxDigestLevel, err := v.verifySingleElement(e, hkeyPrefixes, slabIDs) if err != nil { // Don't need to wrap error as external error because err is already categorized by verifySingleElement(). return 0, 0, fmt.Errorf("data slab %d: %w", id, err) @@ -797,6 +814,7 @@ func (v *mapVerifier) verifySingleElements( func (v *mapVerifier) verifySingleElement( e *singleElement, digests []Digest, + slabIDs map[SlabID]struct{}, ) ( size uint32, digestMaxLevel uint, @@ -834,7 +852,7 @@ func (v *mapVerifier) verifySingleElement( return 0, 0, NewFatalError(fmt.Errorf("element %s key shouldn't be inlined array or map", e)) } - err = verifyValue(kv, v.address, nil, v.tic, v.hip, v.inlineEnabled) + err = verifyValue(kv, v.address, nil, v.tic, v.hip, v.inlineEnabled, slabIDs) if err != nil { // Don't need to wrap error as external error because err is already categorized by verifyValue(). return 0, 0, fmt.Errorf("element %s key isn't valid: %w", e, err) @@ -870,7 +888,7 @@ func (v *mapVerifier) verifySingleElement( } } - err = verifyValue(vv, v.address, nil, v.tic, v.hip, v.inlineEnabled) + err = verifyValue(vv, v.address, nil, v.tic, v.hip, v.inlineEnabled, slabIDs) if err != nil { // Don't need to wrap error as external error because err is already categorized by verifyValue(). return 0, 0, fmt.Errorf("element %s value isn't valid: %w", e, err) @@ -902,12 +920,12 @@ func (v *mapVerifier) verifySingleElement( return computedSize, digest.Levels(), nil } -func verifyValue(value Value, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool) error { +func verifyValue(value Value, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool, slabIDs map[SlabID]struct{}) error { switch v := value.(type) { case *Array: - return VerifyArray(v, address, typeInfo, tic, hip, inlineEnabled) + return verifyArray(v, address, typeInfo, tic, hip, inlineEnabled, slabIDs) case *OrderedMap: - return VerifyMap(v, address, typeInfo, tic, hip, inlineEnabled) + return verifyMap(v, address, typeInfo, tic, hip, inlineEnabled, slabIDs) } return nil } diff --git a/map_test.go b/map_test.go index ef64e67b..6cdb0144 100644 --- a/map_test.go +++ b/map_test.go @@ -89,27 +89,27 @@ func (h *errorDigesterBuilder) Digest(_ HashInputProvider, _ Value) (Digester, e return nil, h.err } -func verifyEmptyMapV0( +func testEmptyMapV0( t *testing.T, storage *PersistentSlabStorage, typeInfo TypeInfo, address Address, m *OrderedMap, ) { - verifyMapV0(t, storage, typeInfo, address, m, nil, nil, false) + testMapV0(t, storage, typeInfo, address, m, nil, nil, false) } -func verifyEmptyMap( +func testEmptyMap( t *testing.T, storage *PersistentSlabStorage, typeInfo TypeInfo, address Address, m *OrderedMap, ) { - verifyMap(t, storage, typeInfo, address, m, nil, nil, false) + testMap(t, storage, typeInfo, address, m, nil, nil, false) } -func verifyMapV0( +func testMapV0( t *testing.T, storage *PersistentSlabStorage, typeInfo TypeInfo, @@ -119,10 +119,10 @@ func verifyMapV0( sortedKeys []Value, hasNestedArrayMapElement bool, ) { - _verifyMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, hasNestedArrayMapElement, false) + _testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, hasNestedArrayMapElement, false) } -func verifyMap( +func testMap( t *testing.T, storage *PersistentSlabStorage, typeInfo TypeInfo, @@ -132,12 +132,12 @@ func verifyMap( sortedKeys []Value, hasNestedArrayMapElement bool, ) { - _verifyMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, hasNestedArrayMapElement, true) + _testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, hasNestedArrayMapElement, true) } -// verifyMap verifies map elements and validates serialization and in-memory slab tree. +// _testMap verifies map elements and validates serialization and in-memory slab tree. // It also verifies elements ordering if sortedKeys is not nil. -func _verifyMap( +func _testMap( t *testing.T, storage *PersistentSlabStorage, typeInfo TypeInfo, @@ -311,7 +311,7 @@ func TestMapSetAndGet(t *testing.T) { require.Nil(t, existingStorable) } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) t.Run("replicate keys", func(t *testing.T) { @@ -363,7 +363,7 @@ func TestMapSetAndGet(t *testing.T) { keyValues[k] = newValue } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) t.Run("random key and value", func(t *testing.T) { @@ -398,7 +398,7 @@ func TestMapSetAndGet(t *testing.T) { require.Nil(t, existingStorable) } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) t.Run("unique keys with hash collision", func(t *testing.T) { @@ -447,7 +447,7 @@ func TestMapSetAndGet(t *testing.T) { require.Nil(t, existingStorable) } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) t.Run("replicate keys with hash collision", func(t *testing.T) { @@ -511,7 +511,7 @@ func TestMapSetAndGet(t *testing.T) { keyValues[k] = newValue } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) } @@ -548,7 +548,7 @@ func TestMapGetKeyNotFound(t *testing.T) { require.ErrorAs(t, err, &keyNotFoundError) require.ErrorAs(t, userError, &keyNotFoundError) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) t.Run("collision", func(t *testing.T) { @@ -591,7 +591,7 @@ func TestMapGetKeyNotFound(t *testing.T) { require.ErrorAs(t, err, &keyNotFoundError) require.ErrorAs(t, userError, &keyNotFoundError) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) t.Run("collision group", func(t *testing.T) { @@ -634,7 +634,7 @@ func TestMapGetKeyNotFound(t *testing.T) { require.ErrorAs(t, err, &keyNotFoundError) require.ErrorAs(t, userError, &keyNotFoundError) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) } @@ -800,7 +800,7 @@ func TestMapRemove(t *testing.T) { require.Nil(t, existingStorable) } - verifyMap(t, storage, typeInfo, address, m, tc.keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, tc.keyValues, nil, false) count := len(tc.keyValues) @@ -816,7 +816,7 @@ func TestMapRemove(t *testing.T) { require.Equal(t, uint64(count), m.Count()) } - verifyEmptyMap(t, storage, typeInfo, address, m) + testEmptyMap(t, storage, typeInfo, address, m) }) } @@ -901,7 +901,7 @@ func TestMapRemove(t *testing.T) { require.Equal(t, uint64(count), m.Count()) } - verifyMap(t, storage, typeInfo, address, m, nonCollisionKeyValues, nil, false) + testMap(t, storage, typeInfo, address, m, nonCollisionKeyValues, nil, false) // Remove remaining elements for k, v := range nonCollisionKeyValues { @@ -915,7 +915,7 @@ func TestMapRemove(t *testing.T) { require.Equal(t, uint64(count), m.Count()) } - verifyEmptyMap(t, storage, typeInfo, address, m) + testEmptyMap(t, storage, typeInfo, address, m) }) t.Run("collision with data root", func(t *testing.T) { @@ -984,7 +984,7 @@ func TestMapRemove(t *testing.T) { require.Equal(t, uint64(count), m.Count()) } - verifyMap(t, storage, typeInfo, address, m, nonCollisionKeyValues, nil, false) + testMap(t, storage, typeInfo, address, m, nonCollisionKeyValues, nil, false) // Remove remaining elements for k, v := range nonCollisionKeyValues { @@ -998,7 +998,7 @@ func TestMapRemove(t *testing.T) { require.Equal(t, uint64(count), m.Count()) } - verifyEmptyMap(t, storage, typeInfo, address, m) + testEmptyMap(t, storage, typeInfo, address, m) }) t.Run("no collision key not found", func(t *testing.T) { @@ -1034,7 +1034,7 @@ func TestMapRemove(t *testing.T) { require.ErrorAs(t, err, &keyNotFoundError) require.ErrorAs(t, userError, &keyNotFoundError) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) t.Run("collision key not found", func(t *testing.T) { @@ -1078,7 +1078,7 @@ func TestMapRemove(t *testing.T) { require.ErrorAs(t, err, &keyNotFoundError) require.ErrorAs(t, userError, &keyNotFoundError) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) } @@ -1156,7 +1156,7 @@ func TestMapIterate(t *testing.T) { require.NoError(t, err) require.Equal(t, uint64(mapSize), i) - verifyMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) }) t.Run("collision", func(t *testing.T) { @@ -1244,7 +1244,7 @@ func TestMapIterate(t *testing.T) { t.Log("iterated values") - verifyMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) }) } @@ -1310,7 +1310,7 @@ func testMapDeterministicHashCollision(t *testing.T, r *rand.Rand, maxDigestLeve require.Nil(t, existingStorable) } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) stats, err := GetMapStats(m) require.NoError(t, err) @@ -1340,7 +1340,7 @@ func testMapDeterministicHashCollision(t *testing.T, r *rand.Rand, maxDigestLeve } } - verifyEmptyMap(t, storage, typeInfo, address, m) + testEmptyMap(t, storage, typeInfo, address, m) } func testMapRandomHashCollision(t *testing.T, r *rand.Rand, maxDigestLevel int) { @@ -1382,7 +1382,7 @@ func testMapRandomHashCollision(t *testing.T, r *rand.Rand, maxDigestLevel int) require.Nil(t, existingStorable) } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) // Remove all elements for k, v := range keyValues { @@ -1408,7 +1408,7 @@ func testMapRandomHashCollision(t *testing.T, r *rand.Rand, maxDigestLevel int) } } - verifyEmptyMap(t, storage, typeInfo, address, m) + testEmptyMap(t, storage, typeInfo, address, m) } func TestMapHashCollision(t *testing.T) { @@ -1557,7 +1557,7 @@ func TestMapSetRemoveRandomValues(t *testing.T) { m, keyValues := testMapSetRemoveRandomValues(t, r, storage, typeInfo, address) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) } func TestMapDecodeV0(t *testing.T) { @@ -1614,7 +1614,7 @@ func TestMapDecodeV0(t *testing.T) { decodedMap, err := NewMapWithRootID(storage, mapSlabID, NewDefaultDigesterBuilder()) require.NoError(t, err) - verifyEmptyMapV0(t, storage, typeInfo, address, decodedMap) + testEmptyMapV0(t, storage, typeInfo, address, decodedMap) }) t.Run("dataslab as root", func(t *testing.T) { @@ -1687,7 +1687,7 @@ func TestMapDecodeV0(t *testing.T) { decodedMap, err := NewMapWithRootID(storage, mapSlabID, digesterBuilder) require.NoError(t, err) - verifyMapV0(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) + testMapV0(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("has pointer no collision", func(t *testing.T) { @@ -1900,7 +1900,7 @@ func TestMapDecodeV0(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, mapSlabID, digesterBuilder) require.NoError(t, err) - verifyMapV0(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMapV0(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("inline collision 1 level", func(t *testing.T) { @@ -2074,7 +2074,7 @@ func TestMapDecodeV0(t *testing.T) { decodedMap, err := NewMapWithRootID(storage, mapSlabID, digesterBuilder) require.NoError(t, err) - verifyMapV0(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) + testMapV0(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("inline collision 2 levels", func(t *testing.T) { @@ -2298,7 +2298,7 @@ func TestMapDecodeV0(t *testing.T) { decodedMap, err := NewMapWithRootID(storage, mapSlabID, digesterBuilder) require.NoError(t, err) - verifyMapV0(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) + testMapV0(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("external collision", func(t *testing.T) { @@ -2517,7 +2517,7 @@ func TestMapDecodeV0(t *testing.T) { decodedMap, err := NewMapWithRootID(storage, mapSlabID, digesterBuilder) require.NoError(t, err) - verifyMapV0(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) + testMapV0(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) }) } @@ -2584,7 +2584,7 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, NewDefaultDigesterBuilder()) require.NoError(t, err) - verifyEmptyMap(t, storage2, typeInfo, address, decodedMap) + testEmptyMap(t, storage2, typeInfo, address, decodedMap) }) t.Run("dataslab as root", func(t *testing.T) { @@ -2673,7 +2673,7 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("has inlined array", func(t *testing.T) { @@ -2900,7 +2900,7 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("root data slab, inlined child map of same type", func(t *testing.T) { @@ -3081,7 +3081,7 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("root data slab, inlined child map of different type", func(t *testing.T) { @@ -3270,7 +3270,7 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("root data slab, multiple levels of inlined child map of same type", func(t *testing.T) { @@ -3523,7 +3523,7 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("root data slab, multiple levels of inlined child map of different type", func(t *testing.T) { @@ -3795,7 +3795,7 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("root metadata slab, inlined child map of same type", func(t *testing.T) { @@ -4261,7 +4261,7 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("root metadata slab, inlined child map of different type", func(t *testing.T) { @@ -4742,7 +4742,7 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("inline collision 1 level", func(t *testing.T) { @@ -4929,7 +4929,7 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("inline collision 2 levels", func(t *testing.T) { @@ -5166,7 +5166,7 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("external collision", func(t *testing.T) { @@ -5396,7 +5396,7 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("pointer to child map", func(t *testing.T) { @@ -5575,7 +5575,7 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("pointer to grand child map", func(t *testing.T) { @@ -5801,7 +5801,7 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("pointer to child array", func(t *testing.T) { @@ -6050,7 +6050,7 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("pointer to grand child array", func(t *testing.T) { @@ -6238,7 +6238,7 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("pointer to storable slab", func(t *testing.T) { @@ -6534,7 +6534,7 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("same composite with two fields (same order)", func(t *testing.T) { @@ -6708,7 +6708,7 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("same composite with two fields (different order)", func(t *testing.T) { @@ -6885,7 +6885,7 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("same composite with different fields", func(t *testing.T) { @@ -7132,7 +7132,7 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("same composite with different number of fields", func(t *testing.T) { @@ -7324,7 +7324,7 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("different composite", func(t *testing.T) { @@ -7567,7 +7567,7 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) } @@ -7584,7 +7584,7 @@ func TestMapEncodeDecodeRandomValues(t *testing.T) { m, keyValues := testMapSetRemoveRandomValues(t, r, storage, typeInfo, address) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) // Create a new storage with encoded data from base storage storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) @@ -7593,7 +7593,7 @@ func TestMapEncodeDecodeRandomValues(t *testing.T) { m2, err := NewMapWithRootID(storage2, m.SlabID(), m.digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, m2, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, m2, keyValues, nil, false) } func TestMapStoredValue(t *testing.T) { @@ -7643,7 +7643,7 @@ func TestMapStoredValue(t *testing.T) { m2, ok := value.(*OrderedMap) require.True(t, ok) - verifyMap(t, storage, typeInfo, address, m2, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m2, keyValues, nil, false) } else { require.Equal(t, 1, errorCategorizationCount(err)) var fatalError *FatalError @@ -7679,7 +7679,7 @@ func TestMapPopIterate(t *testing.T) { require.NoError(t, err) require.Equal(t, uint64(0), i) - verifyEmptyMap(t, storage, typeInfo, address, m) + testEmptyMap(t, storage, typeInfo, address, m) }) t.Run("root-dataslab", func(t *testing.T) { @@ -7730,7 +7730,7 @@ func TestMapPopIterate(t *testing.T) { require.NoError(t, err) require.Equal(t, 0, i) - verifyEmptyMap(t, storage, typeInfo, address, m) + testEmptyMap(t, storage, typeInfo, address, m) }) t.Run("root-metaslab", func(t *testing.T) { @@ -7786,7 +7786,7 @@ func TestMapPopIterate(t *testing.T) { require.NoError(t, err) require.Equal(t, 0, i) - verifyEmptyMap(t, storage, typeInfo, address, m) + testEmptyMap(t, storage, typeInfo, address, m) }) t.Run("collision", func(t *testing.T) { @@ -7855,7 +7855,7 @@ func TestMapPopIterate(t *testing.T) { require.NoError(t, err) require.Equal(t, 0, i) - verifyEmptyMap(t, storage, typeInfo, address, m) + testEmptyMap(t, storage, typeInfo, address, m) }) } @@ -7954,7 +7954,7 @@ func TestMapFromBatchData(t *testing.T) { require.NoError(t, err) require.NotEqual(t, copied.SlabID(), m.SlabID()) - verifyEmptyMap(t, storage, typeInfo, address, copied) + testEmptyMap(t, storage, typeInfo, address, copied) }) t.Run("root-dataslab", func(t *testing.T) { @@ -8015,7 +8015,7 @@ func TestMapFromBatchData(t *testing.T) { require.NoError(t, err) require.NotEqual(t, copied.SlabID(), m.SlabID()) - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + testMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) }) t.Run("root-metaslab", func(t *testing.T) { @@ -8074,7 +8074,7 @@ func TestMapFromBatchData(t *testing.T) { require.NoError(t, err) require.NotEqual(t, m.SlabID(), copied.SlabID()) - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + testMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) }) t.Run("rebalance two data slabs", func(t *testing.T) { @@ -8139,7 +8139,7 @@ func TestMapFromBatchData(t *testing.T) { require.NoError(t, err) require.NotEqual(t, m.SlabID(), copied.SlabID()) - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + testMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) }) t.Run("merge two data slabs", func(t *testing.T) { @@ -8208,7 +8208,7 @@ func TestMapFromBatchData(t *testing.T) { require.NoError(t, err) require.NotEqual(t, m.SlabID(), copied.SlabID()) - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + testMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) }) t.Run("random", func(t *testing.T) { @@ -8271,7 +8271,7 @@ func TestMapFromBatchData(t *testing.T) { require.NoError(t, err) require.NotEqual(t, m.SlabID(), copied.SlabID()) - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + testMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) }) t.Run("collision", func(t *testing.T) { @@ -8353,7 +8353,7 @@ func TestMapFromBatchData(t *testing.T) { require.NoError(t, err) require.NotEqual(t, m.SlabID(), copied.SlabID()) - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + testMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) }) t.Run("data slab too large", func(t *testing.T) { @@ -8435,7 +8435,7 @@ func TestMapFromBatchData(t *testing.T) { require.NoError(t, err) require.NotEqual(t, m.SlabID(), copied.SlabID()) - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + testMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) }) } @@ -8468,7 +8468,7 @@ func TestMapNestedStorables(t *testing.T) { require.Nil(t, existingStorable) } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, true) + testMap(t, storage, typeInfo, address, m, keyValues, nil, true) }) t.Run("Array", func(t *testing.T) { @@ -8506,7 +8506,7 @@ func TestMapNestedStorables(t *testing.T) { require.Nil(t, existingStorable) } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, true) + testMap(t, storage, typeInfo, address, m, keyValues, nil, true) }) } @@ -8540,7 +8540,7 @@ func TestMapMaxInlineElement(t *testing.T) { // slab id size (next slab id is omitted in root slab) require.Equal(t, targetThreshold-slabIDSize, uint64(m.root.Header().size)) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) } func TestMapString(t *testing.T) { @@ -8822,7 +8822,7 @@ func TestMaxCollisionLimitPerDigest(t *testing.T) { require.Nil(t, existingStorable) } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) // Insert elements exceeding collision limits collisionKeyValues := make(map[Value]Value, mapSize) @@ -8847,7 +8847,7 @@ func TestMaxCollisionLimitPerDigest(t *testing.T) { } // Verify that no new elements exceeding collision limit inserted - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) // Update elements within collision limits for k := range keyValues { @@ -8858,7 +8858,7 @@ func TestMaxCollisionLimitPerDigest(t *testing.T) { require.NotNil(t, existingStorable) } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) t.Run("collision limit > 0", func(t *testing.T) { @@ -8896,7 +8896,7 @@ func TestMaxCollisionLimitPerDigest(t *testing.T) { require.Nil(t, existingStorable) } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) // Insert elements exceeding collision limits collisionKeyValues := make(map[Value]Value, mapSize) @@ -8921,7 +8921,7 @@ func TestMaxCollisionLimitPerDigest(t *testing.T) { } // Verify that no new elements exceeding collision limit inserted - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) // Update elements within collision limits for k := range keyValues { @@ -8932,7 +8932,7 @@ func TestMaxCollisionLimitPerDigest(t *testing.T) { require.NotNil(t, existingStorable) } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) } @@ -8956,7 +8956,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 1, len(storage.deltas)) require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, nil) + testMapLoadedElements(t, m, nil) }) t.Run("root data slab with simple values", func(t *testing.T) { @@ -8976,7 +8976,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 1, len(storage.deltas)) require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) }) t.Run("root data slab with composite values", func(t *testing.T) { @@ -8997,7 +8997,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 1+mapSize, len(storage.deltas)) require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) }) t.Run("root data slab with composite values in collision group", func(t *testing.T) { @@ -9019,7 +9019,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 1+mapSize, len(storage.deltas)) require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) }) t.Run("root data slab with composite values in external collision group", func(t *testing.T) { @@ -9041,7 +9041,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 1+3+mapSize, len(storage.deltas)) require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) }) t.Run("root data slab with composite values, unload value from front to back", func(t *testing.T) { @@ -9062,7 +9062,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 1+mapSize, len(storage.deltas)) require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) // Unload composite element from front to back. for i := 0; i < len(values); i++ { @@ -9070,7 +9070,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.NoError(t, err) expectedValues := values[i+1:] - verifyMapLoadedElements(t, m, expectedValues) + testMapLoadedElements(t, m, expectedValues) } }) @@ -9085,7 +9085,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 1+mapSize, len(storage.deltas)) require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) // Unload external key from front to back. for i := 0; i < len(values); i++ { @@ -9113,7 +9113,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.NoError(t, err) expectedValues := values[i+1:] - verifyMapLoadedElements(t, m, expectedValues) + testMapLoadedElements(t, m, expectedValues) } }) @@ -9136,7 +9136,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 1+mapSize, len(storage.deltas)) require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) // Unload composite element from front to back. for i := 0; i < len(values); i++ { @@ -9144,7 +9144,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.NoError(t, err) expectedValues := values[i+1:] - verifyMapLoadedElements(t, m, expectedValues) + testMapLoadedElements(t, m, expectedValues) } }) @@ -9167,7 +9167,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 1+3+mapSize, len(storage.deltas)) require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) // Unload composite element from front to back for i := 0; i < len(values); i++ { @@ -9175,7 +9175,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.NoError(t, err) expectedValues := values[i+1:] - verifyMapLoadedElements(t, m, expectedValues) + testMapLoadedElements(t, m, expectedValues) } }) @@ -9198,7 +9198,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 1+3+mapSize, len(storage.deltas)) require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) // Unload external collision group slab from front to back @@ -9226,7 +9226,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.NoError(t, err) expectedValues := values[i*4+4:] - verifyMapLoadedElements(t, m, expectedValues) + testMapLoadedElements(t, m, expectedValues) } }) @@ -9248,7 +9248,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 1+mapSize, len(storage.deltas)) require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) // Unload composite element from back to front. for i := len(values) - 1; i >= 0; i-- { @@ -9256,7 +9256,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.NoError(t, err) expectedValues := values[:i] - verifyMapLoadedElements(t, m, expectedValues) + testMapLoadedElements(t, m, expectedValues) } }) @@ -9271,7 +9271,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 1+mapSize, len(storage.deltas)) require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) // Unload composite element from front to back. for i := len(values) - 1; i >= 0; i-- { @@ -9299,7 +9299,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.NoError(t, err) expectedValues := values[:i] - verifyMapLoadedElements(t, m, expectedValues) + testMapLoadedElements(t, m, expectedValues) } }) @@ -9322,7 +9322,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 1+mapSize, len(storage.deltas)) require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) // Unload composite element from back to front for i := len(values) - 1; i >= 0; i-- { @@ -9330,7 +9330,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.NoError(t, err) expectedValues := values[:i] - verifyMapLoadedElements(t, m, expectedValues) + testMapLoadedElements(t, m, expectedValues) } }) @@ -9353,7 +9353,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 1+3+mapSize, len(storage.deltas)) require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) // Unload composite element from back to front for i := len(values) - 1; i >= 0; i-- { @@ -9361,7 +9361,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.NoError(t, err) expectedValues := values[:i] - verifyMapLoadedElements(t, m, expectedValues) + testMapLoadedElements(t, m, expectedValues) } }) @@ -9384,7 +9384,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 1+3+mapSize, len(storage.deltas)) require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) // Unload external slabs from back to front var externalCollisionSlabIDs []SlabID @@ -9411,7 +9411,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.NoError(t, err) expectedValues := values[:i*4] - verifyMapLoadedElements(t, m, expectedValues) + testMapLoadedElements(t, m, expectedValues) } }) @@ -9433,7 +9433,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 1+mapSize, len(storage.deltas)) require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) // Unload value in the middle unloadValueIndex := 1 @@ -9444,7 +9444,7 @@ func TestMapLoadedValueIterator(t *testing.T) { copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) values = values[:len(values)-1] - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) }) t.Run("root data slab with long string key, unload key in the middle", func(t *testing.T) { @@ -9458,7 +9458,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 1+mapSize, len(storage.deltas)) require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) // Unload key in the middle. unloadValueIndex := 1 @@ -9489,7 +9489,7 @@ func TestMapLoadedValueIterator(t *testing.T) { copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) values = values[:len(values)-1] - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) }) t.Run("root data slab with composite values in collision group, unload value in the middle", func(t *testing.T) { @@ -9511,7 +9511,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 1+mapSize, len(storage.deltas)) require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) // Unload composite element in the middle for _, unloadValueIndex := range []int{1, 3, 5} { @@ -9524,7 +9524,7 @@ func TestMapLoadedValueIterator(t *testing.T) { values[2], values[4], } - verifyMapLoadedElements(t, m, expectedValues) + testMapLoadedElements(t, m, expectedValues) }) t.Run("root data slab with composite values in external collision group, unload value in the middle", func(t *testing.T) { @@ -9546,7 +9546,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 1+3+mapSize, len(storage.deltas)) require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) // Unload composite value in the middle. for _, unloadValueIndex := range []int{1, 3, 5, 7, 9, 11} { @@ -9562,7 +9562,7 @@ func TestMapLoadedValueIterator(t *testing.T) { values[8], values[10], } - verifyMapLoadedElements(t, m, expectedValues) + testMapLoadedElements(t, m, expectedValues) }) t.Run("root data slab with composite values in external collision group, unload external slab in the middle", func(t *testing.T) { @@ -9584,7 +9584,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 1+3+mapSize, len(storage.deltas)) require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) // Unload external slabs in the middle. var externalCollisionSlabIDs []SlabID @@ -9613,7 +9613,7 @@ func TestMapLoadedValueIterator(t *testing.T) { copy(values[4:], values[8:]) values = values[:8] - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) }) t.Run("root data slab with composite values, unload composite elements during iteration", func(t *testing.T) { @@ -9634,7 +9634,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 1+mapSize, len(storage.deltas)) require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) i := 0 err := m.IterateLoadedValues(func(k Value, v Value) (bool, error) { @@ -9679,7 +9679,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 2, len(storage.deltas)) require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) // Unload composite value err := storage.Remove(childSlabID) @@ -9688,7 +9688,7 @@ func TestMapLoadedValueIterator(t *testing.T) { copy(values[childArrayIndex:], values[childArrayIndex+1:]) values = values[:len(values)-1] - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) } }) @@ -9709,7 +9709,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 4, len(storage.deltas)) require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) }) t.Run("root metadata slab with composite values", func(t *testing.T) { @@ -9730,7 +9730,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 4+mapSize, len(storage.deltas)) require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) }) t.Run("root metadata slab with composite values, unload value from front to back", func(t *testing.T) { @@ -9751,7 +9751,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 4+mapSize, len(storage.deltas)) require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) // Unload composite element from front to back for i := 0; i < len(values); i++ { @@ -9759,7 +9759,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.NoError(t, err) expectedValues := values[i+1:] - verifyMapLoadedElements(t, m, expectedValues) + testMapLoadedElements(t, m, expectedValues) } }) @@ -9781,7 +9781,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 4+mapSize, len(storage.deltas)) require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) // Unload composite element from back to front for i := len(values) - 1; i >= 0; i-- { @@ -9789,7 +9789,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.NoError(t, err) expectedValues := values[:i] - verifyMapLoadedElements(t, m, expectedValues) + testMapLoadedElements(t, m, expectedValues) } }) @@ -9811,7 +9811,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 4+mapSize, len(storage.deltas)) require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) // Unload composite element in the middle for _, index := range []int{4, 14} { @@ -9824,7 +9824,7 @@ func TestMapLoadedValueIterator(t *testing.T) { copy(childSlabIDs[index:], childSlabIDs[index+1:]) childSlabIDs = childSlabIDs[:len(childSlabIDs)-1] - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) } }) @@ -9850,7 +9850,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 5, len(storage.deltas)) require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) err := storage.Remove(childSlabID) require.NoError(t, err) @@ -9858,7 +9858,7 @@ func TestMapLoadedValueIterator(t *testing.T) { copy(values[childArrayIndex:], values[childArrayIndex+1:]) values = values[:len(values)-1] - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) } }) @@ -9880,7 +9880,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 4, len(storage.deltas)) require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) require.True(t, ok) @@ -9902,7 +9902,7 @@ func TestMapLoadedValueIterator(t *testing.T) { values = values[count:] - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) } }) @@ -9924,7 +9924,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 4, len(storage.deltas)) require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) require.True(t, ok) @@ -9946,7 +9946,7 @@ func TestMapLoadedValueIterator(t *testing.T) { values = values[:len(values)-int(count)] - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) } }) @@ -9968,7 +9968,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.Equal(t, 4, len(storage.deltas)) require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) require.True(t, ok) @@ -9996,7 +9996,7 @@ func TestMapLoadedValueIterator(t *testing.T) { copy(values[countAtIndex0:], values[countAtIndex0+countAtIndex1:]) values = values[:m.Count()-uint64(countAtIndex1)] - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) }) t.Run("root metadata slab, unload non-root metadata slab from front to back", func(t *testing.T) { @@ -10034,7 +10034,7 @@ func TestMapLoadedValueIterator(t *testing.T) { expectedValues = values[int(nextChildHeader.firstKey):] } - verifyMapLoadedElements(t, m, expectedValues) + testMapLoadedElements(t, m, expectedValues) } }) @@ -10069,7 +10069,7 @@ func TestMapLoadedValueIterator(t *testing.T) { // Use firstKey to deduce number of elements in slabs. values = values[:childHeader.firstKey] - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) } }) @@ -10092,7 +10092,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.True(t, len(storage.deltas) > 1+mapSize) require.True(t, getMapMetaDataSlabCount(storage) > 1) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) r := newRand(t) @@ -10110,7 +10110,7 @@ func TestMapLoadedValueIterator(t *testing.T) { copy(childSlabIDs[i:], childSlabIDs[i+1:]) childSlabIDs = childSlabIDs[:len(childSlabIDs)-1] - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) } }) @@ -10133,7 +10133,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.True(t, len(storage.deltas) > 1+mapSize) require.True(t, getMapMetaDataSlabCount(storage) > 1) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) require.True(t, ok) @@ -10187,7 +10187,7 @@ func TestMapLoadedValueIterator(t *testing.T) { copy(dataSlabInfos[index:], dataSlabInfos[index+1:]) dataSlabInfos = dataSlabInfos[:len(dataSlabInfos)-1] - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) } require.Equal(t, 0, len(values)) @@ -10212,7 +10212,7 @@ func TestMapLoadedValueIterator(t *testing.T) { require.True(t, len(storage.deltas) > 1+mapSize) require.True(t, getMapMetaDataSlabCount(storage) > 1) - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) type slabInfo struct { id SlabID @@ -10348,7 +10348,7 @@ func TestMapLoadedValueIterator(t *testing.T) { values = values[:len(values)-slabInfoToBeRemoved.count] } - verifyMapLoadedElements(t, m, values) + testMapLoadedElements(t, m, values) } require.Equal(t, 0, len(values)) @@ -10539,7 +10539,7 @@ func createMapWithSimpleAndChildArrayValues( return m, values, slabID } -func verifyMapLoadedElements(t *testing.T, m *OrderedMap, expectedValues [][2]Value) { +func testMapLoadedElements(t *testing.T, m *OrderedMap, expectedValues [][2]Value) { i := 0 err := m.IterateLoadedValues(func(k Value, v Value) (bool, error) { require.True(t, i < len(expectedValues)) @@ -10599,7 +10599,7 @@ func TestMaxInlineMapValueSize(t *testing.T) { // Both key and value are stored in map slab. require.Equal(t, 1, len(storage.deltas)) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) t.Run("max size key", func(t *testing.T) { @@ -10637,7 +10637,7 @@ func TestMaxInlineMapValueSize(t *testing.T) { // Key is stored in map slab, while value is stored separately in storable slab. require.Equal(t, 2, len(storage.deltas)) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) t.Run("large key", func(t *testing.T) { @@ -10677,7 +10677,7 @@ func TestMaxInlineMapValueSize(t *testing.T) { // Key is stored in separate storable slabs, while value is stored in map slab. require.Equal(t, 2, len(storage.deltas)) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) } @@ -10784,7 +10784,7 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { require.True(t, parentMap.root.IsData()) require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) children := getInlinedChildMapsFromParentMap(t, address, parentMap) @@ -10824,7 +10824,7 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { expectedParentElementSize*mapSize require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } } @@ -10865,7 +10865,7 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { expectedParentElementSize*mapSize require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } // Remove elements from child map which triggers standalone map slab becomes inlined slab again. @@ -10904,7 +10904,7 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { expectedParentElementSize*mapSize require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } } @@ -10938,7 +10938,7 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { require.True(t, parentMap.root.IsData()) require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) children := getInlinedChildMapsFromParentMap(t, address, parentMap) @@ -10978,7 +10978,7 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { expectedParentSize += expectedChildElementSize require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } } @@ -11021,7 +11021,7 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { expectedParentSize += SlabIDStorable(expectedSlabID).ByteSize() require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } require.Equal(t, 1+mapSize, getStoredDeltas(storage)) // There are >1 stored slab because child map is no longer inlined. @@ -11067,7 +11067,7 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { expectedParentSize += expectedInlinedMapSize require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } // Remove remaining elements from each inlined child map. @@ -11105,7 +11105,7 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { expectedParentSize -= expectedChildElementSize require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } } @@ -11139,7 +11139,7 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { require.True(t, parentMap.root.IsData()) require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) children := getInlinedChildMapsFromParentMap(t, address, parentMap) @@ -11172,7 +11172,7 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { expectedChildElementSize*uint32(childMap.Count()) require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } } @@ -11209,7 +11209,7 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { expectedChildElementSize*uint32(childMap.Count()) require.Equal(t, expectedStandaloneSlabSize, childMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } // Parent map has one root data slab. @@ -11247,7 +11247,7 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { expectedChildElementSize*uint32(childMap.Count()) require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } // Parent map has one metadata slab + 2 data slabs. @@ -11284,7 +11284,7 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { expectedChildElementSize*uint32(childMap.Count()) require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } } @@ -11337,7 +11337,7 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { require.True(t, parentMap.root.IsData()) require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) children := getInlinedChildMapsFromParentMap(t, address, parentMap) require.Equal(t, mapSize, len(children)) @@ -11407,13 +11407,13 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { expectedParentSize += expectedGrandChildElementSize require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } require.True(t, parentMap.root.IsData()) require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) // Add one more element to grand child map which triggers inlined child map slab (NOT grand child map slab) becomes standalone slab for childKey, child := range children { @@ -11479,13 +11479,13 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { singleElementPrefixSize + digestSize + encodedKeySize + SlabIDStorable(SlabID{}).ByteSize() require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } require.True(t, parentMap.root.IsData()) require.Equal(t, 2, getStoredDeltas(storage)) // There is 2 stored slab because child map is not inlined. - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) // Remove elements from grand child map which triggers standalone child map slab becomes inlined slab again. for childKey, child := range children { @@ -11550,7 +11550,7 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { expectedParentElementSize*uint32(parentMap.Count()) require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } require.Equal(t, uint64(0), gchildMap.Count()) @@ -11561,7 +11561,7 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { require.True(t, parentMap.root.IsData()) require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map and grand child map are inlined. - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) }) t.Run("parent is root data slab, one child map, one grand child map, changes to grand child map triggers grand child array slab to become standalone slab", func(t *testing.T) { @@ -11595,7 +11595,7 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { require.True(t, parentMap.root.IsData()) require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) children := getInlinedChildMapsFromParentMap(t, address, parentMap) require.Equal(t, mapSize, len(children)) @@ -11665,13 +11665,13 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { expectedParentSize += expectedGrandChildElementSize require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } require.True(t, parentMap.root.IsData()) require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) gchildLargeElementKeys := make(map[Value]Value) // key: child map key, value: gchild map key // Add one large element to grand child map which triggers inlined grand child map slab (NOT child map slab) becomes standalone slab @@ -11740,13 +11740,13 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { singleElementPrefixSize + digestSize + encodedKeySize + expectedChildMapSize require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } require.True(t, parentMap.root.IsData()) require.Equal(t, 2, getStoredDeltas(storage)) // There is 2 stored slab because child map is not inlined. - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) // Remove elements from grand child map which triggers standalone child map slab becomes inlined slab again. for childKey, child := range children { @@ -11817,7 +11817,7 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { expectedParentElementSize*uint32(parentMap.Count()) require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } require.Equal(t, uint64(0), gchildMap.Count()) @@ -11828,7 +11828,7 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { require.True(t, parentMap.root.IsData()) require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map and grand child map are inlined. - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) }) t.Run("parent is root data slab, two child map, one grand child map each, changes to child map triggers child map slab to become standalone slab", func(t *testing.T) { @@ -11860,7 +11860,7 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { require.True(t, parentMap.root.IsData()) require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) children := getInlinedChildMapsFromParentMap(t, address, parentMap) require.Equal(t, mapSize, len(children)) @@ -11928,13 +11928,13 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { expectedParentSize += expectedGrandChildElementSize require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } require.True(t, parentMap.root.IsData()) require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) expectedParentSize = parentMap.root.ByteSize() @@ -11995,13 +11995,13 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { expectedParentSize += digestSize + singleElementPrefixSize + encodedKeySize + encodedValueSize require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } require.True(t, parentMap.root.IsData()) require.Equal(t, 1, getStoredDeltas(storage)) // There is 1 stored slab because child map is inlined. - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) // Add 1 more element to each child map so child map reaches its max size i := 0 @@ -12059,7 +12059,7 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { expectedChildElementSize*2 + (digestSize + singleElementPrefixSize + encodedKeySize + expectedGrandChildMapSize) require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } require.True(t, parentMap.root.IsData()) @@ -12070,7 +12070,7 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { (singleElementPrefixSize+digestSize+encodedKeySize+slabIDStorableSize)*mapSize require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) expectedParentMapSize := parentMap.root.ByteSize() @@ -12135,14 +12135,14 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { expectedParentMapSize = expectedParentMapSize - slabIDStorableSize + expectedChildMapSize require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } require.Equal(t, uint64(mapSize), parentMap.Count()) require.True(t, parentMap.root.IsData()) require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map and grand child map are inlined. - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) // remove remaining elements from child map, except for grand child map for childKey, child := range children { @@ -12205,7 +12205,7 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { expectedParentMapSize -= digestSize + singleElementPrefixSize + encodedKeySize + encodedValueSize require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } require.Equal(t, uint64(1), gchildMap.Count()) @@ -12216,7 +12216,7 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { require.True(t, parentMap.root.IsData()) require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map and grand child map are inlined. - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) }) t.Run("parent is root metadata slab, with four child maps, each child map has grand child maps", func(t *testing.T) { @@ -12248,7 +12248,7 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { require.True(t, parentMap.root.IsData()) require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) children := getInlinedChildMapsFromParentMap(t, address, parentMap) require.Equal(t, mapSize, len(children)) @@ -12306,7 +12306,7 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { expectedChildElementSize*uint32(childMap.Count()) require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } require.False(t, parentMap.Inlined()) @@ -12314,7 +12314,7 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { // There is 3 stored slab: parent metadata slab with 2 data slabs (all child and grand child maps are inlined) require.Equal(t, 3, getStoredDeltas(storage)) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) // Insert 1 element to grand child map // - grand child maps are inlined @@ -12371,7 +12371,7 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { expectedChildElementSize*uint32(childMap.Count()) require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } require.False(t, parentMap.Inlined()) @@ -12384,7 +12384,7 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { expectedParentElementSize*uint32(parentMap.Count()) require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) // Remove one element from grand child map to trigger child map inlined again. // - grand child maps are inlined @@ -12447,14 +12447,14 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { expectedChildElementSize1 + expectedChildElementSize2*uint32(childMap.Count()-1) require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } require.Equal(t, uint64(mapSize), parentMap.Count()) require.False(t, parentMap.root.IsData()) require.Equal(t, 3, getStoredDeltas(storage)) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) // Remove all grand child element to trigger // - child maps are inlined @@ -12503,7 +12503,7 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { require.Equal(t, SlabIDUndefined, childMap.SlabID()) require.Equal(t, cValueID, childMap.ValueID()) // value ID is unchanged - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } expectedChildMapSize := uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) @@ -12516,7 +12516,7 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { require.True(t, parentMap.root.IsData()) require.Equal(t, 1, getStoredDeltas(storage)) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) expectedChildMapSize := uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) expectedParentMapSize = mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + @@ -12589,7 +12589,7 @@ func TestChildMapWhenParentMapIsModified(t *testing.T) { require.True(t, parentMap.root.IsData()) require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) children := getInlinedChildMapsFromParentMap(t, address, parentMap) require.Equal(t, mapSize, len(children)) @@ -12648,7 +12648,7 @@ func TestChildMapWhenParentMapIsModified(t *testing.T) { expectedChildElementSize*uint32(childMap.Count()) require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } } @@ -12695,7 +12695,7 @@ func TestChildMapWhenParentMapIsModified(t *testing.T) { expectedChildElementSize*uint32(childMap.Count()) require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } } }) @@ -12907,7 +12907,7 @@ func TestMapSetReturnedValue(t *testing.T) { expectedKeyValues[k] = expectedChildValues } - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) // Overwrite existing child array value for k := range expectedKeyValues { @@ -12929,7 +12929,7 @@ func TestMapSetReturnedValue(t *testing.T) { expectedKeyValues[k] = Uint64Value(0) } - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) }) t.Run("child array is inlined", func(t *testing.T) { @@ -12964,7 +12964,7 @@ func TestMapSetReturnedValue(t *testing.T) { expectedKeyValues[k] = arrayValue{v} } - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) // Overwrite existing child array value for k := range expectedKeyValues { @@ -12986,7 +12986,7 @@ func TestMapSetReturnedValue(t *testing.T) { require.NoError(t, err) } - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) }) t.Run("child map is not inlined", func(t *testing.T) { @@ -13033,7 +13033,7 @@ func TestMapSetReturnedValue(t *testing.T) { } } - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) // Overwrite existing child map value for k := range expectedKeyValues { @@ -13055,7 +13055,7 @@ func TestMapSetReturnedValue(t *testing.T) { require.NoError(t, err) } - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) }) t.Run("child map is inlined", func(t *testing.T) { @@ -13093,7 +13093,7 @@ func TestMapSetReturnedValue(t *testing.T) { expectedChildValues[k] = v } - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) // Overwrite existing child map value for k := range expectedKeyValues { @@ -13115,7 +13115,7 @@ func TestMapSetReturnedValue(t *testing.T) { require.NoError(t, err) } - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) }) } @@ -13162,7 +13162,7 @@ func TestMapRemoveReturnedValue(t *testing.T) { expectedKeyValues[k] = expectedChildValues } - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) // Remove child array value for k := range expectedKeyValues { @@ -13184,7 +13184,7 @@ func TestMapRemoveReturnedValue(t *testing.T) { delete(expectedKeyValues, k) } - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) }) t.Run("child array is inlined", func(t *testing.T) { @@ -13219,7 +13219,7 @@ func TestMapRemoveReturnedValue(t *testing.T) { expectedKeyValues[k] = arrayValue{v} } - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) // Remove child array value for k := range expectedKeyValues { @@ -13241,7 +13241,7 @@ func TestMapRemoveReturnedValue(t *testing.T) { require.NoError(t, err) } - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) }) t.Run("child map is not inlined", func(t *testing.T) { @@ -13288,7 +13288,7 @@ func TestMapRemoveReturnedValue(t *testing.T) { } } - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) // Remove child map value for k := range expectedKeyValues { @@ -13310,7 +13310,7 @@ func TestMapRemoveReturnedValue(t *testing.T) { require.NoError(t, err) } - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) }) t.Run("child map is inlined", func(t *testing.T) { @@ -13348,7 +13348,7 @@ func TestMapRemoveReturnedValue(t *testing.T) { expectedChildValues[k] = v } - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) // Remove child map value for k := range expectedKeyValues { @@ -13370,7 +13370,7 @@ func TestMapRemoveReturnedValue(t *testing.T) { require.NoError(t, err) } - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) }) } @@ -13406,7 +13406,7 @@ func TestMapWithOutdatedCallback(t *testing.T) { expectedKeyValues[k] = arrayValue{v} - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) // Overwrite child array value from parent valueStorable, err := parentMap.Set(compare, hashInputProvider, k, Uint64Value(0)) @@ -13464,7 +13464,7 @@ func TestMapWithOutdatedCallback(t *testing.T) { expectedKeyValues[k] = arrayValue{v} - verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) // Remove child array value from parent keyStorable, valueStorable, err := parentMap.Remove(compare, hashInputProvider, k) From 903fc13b83fe1a325ef49be472e393983710ec8e Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 3 Oct 2023 12:37:55 -0500 Subject: [PATCH 050/126] Create Go maps in inlinedExtraData lazily --- map.go | 6 +++--- typeinfo.go | 13 +++++++++---- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/map.go b/map.go index e3144822..b67674cb 100644 --- a/map.go +++ b/map.go @@ -2502,7 +2502,7 @@ func DecodeInlinedCompactMapStorable( copy(hkeys, extraData.hkeys) // Decode values - size := uint32(hkeyElementsPrefixSize) + elementsSize := uint32(hkeyElementsPrefixSize) elems := make([]element, elemCount) for i := 0; i < int(elemCount); i++ { value, err := decodeStorable(dec, slabID, inlinedExtraData) @@ -2517,7 +2517,7 @@ func DecodeInlinedCompactMapStorable( elem := &singleElement{key, value, elemSize} elems[i] = elem - size += digestSize + elem.Size() + elementsSize += digestSize + elem.Size() } // Create hkeyElements @@ -2525,7 +2525,7 @@ func DecodeInlinedCompactMapStorable( hkeys: hkeys, elems: elems, level: 0, - size: size, + size: elementsSize, } header := MapSlabHeader{ diff --git a/typeinfo.go b/typeinfo.go index 6702135a..cabb1469 100644 --- a/typeinfo.go +++ b/typeinfo.go @@ -206,10 +206,7 @@ type inlinedExtraData struct { } func newInlinedExtraData() *inlinedExtraData { - return &inlinedExtraData{ - compactMapTypes: make(map[string]compactMapTypeInfo), - arrayTypes: make(map[string]int), - } + return &inlinedExtraData{} } // Encode encodes inlined extra data as CBOR array. @@ -311,6 +308,10 @@ func newInlinedExtraDataFromData( // Array extra data is deduplicated by array type info ID because array // extra data only contains type info. func (ied *inlinedExtraData) addArrayExtraData(data *ArrayExtraData) int { + if ied.arrayTypes == nil { + ied.arrayTypes = make(map[string]int) + } + id := data.TypeInfo.ID() index, exist := ied.arrayTypes[id] if exist { @@ -339,6 +340,10 @@ func (ied *inlinedExtraData) addCompactMapExtraData( keys []ComparableStorable, ) (int, []ComparableStorable) { + if ied.compactMapTypes == nil { + ied.compactMapTypes = make(map[string]compactMapTypeInfo) + } + id := makeCompactMapTypeID(data.TypeInfo, keys) info, exist := ied.compactMapTypes[id] if exist { From 8d02bbcd024cf77a14b6feb31ca7a4aadec330c7 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 3 Oct 2023 15:49:57 -0500 Subject: [PATCH 051/126] Rename variables for clarity --- map.go | 48 +++++++++++++++---------------- map_test.go | 80 +++++++++++++++++++++++++-------------------------- utils_test.go | 6 ++-- 3 files changed, 67 insertions(+), 67 deletions(-) diff --git a/map.go b/map.go index b67674cb..349faf28 100644 --- a/map.go +++ b/map.go @@ -135,7 +135,7 @@ type element interface { hip HashInputProvider, key Value, value Value, - ) (newElem element, keyStorable MapKey, existingValue MapValue, err error) + ) (newElem element, keyStorable MapKey, existingMapValueStorable MapValue, err error) // Remove returns matched key, value, and updated element. // Updated element may be nil, modified, or a different type of element. @@ -654,7 +654,7 @@ func (e *singleElement) Set( // Key matches, overwrite existing value if equal { - existingValue := e.value + existingMapValueStorable := e.value valueStorable, err := value.Storable(storage, address, maxInlineMapValueSize(uint64(e.key.ByteSize()))) if err != nil { @@ -664,7 +664,7 @@ func (e *singleElement) Set( e.value = valueStorable e.size = singleElementPrefixSize + e.key.ByteSize() + e.value.ByteSize() - return e, e.key, existingValue, nil + return e, e.key, existingMapValueStorable, nil } // Hash collision detected @@ -822,7 +822,7 @@ func (e *inlineCollisionGroup) Set( } hkey, _ := digester.Digest(level) - keyStorable, existingValue, err := e.elements.Set(storage, address, b, digester, level, hkey, comparator, hip, key, value) + keyStorable, existingMapValueStorable, err := e.elements.Set(storage, address, b, digester, level, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Set(). return nil, nil, nil, err @@ -863,11 +863,11 @@ func (e *inlineCollisionGroup) Set( return &externalCollisionGroup{ slabID: id, size: externalCollisionGroupPrefixSize + SlabIDStorable(id).ByteSize(), - }, keyStorable, existingValue, nil + }, keyStorable, existingMapValueStorable, nil } } - return e, keyStorable, existingValue, nil + return e, keyStorable, existingMapValueStorable, nil } // Remove returns key, value, and updated element if key is found. @@ -1021,12 +1021,12 @@ func (e *externalCollisionGroup) Set( } hkey, _ := digester.Digest(level) - keyStorable, existingValue, err := slab.Set(storage, b, digester, level, hkey, comparator, hip, key, value) + keyStorable, existingMapValueStorable, err := slab.Set(storage, b, digester, level, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by MapSlab.Set(). return nil, nil, nil, err } - return e, keyStorable, existingValue, nil + return e, keyStorable, existingMapValueStorable, nil } // Remove returns key, value, and updated element if key is found. @@ -1498,7 +1498,7 @@ func (e *hkeyElements) Set( } } - elem, keyStorable, existingValue, err := elem.Set(storage, address, b, digester, level, hkey, comparator, hip, key, value) + elem, keyStorable, existingMapValueStorable, err := elem.Set(storage, address, b, digester, level, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by element.Set(). return nil, nil, err @@ -1515,7 +1515,7 @@ func (e *hkeyElements) Set( } e.size = size - return keyStorable, existingValue, nil + return keyStorable, existingMapValueStorable, nil } // No matching hkey @@ -2017,8 +2017,8 @@ func (e *singleElements) Set( } if equal { - existingKey := elem.key - existingValue := elem.value + existingKeyStorable := elem.key + existingValueStorable := elem.value vs, err := value.Storable(storage, address, maxInlineMapValueSize(uint64(elem.key.ByteSize()))) if err != nil { @@ -2038,7 +2038,7 @@ func (e *singleElements) Set( } e.size = size - return existingKey, existingValue, nil + return existingKeyStorable, existingValueStorable, nil } } @@ -3177,7 +3177,7 @@ func (m *MapDataSlab) Set( value Value, ) (MapKey, MapValue, error) { - keyStorable, existingValue, err := m.elements.Set(storage, m.SlabID().address, b, digester, level, hkey, comparator, hip, key, value) + keyStorable, existingMapValueStorable, err := m.elements.Set(storage, m.SlabID().address, b, digester, level, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Set(). return nil, nil, err @@ -3198,7 +3198,7 @@ func (m *MapDataSlab) Set( } } - return keyStorable, existingValue, nil + return keyStorable, existingMapValueStorable, nil } func (m *MapDataSlab) Remove(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { @@ -3888,7 +3888,7 @@ func (m *MapMetaDataSlab) Set( return nil, nil, err } - keyStorable, existingValue, err := child.Set(storage, b, digester, level, hkey, comparator, hip, key, value) + keyStorable, existingMapValueStorable, err := child.Set(storage, b, digester, level, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by MapSlab.Set(). return nil, nil, err @@ -3907,7 +3907,7 @@ func (m *MapMetaDataSlab) Set( // Don't need to wrap error as external error because err is already categorized by MapMetaDataSlab.SplitChildSlab(). return nil, nil, err } - return keyStorable, existingValue, nil + return keyStorable, existingMapValueStorable, nil } if underflowSize, underflow := child.IsUnderflow(); underflow { @@ -3916,7 +3916,7 @@ func (m *MapMetaDataSlab) Set( // Don't need to wrap error as external error because err is already categorized by MapMetaDataSlab.MergeOrRebalanceChildSlab(). return nil, nil, err } - return keyStorable, existingValue, nil + return keyStorable, existingMapValueStorable, nil } err = storage.Store(m.header.slabID, m) @@ -3924,7 +3924,7 @@ func (m *MapMetaDataSlab) Set( // Wrap err as external error (if needed) because err is returned by SlabStorage interface. return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) } - return keyStorable, existingValue, nil + return keyStorable, existingMapValueStorable, nil } func (m *MapMetaDataSlab) Remove(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { @@ -4902,13 +4902,13 @@ func (m *OrderedMap) set(comparator ValueComparator, hip HashInputProvider, key return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to get map key digest at level %d", level)) } - keyStorable, existingValue, err := m.root.Set(m.Storage, m.digesterBuilder, keyDigest, level, hkey, comparator, hip, key, value) + keyStorable, existingMapValueStorable, err := m.root.Set(m.Storage, m.digesterBuilder, keyDigest, level, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by MapSlab.Set(). return nil, err } - if existingValue == nil { + if existingMapValueStorable == nil { m.root.ExtraData().incrementCount() } @@ -4957,7 +4957,7 @@ func (m *OrderedMap) set(comparator ValueComparator, hip HashInputProvider, key maxInlineSize := maxInlineMapValueSize(uint64(keyStorable.ByteSize())) m.setCallbackWithChild(comparator, hip, key, value, maxInlineSize) - return existingValue, nil + return existingMapValueStorable, nil } func (m *OrderedMap) Remove(comparator ValueComparator, hip HashInputProvider, key Value) (Storable, Storable, error) { @@ -5724,12 +5724,12 @@ func NewMapFromBatchData( prevElem := elements.elems[lastElementIndex] prevElemSize := prevElem.Size() - elem, _, existingValue, err := prevElem.Set(storage, address, digesterBuilder, digester, 0, hkey, comparator, hip, key, value) + elem, _, existingMapValueStorable, err := prevElem.Set(storage, address, digesterBuilder, digester, 0, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by element.Set(). return nil, err } - if existingValue != nil { + if existingMapValueStorable != nil { return nil, NewDuplicateKeyError(key) } diff --git a/map_test.go b/map_test.go index 6cdb0144..9cdf0b11 100644 --- a/map_test.go +++ b/map_test.go @@ -7882,15 +7882,15 @@ func TestEmptyMap(t *testing.T) { }) t.Run("remove", func(t *testing.T) { - existingKey, existingValue, err := m.Remove(compare, hashInputProvider, Uint64Value(0)) + existingMapKeyStorable, existingMapValueStorable, err := m.Remove(compare, hashInputProvider, Uint64Value(0)) require.Equal(t, 1, errorCategorizationCount(err)) var userError *UserError var keyNotFoundError *KeyNotFoundError require.ErrorAs(t, err, &userError) require.ErrorAs(t, err, &keyNotFoundError) require.ErrorAs(t, userError, &keyNotFoundError) - require.Nil(t, existingKey) - require.Nil(t, existingValue) + require.Nil(t, existingMapKeyStorable) + require.Nil(t, existingMapValueStorable) }) t.Run("iterate", func(t *testing.T) { @@ -10882,10 +10882,10 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { } for _, k := range keys { - existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, k) + existingMapKeyStorable, existingMapValueStorable, err := childMap.Remove(compare, hashInputProvider, k) require.NoError(t, err) - require.Equal(t, k, existingKey) - require.NotNil(t, existingValue) + require.Equal(t, k, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) delete(expectedChildMapValues, k) @@ -11041,10 +11041,10 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { break } - existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, aKey) + existingMapKeyStorable, existingMapValueStorable, err := childMap.Remove(compare, hashInputProvider, aKey) require.NoError(t, err) - require.Equal(t, aKey, existingKey) - require.NotNil(t, existingValue) + require.Equal(t, aKey, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) delete(expectedChildMapValues, aKey) @@ -11084,10 +11084,10 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { } for _, k := range keys { - existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, k) + existingMapKeyStorable, existingMapValueStorable, err := childMap.Remove(compare, hashInputProvider, k) require.NoError(t, err) - require.Equal(t, k, existingKey) - require.NotNil(t, existingValue) + require.Equal(t, k, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) delete(expectedChildMapValues, k) @@ -11231,10 +11231,10 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { break } - existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, aKey) + existingMapKeyStorable, existingMapValueStorable, err := childMap.Remove(compare, hashInputProvider, aKey) require.NoError(t, err) - require.Equal(t, aKey, existingKey) - require.NotNil(t, existingValue) + require.Equal(t, aKey, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) delete(expectedChildMapValues, aKey) @@ -11268,10 +11268,10 @@ func TestChildMapInlinabilityInParentMap(t *testing.T) { } for _, k := range keys { - existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, k) + existingMapKeyStorable, existingMapValueStorable, err := childMap.Remove(compare, hashInputProvider, k) require.NoError(t, err) - require.Equal(t, k, existingKey) - require.NotNil(t, existingValue) + require.Equal(t, k, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) delete(expectedChildMapValues, k) @@ -11515,10 +11515,10 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { } for _, k := range gchildKeys { - existingKey, existingValue, err := gchildMap.Remove(compare, hashInputProvider, k) + existingMapKey, existingMapValueStorable, err := gchildMap.Remove(compare, hashInputProvider, k) require.NoError(t, err) - require.Equal(t, k, existingKey) - require.NotNil(t, existingValue) + require.Equal(t, k, existingMapKey) + require.NotNil(t, existingMapValueStorable) delete(expectedGChildMapValues, k) @@ -11782,10 +11782,10 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { // Remove all elements (large element first) to trigger grand child map being inlined again. for _, k := range keys { - existingKey, existingValue, err := gchildMap.Remove(compare, hashInputProvider, k) + existingMapKeyStorable, existingMapValueStorable, err := gchildMap.Remove(compare, hashInputProvider, k) require.NoError(t, err) - require.Equal(t, k, existingKey) - require.NotNil(t, existingValue) + require.Equal(t, k, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) delete(expectedGChildMapValues, k) @@ -12101,10 +12101,10 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { } // Remove one element - existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, aKey) + existingMapKeyStorable, existingMapValueStorable, err := childMap.Remove(compare, hashInputProvider, aKey) require.NoError(t, err) - require.Equal(t, aKey, existingKey) - require.NotNil(t, existingValue) + require.Equal(t, aKey, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) delete(expectedChildMapValues, aKey) @@ -12171,10 +12171,10 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { // Remove all elements, except grand child map for _, k := range keys { - existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, k) + existingMapKeyStorable, existingMapValueStorable, err := childMap.Remove(compare, hashInputProvider, k) require.NoError(t, err) - require.Equal(t, k, existingKey) - require.NotNil(t, existingValue) + require.Equal(t, k, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) delete(expectedChildMapValues, k) @@ -12417,10 +12417,10 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { } // Remove one element from grand child map - existingKey, existingValue, err := gchildMap.Remove(compare, hashInputProvider, aKey) + existingMapKeyStorable, existingMapValueStorable, err := gchildMap.Remove(compare, hashInputProvider, aKey) require.NoError(t, err) - require.Equal(t, aKey, existingKey) - require.NotNil(t, existingValue) + require.Equal(t, aKey, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) delete(expectedGChildMapValues, aKey) @@ -12473,13 +12473,13 @@ func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { // Remove grand children for _, k := range keys { - existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, k) + existingMapKeyStorable, existingMapValueStorable, err := childMap.Remove(compare, hashInputProvider, k) require.NoError(t, err) - require.Equal(t, k, existingKey) - require.NotNil(t, existingValue) + require.Equal(t, k, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) // Grand child map is returned as SlabIDStorable, even if it was stored inlined in the parent. - id, ok := existingValue.(SlabIDStorable) + id, ok := existingMapValueStorable.(SlabIDStorable) require.True(t, ok) v, err := id.StoredValue(storage) @@ -12659,10 +12659,10 @@ func TestChildMapWhenParentMapIsModified(t *testing.T) { for _, k := range keysForNonChildMaps { - existingKey, existingValue, err := parentMap.Remove(compare, hashInputProvider, k) + existingMapKeyStorable, existingMapValueStorable, err := parentMap.Remove(compare, hashInputProvider, k) require.NoError(t, err) - require.NotNil(t, existingKey) - require.NotNil(t, existingValue) + require.NotNil(t, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) delete(expectedKeyValues, k) diff --git a/utils_test.go b/utils_test.go index 6bcd6608..3388d4bb 100644 --- a/utils_test.go +++ b/utils_test.go @@ -147,12 +147,12 @@ func (i testCompositeTypeInfo) Equal(other TypeInfo) bool { } func typeInfoComparator(a, b TypeInfo) bool { - switch x := a.(type) { + switch a := a.(type) { case testTypeInfo: - return x.Equal(b) + return a.Equal(b) case testCompositeTypeInfo: - return x.Equal(b) + return a.Equal(b) default: return false From 2a6091adbe34147e6c63ce1e713ce04369bfb160 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 3 Oct 2023 17:51:28 -0500 Subject: [PATCH 052/126] Add more comments --- array.go | 6 +++--- map.go | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/array.go b/array.go index 80b5cc80..286cb289 100644 --- a/array.go +++ b/array.go @@ -2972,7 +2972,7 @@ func (a *Array) Set(index uint64, value Value) (Storable, error) { // This is to prevent potential data loss because the overwritten inlined slab was not in // storage and any future changes to it would have been lost. switch s := existingStorable.(type) { - case ArraySlab: + case ArraySlab: // inlined array slab err = s.Uninline(a.Storage) if err != nil { return nil, err @@ -2980,7 +2980,7 @@ func (a *Array) Set(index uint64, value Value) (Storable, error) { existingStorable = SlabIDStorable(s.SlabID()) existingValueID = slabIDToValueID(s.SlabID()) - case MapSlab: + case MapSlab: // inlined map slab err = s.Uninline(a.Storage) if err != nil { return nil, err @@ -2988,7 +2988,7 @@ func (a *Array) Set(index uint64, value Value) (Storable, error) { existingStorable = SlabIDStorable(s.SlabID()) existingValueID = slabIDToValueID(s.SlabID()) - case SlabIDStorable: + case SlabIDStorable: // uninlined slab existingValueID = slabIDToValueID(SlabID(s)) } diff --git a/map.go b/map.go index 349faf28..e1225e46 100644 --- a/map.go +++ b/map.go @@ -4867,14 +4867,14 @@ func (m *OrderedMap) Set(comparator ValueComparator, hip HashInputProvider, key // This is to prevent potential data loss because the overwritten inlined slab was not in // storage and any future changes to it would have been lost. switch s := storable.(type) { - case ArraySlab: + case ArraySlab: // inlined array slab err = s.Uninline(m.Storage) if err != nil { return nil, err } storable = SlabIDStorable(s.SlabID()) - case MapSlab: + case MapSlab: // inlined map slab err = s.Uninline(m.Storage) if err != nil { return nil, err From c07907da6b3b736d77d0bcd8cef920ad9e20ff08 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 3 Oct 2023 18:05:50 -0500 Subject: [PATCH 053/126] Add MapIterator.CanMutate() predicate function --- map.go | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/map.go b/map.go index 314aa04d..0c4e2732 100644 --- a/map.go +++ b/map.go @@ -5341,7 +5341,7 @@ type MapElementIterationFunc func(Value) (resume bool, err error) type MapIterator struct { m *OrderedMap - comparator ValueComparator // TODO: use comparator and hip to update child element in parent map in register inlining. + comparator ValueComparator hip HashInputProvider id SlabID elemIterator *mapElementIterator @@ -5379,7 +5379,7 @@ func (i *MapIterator) Next() (key Value, value Value, err error) { return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get map value's stored value") } - if i.comparator != nil && i.hip != nil { + if i.CanMutate() { maxInlineSize := maxInlineMapValueSize(uint64(ks.ByteSize())) i.m.setCallbackWithChild(i.comparator, i.hip, key, value, maxInlineSize) } @@ -5454,7 +5454,7 @@ func (i *MapIterator) NextValue() (value Value, err error) { return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get map value's stored value") } - if i.comparator != nil && i.hip != nil { + if i.CanMutate() { key, err := ks.StoredValue(i.m.Storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. @@ -5520,11 +5520,19 @@ func (m *OrderedMap) iterator(comparator ValueComparator, hip HashInputProvider) }, nil } +func (i *MapIterator) CanMutate() bool { + return i.comparator != nil && i.hip != nil +} + func (m *OrderedMap) Iterator(comparator ValueComparator, hip HashInputProvider) (*MapIterator, error) { - if comparator == nil || hip == nil { + iterator, err := m.iterator(comparator, hip) + if err != nil { + return nil, err + } + if !iterator.CanMutate() { return nil, NewUserError(fmt.Errorf("failed to create MapIterator: ValueComparator or HashInputProvider is nil")) } - return m.iterator(comparator, hip) + return iterator, nil } // ReadOnlyIterator returns readonly iterator for map elements. From f6711898a763dc74205d6d541131c8eb9cba7483 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 3 Oct 2023 18:10:47 -0500 Subject: [PATCH 054/126] Add ArrayIterator.CanMutate() predicate function --- array.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/array.go b/array.go index c0e44238..52e9c22e 100644 --- a/array.go +++ b/array.go @@ -3359,6 +3359,10 @@ type ArrayIterator struct { readOnly bool } +func (i *ArrayIterator) CanMutate() bool { + return !i.readOnly +} + func (i *ArrayIterator) Next() (Value, error) { if i.remainingCount == 0 { return nil, nil @@ -3391,7 +3395,7 @@ func (i *ArrayIterator) Next() (Value, error) { return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") } - if !i.readOnly { + if i.CanMutate() { // Set up notification callback in child value so // when child value is modified parent a is notified. i.array.setCallbackWithChild(uint64(i.indexInArray), element, maxInlineArrayElementSize) From 5eb2db841ec2ff99076f8714fcd8b7fca06e288f Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 4 Oct 2023 16:22:48 -0500 Subject: [PATCH 055/126] Make smoke tests remove slabs inside inlined slabs --- cmd/stress/utils.go | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/cmd/stress/utils.go b/cmd/stress/utils.go index 96f72584..978a56f8 100644 --- a/cmd/stress/utils.go +++ b/cmd/stress/utils.go @@ -192,12 +192,6 @@ func removeValue(storage *atree.PersistentSlabStorage, value atree.Value) error } func removeStorable(storage *atree.PersistentSlabStorage, storable atree.Storable) error { - sid, ok := storable.(atree.SlabIDStorable) - if !ok { - return nil - } - - id := atree.SlabID(sid) value, err := storable.StoredValue(storage) if err != nil { @@ -205,8 +199,6 @@ func removeStorable(storage *atree.PersistentSlabStorage, storable atree.Storabl } switch v := value.(type) { - case StringValue: - return storage.Remove(id) case *atree.Array: err := v.PopIterate(func(storable atree.Storable) { _ = removeStorable(storage, storable) @@ -214,7 +206,6 @@ func removeStorable(storage *atree.PersistentSlabStorage, storable atree.Storabl if err != nil { return err } - return storage.Remove(id) case *atree.OrderedMap: err := v.PopIterate(func(keyStorable atree.Storable, valueStorable atree.Storable) { @@ -224,11 +215,13 @@ func removeStorable(storage *atree.PersistentSlabStorage, storable atree.Storabl if err != nil { return err } - return storage.Remove(id) + } - default: - return fmt.Errorf("failed to remove storable: storable type %T isn't supported", v) + if sid, ok := storable.(atree.SlabIDStorable); ok { + return storage.Remove(atree.SlabID(sid)) } + + return nil } func valueEqual(a atree.Value, b atree.Value) error { From e61ba40911637de3aa425e9d7bbeaf5413821bab Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 4 Oct 2023 17:11:22 -0500 Subject: [PATCH 056/126] Refactor smoke test --- cmd/stress/array.go | 87 +++++++++++++++++++++---------------- cmd/stress/main.go | 6 +-- cmd/stress/map.go | 55 +++++++++--------------- cmd/stress/typeinfo.go | 97 ++++++++++++++++++++++++++++++++++++------ cmd/stress/utils.go | 4 +- 5 files changed, 158 insertions(+), 91 deletions(-) diff --git a/cmd/stress/array.go b/cmd/stress/array.go index 699bf60a..6fcb20a3 100644 --- a/cmd/stress/array.go +++ b/cmd/stress/array.go @@ -113,13 +113,14 @@ func (status *arrayStatus) Write() { func testArray( storage *atree.PersistentSlabStorage, address atree.Address, - typeInfo atree.TypeInfo, maxLength uint64, status *arrayStatus, minHeapAllocMiB uint64, maxHeapAllocMiB uint64, ) { + typeInfo := newArrayTypeInfo() + // Create new array array, err := atree.NewArray(storage, address, typeInfo) if err != nil { @@ -127,8 +128,8 @@ func testArray( return } - // values contains array elements in the same order. It is used to check data loss. - values := make([]atree.Value, 0, maxLength) + // expectedValues contains array elements in the same order. It is used to check data loss. + expectedValues := make([]atree.Value, 0, maxLength) reduceHeapAllocs := false @@ -214,7 +215,7 @@ func testArray( } // Append to values - values = append(values, copiedValue) + expectedValues = append(expectedValues, copiedValue) // Append to array err = array.Append(v) @@ -248,10 +249,10 @@ func testArray( return } - oldV := values[k] + oldExpectedValue := expectedValues[k] // Update values - values[k] = copiedValue + expectedValues[k] = copiedValue // Update array existingStorable, err := array.Set(uint64(k), v) @@ -267,9 +268,9 @@ func testArray( return } - err = valueEqual(oldV, existingValue) + err = valueEqual(oldExpectedValue, existingValue) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to compare %s and %s: %s", existingValue, oldV, err) + fmt.Fprintf(os.Stderr, "Failed to compare %s and %s: %s", existingValue, oldExpectedValue, err) return } @@ -280,9 +281,9 @@ func testArray( return } - err = removeValue(storage, oldV) + err = removeValue(storage, oldExpectedValue) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove copied overwritten value %s: %s", oldV, err) + fmt.Fprintf(os.Stderr, "Failed to remove copied overwritten value %s: %s", oldExpectedValue, err) return } @@ -309,11 +310,11 @@ func testArray( // Update values if k == int(array.Count()) { - values = append(values, copiedValue) + expectedValues = append(expectedValues, copiedValue) } else { - values = append(values, nil) - copy(values[k+1:], values[k:]) - values[k] = copiedValue + expectedValues = append(expectedValues, nil) + copy(expectedValues[k+1:], expectedValues[k:]) + expectedValues[k] = copiedValue } // Update array @@ -335,12 +336,12 @@ func testArray( k := r.Intn(int(array.Count())) - oldV := values[k] + oldExpectedValue := expectedValues[k] // Update values - copy(values[k:], values[k+1:]) - values[len(values)-1] = nil - values = values[:len(values)-1] + copy(expectedValues[k:], expectedValues[k+1:]) + expectedValues[len(expectedValues)-1] = nil + expectedValues = expectedValues[:len(expectedValues)-1] // Update array existingStorable, err := array.Remove(uint64(k)) @@ -356,9 +357,9 @@ func testArray( return } - err = valueEqual(oldV, existingValue) + err = valueEqual(oldExpectedValue, existingValue) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to compare %s and %s: %s", existingValue, oldV, err) + fmt.Fprintf(os.Stderr, "Failed to compare %s and %s: %s", existingValue, oldExpectedValue, err) return } @@ -369,9 +370,9 @@ func testArray( return } - err = removeValue(storage, oldV) + err = removeValue(storage, oldExpectedValue) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove copied removed value %s: %s", oldV, err) + fmt.Fprintf(os.Stderr, "Failed to remove copied removed value %s: %s", oldExpectedValue, err) return } @@ -380,7 +381,7 @@ func testArray( } // Check array elements against values after every op - err = checkArrayDataLoss(array, values) + err = checkArrayDataLoss(array, expectedValues) if err != nil { fmt.Fprintln(os.Stderr, err) return @@ -388,26 +389,38 @@ func testArray( if opCount >= 100 { opCount = 0 - rootIDs, err := atree.CheckStorageHealth(storage, -1) - if err != nil { - fmt.Fprintln(os.Stderr, err) - return - } - ids := make([]atree.SlabID, 0, len(rootIDs)) - for id := range rootIDs { - // filter out root ids with empty address - if !id.HasTempAddress() { - ids = append(ids, id) - } - } - if len(ids) != 1 || ids[0] != array.SlabID() { - fmt.Fprintf(os.Stderr, "root slab ids %v in storage, want %s\n", ids, array.SlabID()) + if !checkStorageHealth(storage, array.SlabID()) { return } } } } +func checkStorageHealth(storage *atree.PersistentSlabStorage, rootSlabID atree.SlabID) bool { + rootIDs, err := atree.CheckStorageHealth(storage, -1) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return false + } + + // Filter out slabs with temp address because + // child array/map values used for data loss check is stored with temp address. + ids := make([]atree.SlabID, 0, len(rootIDs)) + for id := range rootIDs { + // filter out root ids with empty address + if !id.HasTempAddress() { + ids = append(ids, id) + } + } + + if len(ids) != 1 || ids[0] != rootSlabID { + fmt.Fprintf(os.Stderr, "root slab ids %v in storage, want %s\n", ids, rootSlabID) + return false + } + + return true +} + func checkArrayDataLoss(array *atree.Array, values []atree.Value) error { // Check array has the same number of elements as values diff --git a/cmd/stress/main.go b/cmd/stress/main.go index c74e677a..de44f90e 100644 --- a/cmd/stress/main.go +++ b/cmd/stress/main.go @@ -129,8 +129,6 @@ func main() { decodeTypeInfo, ) - typeInfo := testTypeInfo{value: 123} - address := atree.Address{1, 2, 3, 4, 5, 6, 7, 8} switch typ { @@ -142,7 +140,7 @@ func main() { go updateStatus(sigc, status) - testArray(storage, address, typeInfo, maxLength, status, minHeapAllocMiB, maxHeapAllocMiB) + testArray(storage, address, maxLength, status, minHeapAllocMiB, maxHeapAllocMiB) case "map": fmt.Printf("Starting map stress test, minMapHeapAlloc = %d MiB, maxMapHeapAlloc = %d MiB\n", minHeapAllocMiB, maxHeapAllocMiB) @@ -151,7 +149,7 @@ func main() { go updateStatus(sigc, status) - testMap(storage, address, typeInfo, maxLength, status, minHeapAllocMiB, maxHeapAllocMiB) + testMap(storage, address, maxLength, status, minHeapAllocMiB, maxHeapAllocMiB) } } diff --git a/cmd/stress/map.go b/cmd/stress/map.go index 13f222c2..fbf9cff4 100644 --- a/cmd/stress/map.go +++ b/cmd/stress/map.go @@ -97,12 +97,12 @@ func (status *mapStatus) Write() { func testMap( storage *atree.PersistentSlabStorage, address atree.Address, - typeInfo atree.TypeInfo, maxLength uint64, status *mapStatus, minHeapAllocMiB uint64, maxHeapAllocMiB uint64, ) { + typeInfo := newMapTypeInfo() m, err := atree.NewMap(storage, address, atree.NewDefaultDigesterBuilder(), typeInfo) if err != nil { @@ -110,8 +110,8 @@ func testMap( return } - // elements contains generated keys and values. It is used to check data loss. - elements := make(map[atree.Value]atree.Value, maxLength) + // expectedValues contains generated keys and values. It is used to check data loss. + expectedValues := make(map[atree.Value]atree.Value, maxLength) // keys contains generated keys. It is used to select random keys for removal. keys := make([]atree.Value, 0, maxLength) @@ -148,7 +148,7 @@ func testMap( storage.DropDeltas() storage.DropCache() - elements = make(map[atree.Value]atree.Value, maxLength) + expectedValues = make(map[atree.Value]atree.Value, maxLength) // Load root slab from storage and cache it in read cache rootID := m.SlabID() @@ -213,15 +213,15 @@ func testMap( return } - oldV := elements[copiedKey] + oldExpectedValue := expectedValues[copiedKey] // Update keys - if oldV == nil { + if oldExpectedValue == nil { keys = append(keys, copiedKey) } // Update elements - elements[copiedKey] = copiedValue + expectedValues[copiedKey] = copiedValue // Update map existingStorable, err := m.Set(compare, hashInputProvider, k, v) @@ -231,13 +231,13 @@ func testMap( } // Compare old value from map with old value from elements - if (oldV == nil) && (existingStorable != nil) { + if (oldExpectedValue == nil) && (existingStorable != nil) { fmt.Fprintf(os.Stderr, "Set returned storable %s, want nil", existingStorable) return } - if (oldV != nil) && (existingStorable == nil) { - fmt.Fprintf(os.Stderr, "Set returned nil, want %s", oldV) + if (oldExpectedValue != nil) && (existingStorable == nil) { + fmt.Fprintf(os.Stderr, "Set returned nil, want %s", oldExpectedValue) return } @@ -249,9 +249,9 @@ func testMap( return } - err = valueEqual(oldV, existingValue) + err = valueEqual(oldExpectedValue, existingValue) if err != nil { - fmt.Fprintf(os.Stderr, "Set() returned wrong existing value %s, want %s", existingValue, oldV) + fmt.Fprintf(os.Stderr, "Set() returned wrong existing value %s, want %s", existingValue, oldExpectedValue) return } @@ -261,7 +261,7 @@ func testMap( return } - err = removeValue(storage, oldV) + err = removeValue(storage, oldExpectedValue) if err != nil { fmt.Fprintf(os.Stderr, "Failed to remove copied overwritten value %s: %s", existingValue, err) return @@ -269,7 +269,7 @@ func testMap( } // Update status - status.incSet(oldV == nil) + status.incSet(oldExpectedValue == nil) case mapRemoveOp: if m.Count() == 0 { @@ -281,10 +281,10 @@ func testMap( index := r.Intn(len(keys)) k := keys[index] - oldV := elements[k] + oldExpectedValue := expectedValues[k] // Update elements - delete(elements, k) + delete(expectedValues, k) // Update keys copy(keys[index:], keys[index+1:]) @@ -318,9 +318,9 @@ func testMap( return } - err = valueEqual(oldV, existingValue) + err = valueEqual(oldExpectedValue, existingValue) if err != nil { - fmt.Fprintf(os.Stderr, "Remove() returned wrong existing value %s, want %s", existingValueStorable, oldV) + fmt.Fprintf(os.Stderr, "Remove() returned wrong existing value %s, want %s", existingValueStorable, oldExpectedValue) return } @@ -342,7 +342,7 @@ func testMap( return } - err = removeValue(storage, oldV) + err = removeValue(storage, oldExpectedValue) if err != nil { fmt.Fprintf(os.Stderr, "Failed to remove copied value %s: %s", existingValue, err) return @@ -353,7 +353,7 @@ func testMap( } // Check map elements against elements after every op - err = checkMapDataLoss(m, elements) + err = checkMapDataLoss(m, expectedValues) if err != nil { fmt.Fprintln(os.Stderr, err) return @@ -361,20 +361,7 @@ func testMap( if opCount >= 100 { opCount = 0 - rootIDs, err := atree.CheckStorageHealth(storage, -1) - if err != nil { - fmt.Fprintln(os.Stderr, err) - return - } - ids := make([]atree.SlabID, 0, len(rootIDs)) - for id := range rootIDs { - // filter out root ids with empty address - if !id.HasTempAddress() { - ids = append(ids, id) - } - } - if len(ids) != 1 || ids[0] != m.SlabID() { - fmt.Fprintf(os.Stderr, "root slab ids %v in storage, want %s\n", ids, m.SlabID()) + if !checkStorageHealth(storage, m.SlabID()) { return } } diff --git a/cmd/stress/typeinfo.go b/cmd/stress/typeinfo.go index ec78239f..6a1fafb1 100644 --- a/cmd/stress/typeinfo.go +++ b/cmd/stress/typeinfo.go @@ -26,38 +26,107 @@ import ( "github.com/fxamacker/cbor/v2" ) -type testTypeInfo struct { - value uint64 +const ( + maxArrayTypeValue = 10 + maxMapTypeValue = 10 + + arrayTypeTagNum = 246 + mapTypeTagNum = 245 +) + +type arrayTypeInfo struct { + value int +} + +func newArrayTypeInfo() arrayTypeInfo { + return arrayTypeInfo{value: r.Intn(maxArrayTypeValue)} +} + +var _ atree.TypeInfo = arrayTypeInfo{} + +func (i arrayTypeInfo) Copy() atree.TypeInfo { + return i +} + +func (i arrayTypeInfo) IsComposite() bool { + return false +} + +func (i arrayTypeInfo) ID() string { + return fmt.Sprintf("array(%d)", i) +} + +func (i arrayTypeInfo) Encode(e *cbor.StreamEncoder) error { + err := e.EncodeTagHead(arrayTypeTagNum) + if err != nil { + return err + } + return e.EncodeInt64(int64(i.value)) +} + +func (i arrayTypeInfo) Equal(other atree.TypeInfo) bool { + otherArrayTypeInfo, ok := other.(arrayTypeInfo) + return ok && i.value == otherArrayTypeInfo.value +} + +type mapTypeInfo struct { + value int } -var _ atree.TypeInfo = testTypeInfo{} +var _ atree.TypeInfo = mapTypeInfo{} -func (i testTypeInfo) Copy() atree.TypeInfo { +func newMapTypeInfo() mapTypeInfo { + return mapTypeInfo{value: r.Intn(maxMapTypeValue)} +} + +func (i mapTypeInfo) Copy() atree.TypeInfo { return i } -func (i testTypeInfo) IsComposite() bool { +func (i mapTypeInfo) IsComposite() bool { return false } -func (i testTypeInfo) ID() string { - return fmt.Sprintf("uint64(%d)", i) +func (i mapTypeInfo) ID() string { + return fmt.Sprintf("map(%d)", i) } -func (i testTypeInfo) Encode(e *cbor.StreamEncoder) error { - return e.EncodeUint64(i.value) +func (i mapTypeInfo) Encode(e *cbor.StreamEncoder) error { + err := e.EncodeTagHead(mapTypeTagNum) + if err != nil { + return err + } + return e.EncodeInt64(int64(i.value)) } -func (i testTypeInfo) Equal(other atree.TypeInfo) bool { - otherTestTypeInfo, ok := other.(testTypeInfo) - return ok && i.value == otherTestTypeInfo.value +func (i mapTypeInfo) Equal(other atree.TypeInfo) bool { + otherMapTypeInfo, ok := other.(mapTypeInfo) + return ok && i.value == otherMapTypeInfo.value } func decodeTypeInfo(dec *cbor.StreamDecoder) (atree.TypeInfo, error) { - value, err := dec.DecodeUint64() + num, err := dec.DecodeTagNumber() if err != nil { return nil, err } + switch num { + case arrayTypeTagNum: + value, err := dec.DecodeInt64() + if err != nil { + return nil, err + } + + return arrayTypeInfo{value: int(value)}, nil - return testTypeInfo{value: value}, nil + case mapTypeTagNum: + value, err := dec.DecodeInt64() + if err != nil { + return nil, err + } + + return mapTypeInfo{value: int(value)}, nil + + default: + return nil, fmt.Errorf("failed to decode type info with tag number %d", num) + } } diff --git a/cmd/stress/utils.go b/cmd/stress/utils.go index 978a56f8..5026abcc 100644 --- a/cmd/stress/utils.go +++ b/cmd/stress/utils.go @@ -343,7 +343,7 @@ func mapEqual(a atree.Value, b atree.Value) error { // newArray creates atree.Array with random elements of specified size and nested level func newArray(storage *atree.PersistentSlabStorage, address atree.Address, length int, nestedLevel int) (*atree.Array, error) { - typeInfo := testTypeInfo{value: 123} + typeInfo := newArrayTypeInfo() array, err := atree.NewArray(storage, address, typeInfo) if err != nil { @@ -385,7 +385,7 @@ func newArray(storage *atree.PersistentSlabStorage, address atree.Address, lengt // newMap creates atree.OrderedMap with random elements of specified size and nested level func newMap(storage *atree.PersistentSlabStorage, address atree.Address, length int, nestedLevel int) (*atree.OrderedMap, error) { - typeInfo := testTypeInfo{value: 123} + typeInfo := newMapTypeInfo() m, err := atree.NewMap(storage, address, atree.NewDefaultDigesterBuilder(), typeInfo) if err != nil { From 0c6f63120a92c4bb3b809d7db93d8d3925fb6f89 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 4 Oct 2023 17:54:42 -0500 Subject: [PATCH 057/126] Make smoke test encode and decode slabs every 100x --- cmd/stress/array.go | 10 ++++++++++ cmd/stress/map.go | 10 ++++++++++ cmd/stress/storable.go | 11 ++++++++++- 3 files changed, 30 insertions(+), 1 deletion(-) diff --git a/cmd/stress/array.go b/cmd/stress/array.go index 6fcb20a3..1207600e 100644 --- a/cmd/stress/array.go +++ b/cmd/stress/array.go @@ -392,6 +392,16 @@ func testArray( if !checkStorageHealth(storage, array.SlabID()) { return } + + // Commit slabs to storage so slabs are encoded and then decoded at next op. + err = storage.FastCommit(runtime.NumCPU()) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to commit to storage: %s", err) + return + } + + // Drop cache after commit to force slab decoding at next op. + storage.DropCache() } } } diff --git a/cmd/stress/map.go b/cmd/stress/map.go index fbf9cff4..e67ecfe1 100644 --- a/cmd/stress/map.go +++ b/cmd/stress/map.go @@ -364,6 +364,16 @@ func testMap( if !checkStorageHealth(storage, m.SlabID()) { return } + + // Commit slabs to storage so slabs are encoded and then decoded at next op. + err = storage.FastCommit(runtime.NumCPU()) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to commit to storage: %s", err) + return + } + + // Drop cache after commit to force slab decoding at next op. + storage.DropCache() } } } diff --git a/cmd/stress/storable.go b/cmd/stress/storable.go index a2bdf1da..0aaf1aa4 100644 --- a/cmd/stress/storable.go +++ b/cmd/stress/storable.go @@ -413,7 +413,7 @@ func (v StringValue) String() string { return v.str } -func decodeStorable(dec *cbor.StreamDecoder, _ atree.SlabID, _ []atree.ExtraData) (atree.Storable, error) { +func decodeStorable(dec *cbor.StreamDecoder, id atree.SlabID, inlinedExtraData []atree.ExtraData) (atree.Storable, error) { t, err := dec.NextType() if err != nil { return nil, err @@ -435,6 +435,15 @@ func decodeStorable(dec *cbor.StreamDecoder, _ atree.SlabID, _ []atree.ExtraData switch tagNumber { + case atree.CBORTagInlinedArray: + return atree.DecodeInlinedArrayStorable(dec, decodeStorable, id, inlinedExtraData) + + case atree.CBORTagInlinedMap: + return atree.DecodeInlinedMapStorable(dec, decodeStorable, id, inlinedExtraData) + + case atree.CBORTagInlinedCompactMap: + return atree.DecodeInlinedCompactMapStorable(dec, decodeStorable, id, inlinedExtraData) + case atree.CBORTagSlabID: return atree.DecodeSlabIDStorable(dec) From c59afb888e4cec0f2861325635423ce3b48a02a1 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 4 Oct 2023 18:17:59 -0500 Subject: [PATCH 058/126] Make smoke test create more child arrays/maps --- cmd/stress/utils.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/cmd/stress/utils.go b/cmd/stress/utils.go index 5026abcc..308319d7 100644 --- a/cmd/stress/utils.go +++ b/cmd/stress/utils.go @@ -40,8 +40,12 @@ const ( uint64Type smallStringType largeStringType - arrayType - mapType + arrayType1 + arrayType2 + arrayType3 + mapType1 + mapType2 + mapType3 maxValueType ) @@ -84,10 +88,10 @@ func generateValue(storage *atree.PersistentSlabStorage, address atree.Address, case largeStringType: slen := r.Intn(125) + 1024 return NewStringValue(randStr(slen)), nil - case arrayType: + case arrayType1, arrayType2, arrayType3: length := r.Intn(maxNestedArraySize) return newArray(storage, address, length, nestedLevels) - case mapType: + case mapType1, mapType2, mapType3: length := r.Intn(maxNestedMapSize) return newMap(storage, address, length, nestedLevels) default: From acf4b707de2b4f48b2687a6db00fd47c90075643 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 4 Oct 2023 19:15:37 -0500 Subject: [PATCH 059/126] Add "slabcheck" flag to smoke test This commit adds flag "slabcheck" to enable checking in-memory and serialized slabs. This flag is off by default because it can take a long time to run. --- cmd/stress/array.go | 49 +++++++++++++++++++++-------- cmd/stress/main.go | 75 +++++++++++++++++++++++++-------------------- cmd/stress/map.go | 52 ++++++++++++++++++++++--------- 3 files changed, 114 insertions(+), 62 deletions(-) diff --git a/cmd/stress/array.go b/cmd/stress/array.go index 1207600e..68cb530e 100644 --- a/cmd/stress/array.go +++ b/cmd/stress/array.go @@ -21,6 +21,7 @@ package main import ( "fmt" "os" + "reflect" "runtime" "sync" "time" @@ -113,10 +114,7 @@ func (status *arrayStatus) Write() { func testArray( storage *atree.PersistentSlabStorage, address atree.Address, - maxLength uint64, status *arrayStatus, - minHeapAllocMiB uint64, - maxHeapAllocMiB uint64, ) { typeInfo := newArrayTypeInfo() @@ -129,7 +127,7 @@ func testArray( } // expectedValues contains array elements in the same order. It is used to check data loss. - expectedValues := make([]atree.Value, 0, maxLength) + expectedValues := make([]atree.Value, 0, flagMaxLength) reduceHeapAllocs := false @@ -141,10 +139,10 @@ func testArray( runtime.ReadMemStats(&m) allocMiB := m.Alloc / 1024 / 1024 - if !reduceHeapAllocs && allocMiB > maxHeapAllocMiB { + if !reduceHeapAllocs && allocMiB > flagMaxHeapAllocMiB { fmt.Printf("\nHeapAlloc is %d MiB, removing elements to reduce allocs...\n", allocMiB) reduceHeapAllocs = true - } else if reduceHeapAllocs && allocMiB < minHeapAllocMiB { + } else if reduceHeapAllocs && allocMiB < flagMinHeapAllocMiB { fmt.Printf("\nHeapAlloc is %d MiB, resuming random operation...\n", allocMiB) reduceHeapAllocs = false } @@ -179,20 +177,20 @@ func testArray( fmt.Printf("\nHeapAlloc is %d MiB after cleanup and forced gc\n", allocMiB) // Prevent infinite loop that doesn't do useful work. - if allocMiB > maxHeapAllocMiB { + if allocMiB > flagMaxHeapAllocMiB { // This shouldn't happen unless there's a memory leak. fmt.Fprintf( os.Stderr, "Exiting because allocMiB %d > maxMapHeapAlloMiB %d with empty map\n", allocMiB, - maxHeapAllocMiB) + flagMaxHeapAllocMiB) return } } nextOp := r.Intn(maxArrayOp) - if array.Count() == maxLength || reduceHeapAllocs { + if array.Count() == flagMaxLength || reduceHeapAllocs { nextOp = arrayRemoveOp } @@ -431,15 +429,15 @@ func checkStorageHealth(storage *atree.PersistentSlabStorage, rootSlabID atree.S return true } -func checkArrayDataLoss(array *atree.Array, values []atree.Value) error { +func checkArrayDataLoss(array *atree.Array, expectedValues []atree.Value) error { // Check array has the same number of elements as values - if array.Count() != uint64(len(values)) { - return fmt.Errorf("Count() %d != len(values) %d", array.Count(), len(values)) + if array.Count() != uint64(len(expectedValues)) { + return fmt.Errorf("Count() %d != len(values) %d", array.Count(), len(expectedValues)) } // Check every element - for i, v := range values { + for i, v := range expectedValues { convertedValue, err := array.Get(uint64(i)) if err != nil { return fmt.Errorf("failed to get element at %d: %w", i, err) @@ -450,5 +448,30 @@ func checkArrayDataLoss(array *atree.Array, values []atree.Value) error { } } + if flagCheckSlabEnabled { + typeInfoComparator := func(a atree.TypeInfo, b atree.TypeInfo) bool { + return a.ID() == b.ID() + } + + err := atree.VerifyArray(array, array.Address(), array.Type(), typeInfoComparator, hashInputProvider, true) + if err != nil { + return err + } + + err = atree.VerifyArraySerialization( + array, + cborDecMode, + cborEncMode, + decodeStorable, + decodeTypeInfo, + func(a, b atree.Storable) bool { + return reflect.DeepEqual(a, b) + }, + ) + if err != nil { + return err + } + } + return nil } diff --git a/cmd/stress/main.go b/cmd/stress/main.go index de44f90e..b1d4f747 100644 --- a/cmd/stress/main.go +++ b/cmd/stress/main.go @@ -70,25 +70,45 @@ func updateStatus(sigc <-chan os.Signal, status Status) { } } -func main() { +var cborEncMode = func() cbor.EncMode { + encMode, err := cbor.EncOptions{}.EncMode() + if err != nil { + panic(fmt.Sprintf("Failed to create CBOR encoding mode: %s", err)) + } + return encMode +}() + +var cborDecMode = func() cbor.DecMode { + decMode, err := cbor.DecOptions{}.DecMode() + if err != nil { + panic(fmt.Sprintf("Failed to create CBOR decoding mode: %s\n", err)) + } + return decMode +}() + +var ( + flagType string + flagCheckSlabEnabled bool + flagMaxLength uint64 + flagSeedHex string + flagMinHeapAllocMiB, flagMaxHeapAllocMiB uint64 +) - var typ string - var maxLength uint64 - var seedHex string - var minHeapAllocMiB, maxHeapAllocMiB uint64 +func main() { - flag.StringVar(&typ, "type", "array", "array or map") - flag.Uint64Var(&maxLength, "maxlen", 10_000, "max number of elements") - flag.StringVar(&seedHex, "seed", "", "seed for prng in hex (default is Unix time)") - flag.Uint64Var(&minHeapAllocMiB, "minheap", 1000, "min HeapAlloc in MiB to stop extra removal of elements") - flag.Uint64Var(&maxHeapAllocMiB, "maxheap", 2000, "max HeapAlloc in MiB to trigger extra removal of elements") + flag.StringVar(&flagType, "type", "array", "array or map") + flag.BoolVar(&flagCheckSlabEnabled, "slabcheck", false, "in memory and serialized slab check") + flag.Uint64Var(&flagMaxLength, "maxlen", 10_000, "max number of elements") + flag.StringVar(&flagSeedHex, "seed", "", "seed for prng in hex (default is Unix time)") + flag.Uint64Var(&flagMinHeapAllocMiB, "minheap", 1000, "min HeapAlloc in MiB to stop extra removal of elements") + flag.Uint64Var(&flagMaxHeapAllocMiB, "maxheap", 2000, "max HeapAlloc in MiB to trigger extra removal of elements") flag.Parse() var seed int64 - if len(seedHex) != 0 { + if len(flagSeedHex) != 0 { var err error - seed, err = strconv.ParseInt(strings.ReplaceAll(seedHex, "0x", ""), 16, 64) + seed, err = strconv.ParseInt(strings.ReplaceAll(flagSeedHex, "0x", ""), 16, 64) if err != nil { panic("Failed to parse seed flag (hex string)") } @@ -96,9 +116,9 @@ func main() { r = newRand(seed) - typ = strings.ToLower(typ) + flagType = strings.ToLower(flagType) - if typ != "array" && typ != "map" { + if flagType != "array" && flagType != "map" { fmt.Fprintf(os.Stderr, "Please specify type as either \"array\" or \"map\"") return } @@ -106,50 +126,37 @@ func main() { sigc := make(chan os.Signal, 1) signal.Notify(sigc, os.Interrupt, syscall.SIGTERM) - // Create storage - encMode, err := cbor.EncOptions{}.EncMode() - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to create CBOR encoding mode: %s\n", err) - return - } - - decMode, err := cbor.DecOptions{}.DecMode() - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to create CBOR decoding mode: %s\n", err) - return - } - baseStorage := NewInMemBaseStorage() storage := atree.NewPersistentSlabStorage( baseStorage, - encMode, - decMode, + cborEncMode, + cborDecMode, decodeStorable, decodeTypeInfo, ) address := atree.Address{1, 2, 3, 4, 5, 6, 7, 8} - switch typ { + switch flagType { case "array": - fmt.Printf("Starting array stress test, minMapHeapAlloc = %d MiB, maxMapHeapAlloc = %d MiB\n", minHeapAllocMiB, maxHeapAllocMiB) + fmt.Printf("Starting array stress test, minMapHeapAlloc = %d MiB, maxMapHeapAlloc = %d MiB\n", flagMinHeapAllocMiB, flagMaxHeapAllocMiB) status := newArrayStatus() go updateStatus(sigc, status) - testArray(storage, address, maxLength, status, minHeapAllocMiB, maxHeapAllocMiB) + testArray(storage, address, status) case "map": - fmt.Printf("Starting map stress test, minMapHeapAlloc = %d MiB, maxMapHeapAlloc = %d MiB\n", minHeapAllocMiB, maxHeapAllocMiB) + fmt.Printf("Starting map stress test, minMapHeapAlloc = %d MiB, maxMapHeapAlloc = %d MiB\n", flagMinHeapAllocMiB, flagMaxHeapAllocMiB) status := newMapStatus() go updateStatus(sigc, status) - testMap(storage, address, maxLength, status, minHeapAllocMiB, maxHeapAllocMiB) + testMap(storage, address, status) } } diff --git a/cmd/stress/map.go b/cmd/stress/map.go index e67ecfe1..d113d3e4 100644 --- a/cmd/stress/map.go +++ b/cmd/stress/map.go @@ -21,6 +21,7 @@ package main import ( "fmt" "os" + "reflect" "runtime" "sync" "time" @@ -97,10 +98,7 @@ func (status *mapStatus) Write() { func testMap( storage *atree.PersistentSlabStorage, address atree.Address, - maxLength uint64, status *mapStatus, - minHeapAllocMiB uint64, - maxHeapAllocMiB uint64, ) { typeInfo := newMapTypeInfo() @@ -111,10 +109,10 @@ func testMap( } // expectedValues contains generated keys and values. It is used to check data loss. - expectedValues := make(map[atree.Value]atree.Value, maxLength) + expectedValues := make(map[atree.Value]atree.Value, flagMaxLength) // keys contains generated keys. It is used to select random keys for removal. - keys := make([]atree.Value, 0, maxLength) + keys := make([]atree.Value, 0, flagMaxLength) reduceHeapAllocs := false @@ -126,10 +124,10 @@ func testMap( runtime.ReadMemStats(&ms) allocMiB := ms.Alloc / 1024 / 1024 - if !reduceHeapAllocs && allocMiB > maxHeapAllocMiB { + if !reduceHeapAllocs && allocMiB > flagMaxHeapAllocMiB { fmt.Printf("\nHeapAlloc is %d MiB, removing elements to reduce allocs...\n", allocMiB) reduceHeapAllocs = true - } else if reduceHeapAllocs && allocMiB < minHeapAllocMiB { + } else if reduceHeapAllocs && allocMiB < flagMinHeapAllocMiB { fmt.Printf("\nHeapAlloc is %d MiB, resuming random operation...\n", allocMiB) reduceHeapAllocs = false } @@ -148,7 +146,7 @@ func testMap( storage.DropDeltas() storage.DropCache() - expectedValues = make(map[atree.Value]atree.Value, maxLength) + expectedValues = make(map[atree.Value]atree.Value, flagMaxLength) // Load root slab from storage and cache it in read cache rootID := m.SlabID() @@ -166,20 +164,20 @@ func testMap( fmt.Printf("\nHeapAlloc is %d MiB after cleanup and forced gc\n", allocMiB) // Prevent infinite loop that doesn't do useful work. - if allocMiB > maxHeapAllocMiB { + if allocMiB > flagMaxHeapAllocMiB { // This shouldn't happen unless there's a memory leak. fmt.Fprintf( os.Stderr, "Exiting because allocMiB %d > maxMapHeapAlloMiB %d with empty map\n", allocMiB, - maxHeapAllocMiB) + flagMaxHeapAllocMiB) return } } nextOp := r.Intn(maxMapOp) - if m.Count() == maxLength || reduceHeapAllocs { + if m.Count() == flagMaxLength || reduceHeapAllocs { nextOp = mapRemoveOp } @@ -378,15 +376,15 @@ func testMap( } } -func checkMapDataLoss(m *atree.OrderedMap, elements map[atree.Value]atree.Value) error { +func checkMapDataLoss(m *atree.OrderedMap, expectedValues map[atree.Value]atree.Value) error { // Check map has the same number of elements as elements - if m.Count() != uint64(len(elements)) { - return fmt.Errorf("Count() %d != len(values) %d", m.Count(), len(elements)) + if m.Count() != uint64(len(expectedValues)) { + return fmt.Errorf("Count() %d != len(values) %d", m.Count(), len(expectedValues)) } // Check every element - for k, v := range elements { + for k, v := range expectedValues { convertedValue, err := m.Get(compare, hashInputProvider, k) if err != nil { return fmt.Errorf("failed to get element with key %s: %w", k, err) @@ -397,5 +395,29 @@ func checkMapDataLoss(m *atree.OrderedMap, elements map[atree.Value]atree.Value) } } + if flagCheckSlabEnabled { + typeInfoComparator := func(a atree.TypeInfo, b atree.TypeInfo) bool { + return a.ID() == b.ID() + } + + err := atree.VerifyMap(m, m.Address(), m.Type(), typeInfoComparator, hashInputProvider, true) + if err != nil { + return err + } + + err = atree.VerifyMapSerialization( + m, + cborDecMode, + cborEncMode, + decodeStorable, + decodeTypeInfo, + func(a, b atree.Storable) bool { + return reflect.DeepEqual(a, b) + }, + ) + if err != nil { + return err + } + } return nil } From b739b509ea56fbb9a697bc02866f55c3439d469a Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 5 Oct 2023 11:14:00 -0500 Subject: [PATCH 060/126] Refactor smoke test --- cmd/stress/array.go | 86 ++++------- cmd/stress/map.go | 74 +++------ cmd/stress/utils.go | 367 ++++++++++++++++++-------------------------- 3 files changed, 202 insertions(+), 325 deletions(-) diff --git a/cmd/stress/array.go b/cmd/stress/array.go index 68cb530e..650d24ca 100644 --- a/cmd/stress/array.go +++ b/cmd/stress/array.go @@ -127,7 +127,7 @@ func testArray( } // expectedValues contains array elements in the same order. It is used to check data loss. - expectedValues := make([]atree.Value, 0, flagMaxLength) + expectedValues := make(arrayValue, 0, flagMaxLength) reduceHeapAllocs := false @@ -200,25 +200,19 @@ func testArray( opCount++ nestedLevels := r.Intn(maxNestedLevels) - v, err := randomValue(storage, address, nestedLevels) + expectedValue, value, err := randomValue(storage, address, nestedLevels) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to generate random value %s: %s", v, err) + fmt.Fprintf(os.Stderr, "Failed to generate random value %s: %s", value, err) return } - copiedValue, err := copyValue(storage, atree.Address{}, v) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to copy random value %s: %s", v, err) - return - } - - // Append to values - expectedValues = append(expectedValues, copiedValue) + // Append to expectedValues + expectedValues = append(expectedValues, expectedValue) // Append to array - err = array.Append(v) + err = array.Append(value) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to append %s: %s", v, err) + fmt.Fprintf(os.Stderr, "Failed to append %s: %s", value, err) return } @@ -232,30 +226,24 @@ func testArray( continue } - k := r.Intn(int(array.Count())) + index := r.Intn(int(array.Count())) nestedLevels := r.Intn(maxNestedLevels) - v, err := randomValue(storage, address, nestedLevels) + expectedValue, value, err := randomValue(storage, address, nestedLevels) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to generate random value %s: %s", v, err) + fmt.Fprintf(os.Stderr, "Failed to generate random value %s: %s", value, err) return } - copiedValue, err := copyValue(storage, atree.Address{}, v) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to copy random value %s: %s", v, err) - return - } + oldExpectedValue := expectedValues[index] - oldExpectedValue := expectedValues[k] - - // Update values - expectedValues[k] = copiedValue + // Update expectedValues + expectedValues[index] = expectedValue // Update array - existingStorable, err := array.Set(uint64(k), v) + existingStorable, err := array.Set(uint64(index), value) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to set %s at index %d: %s", v, k, err) + fmt.Fprintf(os.Stderr, "Failed to set %s at index %d: %s", value, index, err) return } @@ -279,46 +267,34 @@ func testArray( return } - err = removeValue(storage, oldExpectedValue) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove copied overwritten value %s: %s", oldExpectedValue, err) - return - } - // Update status status.incSet() case arrayInsertOp: opCount++ - k := r.Intn(int(array.Count() + 1)) + index := r.Intn(int(array.Count() + 1)) nestedLevels := r.Intn(maxNestedLevels) - v, err := randomValue(storage, address, nestedLevels) + expectedValue, value, err := randomValue(storage, address, nestedLevels) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to generate random value %s: %s", v, err) + fmt.Fprintf(os.Stderr, "Failed to generate random value %s: %s", value, err) return } - copiedValue, err := copyValue(storage, atree.Address{}, v) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to copy random value %s: %s", v, err) - return - } - - // Update values - if k == int(array.Count()) { - expectedValues = append(expectedValues, copiedValue) + // Update expectedValues + if index == int(array.Count()) { + expectedValues = append(expectedValues, expectedValue) } else { expectedValues = append(expectedValues, nil) - copy(expectedValues[k+1:], expectedValues[k:]) - expectedValues[k] = copiedValue + copy(expectedValues[index+1:], expectedValues[index:]) + expectedValues[index] = expectedValue } // Update array - err = array.Insert(uint64(k), v) + err = array.Insert(uint64(index), value) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to insert %s into index %d: %s", v, k, err) + fmt.Fprintf(os.Stderr, "Failed to insert %s into index %d: %s", value, index, err) return } @@ -336,7 +312,7 @@ func testArray( oldExpectedValue := expectedValues[k] - // Update values + // Update expectedValues copy(expectedValues[k:], expectedValues[k+1:]) expectedValues[len(expectedValues)-1] = nil expectedValues = expectedValues[:len(expectedValues)-1] @@ -368,18 +344,12 @@ func testArray( return } - err = removeValue(storage, oldExpectedValue) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove copied removed value %s: %s", oldExpectedValue, err) - return - } - // Update status status.incRemove() } // Check array elements against values after every op - err = checkArrayDataLoss(array, expectedValues) + err = checkArrayDataLoss(expectedValues, array) if err != nil { fmt.Fprintln(os.Stderr, err) return @@ -429,7 +399,7 @@ func checkStorageHealth(storage *atree.PersistentSlabStorage, rootSlabID atree.S return true } -func checkArrayDataLoss(array *atree.Array, expectedValues []atree.Value) error { +func checkArrayDataLoss(expectedValues arrayValue, array *atree.Array) error { // Check array has the same number of elements as values if array.Count() != uint64(len(expectedValues)) { diff --git a/cmd/stress/map.go b/cmd/stress/map.go index d113d3e4..e42a5955 100644 --- a/cmd/stress/map.go +++ b/cmd/stress/map.go @@ -109,7 +109,7 @@ func testMap( } // expectedValues contains generated keys and values. It is used to check data loss. - expectedValues := make(map[atree.Value]atree.Value, flagMaxLength) + expectedValues := make(mapValue, flagMaxLength) // keys contains generated keys. It is used to select random keys for removal. keys := make([]atree.Value, 0, flagMaxLength) @@ -186,45 +186,33 @@ func testMap( case mapSetOp1, mapSetOp2, mapSetOp3: opCount++ - k, err := randomKey() + expectedKey, key, err := randomKey() if err != nil { - fmt.Fprintf(os.Stderr, "Failed to generate random key %s: %s", k, err) + fmt.Fprintf(os.Stderr, "Failed to generate random key %s: %s", key, err) return } nestedLevels := r.Intn(maxNestedLevels) - v, err := randomValue(storage, address, nestedLevels) + expectedValue, value, err := randomValue(storage, address, nestedLevels) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to generate random value %s: %s", v, err) + fmt.Fprintf(os.Stderr, "Failed to generate random value %s: %s", value, err) return } - copiedKey, err := copyValue(storage, atree.Address{}, k) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to copy random key %s: %s", k, err) - return - } - - copiedValue, err := copyValue(storage, atree.Address{}, v) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to copy random value %s: %s", k, err) - return - } - - oldExpectedValue := expectedValues[copiedKey] + oldExpectedValue, keyExist := expectedValues[expectedKey] // Update keys - if oldExpectedValue == nil { - keys = append(keys, copiedKey) + if !keyExist { + keys = append(keys, expectedKey) } - // Update elements - expectedValues[copiedKey] = copiedValue + // Update expectedValues + expectedValues[expectedKey] = expectedValue // Update map - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + existingStorable, err := m.Set(compare, hashInputProvider, key, value) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to set %s at index %d: %s", v, k, err) + fmt.Fprintf(os.Stderr, "Failed to set %s at index %d: %s", value, key, err) return } @@ -258,12 +246,6 @@ func testMap( fmt.Fprintf(os.Stderr, "Failed to remove map storable element %s: %s", existingStorable, err) return } - - err = removeValue(storage, oldExpectedValue) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove copied overwritten value %s: %s", existingValue, err) - return - } } // Update status @@ -277,12 +259,12 @@ func testMap( opCount++ index := r.Intn(len(keys)) - k := keys[index] + key := keys[index] - oldExpectedValue := expectedValues[k] + oldExpectedValue := expectedValues[key] - // Update elements - delete(expectedValues, k) + // Update unexpectedValues + delete(expectedValues, key) // Update keys copy(keys[index:], keys[index+1:]) @@ -290,9 +272,9 @@ func testMap( keys = keys[:len(keys)-1] // Update map - existingKeyStorable, existingValueStorable, err := m.Remove(compare, hashInputProvider, k) + existingKeyStorable, existingValueStorable, err := m.Remove(compare, hashInputProvider, key) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove element with key %s: %s", k, err) + fmt.Fprintf(os.Stderr, "Failed to remove element with key %s: %s", key, err) return } @@ -303,9 +285,9 @@ func testMap( return } - err = valueEqual(k, existingKeyValue) + err = valueEqual(key, existingKeyValue) if err != nil { - fmt.Fprintf(os.Stderr, "Remove() returned wrong existing key %s, want %s", existingKeyStorable, k) + fmt.Fprintf(os.Stderr, "Remove() returned wrong existing key %s, want %s", existingKeyStorable, key) return } @@ -334,24 +316,12 @@ func testMap( return } - err = removeValue(storage, k) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove copied key %s: %s", k, err) - return - } - - err = removeValue(storage, oldExpectedValue) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove copied value %s: %s", existingValue, err) - return - } - // Update status status.incRemove() } // Check map elements against elements after every op - err = checkMapDataLoss(m, expectedValues) + err = checkMapDataLoss(expectedValues, m) if err != nil { fmt.Fprintln(os.Stderr, err) return @@ -376,7 +346,7 @@ func testMap( } } -func checkMapDataLoss(m *atree.OrderedMap, expectedValues map[atree.Value]atree.Value) error { +func checkMapDataLoss(expectedValues mapValue, m *atree.OrderedMap) error { // Check map has the same number of elements as elements if m.Count() != uint64(len(expectedValues)) { diff --git a/cmd/stress/utils.go b/cmd/stress/utils.go index 308319d7..6a5c635f 100644 --- a/cmd/stress/utils.go +++ b/cmd/stress/utils.go @@ -72,39 +72,62 @@ func randStr(n int) string { return string(runes) } -func generateValue(storage *atree.PersistentSlabStorage, address atree.Address, valueType int, nestedLevels int) (atree.Value, error) { +func generateValue( + storage *atree.PersistentSlabStorage, + address atree.Address, + valueType int, + nestedLevels int, +) (expected atree.Value, actual atree.Value, err error) { switch valueType { case uint8Type: - return Uint8Value(r.Intn(255)), nil + v := Uint8Value(r.Intn(255)) + return v, v, nil + case uint16Type: - return Uint16Value(r.Intn(6535)), nil + v := Uint16Value(r.Intn(6535)) + return v, v, nil + case uint32Type: - return Uint32Value(r.Intn(4294967295)), nil + v := Uint32Value(r.Intn(4294967295)) + return v, v, nil + case uint64Type: - return Uint64Value(r.Intn(1844674407370955161)), nil + v := Uint64Value(r.Intn(1844674407370955161)) + return v, v, nil + case smallStringType: slen := r.Intn(125) - return NewStringValue(randStr(slen)), nil + v := NewStringValue(randStr(slen)) + return v, v, nil + case largeStringType: slen := r.Intn(125) + 1024 - return NewStringValue(randStr(slen)), nil + v := NewStringValue(randStr(slen)) + return v, v, nil + case arrayType1, arrayType2, arrayType3: length := r.Intn(maxNestedArraySize) return newArray(storage, address, length, nestedLevels) + case mapType1, mapType2, mapType3: length := r.Intn(maxNestedMapSize) return newMap(storage, address, length, nestedLevels) + default: - return Uint8Value(r.Intn(255)), nil + return nil, nil, fmt.Errorf("unexpected randome value type %d", valueType) } } -func randomKey() (atree.Value, error) { +func randomKey() (atree.Value, atree.Value, error) { t := r.Intn(largeStringType + 1) return generateValue(nil, atree.Address{}, t, 0) } -func randomValue(storage *atree.PersistentSlabStorage, address atree.Address, nestedLevels int) (atree.Value, error) { +func randomValue( + storage *atree.PersistentSlabStorage, + address atree.Address, + nestedLevels int, +) (expected atree.Value, actual atree.Value, err error) { var t int if nestedLevels <= 0 { t = r.Intn(largeStringType + 1) @@ -114,87 +137,6 @@ func randomValue(storage *atree.PersistentSlabStorage, address atree.Address, ne return generateValue(storage, address, t, nestedLevels) } -func copyValue(storage *atree.PersistentSlabStorage, address atree.Address, value atree.Value) (atree.Value, error) { - switch v := value.(type) { - case Uint8Value: - return Uint8Value(uint8(v)), nil - case Uint16Value: - return Uint16Value(uint16(v)), nil - case Uint32Value: - return Uint32Value(uint32(v)), nil - case Uint64Value: - return Uint64Value(uint64(v)), nil - case StringValue: - return NewStringValue(v.str), nil - case *atree.Array: - return copyArray(storage, address, v) - case *atree.OrderedMap: - return copyMap(storage, address, v) - default: - return nil, fmt.Errorf("failed to copy value: value type %T isn't supported", v) - } -} - -func copyArray(storage *atree.PersistentSlabStorage, address atree.Address, array *atree.Array) (*atree.Array, error) { - iterator, err := array.ReadOnlyIterator() - if err != nil { - return nil, err - } - return atree.NewArrayFromBatchData(storage, address, array.Type(), func() (atree.Value, error) { - v, err := iterator.Next() - if err != nil { - return nil, err - } - if v == nil { - return nil, nil - } - return copyValue(storage, address, v) - }) -} - -func copyMap(storage *atree.PersistentSlabStorage, address atree.Address, m *atree.OrderedMap) (*atree.OrderedMap, error) { - iterator, err := m.ReadOnlyIterator() - if err != nil { - return nil, err - } - return atree.NewMapFromBatchData( - storage, - address, - atree.NewDefaultDigesterBuilder(), - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (atree.Value, atree.Value, error) { - k, v, err := iterator.Next() - if err != nil { - return nil, nil, err - } - if k == nil { - return nil, nil, nil - } - copiedKey, err := copyValue(storage, address, k) - if err != nil { - return nil, nil, err - } - copiedValue, err := copyValue(storage, address, v) - if err != nil { - return nil, nil, err - } - return copiedKey, copiedValue, nil - }) -} - -func removeValue(storage *atree.PersistentSlabStorage, value atree.Value) error { - switch v := value.(type) { - case *atree.Array: - return removeStorable(storage, atree.SlabIDStorable(v.SlabID())) - case *atree.OrderedMap: - return removeStorable(storage, atree.SlabIDStorable(v.SlabID())) - } - return nil -} - func removeStorable(storage *atree.PersistentSlabStorage, storable atree.Storable) error { value, err := storable.StoredValue(storage) @@ -228,230 +170,209 @@ func removeStorable(storage *atree.PersistentSlabStorage, storable atree.Storabl return nil } -func valueEqual(a atree.Value, b atree.Value) error { - switch a.(type) { +func valueEqual(expected atree.Value, actual atree.Value) error { + switch expected := expected.(type) { + case arrayValue: + actual, ok := actual.(*atree.Array) + if !ok { + return fmt.Errorf("failed to convert actual value to *Array, got %T", actual) + } + + return arrayEqual(expected, actual) + case *atree.Array: - return arrayEqual(a, b) + return fmt.Errorf("expected value shouldn't be *Array") + + case mapValue: + actual, ok := actual.(*atree.OrderedMap) + if !ok { + return fmt.Errorf("failed to convert actual value to *OrderedMap, got %T", actual) + } + + return mapEqual(expected, actual) + case *atree.OrderedMap: - return mapEqual(a, b) + return fmt.Errorf("expected value shouldn't be *OrderedMap") + default: - if !reflect.DeepEqual(a, b) { - return fmt.Errorf("value %s (%T) != value %s (%T)", a, a, b, b) + if !reflect.DeepEqual(expected, actual) { + return fmt.Errorf("expected value %v (%T) != actual value %v (%T)", expected, expected, actual, actual) } } + return nil } -func arrayEqual(a atree.Value, b atree.Value) error { - array1, ok := a.(*atree.Array) - if !ok { - return fmt.Errorf("value %s type is %T, want *atree.Array", a, a) - } - - array2, ok := b.(*atree.Array) - if !ok { - return fmt.Errorf("value %s type is %T, want *atree.Array", b, b) - } - - if array1.Count() != array2.Count() { - return fmt.Errorf("array %s count %d != array %s count %d", array1, array1.Count(), array2, array2.Count()) - } - - iterator1, err := array1.ReadOnlyIterator() - if err != nil { - return fmt.Errorf("failed to get array1 iterator: %w", err) +func arrayEqual(expected arrayValue, actual *atree.Array) error { + if uint64(len(expected)) != actual.Count() { + return fmt.Errorf("array count %d != expected count %d", actual.Count(), len(expected)) } - iterator2, err := array2.ReadOnlyIterator() + iterator, err := actual.ReadOnlyIterator() if err != nil { - return fmt.Errorf("failed to get array2 iterator: %w", err) + return fmt.Errorf("failed to get array iterator: %w", err) } + i := 0 for { - value1, err := iterator1.Next() + actualValue, err := iterator.Next() if err != nil { - return fmt.Errorf("iterator1.Next() error: %w", err) + return fmt.Errorf("iterator.Next() error: %w", err) } - value2, err := iterator2.Next() - if err != nil { - return fmt.Errorf("iterator2.Next() error: %w", err) + if actualValue == nil { + break } - err = valueEqual(value1, value2) - if err != nil { - return fmt.Errorf("array elements are different: %w", err) + if i >= len(expected) { + return fmt.Errorf("more elements from array iterator than expected") } - if value1 == nil || value2 == nil { - break + err = valueEqual(expected[i], actualValue) + if err != nil { + return fmt.Errorf("array elements are different: %w", err) } - } - return nil -} - -func mapEqual(a atree.Value, b atree.Value) error { - m1, ok := a.(*atree.OrderedMap) - if !ok { - return fmt.Errorf("value %s type is %T, want *atree.OrderedMap", a, a) + i++ } - m2, ok := b.(*atree.OrderedMap) - if !ok { - return fmt.Errorf("value %s type is %T, want *atree.OrderedMap", b, b) + if i != len(expected) { + return fmt.Errorf("got %d iterated array elements, expect %d values", i, len(expected)) } - if m1.Count() != m2.Count() { - return fmt.Errorf("map %s count %d != map %s count %d", m1, m1.Count(), m2, m2.Count()) - } + return nil +} - iterator1, err := m1.ReadOnlyIterator() - if err != nil { - return fmt.Errorf("failed to get m1 iterator: %w", err) +func mapEqual(expected mapValue, actual *atree.OrderedMap) error { + if uint64(len(expected)) != actual.Count() { + return fmt.Errorf("map count %d != expected count %d", actual.Count(), len(expected)) } - iterator2, err := m2.ReadOnlyIterator() + iterator, err := actual.ReadOnlyIterator() if err != nil { - return fmt.Errorf("failed to get m2 iterator: %w", err) + return fmt.Errorf("failed to get map iterator: %w", err) } + i := 0 for { - key1, value1, err := iterator1.Next() + actualKey, actualValue, err := iterator.Next() if err != nil { - return fmt.Errorf("iterator1.Next() error: %w", err) + return fmt.Errorf("iterator.Next() error: %w", err) } - key2, value2, err := iterator2.Next() - if err != nil { - return fmt.Errorf("iterator2.Next() error: %w", err) + if actualKey == nil { + break } - err = valueEqual(key1, key2) - if err != nil { - return fmt.Errorf("map keys are different: %w", err) + expectedValue, exist := expected[actualKey] + if !exist { + return fmt.Errorf("failed to find key %v in expected values", actualKey) } - err = valueEqual(value1, value2) + err = valueEqual(expectedValue, actualValue) if err != nil { return fmt.Errorf("map values are different: %w", err) } - if key1 == nil || key2 == nil { - break - } + i++ + } + + if i != len(expected) { + return fmt.Errorf("got %d iterated map elements, expect %d values", i, len(expected)) } return nil } // newArray creates atree.Array with random elements of specified size and nested level -func newArray(storage *atree.PersistentSlabStorage, address atree.Address, length int, nestedLevel int) (*atree.Array, error) { +func newArray( + storage *atree.PersistentSlabStorage, + address atree.Address, + length int, + nestedLevel int, +) (arrayValue, *atree.Array, error) { + typeInfo := newArrayTypeInfo() array, err := atree.NewArray(storage, address, typeInfo) if err != nil { - return nil, fmt.Errorf("failed to create new array: %w", err) + return nil, nil, fmt.Errorf("failed to create new array: %w", err) } - values := make([]atree.Value, length) + expectedValues := make(arrayValue, length) for i := 0; i < length; i++ { - value, err := randomValue(storage, address, nestedLevel-1) + expectedValue, value, err := randomValue(storage, address, nestedLevel-1) if err != nil { - return nil, err + return nil, nil, err } - copedValue, err := copyValue(storage, atree.Address{}, value) - if err != nil { - return nil, err - } - values[i] = copedValue + + expectedValues[i] = expectedValue + err = array.Append(value) if err != nil { - return nil, err + return nil, nil, err } } - err = checkArrayDataLoss(array, values) + err = checkArrayDataLoss(expectedValues, array) if err != nil { - return nil, err + return nil, nil, err } - for _, v := range values { - err := removeValue(storage, v) - if err != nil { - return nil, err - } - } - - return array, nil + return expectedValues, array, nil } // newMap creates atree.OrderedMap with random elements of specified size and nested level -func newMap(storage *atree.PersistentSlabStorage, address atree.Address, length int, nestedLevel int) (*atree.OrderedMap, error) { +func newMap( + storage *atree.PersistentSlabStorage, + address atree.Address, + length int, + nestedLevel int, +) (mapValue, *atree.OrderedMap, error) { + typeInfo := newMapTypeInfo() m, err := atree.NewMap(storage, address, atree.NewDefaultDigesterBuilder(), typeInfo) if err != nil { - return nil, fmt.Errorf("failed to create new map: %w", err) + return nil, nil, fmt.Errorf("failed to create new map: %w", err) } - elements := make(map[atree.Value]atree.Value, length) - - for i := 0; i < length; i++ { - k, err := randomKey() - if err != nil { - return nil, err - } + expectedValues := make(mapValue, length) - copiedKey, err := copyValue(storage, atree.Address{}, k) + for m.Count() < uint64(length) { + expectedKey, key, err := randomKey() if err != nil { - return nil, err + return nil, nil, err } - v, err := randomValue(storage, address, nestedLevel-1) + expectedValue, value, err := randomValue(storage, address, nestedLevel-1) if err != nil { - return nil, err + return nil, nil, err } - copiedValue, err := copyValue(storage, atree.Address{}, v) - if err != nil { - return nil, err - } - - elements[copiedKey] = copiedValue + expectedValues[expectedKey] = expectedValue - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + existingStorable, err := m.Set(compare, hashInputProvider, key, value) if err != nil { - return nil, err + return nil, nil, err } if existingStorable != nil { // Delete overwritten element err = removeStorable(storage, existingStorable) if err != nil { - return nil, fmt.Errorf("failed to remove storable element %s: %w", existingStorable, err) + return nil, nil, fmt.Errorf("failed to remove storable element %s: %w", existingStorable, err) } } } - err = checkMapDataLoss(m, elements) + err = checkMapDataLoss(expectedValues, m) if err != nil { - return nil, err + return nil, nil, err } - for k, v := range elements { - err := removeValue(storage, k) - if err != nil { - return nil, err - } - err = removeValue(storage, v) - if err != nil { - return nil, err - } - } - - return m, nil + return expectedValues, m, nil } type InMemBaseStorage struct { @@ -539,3 +460,19 @@ func (s *InMemBaseStorage) SegmentsTouched() int { func (s *InMemBaseStorage) ResetReporter() { // not needed } + +type arrayValue []atree.Value + +var _ atree.Value = &arrayValue{} + +func (v arrayValue) Storable(atree.SlabStorage, atree.Address, uint64) (atree.Storable, error) { + panic("not reachable") +} + +type mapValue map[atree.Value]atree.Value + +var _ atree.Value = &mapValue{} + +func (v mapValue) Storable(atree.SlabStorage, atree.Address, uint64) (atree.Storable, error) { + panic("not reachable") +} From ff9d5a9edbac80d12e4b7c0a0d37139dcb7216f4 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 5 Oct 2023 12:23:46 -0500 Subject: [PATCH 061/126] Refactor smoke test --- cmd/stress/utils.go | 51 ++++++++++++++++++++++++++------------------- 1 file changed, 30 insertions(+), 21 deletions(-) diff --git a/cmd/stress/utils.go b/cmd/stress/utils.go index 6a5c635f..22885f0f 100644 --- a/cmd/stress/utils.go +++ b/cmd/stress/utils.go @@ -40,13 +40,13 @@ const ( uint64Type smallStringType largeStringType - arrayType1 - arrayType2 - arrayType3 - mapType1 - mapType2 - mapType3 - maxValueType + maxSimpleValueType +) + +const ( + arrayType int = iota + mapType + maxContainerValueType ) var ( @@ -72,11 +72,8 @@ func randStr(n int) string { return string(runes) } -func generateValue( - storage *atree.PersistentSlabStorage, - address atree.Address, +func generateSimpleValue( valueType int, - nestedLevels int, ) (expected atree.Value, actual atree.Value, err error) { switch valueType { case uint8Type: @@ -105,22 +102,34 @@ func generateValue( v := NewStringValue(randStr(slen)) return v, v, nil - case arrayType1, arrayType2, arrayType3: + default: + return nil, nil, fmt.Errorf("unexpected randome simple value type %d", valueType) + } +} + +func generateContainerValue( + valueType int, + storage *atree.PersistentSlabStorage, + address atree.Address, + nestedLevels int, +) (expected atree.Value, actual atree.Value, err error) { + switch valueType { + case arrayType: length := r.Intn(maxNestedArraySize) return newArray(storage, address, length, nestedLevels) - case mapType1, mapType2, mapType3: + case mapType: length := r.Intn(maxNestedMapSize) return newMap(storage, address, length, nestedLevels) default: - return nil, nil, fmt.Errorf("unexpected randome value type %d", valueType) + return nil, nil, fmt.Errorf("unexpected randome container value type %d", valueType) } } func randomKey() (atree.Value, atree.Value, error) { - t := r.Intn(largeStringType + 1) - return generateValue(nil, atree.Address{}, t, 0) + t := r.Intn(maxSimpleValueType) + return generateSimpleValue(t) } func randomValue( @@ -128,13 +137,13 @@ func randomValue( address atree.Address, nestedLevels int, ) (expected atree.Value, actual atree.Value, err error) { - var t int if nestedLevels <= 0 { - t = r.Intn(largeStringType + 1) - } else { - t = r.Intn(maxValueType) + t := r.Intn(maxSimpleValueType) + return generateSimpleValue(t) } - return generateValue(storage, address, t, nestedLevels) + + t := r.Intn(maxContainerValueType) + return generateContainerValue(t, storage, address, nestedLevels) } func removeStorable(storage *atree.PersistentSlabStorage, storable atree.Storable) error { From a10af613b62ca112d49538828145f8831fbb4dd7 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 5 Oct 2023 14:48:18 -0500 Subject: [PATCH 062/126] Refactor smoke test --- cmd/stress/array.go | 310 +++++++++++++++++++++----------------------- cmd/stress/utils.go | 10 +- 2 files changed, 153 insertions(+), 167 deletions(-) diff --git a/cmd/stress/array.go b/cmd/stress/array.go index 650d24ca..dc74634d 100644 --- a/cmd/stress/array.go +++ b/cmd/stress/array.go @@ -76,35 +76,25 @@ func (status *arrayStatus) String() string { ) } -func (status *arrayStatus) incAppend() { +func (status *arrayStatus) incOp(op int, count uint64) { status.lock.Lock() defer status.lock.Unlock() - status.appendOps++ - status.count++ -} - -func (status *arrayStatus) incSet() { - status.lock.Lock() - defer status.lock.Unlock() + switch op { + case arrayAppendOp: + status.appendOps++ - status.setOps++ -} - -func (status *arrayStatus) incInsert() { - status.lock.Lock() - defer status.lock.Unlock() + case arrayInsertOp: + status.insertOps++ - status.insertOps++ - status.count++ -} + case arraySetOp: + status.setOps++ -func (status *arrayStatus) incRemove() { - status.lock.Lock() - defer status.lock.Unlock() + case arrayRemoveOp: + status.removeOps++ + } - status.removeOps++ - status.count-- + status.count = count } func (status *arrayStatus) Write() { @@ -188,190 +178,186 @@ func testArray( } } - nextOp := r.Intn(maxArrayOp) - + var forceRemove bool if array.Count() == flagMaxLength || reduceHeapAllocs { - nextOp = arrayRemoveOp + forceRemove = true } - switch nextOp { + var nextOp int + expectedValues, nextOp, err = modifyArray(expectedValues, array, maxNestedLevels, forceRemove) + if err != nil { + fmt.Fprint(os.Stderr, err.Error()) + return + } - case arrayAppendOp: - opCount++ + opCount++ - nestedLevels := r.Intn(maxNestedLevels) - expectedValue, value, err := randomValue(storage, address, nestedLevels) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to generate random value %s: %s", value, err) - return - } + // Update status + status.incOp(nextOp, array.Count()) - // Append to expectedValues - expectedValues = append(expectedValues, expectedValue) + // Check array elements against values after every op + err = checkArrayDataLoss(expectedValues, array) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return + } - // Append to array - err = array.Append(value) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to append %s: %s", value, err) + if opCount >= 100 { + opCount = 0 + if !checkStorageHealth(storage, array.SlabID()) { return } - // Update status - status.incAppend() - - case arraySetOp: - opCount++ - - if array.Count() == 0 { - continue - } - - index := r.Intn(int(array.Count())) - - nestedLevels := r.Intn(maxNestedLevels) - expectedValue, value, err := randomValue(storage, address, nestedLevels) + // Commit slabs to storage so slabs are encoded and then decoded at next op. + err = storage.FastCommit(runtime.NumCPU()) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to generate random value %s: %s", value, err) + fmt.Fprintf(os.Stderr, "Failed to commit to storage: %s", err) return } - oldExpectedValue := expectedValues[index] + // Drop cache after commit to force slab decoding at next op. + storage.DropCache() + } + } +} - // Update expectedValues - expectedValues[index] = expectedValue +func modifyArray( + expectedValues arrayValue, + array *atree.Array, + maxNestedLevels int, + forceRemove bool, +) (arrayValue, int, error) { - // Update array - existingStorable, err := array.Set(uint64(index), value) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to set %s at index %d: %s", value, index, err) - return - } + storage := array.Storage + address := array.Address() - // Compare overwritten value from array with overwritten value from values - existingValue, err := existingStorable.StoredValue(storage) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to convert %s to value: %s", existingStorable, err) - return - } + var nextOp int + if forceRemove { + if array.Count() == 0 { + return nil, 0, fmt.Errorf("failed to force remove array elements because there is no element") + } + nextOp = arrayRemoveOp + } else { + if array.Count() == 0 { + nextOp = arrayAppendOp + } else { + nextOp = r.Intn(maxArrayOp) + } + } - err = valueEqual(oldExpectedValue, existingValue) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to compare %s and %s: %s", existingValue, oldExpectedValue, err) - return - } + switch nextOp { + case arrayAppendOp: + nestedLevels := r.Intn(maxNestedLevels) + expectedValue, value, err := randomValue(storage, address, nestedLevels) + if err != nil { + return nil, 0, fmt.Errorf("failed to generate random value %s: %s", value, err) + } - // Delete overwritten element from storage - err = removeStorable(storage, existingStorable) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove storable %s: %s", existingStorable, err) - return - } + // Update expectedValues + expectedValues = append(expectedValues, expectedValue) - // Update status - status.incSet() + // Update array + err = array.Append(value) + if err != nil { + return nil, 0, fmt.Errorf("failed to append %s: %s", value, err) + } - case arrayInsertOp: - opCount++ + case arraySetOp: + nestedLevels := r.Intn(maxNestedLevels) + expectedValue, value, err := randomValue(storage, address, nestedLevels) + if err != nil { + return nil, 0, fmt.Errorf("failed to generate random value %s: %s", value, err) + } - index := r.Intn(int(array.Count() + 1)) + index := r.Intn(int(array.Count())) - nestedLevels := r.Intn(maxNestedLevels) - expectedValue, value, err := randomValue(storage, address, nestedLevels) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to generate random value %s: %s", value, err) - return - } + oldExpectedValue := expectedValues[index] - // Update expectedValues - if index == int(array.Count()) { - expectedValues = append(expectedValues, expectedValue) - } else { - expectedValues = append(expectedValues, nil) - copy(expectedValues[index+1:], expectedValues[index:]) - expectedValues[index] = expectedValue - } + // Update expectedValues + expectedValues[index] = expectedValue - // Update array - err = array.Insert(uint64(index), value) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to insert %s into index %d: %s", value, index, err) - return - } + // Update array + existingStorable, err := array.Set(uint64(index), value) + if err != nil { + return nil, 0, fmt.Errorf("failed to set %s at index %d: %s", value, index, err) + } - // Update status - status.incInsert() + // Compare overwritten value from array with overwritten value from expectedValues + existingValue, err := existingStorable.StoredValue(storage) + if err != nil { + return nil, 0, fmt.Errorf("failed to convert %s to value: %s", existingStorable, err) + } - case arrayRemoveOp: - if array.Count() == 0 { - continue - } + err = valueEqual(oldExpectedValue, existingValue) + if err != nil { + return nil, 0, fmt.Errorf("failed to compare %s and %s: %s", existingValue, oldExpectedValue, err) + } - opCount++ + // Delete overwritten element from storage + err = removeStorable(storage, existingStorable) + if err != nil { + return nil, 0, fmt.Errorf("failed to remove storable %s: %s", existingStorable, err) + } - k := r.Intn(int(array.Count())) + case arrayInsertOp: + nestedLevels := r.Intn(maxNestedLevels) + expectedValue, value, err := randomValue(storage, address, nestedLevels) + if err != nil { + return nil, 0, fmt.Errorf("failed to generate random value %s: %s", value, err) + } - oldExpectedValue := expectedValues[k] + index := r.Intn(int(array.Count() + 1)) - // Update expectedValues - copy(expectedValues[k:], expectedValues[k+1:]) - expectedValues[len(expectedValues)-1] = nil - expectedValues = expectedValues[:len(expectedValues)-1] + // Update expectedValues + if index == int(array.Count()) { + expectedValues = append(expectedValues, expectedValue) + } else { + expectedValues = append(expectedValues, nil) + copy(expectedValues[index+1:], expectedValues[index:]) + expectedValues[index] = expectedValue + } - // Update array - existingStorable, err := array.Remove(uint64(k)) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove element at index %d: %s", k, err) - return - } + // Update array + err = array.Insert(uint64(index), value) + if err != nil { + return nil, 0, fmt.Errorf("failed to insert %s into index %d: %s", value, index, err) + } - // Compare removed value from array with removed value from values - existingValue, err := existingStorable.StoredValue(storage) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to convert %s to value: %s", existingStorable, err) - return - } + case arrayRemoveOp: + index := r.Intn(int(array.Count())) - err = valueEqual(oldExpectedValue, existingValue) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to compare %s and %s: %s", existingValue, oldExpectedValue, err) - return - } + oldExpectedValue := expectedValues[index] - // Delete removed element from storage - err = removeStorable(storage, existingStorable) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove element %s: %s", existingStorable, err) - return - } + // Update expectedValues + copy(expectedValues[index:], expectedValues[index+1:]) + expectedValues[len(expectedValues)-1] = nil + expectedValues = expectedValues[:len(expectedValues)-1] - // Update status - status.incRemove() + // Update array + existingStorable, err := array.Remove(uint64(index)) + if err != nil { + return nil, 0, fmt.Errorf("failed to remove element at index %d: %s", index, err) } - // Check array elements against values after every op - err = checkArrayDataLoss(expectedValues, array) + // Compare removed value from array with removed value from values + existingValue, err := existingStorable.StoredValue(storage) if err != nil { - fmt.Fprintln(os.Stderr, err) - return + return nil, 0, fmt.Errorf("failed to convert %s to value: %s", existingStorable, err) } - if opCount >= 100 { - opCount = 0 - if !checkStorageHealth(storage, array.SlabID()) { - return - } - - // Commit slabs to storage so slabs are encoded and then decoded at next op. - err = storage.FastCommit(runtime.NumCPU()) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to commit to storage: %s", err) - return - } + err = valueEqual(oldExpectedValue, existingValue) + if err != nil { + return nil, 0, fmt.Errorf("failed to compare %s and %s: %s", existingValue, oldExpectedValue, err) + } - // Drop cache after commit to force slab decoding at next op. - storage.DropCache() + // Delete removed element from storage + err = removeStorable(storage, existingStorable) + if err != nil { + return nil, 0, fmt.Errorf("failed to remove element %s: %s", existingStorable, err) } } + + return expectedValues, nextOp, nil } func checkStorageHealth(storage *atree.PersistentSlabStorage, rootSlabID atree.SlabID) bool { diff --git a/cmd/stress/utils.go b/cmd/stress/utils.go index 22885f0f..4f5acfc5 100644 --- a/cmd/stress/utils.go +++ b/cmd/stress/utils.go @@ -109,7 +109,7 @@ func generateSimpleValue( func generateContainerValue( valueType int, - storage *atree.PersistentSlabStorage, + storage atree.SlabStorage, address atree.Address, nestedLevels int, ) (expected atree.Value, actual atree.Value, err error) { @@ -133,7 +133,7 @@ func randomKey() (atree.Value, atree.Value, error) { } func randomValue( - storage *atree.PersistentSlabStorage, + storage atree.SlabStorage, address atree.Address, nestedLevels int, ) (expected atree.Value, actual atree.Value, err error) { @@ -146,7 +146,7 @@ func randomValue( return generateContainerValue(t, storage, address, nestedLevels) } -func removeStorable(storage *atree.PersistentSlabStorage, storable atree.Storable) error { +func removeStorable(storage atree.SlabStorage, storable atree.Storable) error { value, err := storable.StoredValue(storage) if err != nil { @@ -295,7 +295,7 @@ func mapEqual(expected mapValue, actual *atree.OrderedMap) error { // newArray creates atree.Array with random elements of specified size and nested level func newArray( - storage *atree.PersistentSlabStorage, + storage atree.SlabStorage, address atree.Address, length int, nestedLevel int, @@ -334,7 +334,7 @@ func newArray( // newMap creates atree.OrderedMap with random elements of specified size and nested level func newMap( - storage *atree.PersistentSlabStorage, + storage atree.SlabStorage, address atree.Address, length int, nestedLevel int, From e38ad5a1e0ad975e0dc3f75b3f6320fe28064a2d Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 5 Oct 2023 17:02:48 -0500 Subject: [PATCH 063/126] Refactor smoke test --- cmd/stress/map.go | 278 +++++++++++++++++++++++----------------------- 1 file changed, 142 insertions(+), 136 deletions(-) diff --git a/cmd/stress/map.go b/cmd/stress/map.go index e42a5955..c9478ed5 100644 --- a/cmd/stress/map.go +++ b/cmd/stress/map.go @@ -29,8 +29,10 @@ import ( "github.com/onflow/atree" ) +type mapOpType int + const ( - mapSetOp1 = iota + mapSetOp1 mapOpType = iota mapSetOp2 mapSetOp3 mapRemoveOp @@ -72,23 +74,19 @@ func (status *mapStatus) String() string { ) } -func (status *mapStatus) incSet(newValue bool) { +func (status *mapStatus) incOp(op mapOpType, count uint64) { status.lock.Lock() defer status.lock.Unlock() - status.setOps++ + switch op { + case mapSetOp1, mapSetOp2, mapSetOp3: + status.setOps++ - if newValue { - status.count++ + case mapRemoveOp: + status.removeOps++ } -} -func (status *mapStatus) incRemove() { - status.lock.Lock() - defer status.lock.Unlock() - - status.removeOps++ - status.count-- + status.count = count } func (status *mapStatus) Write() { @@ -175,175 +173,183 @@ func testMap( } } - nextOp := r.Intn(maxMapOp) - + var forceRemove bool if m.Count() == flagMaxLength || reduceHeapAllocs { - nextOp = mapRemoveOp + forceRemove = true } - switch nextOp { - - case mapSetOp1, mapSetOp2, mapSetOp3: - opCount++ + var nextOp mapOpType + expectedValues, keys, nextOp, err = modifyMap(expectedValues, keys, m, maxNestedLevels, forceRemove) + if err != nil { + fmt.Fprint(os.Stderr, err.Error()) + return + } - expectedKey, key, err := randomKey() - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to generate random key %s: %s", key, err) - return - } + opCount++ - nestedLevels := r.Intn(maxNestedLevels) - expectedValue, value, err := randomValue(storage, address, nestedLevels) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to generate random value %s: %s", value, err) - return - } + // Update status + status.incOp(nextOp, m.Count()) - oldExpectedValue, keyExist := expectedValues[expectedKey] + // Check map elements against elements after every op + err = checkMapDataLoss(expectedValues, m) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return + } - // Update keys - if !keyExist { - keys = append(keys, expectedKey) + if opCount >= 100 { + opCount = 0 + if !checkStorageHealth(storage, m.SlabID()) { + return } - // Update expectedValues - expectedValues[expectedKey] = expectedValue - - // Update map - existingStorable, err := m.Set(compare, hashInputProvider, key, value) + // Commit slabs to storage so slabs are encoded and then decoded at next op. + err = storage.FastCommit(runtime.NumCPU()) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to set %s at index %d: %s", value, key, err) + fmt.Fprintf(os.Stderr, "Failed to commit to storage: %s", err) return } - // Compare old value from map with old value from elements - if (oldExpectedValue == nil) && (existingStorable != nil) { - fmt.Fprintf(os.Stderr, "Set returned storable %s, want nil", existingStorable) - return - } + // Drop cache after commit to force slab decoding at next op. + storage.DropCache() + } + } +} - if (oldExpectedValue != nil) && (existingStorable == nil) { - fmt.Fprintf(os.Stderr, "Set returned nil, want %s", oldExpectedValue) - return - } +func modifyMap( + expectedValues mapValue, + keys []atree.Value, + m *atree.OrderedMap, + maxNestedLevels int, + forceRemove bool, +) (mapValue, []atree.Value, mapOpType, error) { + + storage := m.Storage + address := m.Address() + + var nextOp mapOpType + if forceRemove { + if m.Count() == 0 { + return nil, nil, 0, fmt.Errorf("failed to force remove map elements because there is no element") + } + nextOp = mapRemoveOp + } else { + if m.Count() == 0 { + nextOp = mapSetOp1 + } else { + nextOp = mapOpType(r.Intn(int(maxMapOp))) + } + } - if existingStorable != nil { - - existingValue, err := existingStorable.StoredValue(storage) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to convert %s to value: %s", existingStorable, err) - return - } - - err = valueEqual(oldExpectedValue, existingValue) - if err != nil { - fmt.Fprintf(os.Stderr, "Set() returned wrong existing value %s, want %s", existingValue, oldExpectedValue) - return - } - - err = removeStorable(storage, existingStorable) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove map storable element %s: %s", existingStorable, err) - return - } - } + switch nextOp { + case mapSetOp1, mapSetOp2, mapSetOp3: - // Update status - status.incSet(oldExpectedValue == nil) + expectedKey, key, err := randomKey() + if err != nil { + return nil, nil, 0, fmt.Errorf("failed to generate random key %s: %s", key, err) + } - case mapRemoveOp: - if m.Count() == 0 { - continue - } + nestedLevels := r.Intn(maxNestedLevels) + expectedValue, value, err := randomValue(storage, address, nestedLevels) + if err != nil { + return nil, nil, 0, fmt.Errorf("failed to generate random value %s: %s", value, err) + } - opCount++ + oldExpectedValue, keyExist := expectedValues[expectedKey] - index := r.Intn(len(keys)) - key := keys[index] + // Update keys + if !keyExist { + keys = append(keys, expectedKey) + } - oldExpectedValue := expectedValues[key] + // Update expectedValues + expectedValues[expectedKey] = expectedValue - // Update unexpectedValues - delete(expectedValues, key) + // Update map + existingStorable, err := m.Set(compare, hashInputProvider, key, value) + if err != nil { + return nil, nil, 0, fmt.Errorf("failed to set %s at index %d: %s", value, key, err) + } - // Update keys - copy(keys[index:], keys[index+1:]) - keys[len(keys)-1] = nil - keys = keys[:len(keys)-1] + // Compare old value from map with old value from elements + if (oldExpectedValue == nil) != (existingStorable == nil) { + return nil, nil, 0, fmt.Errorf("Set returned storable %s != expected %s", existingStorable, oldExpectedValue) + } - // Update map - existingKeyStorable, existingValueStorable, err := m.Remove(compare, hashInputProvider, key) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove element with key %s: %s", key, err) - return - } + if existingStorable != nil { - // Compare removed key from map with removed key from elements - existingKeyValue, err := existingKeyStorable.StoredValue(storage) + existingValue, err := existingStorable.StoredValue(storage) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to convert %s to value: %s", existingKeyStorable, err) - return + return nil, nil, 0, fmt.Errorf("failed to convert %s to value: %s", existingStorable, err) } - err = valueEqual(key, existingKeyValue) + err = valueEqual(oldExpectedValue, existingValue) if err != nil { - fmt.Fprintf(os.Stderr, "Remove() returned wrong existing key %s, want %s", existingKeyStorable, key) - return + return nil, nil, 0, fmt.Errorf("Set() returned wrong existing value %s, want %s", existingValue, oldExpectedValue) } - // Compare removed value from map with removed value from elements - existingValue, err := existingValueStorable.StoredValue(storage) + // Delete removed element from storage + err = removeStorable(storage, existingStorable) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to convert %s to value: %s", existingValueStorable, err) - return + return nil, nil, 0, fmt.Errorf("failed to remove map storable element %s: %s", existingStorable, err) } + } - err = valueEqual(oldExpectedValue, existingValue) - if err != nil { - fmt.Fprintf(os.Stderr, "Remove() returned wrong existing value %s, want %s", existingValueStorable, oldExpectedValue) - return - } + case mapRemoveOp: + index := r.Intn(len(keys)) + key := keys[index] - err = removeStorable(storage, existingKeyStorable) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove key %s: %s", existingKeyStorable, err) - return - } + oldExpectedValue := expectedValues[key] - err = removeStorable(storage, existingValueStorable) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove value %s: %s", existingValueStorable, err) - return - } + // Update expectedValues + delete(expectedValues, key) - // Update status - status.incRemove() + // Update keys + copy(keys[index:], keys[index+1:]) + keys[len(keys)-1] = nil + keys = keys[:len(keys)-1] + + // Update map + existingKeyStorable, existingValueStorable, err := m.Remove(compare, hashInputProvider, key) + if err != nil { + return nil, nil, 0, fmt.Errorf("failed to remove element with key %s: %s", key, err) } - // Check map elements against elements after every op - err = checkMapDataLoss(expectedValues, m) + // Compare removed key from map with removed key from elements + existingKeyValue, err := existingKeyStorable.StoredValue(storage) if err != nil { - fmt.Fprintln(os.Stderr, err) - return + return nil, nil, 0, fmt.Errorf("failed to convert %s to value: %s", existingKeyStorable, err) } - if opCount >= 100 { - opCount = 0 - if !checkStorageHealth(storage, m.SlabID()) { - return - } + err = valueEqual(key, existingKeyValue) + if err != nil { + return nil, nil, 0, fmt.Errorf("Remove() returned wrong existing key %s, want %s", existingKeyStorable, key) + } - // Commit slabs to storage so slabs are encoded and then decoded at next op. - err = storage.FastCommit(runtime.NumCPU()) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to commit to storage: %s", err) - return - } + // Compare removed value from map with removed value from elements + existingValue, err := existingValueStorable.StoredValue(storage) + if err != nil { + return nil, nil, 0, fmt.Errorf("failed to convert %s to value: %s", existingValueStorable, err) + } - // Drop cache after commit to force slab decoding at next op. - storage.DropCache() + err = valueEqual(oldExpectedValue, existingValue) + if err != nil { + return nil, nil, 0, fmt.Errorf("Remove() returned wrong existing value %s, want %s", existingValueStorable, oldExpectedValue) + } + + // Delete removed element from storage + err = removeStorable(storage, existingKeyStorable) + if err != nil { + return nil, nil, 0, fmt.Errorf("failed to remove key %s: %s", existingKeyStorable, err) + } + + err = removeStorable(storage, existingValueStorable) + if err != nil { + return nil, nil, 0, fmt.Errorf("failed to remove value %s: %s", existingValueStorable, err) } } + + return expectedValues, keys, nextOp, nil } func checkMapDataLoss(expectedValues mapValue, m *atree.OrderedMap) error { From 25dbc3e454fe5d6d560e05ac4f328252fbaae64c Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Fri, 6 Oct 2023 10:12:44 -0500 Subject: [PATCH 064/126] Make smoke test support child array/map mutation This commit smoke tests mutation of child container: - existing child retrieved from parent array/map - new child appened/inserted to parent array - existing child set in parent array/map - new child set in parent map --- cmd/stress/array.go | 279 ++++++++++++++++++++++++++++++++++++-------- cmd/stress/main.go | 16 ++- cmd/stress/map.go | 206 +++++++++++++++++++++++--------- cmd/stress/utils.go | 2 +- 4 files changed, 397 insertions(+), 106 deletions(-) diff --git a/cmd/stress/array.go b/cmd/stress/array.go index dc74634d..3e217a0a 100644 --- a/cmd/stress/array.go +++ b/cmd/stress/array.go @@ -29,11 +29,17 @@ import ( "github.com/onflow/atree" ) +type arrayOpType int + const ( - arrayAppendOp = iota + arrayAppendOp arrayOpType = iota arrayInsertOp arraySetOp arrayRemoveOp + arrayMutateChildContainerAfterGet + arrayMutateChildContainerAfterAppend + arrayMutateChildContainerAfterInsert + arrayMutateChildContainerAfterSet maxArrayOp ) @@ -44,10 +50,14 @@ type arrayStatus struct { count uint64 // number of elements in array - appendOps uint64 - insertOps uint64 - setOps uint64 - removeOps uint64 + appendOps uint64 + insertOps uint64 + setOps uint64 + removeOps uint64 + mutateChildContainerAfterGetOps uint64 + mutateChildContainerAfterAppendOps uint64 + mutateChildContainerAfterInsertOps uint64 + mutateChildContainerAfterSetOps uint64 } var _ Status = &arrayStatus{} @@ -65,7 +75,7 @@ func (status *arrayStatus) String() string { var m runtime.MemStats runtime.ReadMemStats(&m) - return fmt.Sprintf("duration %s, heapAlloc %d MiB, %d elements, %d appends, %d sets, %d inserts, %d removes", + return fmt.Sprintf("duration %s, heapAlloc %d MiB, %d elements, %d appends, %d sets, %d inserts, %d removes, %d Get mutations, %d Append mutations, %d Insert mutations, %d Set mutations", duration.Truncate(time.Second).String(), m.Alloc/1024/1024, status.count, @@ -73,10 +83,14 @@ func (status *arrayStatus) String() string { status.setOps, status.insertOps, status.removeOps, + status.mutateChildContainerAfterGetOps, + status.mutateChildContainerAfterAppendOps, + status.mutateChildContainerAfterInsertOps, + status.mutateChildContainerAfterSetOps, ) } -func (status *arrayStatus) incOp(op int, count uint64) { +func (status *arrayStatus) incOp(op arrayOpType, count uint64) { status.lock.Lock() defer status.lock.Unlock() @@ -92,6 +106,18 @@ func (status *arrayStatus) incOp(op int, count uint64) { case arrayRemoveOp: status.removeOps++ + + case arrayMutateChildContainerAfterGet: + status.mutateChildContainerAfterGetOps++ + + case arrayMutateChildContainerAfterAppend: + status.mutateChildContainerAfterAppendOps++ + + case arrayMutateChildContainerAfterInsert: + status.mutateChildContainerAfterInsertOps++ + + case arrayMutateChildContainerAfterSet: + status.mutateChildContainerAfterSetOps++ } status.count = count @@ -183,8 +209,8 @@ func testArray( forceRemove = true } - var nextOp int - expectedValues, nextOp, err = modifyArray(expectedValues, array, maxNestedLevels, forceRemove) + var prevOp arrayOpType + expectedValues, prevOp, err = modifyArray(expectedValues, array, maxNestedLevels, forceRemove) if err != nil { fmt.Fprint(os.Stderr, err.Error()) return @@ -193,7 +219,7 @@ func testArray( opCount++ // Update status - status.incOp(nextOp, array.Count()) + status.incOp(prevOp, array.Count()) // Check array elements against values after every op err = checkArrayDataLoss(expectedValues, array) @@ -221,52 +247,116 @@ func testArray( } } -func modifyArray( +func nextArrayOp( expectedValues arrayValue, array *atree.Array, - maxNestedLevels int, + nestedLevels int, forceRemove bool, -) (arrayValue, int, error) { +) (arrayOpType, error) { - storage := array.Storage - address := array.Address() - - var nextOp int if forceRemove { if array.Count() == 0 { - return nil, 0, fmt.Errorf("failed to force remove array elements because there is no element") + return 0, fmt.Errorf("failed to force remove array elements because array has no elements") } - nextOp = arrayRemoveOp - } else { - if array.Count() == 0 { - nextOp = arrayAppendOp - } else { - nextOp = r.Intn(maxArrayOp) + return arrayRemoveOp, nil + } + + if array.Count() == 0 { + return arrayAppendOp, nil + } + + for { + nextOp := arrayOpType(r.Intn(int(maxArrayOp))) + + switch nextOp { + case arrayMutateChildContainerAfterAppend, + arrayMutateChildContainerAfterInsert, + arrayMutateChildContainerAfterSet: + + if nestedLevels-1 > 0 { + return nextOp, nil + } + + // New child container can't be created because next nestedLevels is 0. + // Try another array operation. + + case arrayMutateChildContainerAfterGet: + if hasChildContainerInArray(expectedValues) { + return nextOp, nil + } + + // Array doesn't have child container, try another array operation. + + default: + return nextOp, nil } } +} + +func modifyArray( + expectedValues arrayValue, + array *atree.Array, + nestedLevels int, + forceRemove bool, +) (arrayValue, arrayOpType, error) { + + storage := array.Storage + address := array.Address() + + nextOp, err := nextArrayOp(expectedValues, array, nestedLevels, forceRemove) + if err != nil { + return nil, 0, err + } switch nextOp { - case arrayAppendOp: - nestedLevels := r.Intn(maxNestedLevels) - expectedValue, value, err := randomValue(storage, address, nestedLevels) + case arrayAppendOp, arrayMutateChildContainerAfterAppend: + + var nextNestedLevels int + + if nextOp == arrayAppendOp { + nextNestedLevels = r.Intn(nestedLevels) + } else { // arrayMutateChildContainerAfterAppend + nextNestedLevels = nestedLevels - 1 + } + + // Create new chid child + expectedChildValue, child, err := randomValue(storage, address, nextNestedLevels) if err != nil { - return nil, 0, fmt.Errorf("failed to generate random value %s: %s", value, err) + return nil, 0, fmt.Errorf("failed to generate random value %s: %s", child, err) } // Update expectedValues - expectedValues = append(expectedValues, expectedValue) + expectedValues = append(expectedValues, expectedChildValue) // Update array - err = array.Append(value) + err = array.Append(child) if err != nil { - return nil, 0, fmt.Errorf("failed to append %s: %s", value, err) + return nil, 0, fmt.Errorf("failed to append %s: %s", child, err) } - case arraySetOp: - nestedLevels := r.Intn(maxNestedLevels) - expectedValue, value, err := randomValue(storage, address, nestedLevels) + if nextOp == arrayMutateChildContainerAfterAppend { + index := len(expectedValues) - 1 + + expectedValues[index], err = modifyContainer(expectedValues[index], child, nextNestedLevels) + if err != nil { + return nil, 0, fmt.Errorf("failed to modify child container at index %d: %w", index, err) + } + } + + case arraySetOp, arrayMutateChildContainerAfterSet: + + var nextNestedLevels int + + if nextOp == arraySetOp { + nextNestedLevels = r.Intn(nestedLevels) + } else { // arrayMutateChildContainerAfterSet + nextNestedLevels = nestedLevels - 1 + } + + // Create new child child + expectedChildValue, child, err := randomValue(storage, address, nextNestedLevels) if err != nil { - return nil, 0, fmt.Errorf("failed to generate random value %s: %s", value, err) + return nil, 0, fmt.Errorf("failed to generate random value %s: %s", child, err) } index := r.Intn(int(array.Count())) @@ -274,12 +364,12 @@ func modifyArray( oldExpectedValue := expectedValues[index] // Update expectedValues - expectedValues[index] = expectedValue + expectedValues[index] = expectedChildValue // Update array - existingStorable, err := array.Set(uint64(index), value) + existingStorable, err := array.Set(uint64(index), child) if err != nil { - return nil, 0, fmt.Errorf("failed to set %s at index %d: %s", value, index, err) + return nil, 0, fmt.Errorf("failed to set %s at index %d: %s", child, index, err) } // Compare overwritten value from array with overwritten value from expectedValues @@ -299,28 +389,51 @@ func modifyArray( return nil, 0, fmt.Errorf("failed to remove storable %s: %s", existingStorable, err) } - case arrayInsertOp: - nestedLevels := r.Intn(maxNestedLevels) - expectedValue, value, err := randomValue(storage, address, nestedLevels) + if nextOp == arrayMutateChildContainerAfterSet { + expectedValues[index], err = modifyContainer(expectedValues[index], child, nextNestedLevels) + if err != nil { + return nil, 0, fmt.Errorf("failed to modify child container at index %d: %w", index, err) + } + } + + case arrayInsertOp, arrayMutateChildContainerAfterInsert: + + var nextNestedLevels int + + if nextOp == arrayInsertOp { + nextNestedLevels = r.Intn(nestedLevels) + } else { // arrayMutateChildContainerAfterInsert + nextNestedLevels = nestedLevels - 1 + } + + // Create new child child + expectedChildValue, child, err := randomValue(storage, address, nextNestedLevels) if err != nil { - return nil, 0, fmt.Errorf("failed to generate random value %s: %s", value, err) + return nil, 0, fmt.Errorf("failed to generate random value %s: %s", child, err) } index := r.Intn(int(array.Count() + 1)) // Update expectedValues if index == int(array.Count()) { - expectedValues = append(expectedValues, expectedValue) + expectedValues = append(expectedValues, expectedChildValue) } else { expectedValues = append(expectedValues, nil) copy(expectedValues[index+1:], expectedValues[index:]) - expectedValues[index] = expectedValue + expectedValues[index] = expectedChildValue } // Update array - err = array.Insert(uint64(index), value) + err = array.Insert(uint64(index), child) if err != nil { - return nil, 0, fmt.Errorf("failed to insert %s into index %d: %s", value, index, err) + return nil, 0, fmt.Errorf("failed to insert %s into index %d: %s", child, index, err) + } + + if nextOp == arrayMutateChildContainerAfterInsert { + expectedValues[index], err = modifyContainer(expectedValues[index], child, nextNestedLevels) + if err != nil { + return nil, 0, fmt.Errorf("failed to modify child container at index %d: %w", index, err) + } } case arrayRemoveOp: @@ -355,11 +468,85 @@ func modifyArray( if err != nil { return nil, 0, fmt.Errorf("failed to remove element %s: %s", existingStorable, err) } + + case arrayMutateChildContainerAfterGet: + index, found := getRandomChildContainerIndexInArray(expectedValues) + if !found { + // arrayMutateChildContainerAfterGet op can't be performed because there isn't any child container in this array. + // Try another array operation. + return modifyArray(expectedValues, array, nestedLevels, forceRemove) + } + + child, err := array.Get(uint64(index)) + if err != nil { + return nil, 0, fmt.Errorf("failed to get element from array at index %d: %s", index, err) + } + + expectedValues[index], err = modifyContainer(expectedValues[index], child, nestedLevels-1) + if err != nil { + return nil, 0, fmt.Errorf("failed to modify child container at index %d: %w", index, err) + } } return expectedValues, nextOp, nil } +func modifyContainer(expectedValue atree.Value, value atree.Value, nestedLevels int) (expected atree.Value, err error) { + + switch value := value.(type) { + case *atree.Array: + expectedArrayValue, ok := expectedValue.(arrayValue) + if !ok { + return nil, fmt.Errorf("failed to get expected value of type arrayValue: got %T", expectedValue) + } + + expectedValue, _, err = modifyArray(expectedArrayValue, value, nestedLevels, false) + if err != nil { + return nil, err + } + + case *atree.OrderedMap: + expectedMapValue, ok := expectedValue.(mapValue) + if !ok { + return nil, fmt.Errorf("failed to get expected value of type mapValue: got %T", expectedValue) + } + + expectedValue, _, err = modifyMap(expectedMapValue, value, nestedLevels, false) + if err != nil { + return nil, err + } + + default: + return nil, fmt.Errorf("failed to get container: got %T", value) + } + + return expectedValue, nil +} + +func hasChildContainerInArray(expectedValues arrayValue) bool { + for _, v := range expectedValues { + switch v.(type) { + case arrayValue, mapValue: + return true + } + } + return false +} + +func getRandomChildContainerIndexInArray(expectedValues arrayValue) (index int, found bool) { + indexes := make([]int, 0, len(expectedValues)) + for i, v := range expectedValues { + switch v.(type) { + case arrayValue, mapValue: + indexes = append(indexes, i) + } + } + if len(indexes) == 0 { + return 0, false + } + return indexes[r.Intn(len(indexes))], true +} + func checkStorageHealth(storage *atree.PersistentSlabStorage, rootSlabID atree.SlabID) bool { rootIDs, err := atree.CheckStorageHealth(storage, -1) if err != nil { diff --git a/cmd/stress/main.go b/cmd/stress/main.go index b1d4f747..11f26f06 100644 --- a/cmd/stress/main.go +++ b/cmd/stress/main.go @@ -141,7 +141,13 @@ func main() { switch flagType { case "array": - fmt.Printf("Starting array stress test, minMapHeapAlloc = %d MiB, maxMapHeapAlloc = %d MiB\n", flagMinHeapAllocMiB, flagMaxHeapAllocMiB) + var msg string + if flagCheckSlabEnabled { + msg = fmt.Sprintf("Starting array stress test with slab check, minMapHeapAlloc = %d MiB, maxMapHeapAlloc = %d MiB", flagMinHeapAllocMiB, flagMaxHeapAllocMiB) + } else { + msg = fmt.Sprintf("Starting array stress test, minMapHeapAlloc = %d MiB, maxMapHeapAlloc = %d MiB", flagMinHeapAllocMiB, flagMaxHeapAllocMiB) + } + fmt.Println(msg) status := newArrayStatus() @@ -150,7 +156,13 @@ func main() { testArray(storage, address, status) case "map": - fmt.Printf("Starting map stress test, minMapHeapAlloc = %d MiB, maxMapHeapAlloc = %d MiB\n", flagMinHeapAllocMiB, flagMaxHeapAllocMiB) + var msg string + if flagCheckSlabEnabled { + msg = fmt.Sprintf("Starting map stress test with slab check, minMapHeapAlloc = %d MiB, maxMapHeapAlloc = %d MiB", flagMinHeapAllocMiB, flagMaxHeapAllocMiB) + } else { + msg = fmt.Sprintf("Starting map stress test, minMapHeapAlloc = %d MiB, maxMapHeapAlloc = %d MiB", flagMinHeapAllocMiB, flagMaxHeapAllocMiB) + } + fmt.Println(msg) status := newMapStatus() diff --git a/cmd/stress/map.go b/cmd/stress/map.go index c9478ed5..2c9d318d 100644 --- a/cmd/stress/map.go +++ b/cmd/stress/map.go @@ -36,6 +36,8 @@ const ( mapSetOp2 mapSetOp3 mapRemoveOp + mapMutateChildContainerAfterGet + mapMutateChildContainerAfterSet maxMapOp ) @@ -46,8 +48,10 @@ type mapStatus struct { count uint64 // number of elements in map - setOps uint64 - removeOps uint64 + setOps uint64 + removeOps uint64 + mutateChildContainerAfterGetOps uint64 + mutateChildContainerAfterSetOps uint64 } var _ Status = &mapStatus{} @@ -65,12 +69,14 @@ func (status *mapStatus) String() string { var m runtime.MemStats runtime.ReadMemStats(&m) - return fmt.Sprintf("duration %s, heapAlloc %d MiB, %d elements, %d sets, %d removes", + return fmt.Sprintf("duration %s, heapAlloc %d MiB, %d elements, %d sets, %d removes, %d Get mutations, %d Set mutations", duration.Truncate(time.Second).String(), m.Alloc/1024/1024, status.count, status.setOps, status.removeOps, + status.mutateChildContainerAfterGetOps, + status.mutateChildContainerAfterSetOps, ) } @@ -84,6 +90,12 @@ func (status *mapStatus) incOp(op mapOpType, count uint64) { case mapRemoveOp: status.removeOps++ + + case mapMutateChildContainerAfterGet: + status.mutateChildContainerAfterGetOps++ + + case mapMutateChildContainerAfterSet: + status.mutateChildContainerAfterSetOps++ } status.count = count @@ -109,9 +121,6 @@ func testMap( // expectedValues contains generated keys and values. It is used to check data loss. expectedValues := make(mapValue, flagMaxLength) - // keys contains generated keys. It is used to select random keys for removal. - keys := make([]atree.Value, 0, flagMaxLength) - reduceHeapAllocs := false opCount := uint64(0) @@ -178,8 +187,8 @@ func testMap( forceRemove = true } - var nextOp mapOpType - expectedValues, keys, nextOp, err = modifyMap(expectedValues, keys, m, maxNestedLevels, forceRemove) + var prevOp mapOpType + expectedValues, prevOp, err = modifyMap(expectedValues, m, maxNestedLevels, forceRemove) if err != nil { fmt.Fprint(os.Stderr, err.Error()) return @@ -188,7 +197,7 @@ func testMap( opCount++ // Update status - status.incOp(nextOp, m.Count()) + status.incOp(prevOp, m.Count()) // Check map elements against elements after every op err = checkMapDataLoss(expectedValues, m) @@ -216,140 +225,223 @@ func testMap( } } -func modifyMap( +func nextMapOp( expectedValues mapValue, - keys []atree.Value, m *atree.OrderedMap, - maxNestedLevels int, + nestedLevels int, forceRemove bool, -) (mapValue, []atree.Value, mapOpType, error) { +) (mapOpType, error) { - storage := m.Storage - address := m.Address() - - var nextOp mapOpType if forceRemove { if m.Count() == 0 { - return nil, nil, 0, fmt.Errorf("failed to force remove map elements because there is no element") + return 0, fmt.Errorf("failed to force remove map elements because map has no elements") } - nextOp = mapRemoveOp - } else { - if m.Count() == 0 { - nextOp = mapSetOp1 - } else { - nextOp = mapOpType(r.Intn(int(maxMapOp))) + return mapRemoveOp, nil + } + + if m.Count() == 0 { + return mapSetOp1, nil + } + + for { + nextOp := mapOpType(r.Intn(int(maxMapOp))) + + switch nextOp { + case mapMutateChildContainerAfterSet: + if nestedLevels-1 > 0 { + return nextOp, nil + } + + // New child container can't be created because next nestedLevels is 0. + // Try another map operation. + + case mapMutateChildContainerAfterGet: + if hasChildContainerInMap(expectedValues) { + return nextOp, nil + } + + // Map doesn't have child container, try another map operation. + + default: + return nextOp, nil } } +} + +func modifyMap( + expectedValues mapValue, + m *atree.OrderedMap, + nestedLevels int, + forceRemove bool, +) (mapValue, mapOpType, error) { + + storage := m.Storage + address := m.Address() + + nextOp, err := nextMapOp(expectedValues, m, nestedLevels, forceRemove) + if err != nil { + return nil, 0, err + } switch nextOp { - case mapSetOp1, mapSetOp2, mapSetOp3: + case mapSetOp1, mapSetOp2, mapSetOp3, mapMutateChildContainerAfterSet: + + var nextNestedLevels int + + if nextOp == mapMutateChildContainerAfterSet { + nextNestedLevels = nestedLevels - 1 + } else { // mapSetOp1, mapSetOp2, mapSetOp3 + nextNestedLevels = r.Intn(nestedLevels) + } expectedKey, key, err := randomKey() if err != nil { - return nil, nil, 0, fmt.Errorf("failed to generate random key %s: %s", key, err) + return nil, 0, fmt.Errorf("failed to generate random key %s: %s", key, err) } - nestedLevels := r.Intn(maxNestedLevels) - expectedValue, value, err := randomValue(storage, address, nestedLevels) + expectedChildValue, child, err := randomValue(storage, address, nextNestedLevels) if err != nil { - return nil, nil, 0, fmt.Errorf("failed to generate random value %s: %s", value, err) + return nil, 0, fmt.Errorf("failed to generate random value %s: %s", child, err) } - oldExpectedValue, keyExist := expectedValues[expectedKey] - - // Update keys - if !keyExist { - keys = append(keys, expectedKey) - } + oldExpectedValue := expectedValues[expectedKey] // Update expectedValues - expectedValues[expectedKey] = expectedValue + expectedValues[expectedKey] = expectedChildValue // Update map - existingStorable, err := m.Set(compare, hashInputProvider, key, value) + existingStorable, err := m.Set(compare, hashInputProvider, key, child) if err != nil { - return nil, nil, 0, fmt.Errorf("failed to set %s at index %d: %s", value, key, err) + return nil, 0, fmt.Errorf("failed to set %s at index %d: %s", child, key, err) } // Compare old value from map with old value from elements if (oldExpectedValue == nil) != (existingStorable == nil) { - return nil, nil, 0, fmt.Errorf("Set returned storable %s != expected %s", existingStorable, oldExpectedValue) + return nil, 0, fmt.Errorf("Set returned storable %s != expected %s", existingStorable, oldExpectedValue) } if existingStorable != nil { existingValue, err := existingStorable.StoredValue(storage) if err != nil { - return nil, nil, 0, fmt.Errorf("failed to convert %s to value: %s", existingStorable, err) + return nil, 0, fmt.Errorf("failed to convert %s to value: %s", existingStorable, err) } err = valueEqual(oldExpectedValue, existingValue) if err != nil { - return nil, nil, 0, fmt.Errorf("Set() returned wrong existing value %s, want %s", existingValue, oldExpectedValue) + return nil, 0, fmt.Errorf("Set() returned wrong existing value %s, want %s", existingValue, oldExpectedValue) } // Delete removed element from storage err = removeStorable(storage, existingStorable) if err != nil { - return nil, nil, 0, fmt.Errorf("failed to remove map storable element %s: %s", existingStorable, err) + return nil, 0, fmt.Errorf("failed to remove map storable element %s: %s", existingStorable, err) + } + } + + if nextOp == mapMutateChildContainerAfterSet { + expectedValues[expectedKey], err = modifyContainer(expectedValues[expectedKey], child, nextNestedLevels) + if err != nil { + return nil, 0, fmt.Errorf("failed to modify child container at key %s: %w", expectedKey, err) } } case mapRemoveOp: - index := r.Intn(len(keys)) - key := keys[index] + // Use for-range on Go map to get random key + var key atree.Value + for k := range expectedValues { + key = k + break + } oldExpectedValue := expectedValues[key] // Update expectedValues delete(expectedValues, key) - // Update keys - copy(keys[index:], keys[index+1:]) - keys[len(keys)-1] = nil - keys = keys[:len(keys)-1] - // Update map existingKeyStorable, existingValueStorable, err := m.Remove(compare, hashInputProvider, key) if err != nil { - return nil, nil, 0, fmt.Errorf("failed to remove element with key %s: %s", key, err) + return nil, 0, fmt.Errorf("failed to remove element with key %s: %s", key, err) } // Compare removed key from map with removed key from elements existingKeyValue, err := existingKeyStorable.StoredValue(storage) if err != nil { - return nil, nil, 0, fmt.Errorf("failed to convert %s to value: %s", existingKeyStorable, err) + return nil, 0, fmt.Errorf("failed to convert %s to value: %s", existingKeyStorable, err) } err = valueEqual(key, existingKeyValue) if err != nil { - return nil, nil, 0, fmt.Errorf("Remove() returned wrong existing key %s, want %s", existingKeyStorable, key) + return nil, 0, fmt.Errorf("Remove() returned wrong existing key %s, want %s", existingKeyStorable, key) } // Compare removed value from map with removed value from elements existingValue, err := existingValueStorable.StoredValue(storage) if err != nil { - return nil, nil, 0, fmt.Errorf("failed to convert %s to value: %s", existingValueStorable, err) + return nil, 0, fmt.Errorf("failed to convert %s to value: %s", existingValueStorable, err) } err = valueEqual(oldExpectedValue, existingValue) if err != nil { - return nil, nil, 0, fmt.Errorf("Remove() returned wrong existing value %s, want %s", existingValueStorable, oldExpectedValue) + return nil, 0, fmt.Errorf("Remove() returned wrong existing value %s, want %s", existingValueStorable, oldExpectedValue) } // Delete removed element from storage err = removeStorable(storage, existingKeyStorable) if err != nil { - return nil, nil, 0, fmt.Errorf("failed to remove key %s: %s", existingKeyStorable, err) + return nil, 0, fmt.Errorf("failed to remove key %s: %s", existingKeyStorable, err) } err = removeStorable(storage, existingValueStorable) if err != nil { - return nil, nil, 0, fmt.Errorf("failed to remove value %s: %s", existingValueStorable, err) + return nil, 0, fmt.Errorf("failed to remove value %s: %s", existingValueStorable, err) + } + + case mapMutateChildContainerAfterGet: + key, found := getRandomChildContainerKeyInMap(expectedValues) + if !found { + // mapMutateChildContainerAfterGet op can't be performed because there isn't any child container in this map. + // Try another map operation. + return modifyMap(expectedValues, m, nestedLevels, forceRemove) + } + + child, err := m.Get(compare, hashInputProvider, key) + if err != nil { + return nil, 0, fmt.Errorf("failed to get element from map at key %s: %s", key, err) + } + + expectedValues[key], err = modifyContainer(expectedValues[key], child, nestedLevels-1) + if err != nil { + return nil, 0, fmt.Errorf("failed to modify child container at key %s: %w", key, err) } } - return expectedValues, keys, nextOp, nil + return expectedValues, nextOp, nil +} + +func hasChildContainerInMap(expectedValues mapValue) bool { + for _, v := range expectedValues { + switch v.(type) { + case arrayValue, mapValue: + return true + } + } + return false +} + +func getRandomChildContainerKeyInMap(expectedValues mapValue) (key atree.Value, found bool) { + keys := make([]atree.Value, 0, len(expectedValues)) + for k, v := range expectedValues { + switch v.(type) { + case arrayValue, mapValue: + keys = append(keys, k) + } + } + if len(keys) == 0 { + return nil, false + } + return keys[r.Intn(len(keys))], true } func checkMapDataLoss(expectedValues mapValue, m *atree.OrderedMap) error { diff --git a/cmd/stress/utils.go b/cmd/stress/utils.go index 4f5acfc5..90a276cd 100644 --- a/cmd/stress/utils.go +++ b/cmd/stress/utils.go @@ -98,7 +98,7 @@ func generateSimpleValue( return v, v, nil case largeStringType: - slen := r.Intn(125) + 1024 + slen := r.Intn(125) + 1024/2 v := NewStringValue(randStr(slen)) return v, v, nil From 11f0faad093e669494d4bcbac6cdcd3839bb26f5 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 9 Oct 2023 14:57:16 -0500 Subject: [PATCH 065/126] Refactor smoke test --- cmd/stress/array.go | 72 ++++++++++++++++++++++++++------------------- cmd/stress/main.go | 2 ++ cmd/stress/map.go | 61 ++++++++++++++++++++------------------ cmd/stress/utils.go | 17 ++++++++--- 4 files changed, 89 insertions(+), 63 deletions(-) diff --git a/cmd/stress/array.go b/cmd/stress/array.go index 3e217a0a..1a8e94d3 100644 --- a/cmd/stress/array.go +++ b/cmd/stress/array.go @@ -90,7 +90,7 @@ func (status *arrayStatus) String() string { ) } -func (status *arrayStatus) incOp(op arrayOpType, count uint64) { +func (status *arrayStatus) incOp(op arrayOpType, newTotalCount uint64) { status.lock.Lock() defer status.lock.Unlock() @@ -120,7 +120,7 @@ func (status *arrayStatus) incOp(op arrayOpType, count uint64) { status.mutateChildContainerAfterSetOps++ } - status.count = count + status.count = newTotalCount } func (status *arrayStatus) Write() { @@ -147,7 +147,7 @@ func testArray( reduceHeapAllocs := false - opCount := uint64(0) + opCountForStorageHealthCheck := uint64(0) var m runtime.MemStats @@ -216,7 +216,7 @@ func testArray( return } - opCount++ + opCountForStorageHealthCheck++ // Update status status.incOp(prevOp, array.Count()) @@ -228,8 +228,9 @@ func testArray( return } - if opCount >= 100 { - opCount = 0 + if opCountForStorageHealthCheck >= flagMinOpsForStorageHealthCheck { + opCountForStorageHealthCheck = 0 + if !checkStorageHealth(storage, array.SlabID()) { return } @@ -313,10 +314,13 @@ func modifyArray( var nextNestedLevels int - if nextOp == arrayAppendOp { + switch nextOp { + case arrayAppendOp: nextNestedLevels = r.Intn(nestedLevels) - } else { // arrayMutateChildContainerAfterAppend + case arrayMutateChildContainerAfterAppend: nextNestedLevels = nestedLevels - 1 + default: + panic("not reachable") } // Create new chid child @@ -347,10 +351,13 @@ func modifyArray( var nextNestedLevels int - if nextOp == arraySetOp { + switch nextOp { + case arraySetOp: nextNestedLevels = r.Intn(nestedLevels) - } else { // arrayMutateChildContainerAfterSet + case arrayMutateChildContainerAfterSet: nextNestedLevels = nestedLevels - 1 + default: + panic("not reachable") } // Create new child child @@ -400,10 +407,13 @@ func modifyArray( var nextNestedLevels int - if nextOp == arrayInsertOp { + switch nextOp { + case arrayInsertOp: nextNestedLevels = r.Intn(nestedLevels) - } else { // arrayMutateChildContainerAfterInsert + case arrayMutateChildContainerAfterInsert: nextNestedLevels = nestedLevels - 1 + default: + panic("not reachable") } // Create new child child @@ -592,25 +602,7 @@ func checkArrayDataLoss(expectedValues arrayValue, array *atree.Array) error { } if flagCheckSlabEnabled { - typeInfoComparator := func(a atree.TypeInfo, b atree.TypeInfo) bool { - return a.ID() == b.ID() - } - - err := atree.VerifyArray(array, array.Address(), array.Type(), typeInfoComparator, hashInputProvider, true) - if err != nil { - return err - } - - err = atree.VerifyArraySerialization( - array, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - func(a, b atree.Storable) bool { - return reflect.DeepEqual(a, b) - }, - ) + err := checkArraySlab(array) if err != nil { return err } @@ -618,3 +610,21 @@ func checkArrayDataLoss(expectedValues arrayValue, array *atree.Array) error { return nil } + +func checkArraySlab(array *atree.Array) error { + err := atree.VerifyArray(array, array.Address(), array.Type(), typeInfoComparator, hashInputProvider, true) + if err != nil { + return err + } + + return atree.VerifyArraySerialization( + array, + cborDecMode, + cborEncMode, + decodeStorable, + decodeTypeInfo, + func(a, b atree.Storable) bool { + return reflect.DeepEqual(a, b) + }, + ) +} diff --git a/cmd/stress/main.go b/cmd/stress/main.go index 11f26f06..e93364c6 100644 --- a/cmd/stress/main.go +++ b/cmd/stress/main.go @@ -92,12 +92,14 @@ var ( flagMaxLength uint64 flagSeedHex string flagMinHeapAllocMiB, flagMaxHeapAllocMiB uint64 + flagMinOpsForStorageHealthCheck uint64 ) func main() { flag.StringVar(&flagType, "type", "array", "array or map") flag.BoolVar(&flagCheckSlabEnabled, "slabcheck", false, "in memory and serialized slab check") + flag.Uint64Var(&flagMinOpsForStorageHealthCheck, "minOpsForStorageHealthCheck", 100, "number of operations for storage health check") flag.Uint64Var(&flagMaxLength, "maxlen", 10_000, "max number of elements") flag.StringVar(&flagSeedHex, "seed", "", "seed for prng in hex (default is Unix time)") flag.Uint64Var(&flagMinHeapAllocMiB, "minheap", 1000, "min HeapAlloc in MiB to stop extra removal of elements") diff --git a/cmd/stress/map.go b/cmd/stress/map.go index 2c9d318d..c560bb5a 100644 --- a/cmd/stress/map.go +++ b/cmd/stress/map.go @@ -80,7 +80,7 @@ func (status *mapStatus) String() string { ) } -func (status *mapStatus) incOp(op mapOpType, count uint64) { +func (status *mapStatus) incOp(op mapOpType, newTotalCount uint64) { status.lock.Lock() defer status.lock.Unlock() @@ -98,7 +98,7 @@ func (status *mapStatus) incOp(op mapOpType, count uint64) { status.mutateChildContainerAfterSetOps++ } - status.count = count + status.count = newTotalCount } func (status *mapStatus) Write() { @@ -123,7 +123,7 @@ func testMap( reduceHeapAllocs := false - opCount := uint64(0) + opCountForStorageHealthCheck := uint64(0) var ms runtime.MemStats @@ -194,7 +194,7 @@ func testMap( return } - opCount++ + opCountForStorageHealthCheck++ // Update status status.incOp(prevOp, m.Count()) @@ -206,8 +206,9 @@ func testMap( return } - if opCount >= 100 { - opCount = 0 + if opCountForStorageHealthCheck >= flagMinOpsForStorageHealthCheck { + opCountForStorageHealthCheck = 0 + if !checkStorageHealth(storage, m.SlabID()) { return } @@ -288,10 +289,13 @@ func modifyMap( var nextNestedLevels int - if nextOp == mapMutateChildContainerAfterSet { - nextNestedLevels = nestedLevels - 1 - } else { // mapSetOp1, mapSetOp2, mapSetOp3 + switch nextOp { + case mapSetOp1, mapSetOp2, mapSetOp3: nextNestedLevels = r.Intn(nestedLevels) + case mapMutateChildContainerAfterSet: + nextNestedLevels = nestedLevels - 1 + default: + panic("not reachable") } expectedKey, key, err := randomKey() @@ -464,28 +468,29 @@ func checkMapDataLoss(expectedValues mapValue, m *atree.OrderedMap) error { } if flagCheckSlabEnabled { - typeInfoComparator := func(a atree.TypeInfo, b atree.TypeInfo) bool { - return a.ID() == b.ID() - } - - err := atree.VerifyMap(m, m.Address(), m.Type(), typeInfoComparator, hashInputProvider, true) - if err != nil { - return err - } - - err = atree.VerifyMapSerialization( - m, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - func(a, b atree.Storable) bool { - return reflect.DeepEqual(a, b) - }, - ) + err := checkMapSlab(m) if err != nil { return err } } + return nil } + +func checkMapSlab(m *atree.OrderedMap) error { + err := atree.VerifyMap(m, m.Address(), m.Type(), typeInfoComparator, hashInputProvider, true) + if err != nil { + return err + } + + return atree.VerifyMapSerialization( + m, + cborDecMode, + cborEncMode, + decodeStorable, + decodeTypeInfo, + func(a, b atree.Storable) bool { + return reflect.DeepEqual(a, b) + }, + ) +} diff --git a/cmd/stress/utils.go b/cmd/stress/utils.go index 90a276cd..cd143bd4 100644 --- a/cmd/stress/utils.go +++ b/cmd/stress/utils.go @@ -20,6 +20,7 @@ package main import ( "fmt" + "math" "math/rand" "reflect" "time" @@ -77,19 +78,19 @@ func generateSimpleValue( ) (expected atree.Value, actual atree.Value, err error) { switch valueType { case uint8Type: - v := Uint8Value(r.Intn(255)) + v := Uint8Value(r.Intn(math.MaxUint8)) // 255 return v, v, nil case uint16Type: - v := Uint16Value(r.Intn(6535)) + v := Uint16Value(r.Intn(math.MaxUint16)) // 65535 return v, v, nil case uint32Type: - v := Uint32Value(r.Intn(4294967295)) + v := Uint32Value(r.Intn(math.MaxUint32)) // 4294967295 return v, v, nil case uint64Type: - v := Uint64Value(r.Intn(1844674407370955161)) + v := Uint64Value(r.Intn(math.MaxInt)) // 9_223_372_036_854_775_807 return v, v, nil case smallStringType: @@ -470,6 +471,8 @@ func (s *InMemBaseStorage) ResetReporter() { // not needed } +// arrayValue is an atree.Value that represents an array of atree.Value. +// It's used to test elements of atree.Array. type arrayValue []atree.Value var _ atree.Value = &arrayValue{} @@ -478,6 +481,8 @@ func (v arrayValue) Storable(atree.SlabStorage, atree.Address, uint64) (atree.St panic("not reachable") } +// mapValue is an atree.Value that represents a map of atree.Value. +// It's used to test elements of atree.OrderedMap. type mapValue map[atree.Value]atree.Value var _ atree.Value = &mapValue{} @@ -485,3 +490,7 @@ var _ atree.Value = &mapValue{} func (v mapValue) Storable(atree.SlabStorage, atree.Address, uint64) (atree.Storable, error) { panic("not reachable") } + +var typeInfoComparator = func(a atree.TypeInfo, b atree.TypeInfo) bool { + return a.ID() == b.ID() +} From 3145ef47e2e25800c5c9f52774fe51d3e1d2e3ba Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 9 Oct 2023 15:08:42 -0500 Subject: [PATCH 066/126] Remove unused and outdated demo program --- cmd/main/main.go | 196 ----------------------------------------------- 1 file changed, 196 deletions(-) delete mode 100644 cmd/main/main.go diff --git a/cmd/main/main.go b/cmd/main/main.go deleted file mode 100644 index f94db511..00000000 --- a/cmd/main/main.go +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Atree - Scalable Arrays and Ordered Maps - * - * Copyright 2021 Dapper Labs, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "flag" - "fmt" - - "github.com/onflow/atree" - - "github.com/fxamacker/cbor/v2" -) - -const cborTagUInt64Value = 164 - -type Uint64Value uint64 - -var _ atree.Value = Uint64Value(0) -var _ atree.Storable = Uint64Value(0) - -func (v Uint64Value) ChildStorables() []atree.Storable { - return nil -} - -func (v Uint64Value) StoredValue(_ atree.SlabStorage) (atree.Value, error) { - return v, nil -} - -func (v Uint64Value) Storable(_ atree.SlabStorage, _ atree.Address, _ uint64) (atree.Storable, error) { - return v, nil -} - -// Encode encodes UInt64Value as -// -// cbor.Tag{ -// Number: cborTagUInt64Value, -// Content: uint64(v), -// } -func (v Uint64Value) Encode(enc *atree.Encoder) error { - err := enc.CBOR.EncodeRawBytes([]byte{ - // tag number - 0xd8, cborTagUInt64Value, - }) - if err != nil { - return err - } - return enc.CBOR.EncodeUint64(uint64(v)) -} - -// TODO: cache size -func (v Uint64Value) ByteSize() uint32 { - // tag number (2 bytes) + encoded content - return 2 + atree.GetUintCBORSize(uint64(v)) -} - -func (v Uint64Value) String() string { - return fmt.Sprintf("%d", uint64(v)) -} - -type testTypeInfo struct { - value uint64 -} - -var _ atree.TypeInfo = testTypeInfo{} - -func (i testTypeInfo) Copy() atree.TypeInfo { - return i -} - -func (testTypeInfo) IsComposite() bool { - return false -} - -func (i testTypeInfo) ID() string { - return fmt.Sprintf("uint64(%d)", i.value) -} - -func (i testTypeInfo) Encode(e *cbor.StreamEncoder) error { - return e.EncodeUint64(i.value) -} - -func (i testTypeInfo) Equal(other atree.TypeInfo) bool { - otherTestTypeInfo, ok := other.(testTypeInfo) - return ok && i.value == otherTestTypeInfo.value -} - -func decodeStorable(dec *cbor.StreamDecoder, _ atree.SlabID, _ []atree.ExtraData) (atree.Storable, error) { - tagNumber, err := dec.DecodeTagNumber() - if err != nil { - return nil, err - } - - switch tagNumber { - case atree.CBORTagSlabID: - return atree.DecodeSlabIDStorable(dec) - - case cborTagUInt64Value: - n, err := dec.DecodeUint64() - if err != nil { - return nil, err - } - return Uint64Value(n), nil - - default: - return nil, fmt.Errorf("invalid tag number %d", tagNumber) - } -} - -// TODO: implement different slab size for metadata slab and data slab. -func main() { - var slabSize uint64 - var numElements uint64 - var verbose bool - - flag.Uint64Var(&slabSize, "size", 1024, "slab size in bytes") - flag.Uint64Var(&numElements, "count", 500, "number of elements in array") - flag.BoolVar(&verbose, "verbose", false, "verbose output") - - flag.Parse() - - minThreshold, maxThreshold, _, _ := atree.SetThreshold(slabSize) - - fmt.Printf( - "Inserting %d elements (uint64) into array with slab size %d, min size %d, and max size %d ...\n", - numElements, - slabSize, - minThreshold, - maxThreshold, - ) - - encMode, err := cbor.EncOptions{}.EncMode() - if err != nil { - fmt.Println(err) - return - } - - decMode, err := cbor.DecOptions{}.DecMode() - if err != nil { - fmt.Println(err) - return - } - - storage := atree.NewBasicSlabStorage(encMode, decMode, decodeStorable, decodeTypeInfo) - - typeInfo := testTypeInfo{} - - address := atree.Address{1, 2, 3, 4, 5, 6, 7, 8} - - array, err := atree.NewArray(storage, address, typeInfo) - - if err != nil { - fmt.Println(err) - return - } - - for i := uint64(0); i < numElements; i++ { - err := array.Append(Uint64Value(i)) - if err != nil { - fmt.Println(err) - return - } - } - - stats, err := atree.GetArrayStats(array) - if err != nil { - fmt.Println(err) - return - } - - fmt.Printf("%+v\n", stats) - - if verbose { - fmt.Printf("\n\n=========== array layout ===========\n") - atree.PrintArray(array) - } -} - -func decodeTypeInfo(_ *cbor.StreamDecoder) (atree.TypeInfo, error) { - return testTypeInfo{}, nil -} From 108dc31d37845dfbc163c0311d5fd6bb2e9e6e1a Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 10 Oct 2023 13:50:19 -0500 Subject: [PATCH 067/126] Add composite types to smoke test --- cmd/stress/typeinfo.go | 121 ++++++++++++++++++++++++++++++++++++++++- cmd/stress/utils.go | 48 ++++++++++++++++ map_debug.go | 14 ++++- 3 files changed, 179 insertions(+), 4 deletions(-) diff --git a/cmd/stress/typeinfo.go b/cmd/stress/typeinfo.go index 6a1fafb1..5caa03b6 100644 --- a/cmd/stress/typeinfo.go +++ b/cmd/stress/typeinfo.go @@ -30,8 +30,9 @@ const ( maxArrayTypeValue = 10 maxMapTypeValue = 10 - arrayTypeTagNum = 246 - mapTypeTagNum = 245 + arrayTypeTagNum = 246 + mapTypeTagNum = 245 + compositeTypeTagNum = 244 ) type arrayTypeInfo struct { @@ -104,6 +105,83 @@ func (i mapTypeInfo) Equal(other atree.TypeInfo) bool { return ok && i.value == otherMapTypeInfo.value } +var compositeFieldNames = []string{"a", "b", "c"} + +type compositeTypeInfo struct { + fieldStartIndex int // inclusive start index of fieldNames + fieldEndIndex int // exclusive end index of fieldNames +} + +var _ atree.TypeInfo = mapTypeInfo{} + +// newCompositeTypeInfo creates one of 10 compositeTypeInfo randomly. +// 10 possible composites: +// - ID: composite(0_0), field names: [] +// - ID: composite(0_1), field names: ["a"] +// - ID: composite(0_2), field names: ["a", "b"] +// - ID: composite(0_3), field names: ["a", "b", "c"] +// - ID: composite(1_1), field names: [] +// - ID: composite(1_2), field names: ["b"] +// - ID: composite(1_3), field names: ["b", "c"] +// - ID: composite(2_2), field names: [] +// - ID: composite(2_3), field names: ["c"] +// - ID: composite(3_3), field names: [] +func newCompositeTypeInfo() compositeTypeInfo { + // startIndex is [0, 3] + startIndex := r.Intn(len(compositeFieldNames) + 1) + + // count is [0, 3] + count := r.Intn(len(compositeFieldNames) - startIndex + 1) + + endIndex := startIndex + count + if endIndex > len(compositeFieldNames) { + panic("not reachable") + } + + return compositeTypeInfo{fieldStartIndex: startIndex, fieldEndIndex: endIndex} +} + +func (i compositeTypeInfo) getFieldNames() []string { + return compositeFieldNames[i.fieldStartIndex:i.fieldEndIndex] +} + +func (i compositeTypeInfo) Copy() atree.TypeInfo { + return i +} + +func (i compositeTypeInfo) IsComposite() bool { + return true +} + +func (i compositeTypeInfo) ID() string { + return fmt.Sprintf("composite(%d_%d)", i.fieldStartIndex, i.fieldEndIndex) +} + +func (i compositeTypeInfo) Encode(e *cbor.StreamEncoder) error { + err := e.EncodeTagHead(compositeTypeTagNum) + if err != nil { + return err + } + err = e.EncodeArrayHead(2) + if err != nil { + return err + } + err = e.EncodeInt64(int64(i.fieldStartIndex)) + if err != nil { + return err + } + return e.EncodeInt64(int64(i.fieldEndIndex)) +} + +func (i compositeTypeInfo) Equal(other atree.TypeInfo) bool { + otherCompositeTypeInfo, ok := other.(compositeTypeInfo) + if !ok { + return false + } + return i.fieldStartIndex == otherCompositeTypeInfo.fieldStartIndex && + i.fieldEndIndex == otherCompositeTypeInfo.fieldEndIndex +} + func decodeTypeInfo(dec *cbor.StreamDecoder) (atree.TypeInfo, error) { num, err := dec.DecodeTagNumber() if err != nil { @@ -126,6 +204,45 @@ func decodeTypeInfo(dec *cbor.StreamDecoder) (atree.TypeInfo, error) { return mapTypeInfo{value: int(value)}, nil + case compositeTypeTagNum: + count, err := dec.DecodeArrayHead() + if err != nil { + return nil, err + } + if count != 2 { + return nil, fmt.Errorf( + "failed to decode composite type info: expect 2 elemets, got %d elements", + count, + ) + } + + startIndex, err := dec.DecodeInt64() + if err != nil { + return nil, err + } + + endIndex, err := dec.DecodeInt64() + if err != nil { + return nil, err + } + + if endIndex < startIndex { + return nil, fmt.Errorf( + "failed to decode composite type info: endIndex %d < startIndex %d", + endIndex, + startIndex, + ) + } + + if endIndex > int64(len(compositeFieldNames)) { + return nil, fmt.Errorf( + "failed to decode composite type info: endIndex %d > len(compositeFieldNames) %d", + endIndex, + len(compositeFieldNames)) + } + + return compositeTypeInfo{fieldStartIndex: int(startIndex), fieldEndIndex: int(endIndex)}, nil + default: return nil, fmt.Errorf("failed to decode type info with tag number %d", num) } diff --git a/cmd/stress/utils.go b/cmd/stress/utils.go index cd143bd4..42283c2d 100644 --- a/cmd/stress/utils.go +++ b/cmd/stress/utils.go @@ -47,6 +47,7 @@ const ( const ( arrayType int = iota mapType + compositeType maxContainerValueType ) @@ -123,6 +124,9 @@ func generateContainerValue( length := r.Intn(maxNestedMapSize) return newMap(storage, address, length, nestedLevels) + case compositeType: + return newComposite(storage, address, nestedLevels) + default: return nil, nil, fmt.Errorf("unexpected randome container value type %d", valueType) } @@ -385,6 +389,50 @@ func newMap( return expectedValues, m, nil } +// newComposite creates atree.OrderedMap with elements of random composite type and nested level +func newComposite( + storage atree.SlabStorage, + address atree.Address, + nestedLevel int, +) (mapValue, *atree.OrderedMap, error) { + + compositeType := newCompositeTypeInfo() + + m, err := atree.NewMap(storage, address, atree.NewDefaultDigesterBuilder(), compositeType) + if err != nil { + return nil, nil, fmt.Errorf("failed to create new map: %w", err) + } + + expectedValues := make(mapValue) + + for _, name := range compositeType.getFieldNames() { + + expectedKey, key := NewStringValue(name), NewStringValue(name) + + expectedValue, value, err := randomValue(storage, address, nestedLevel-1) + if err != nil { + return nil, nil, err + } + + expectedValues[expectedKey] = expectedValue + + existingStorable, err := m.Set(compare, hashInputProvider, key, value) + if err != nil { + return nil, nil, err + } + if existingStorable != nil { + return nil, nil, fmt.Errorf("failed to create new map of composite type: found duplicate field name %s", name) + } + } + + err = checkMapDataLoss(expectedValues, m) + if err != nil { + return nil, nil, err + } + + return expectedValues, m, nil +} + type InMemBaseStorage struct { segments map[atree.SlabID][]byte storageIndex map[atree.Address]atree.SlabIndex diff --git a/map_debug.go b/map_debug.go index 3b444350..81edfce5 100644 --- a/map_debug.go +++ b/map_debug.go @@ -1351,8 +1351,18 @@ func mapExtraDataEqual(expected, actual *MapExtraData) error { return NewFatalError(fmt.Errorf("has extra data is %t, want %t", actual == nil, expected == nil)) } - if !reflect.DeepEqual(*expected, *actual) { - return NewFatalError(fmt.Errorf("extra data %+v is wrong, want %+v", *actual, *expected)) + if !reflect.DeepEqual(expected.TypeInfo, actual.TypeInfo) { + return NewFatalError(fmt.Errorf("map extra data type %+v is wrong, want %+v", actual.TypeInfo, expected.TypeInfo)) + } + + if expected.Count != actual.Count { + return NewFatalError(fmt.Errorf("map extra data count %d is wrong, want %d", actual.Count, expected.Count)) + } + + if !expected.TypeInfo.IsComposite() { + if expected.Seed != actual.Seed { + return NewFatalError(fmt.Errorf("map extra data seed %d is wrong, want %d", actual.Seed, expected.Seed)) + } } return nil From aaa47cc22914855c3ec82fe61ae1da77fc83f86f Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 18 Oct 2023 14:05:40 -0500 Subject: [PATCH 068/126] Rename TypeInfo.ID() to TypeInfo.Identifier() --- cmd/stress/typeinfo.go | 6 +++--- cmd/stress/utils.go | 2 +- typeinfo.go | 8 ++++---- utils_test.go | 4 ++-- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/cmd/stress/typeinfo.go b/cmd/stress/typeinfo.go index 5caa03b6..ddeee106 100644 --- a/cmd/stress/typeinfo.go +++ b/cmd/stress/typeinfo.go @@ -53,7 +53,7 @@ func (i arrayTypeInfo) IsComposite() bool { return false } -func (i arrayTypeInfo) ID() string { +func (i arrayTypeInfo) Identifier() string { return fmt.Sprintf("array(%d)", i) } @@ -88,7 +88,7 @@ func (i mapTypeInfo) IsComposite() bool { return false } -func (i mapTypeInfo) ID() string { +func (i mapTypeInfo) Identifier() string { return fmt.Sprintf("map(%d)", i) } @@ -153,7 +153,7 @@ func (i compositeTypeInfo) IsComposite() bool { return true } -func (i compositeTypeInfo) ID() string { +func (i compositeTypeInfo) Identifier() string { return fmt.Sprintf("composite(%d_%d)", i.fieldStartIndex, i.fieldEndIndex) } diff --git a/cmd/stress/utils.go b/cmd/stress/utils.go index 42283c2d..e5ffba91 100644 --- a/cmd/stress/utils.go +++ b/cmd/stress/utils.go @@ -540,5 +540,5 @@ func (v mapValue) Storable(atree.SlabStorage, atree.Address, uint64) (atree.Stor } var typeInfoComparator = func(a atree.TypeInfo, b atree.TypeInfo) bool { - return a.ID() == b.ID() + return a.Identifier() == b.Identifier() } diff --git a/typeinfo.go b/typeinfo.go index cabb1469..9d5911f9 100644 --- a/typeinfo.go +++ b/typeinfo.go @@ -29,7 +29,7 @@ import ( type TypeInfo interface { Encode(*cbor.StreamEncoder) error IsComposite() bool - ID() string + Identifier() string Copy() TypeInfo } @@ -312,7 +312,7 @@ func (ied *inlinedExtraData) addArrayExtraData(data *ArrayExtraData) int { ied.arrayTypes = make(map[string]int) } - id := data.TypeInfo.ID() + id := data.TypeInfo.Identifier() index, exist := ied.arrayTypes[id] if exist { return index @@ -376,14 +376,14 @@ func makeCompactMapTypeID(t TypeInfo, names []ComparableStorable) string { const separator = "," if len(names) == 1 { - return t.ID() + separator + names[0].ID() + return t.Identifier() + separator + names[0].ID() } sorter := newFieldNameSorter(names) sort.Sort(sorter) - return t.ID() + separator + sorter.join(separator) + return t.Identifier() + separator + sorter.join(separator) } // fieldNameSorter sorts names by index (not in place sort). diff --git a/utils_test.go b/utils_test.go index 5762b3d3..0648885b 100644 --- a/utils_test.go +++ b/utils_test.go @@ -100,7 +100,7 @@ func (i testTypeInfo) IsComposite() bool { return false } -func (i testTypeInfo) ID() string { +func (i testTypeInfo) Identifier() string { return fmt.Sprintf("uint64(%d)", i) } @@ -129,7 +129,7 @@ func (i testCompositeTypeInfo) IsComposite() bool { return true } -func (i testCompositeTypeInfo) ID() string { +func (i testCompositeTypeInfo) Identifier() string { return fmt.Sprintf("composite(%d)", i) } From f56c2e7bc43ac2b0f3b8f7ed2c6f05c3d77d1c75 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 18 Oct 2023 14:20:41 -0500 Subject: [PATCH 069/126] Change array encoding error type Some errors that should be returned as external error are being returned as fatal error. This caused a problem that was detected during Cadence integration. This commit resolves the problem by returning these fatal errors as external errors. --- array.go | 7 ++++--- encode.go | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/array.go b/array.go index d4e6fca5..eff16e72 100644 --- a/array.go +++ b/array.go @@ -754,7 +754,8 @@ func (a *ArrayDataSlab) encodeAsInlined(enc *Encoder, inlinedTypeInfo *inlinedEx // element 2: array elements err = a.encodeElements(enc, inlinedTypeInfo) if err != nil { - return NewEncodingError(err) + // err is already categorized by ArrayDataSlab.encodeElements(). + return err } err = enc.CBOR.Flush() @@ -908,8 +909,8 @@ func (a *ArrayDataSlab) encodeElements(enc *Encoder, inlinedTypeInfo *inlinedExt for _, e := range a.elements { err = encodeStorableAsElement(enc, e, inlinedTypeInfo) if err != nil { - // Wrap err as external error (if needed) because err is returned by Storable interface. - return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode array element") + // err is already categorized by encodeStorableAsElement(). + return err } } diff --git a/encode.go b/encode.go index 5f46505c..e422778f 100644 --- a/encode.go +++ b/encode.go @@ -58,7 +58,7 @@ func encodeStorableAsElement(enc *Encoder, storable Storable, inlinedTypeInfo *i err := storable.Encode(enc) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. - return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode map value") + return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode storable as element") } } From c7ae14743ede7f301e9324b75032f57475c60767 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 18 Oct 2023 14:35:01 -0500 Subject: [PATCH 070/126] Change map encoding error type Some errors that should be returned as external error are being returned as fatal error. This caused a problem that was detected during Cadence integration. This commit resolves the problem by returning these fatal errors as external errors. --- map.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/map.go b/map.go index 13654279..bddd420e 100644 --- a/map.go +++ b/map.go @@ -2923,7 +2923,8 @@ func encodeAsInlinedCompactMap( // element 2: compact map values in the order of cachedKeys err = encodeCompactMapValues(enc, cachedKeys, keys, values, inlinedTypeInfo) if err != nil { - return NewEncodingError(err) + // err is already categorized by encodeCompactMapValues(). + return err } err = enc.CBOR.Flush() @@ -2968,7 +2969,7 @@ func encodeCompactMapValues( err = encodeStorableAsElement(enc, values[index], inlinedTypeInfo) if err != nil { - // Don't need to wrap error as external error because err is already categorized by encodeStorable(). + // Don't need to wrap error as external error because err is already categorized by encodeStorableAsElement(). return err } From 945826b673152c078131ad6e6db3ff705f24a233 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 18 Oct 2023 14:50:40 -0500 Subject: [PATCH 071/126] Add InlinedExtraData interface Cadence integration requires InlinedExtraData interface. --- array.go | 4 ++-- encode.go | 2 +- map.go | 24 ++++++++++++------------ typeinfo.go | 14 ++++++++++++++ 4 files changed, 29 insertions(+), 15 deletions(-) diff --git a/array.go b/array.go index eff16e72..c0a75dd3 100644 --- a/array.go +++ b/array.go @@ -704,7 +704,7 @@ func DecodeInlinedArrayStorable( // +------------------+----------------+----------+ // | extra data index | value ID index | elements | // +------------------+----------------+----------+ -func (a *ArrayDataSlab) encodeAsInlined(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { +func (a *ArrayDataSlab) encodeAsInlined(enc *Encoder, inlinedTypeInfo InlinedExtraData) error { if a.extraData == nil { return NewEncodingError( fmt.Errorf("failed to encode non-root array data slab as inlined")) @@ -886,7 +886,7 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { return nil } -func (a *ArrayDataSlab) encodeElements(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { +func (a *ArrayDataSlab) encodeElements(enc *Encoder, inlinedTypeInfo InlinedExtraData) error { // Encode CBOR array size manually for fix-sized encoding enc.Scratch[0] = 0x80 | 25 diff --git a/encode.go b/encode.go index e422778f..f9f5e029 100644 --- a/encode.go +++ b/encode.go @@ -44,7 +44,7 @@ func NewEncoder(w io.Writer, encMode cbor.EncMode) *Encoder { // encodeStorableAsElement encodes storable as Array or OrderedMap element. // Storable is encode as an inlined ArrayDataSlab or MapDataSlab if it is ArrayDataSlab or MapDataSlab. -func encodeStorableAsElement(enc *Encoder, storable Storable, inlinedTypeInfo *inlinedExtraData) error { +func encodeStorableAsElement(enc *Encoder, storable Storable, inlinedTypeInfo InlinedExtraData) error { switch storable := storable.(type) { diff --git a/map.go b/map.go index bddd420e..61e98df9 100644 --- a/map.go +++ b/map.go @@ -148,7 +148,7 @@ type element interface { key Value, ) (MapKey, MapValue, element, error) - Encode(*Encoder, *inlinedExtraData) error + Encode(*Encoder, InlinedExtraData) error hasPointer() bool @@ -215,7 +215,7 @@ type elements interface { Element(int) (element, error) - Encode(*Encoder, *inlinedExtraData) error + Encode(*Encoder, InlinedExtraData) error hasPointer() bool @@ -586,7 +586,7 @@ func newSingleElementFromData(cborDec *cbor.StreamDecoder, decodeStorable Storab // Encode encodes singleElement to the given encoder. // // CBOR encoded array of 2 elements (key, value). -func (e *singleElement) Encode(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { +func (e *singleElement) Encode(enc *Encoder, inlinedTypeInfo InlinedExtraData) error { // Encode CBOR array head for 2 elements err := enc.CBOR.EncodeRawBytes([]byte{0x82}) @@ -763,7 +763,7 @@ func newInlineCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorable // Encode encodes inlineCollisionGroup to the given encoder. // // CBOR tag (number: CBORTagInlineCollisionGroup, content: elements) -func (e *inlineCollisionGroup) Encode(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { +func (e *inlineCollisionGroup) Encode(enc *Encoder, inlinedTypeInfo InlinedExtraData) error { err := enc.CBOR.EncodeRawBytes([]byte{ // tag number CBORTagInlineCollisionGroup @@ -953,7 +953,7 @@ func newExternalCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorab // Encode encodes externalCollisionGroup to the given encoder. // // CBOR tag (number: CBORTagExternalCollisionGroup, content: slab ID) -func (e *externalCollisionGroup) Encode(enc *Encoder, _ *inlinedExtraData) error { +func (e *externalCollisionGroup) Encode(enc *Encoder, _ InlinedExtraData) error { err := enc.CBOR.EncodeRawBytes([]byte{ // tag number CBORTagExternalCollisionGroup 0xd8, CBORTagExternalCollisionGroup, @@ -1259,7 +1259,7 @@ func newHkeyElementsWithElement(level uint, hkey Digest, elem element) *hkeyElem // 1: hkeys (byte string) // 2: elements (array) // ] -func (e *hkeyElements) Encode(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { +func (e *hkeyElements) Encode(enc *Encoder, inlinedTypeInfo InlinedExtraData) error { if e.level > maxDigestLevel { return NewFatalError(fmt.Errorf("hash level %d exceeds max digest level %d", e.level, maxDigestLevel)) @@ -1921,7 +1921,7 @@ func newSingleElementsWithElement(level uint, elem *singleElement) *singleElemen // 1: hkeys (0 length byte string) // 2: elements (array) // ] -func (e *singleElements) Encode(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { +func (e *singleElements) Encode(enc *Encoder, inlinedTypeInfo InlinedExtraData) error { if e.level > maxDigestLevel { return NewFatalError(fmt.Errorf("digest level %d exceeds max digest level %d", e.level, maxDigestLevel)) @@ -2777,7 +2777,7 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { return nil } -func (m *MapDataSlab) encodeElements(enc *Encoder, inlinedTypes *inlinedExtraData) error { +func (m *MapDataSlab) encodeElements(enc *Encoder, inlinedTypes InlinedExtraData) error { err := m.elements.Encode(enc, inlinedTypes) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Encode(). @@ -2799,7 +2799,7 @@ func (m *MapDataSlab) encodeElements(enc *Encoder, inlinedTypes *inlinedExtraDat // +------------------+----------------+----------+ // | extra data index | value ID index | elements | // +------------------+----------------+----------+ -func (m *MapDataSlab) encodeAsInlined(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { +func (m *MapDataSlab) encodeAsInlined(enc *Encoder, inlinedTypeInfo InlinedExtraData) error { if m.extraData == nil { return NewEncodingError( fmt.Errorf("failed to encode non-root map data slab as inlined")) @@ -2817,7 +2817,7 @@ func (m *MapDataSlab) encodeAsInlined(enc *Encoder, inlinedTypeInfo *inlinedExtr return m.encodeAsInlinedMap(enc, inlinedTypeInfo) } -func (m *MapDataSlab) encodeAsInlinedMap(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { +func (m *MapDataSlab) encodeAsInlinedMap(enc *Encoder, inlinedTypeInfo InlinedExtraData) error { extraDataIndex := inlinedTypeInfo.addMapExtraData(m.extraData) @@ -2877,7 +2877,7 @@ func encodeAsInlinedCompactMap( hkeys []Digest, keys []ComparableStorable, values []Storable, - inlinedTypeInfo *inlinedExtraData, + inlinedTypeInfo InlinedExtraData, ) error { extraDataIndex, cachedKeys := inlinedTypeInfo.addCompactMapExtraData(extraData, hkeys, keys) @@ -2941,7 +2941,7 @@ func encodeCompactMapValues( cachedKeys []ComparableStorable, keys []ComparableStorable, values []Storable, - inlinedTypeInfo *inlinedExtraData, + inlinedTypeInfo InlinedExtraData, ) error { var err error diff --git a/typeinfo.go b/typeinfo.go index 9d5911f9..4082a3b5 100644 --- a/typeinfo.go +++ b/typeinfo.go @@ -199,12 +199,26 @@ type compactMapTypeInfo struct { keys []ComparableStorable } +type InlinedExtraData interface { + Encode(*Encoder) error + + addArrayExtraData(data *ArrayExtraData) int + addMapExtraData(data *MapExtraData) int + addCompactMapExtraData( + data *MapExtraData, + digests []Digest, + keys []ComparableStorable, + ) (int, []ComparableStorable) +} + type inlinedExtraData struct { extraData []ExtraData compactMapTypes map[string]compactMapTypeInfo arrayTypes map[string]int } +var _ InlinedExtraData = &inlinedExtraData{} + func newInlinedExtraData() *inlinedExtraData { return &inlinedExtraData{} } From 2e86400276c723a7786474516c0732dffc3691e3 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 18 Oct 2023 15:13:55 -0500 Subject: [PATCH 072/126] Add ContainerStorable interface ContainerStorable interface supports encoding container storable (storable containing other storables) as element. This is needed for integration with Cadence. --- array.go | 5 +++-- encode.go | 11 ++++++----- map.go | 6 +++--- storable.go | 7 +++++++ 4 files changed, 19 insertions(+), 10 deletions(-) diff --git a/array.go b/array.go index c0a75dd3..1fc8f0fc 100644 --- a/array.go +++ b/array.go @@ -132,6 +132,7 @@ func (a *ArrayDataSlab) StoredValue(storage SlabStorage) (Value, error) { } var _ ArraySlab = &ArrayDataSlab{} +var _ ContainerStorable = &ArrayDataSlab{} // ArrayMetaDataSlab is internal node, implementing ArraySlab. type ArrayMetaDataSlab struct { @@ -697,14 +698,14 @@ func DecodeInlinedArrayStorable( }, nil } -// encodeAsInlined encodes inlined array data slab. Encoding is +// EncodeAsElement encodes inlined array data slab. Encoding is // version 1 with CBOR tag having tag number CBORTagInlinedArray, // and tag contant as 3-element array: // // +------------------+----------------+----------+ // | extra data index | value ID index | elements | // +------------------+----------------+----------+ -func (a *ArrayDataSlab) encodeAsInlined(enc *Encoder, inlinedTypeInfo InlinedExtraData) error { +func (a *ArrayDataSlab) EncodeAsElement(enc *Encoder, inlinedTypeInfo InlinedExtraData) error { if a.extraData == nil { return NewEncodingError( fmt.Errorf("failed to encode non-root array data slab as inlined")) diff --git a/encode.go b/encode.go index f9f5e029..0d01e758 100644 --- a/encode.go +++ b/encode.go @@ -48,11 +48,12 @@ func encodeStorableAsElement(enc *Encoder, storable Storable, inlinedTypeInfo In switch storable := storable.(type) { - case *ArrayDataSlab: - return storable.encodeAsInlined(enc, inlinedTypeInfo) - - case *MapDataSlab: - return storable.encodeAsInlined(enc, inlinedTypeInfo) + case ContainerStorable: + err := storable.EncodeAsElement(enc, inlinedTypeInfo) + if err != nil { + // Wrap err as external error (if needed) because err is returned by ContainerStorable interface. + return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode container storable as element") + } default: err := storable.Encode(enc) diff --git a/map.go b/map.go index 61e98df9..b732815b 100644 --- a/map.go +++ b/map.go @@ -300,7 +300,7 @@ type MapDataSlab struct { } var _ MapSlab = &MapDataSlab{} -var _ Storable = &MapDataSlab{} +var _ ContainerStorable = &MapDataSlab{} // MapMetaDataSlab is internal node, implementing MapSlab. type MapMetaDataSlab struct { @@ -2792,14 +2792,14 @@ func (m *MapDataSlab) encodeElements(enc *Encoder, inlinedTypes InlinedExtraData return nil } -// encodeAsInlined encodes inlined map data slab. Encoding is +// EncodeAsElement encodes inlined map data slab. Encoding is // version 1 with CBOR tag having tag number CBORTagInlinedMap, // and tag contant as 3-element array: // // +------------------+----------------+----------+ // | extra data index | value ID index | elements | // +------------------+----------------+----------+ -func (m *MapDataSlab) encodeAsInlined(enc *Encoder, inlinedTypeInfo InlinedExtraData) error { +func (m *MapDataSlab) EncodeAsElement(enc *Encoder, inlinedTypeInfo InlinedExtraData) error { if m.extraData == nil { return NewEncodingError( fmt.Errorf("failed to encode non-root map data slab as inlined")) diff --git a/storable.go b/storable.go index 02888130..7e252748 100644 --- a/storable.go +++ b/storable.go @@ -54,6 +54,13 @@ type ComparableStorable interface { Copy() Storable } +// ContainerStorable is an interface that supports Storable containing other storables. +type ContainerStorable interface { + Storable + + EncodeAsElement(*Encoder, InlinedExtraData) error +} + type containerStorable interface { Storable hasPointer() bool From 2d5e4a493acd2c8386cc756db2a101b771a36944 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 18 Oct 2023 15:27:54 -0500 Subject: [PATCH 073/126] Rename Encode to EncodeSlab --- array_debug.go | 4 ++-- map_debug.go | 4 ++-- storable.go | 5 ++--- storage.go | 6 +++--- storage_test.go | 4 ++-- 5 files changed, 11 insertions(+), 12 deletions(-) diff --git a/array_debug.go b/array_debug.go index 18b88556..ce4188a2 100644 --- a/array_debug.go +++ b/array_debug.go @@ -550,7 +550,7 @@ func (v *serializationVerifier) verifyArraySlab(slab ArraySlab) error { id := slab.SlabID() // Encode slab - data, err := Encode(slab, v.cborEncMode) + data, err := EncodeSlab(slab, v.cborEncMode) if err != nil { // Don't need to wrap error as external error because err is already categorized by Encode(). return err @@ -564,7 +564,7 @@ func (v *serializationVerifier) verifyArraySlab(slab ArraySlab) error { } // Re-encode decoded slab - dataFromDecodedSlab, err := Encode(decodedSlab, v.cborEncMode) + dataFromDecodedSlab, err := EncodeSlab(decodedSlab, v.cborEncMode) if err != nil { // Don't need to wrap error as external error because err is already categorized by Encode(). return err diff --git a/map_debug.go b/map_debug.go index 81edfce5..36fc950d 100644 --- a/map_debug.go +++ b/map_debug.go @@ -958,7 +958,7 @@ func (v *serializationVerifier) verifyMapSlab(slab MapSlab) error { id := slab.SlabID() // Encode slab - data, err := Encode(slab, v.cborEncMode) + data, err := EncodeSlab(slab, v.cborEncMode) if err != nil { // Don't need to wrap error as external error because err is already categorized by Encode(). return err @@ -972,7 +972,7 @@ func (v *serializationVerifier) verifyMapSlab(slab MapSlab) error { } // Re-encode decoded slab - dataFromDecodedSlab, err := Encode(decodedSlab, v.cborEncMode) + dataFromDecodedSlab, err := EncodeSlab(decodedSlab, v.cborEncMode) if err != nil { // Don't need to wrap error as external error because err is already categorized by Encode(). return err diff --git a/storable.go b/storable.go index 7e252748..dc46cfc0 100644 --- a/storable.go +++ b/storable.go @@ -164,12 +164,11 @@ func (v SlabIDStorable) String() string { return fmt.Sprintf("SlabIDStorable(%d)", v) } -// Encode is a wrapper for Storable.Encode() -func Encode(storable Storable, encMode cbor.EncMode) ([]byte, error) { +func EncodeSlab(slab Slab, encMode cbor.EncMode) ([]byte, error) { var buf bytes.Buffer enc := NewEncoder(&buf, encMode) - err := storable.Encode(enc) + err := slab.Encode(enc) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode storable") diff --git a/storage.go b/storage.go index 42ca7490..0b324381 100644 --- a/storage.go +++ b/storage.go @@ -390,7 +390,7 @@ func (s *BasicSlabStorage) SlabIDs() []SlabID { func (s *BasicSlabStorage) Encode() (map[SlabID][]byte, error) { m := make(map[SlabID][]byte) for id, slab := range s.Slabs { - b, err := Encode(slab, s.cborEncMode) + b, err := EncodeSlab(slab, s.cborEncMode) if err != nil { // err is already categorized by Encode(). return nil, err @@ -800,7 +800,7 @@ func (s *PersistentSlabStorage) Commit() error { } // serialize - data, err := Encode(slab, s.cborEncMode) + data, err := EncodeSlab(slab, s.cborEncMode) if err != nil { // err is categorized already by Encode() return err @@ -879,7 +879,7 @@ func (s *PersistentSlabStorage) FastCommit(numWorkers int) error { continue } // serialize - data, err := Encode(slab, s.cborEncMode) + data, err := EncodeSlab(slab, s.cborEncMode) results <- &encodedSlabs{ slabID: id, data: data, diff --git a/storage_test.go b/storage_test.go index 2cd2a929..2b90bb11 100644 --- a/storage_test.go +++ b/storage_test.go @@ -749,7 +749,7 @@ func TestPersistentStorage(t *testing.T) { require.NoError(t, err) // capture data for accuracy testing - simpleMap[slabID], err = Encode(slab, encMode) + simpleMap[slabID], err = EncodeSlab(slab, encMode) require.NoError(t, err) } } @@ -1000,7 +1000,7 @@ func TestPersistentStorageSlabIterator(t *testing.T) { break } - encodedSlab, err := Encode(slab, storage.cborEncMode) + encodedSlab, err := EncodeSlab(slab, storage.cborEncMode) require.NoError(t, err) require.Equal(t, encodedSlab, data[id]) From 485b80014c1a8619c128fe889c817caacaceca66 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 18 Oct 2023 15:54:16 -0500 Subject: [PATCH 074/126] Encode inlined map as map key This is needed for integration with Cadence because map key can be Cadence enums. --- map.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/map.go b/map.go index b732815b..8e1b31c0 100644 --- a/map.go +++ b/map.go @@ -595,17 +595,17 @@ func (e *singleElement) Encode(enc *Encoder, inlinedTypeInfo InlinedExtraData) e } // Encode key - err = e.key.Encode(enc) + err = encodeStorableAsElement(enc, e.key, inlinedTypeInfo) if err != nil { - // Wrap err as external error (if needed) because err is returned by Storable interface. - return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode map key") + // Don't need to wrap error as external error because err is already categorized by encodeStorableAsElement(). + return err } // Encode value err = encodeStorableAsElement(enc, e.value, inlinedTypeInfo) if err != nil { - // Wrap err as external error (if needed) because err is returned by Storable interface. - return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode map value") + // Don't need to wrap error as external error because err is already categorized by encodeStorableAsElement(). + return err } err = enc.CBOR.Flush() From 5eb10f14f2f68172ae5f9944bff1b1079add4fb9 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 18 Oct 2023 17:03:17 -0500 Subject: [PATCH 075/126] Add test verification for compact map serialization --- array_debug.go | 25 ++++++++---- map_debug.go | 108 +++++++++++++++++++++++++++++++------------------ 2 files changed, 86 insertions(+), 47 deletions(-) diff --git a/array_debug.go b/array_debug.go index ce4188a2..4436ee4d 100644 --- a/array_debug.go +++ b/array_debug.go @@ -272,12 +272,8 @@ func (v *arrayVerifier) verifySlab( // Verify that inlined slab is not in storage if slab.Inlined() { - _, exist, err := v.storage.Retrieve(id) - if err != nil { - // Wrap err as external error (if needed) because err is returned by Storage interface. - return 0, nil, nil, wrapErrorAsExternalErrorIfNeeded(err) - } - if exist { + slab := v.storage.RetrieveIfLoaded(id) + if slab != nil { return 0, nil, nil, NewFatalError(fmt.Errorf("inlined slab %s is in storage", id)) } } @@ -345,8 +341,16 @@ func (v *arrayVerifier) verifyDataSlab( } // Verify that only root data slab can be inlined - if level > 0 && dataSlab.Inlined() { - return 0, nil, nil, NewFatalError(fmt.Errorf("non-root slab %s is inlined", id)) + if dataSlab.Inlined() { + if level > 0 { + return 0, nil, nil, NewFatalError(fmt.Errorf("non-root slab %s is inlined", id)) + } + if dataSlab.extraData == nil { + return 0, nil, nil, NewFatalError(fmt.Errorf("inlined slab %s doesn't have extra data", id)) + } + if dataSlab.next != SlabIDUndefined { + return 0, nil, nil, NewFatalError(fmt.Errorf("inlined slab %s has next slab ID", id)) + } } // Verify that aggregated element size + slab prefix is the same as header.size @@ -524,6 +528,11 @@ func VerifyArraySerialization( decodeTypeInfo TypeInfoDecoder, compare StorableComparator, ) error { + // Skip verification of inlined array serialization. + if a.Inlined() { + return nil + } + v := &serializationVerifier{ storage: a.Storage, cborDecMode: cborDecMode, diff --git a/map_debug.go b/map_debug.go index 36fc950d..afb69fb4 100644 --- a/map_debug.go +++ b/map_debug.go @@ -386,12 +386,8 @@ func (v *mapVerifier) verifySlab( // Verify that inlined slab is not in storage if slab.Inlined() { - _, exist, err := v.storage.Retrieve(id) - if err != nil { - // Wrap err as external error (if needed) because err is returned by Storage interface. - return 0, nil, nil, nil, wrapErrorAsExternalErrorIfNeeded(err) - } - if exist { + slab := v.storage.RetrieveIfLoaded(id) + if slab != nil { return 0, nil, nil, nil, NewFatalError(fmt.Errorf("inlined slab %s is in storage", id)) } } @@ -470,8 +466,16 @@ func (v *mapVerifier) verifyDataSlab( } // Verify that only root slab can be inlined - if level > 0 && dataSlab.Inlined() { - return 0, nil, nil, nil, NewFatalError(fmt.Errorf("non-root slab %s is inlined", id)) + if dataSlab.Inlined() { + if level > 0 { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("non-root slab %s is inlined", id)) + } + if dataSlab.extraData == nil { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("inlined slab %s doesn't have extra data", id)) + } + if dataSlab.next != SlabIDUndefined { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("inlined slab %s has next slab ID", id)) + } } // Verify that aggregated element size + slab prefix is the same as header.size @@ -846,12 +850,6 @@ func (v *mapVerifier) verifySingleElement( return 0, 0, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("element %s key can't be converted to value", e)) } - switch e.key.(type) { - case *ArrayDataSlab, *MapDataSlab: - // Verify key can't be inlined array or map - return 0, 0, NewFatalError(fmt.Errorf("element %s key shouldn't be inlined array or map", e)) - } - err = verifyValue(kv, v.address, nil, v.tic, v.hip, v.inlineEnabled, slabIDs) if err != nil { // Don't need to wrap error as external error because err is already categorized by verifyValue(). @@ -942,6 +940,11 @@ func VerifyMapSerialization( decodeTypeInfo TypeInfoDecoder, compare StorableComparator, ) error { + // Skip verification of inlined map serialization. + if m.Inlined() { + return nil + } + v := &serializationVerifier{ storage: m.Storage, cborDecMode: cborDecMode, @@ -1065,8 +1068,10 @@ func (v *serializationVerifier) verifyMapSlab(slab MapSlab) error { func (v *serializationVerifier) mapDataSlabEqual(expected, actual *MapDataSlab) error { + _, _, _, actualDecodedFromCompactMap := expected.canBeEncodedAsCompactMap() + // Compare extra data - err := mapExtraDataEqual(expected.extraData, actual.extraData) + err := mapExtraDataEqual(expected.extraData, actual.extraData, actualDecodedFromCompactMap) if err != nil { // Don't need to wrap error as external error because err is already categorized by mapExtraDataEqual(). return err @@ -1093,12 +1098,21 @@ func (v *serializationVerifier) mapDataSlabEqual(expected, actual *MapDataSlab) } // Compare header - if !reflect.DeepEqual(expected.header, actual.header) { - return NewFatalError(fmt.Errorf("header %+v is wrong, want %+v", actual.header, expected.header)) + if actualDecodedFromCompactMap { + if expected.header.slabID != actual.header.slabID { + return NewFatalError(fmt.Errorf("header.slabID %s is wrong, want %s", actual.header.slabID, expected.header.slabID)) + } + if expected.header.size != actual.header.size { + return NewFatalError(fmt.Errorf("header.size %d is wrong, want %d", actual.header.size, expected.header.size)) + } + } else { + if !reflect.DeepEqual(expected.header, actual.header) { + return NewFatalError(fmt.Errorf("header %+v is wrong, want %+v", actual.header, expected.header)) + } } // Compare elements - err = v.mapElementsEqual(expected.elements, actual.elements) + err = v.mapElementsEqual(expected.elements, actual.elements, actualDecodedFromCompactMap) if err != nil { // Don't need to wrap error as external error because err is already categorized by mapElementsEqual(). return err @@ -1107,7 +1121,7 @@ func (v *serializationVerifier) mapDataSlabEqual(expected, actual *MapDataSlab) return nil } -func (v *serializationVerifier) mapElementsEqual(expected, actual elements) error { +func (v *serializationVerifier) mapElementsEqual(expected, actual elements, actualDecodedFromCompactMap bool) error { switch expectedElems := expected.(type) { case *hkeyElements: @@ -1115,7 +1129,7 @@ func (v *serializationVerifier) mapElementsEqual(expected, actual elements) erro if !ok { return NewFatalError(fmt.Errorf("elements type %T is wrong, want %T", actual, expected)) } - return v.mapHkeyElementsEqual(expectedElems, actualElems) + return v.mapHkeyElementsEqual(expectedElems, actualElems, actualDecodedFromCompactMap) case *singleElements: actualElems, ok := actual.(*singleElements) @@ -1129,7 +1143,7 @@ func (v *serializationVerifier) mapElementsEqual(expected, actual elements) erro return nil } -func (v *serializationVerifier) mapHkeyElementsEqual(expected, actual *hkeyElements) error { +func (v *serializationVerifier) mapHkeyElementsEqual(expected, actual *hkeyElements, actualDecodedFromCompactMap bool) error { if expected.level != actual.level { return NewFatalError(fmt.Errorf("hkeyElements level %d is wrong, want %d", actual.level, expected.level)) @@ -1139,12 +1153,12 @@ func (v *serializationVerifier) mapHkeyElementsEqual(expected, actual *hkeyEleme return NewFatalError(fmt.Errorf("hkeyElements size %d is wrong, want %d", actual.size, expected.size)) } - if len(expected.hkeys) == 0 { - if len(actual.hkeys) != 0 { - return NewFatalError(fmt.Errorf("hkeyElements hkeys %v is wrong, want %v", actual.hkeys, expected.hkeys)) - } - } else { - if !reflect.DeepEqual(expected.hkeys, actual.hkeys) { + if len(expected.hkeys) != len(actual.hkeys) { + return NewFatalError(fmt.Errorf("hkeyElements hkeys len %d is wrong, want %d", len(actual.hkeys), len(expected.hkeys))) + } + + if !actualDecodedFromCompactMap { + if len(expected.hkeys) > 0 && !reflect.DeepEqual(expected.hkeys, actual.hkeys) { return NewFatalError(fmt.Errorf("hkeyElements hkeys %v is wrong, want %v", actual.hkeys, expected.hkeys)) } } @@ -1153,14 +1167,30 @@ func (v *serializationVerifier) mapHkeyElementsEqual(expected, actual *hkeyEleme return NewFatalError(fmt.Errorf("hkeyElements elems len %d is wrong, want %d", len(actual.elems), len(expected.elems))) } - for i := 0; i < len(expected.elems); i++ { - expectedEle := expected.elems[i] - actualEle := actual.elems[i] + if actualDecodedFromCompactMap { + for _, expectedEle := range expected.elems { + found := false + for _, actualEle := range actual.elems { + err := v.mapElementEqual(expectedEle, actualEle, actualDecodedFromCompactMap) + if err == nil { + found = true + break + } + } + if !found { + return NewFatalError(fmt.Errorf("hkeyElements elem %v is not found", expectedEle)) + } + } + } else { + for i := 0; i < len(expected.elems); i++ { + expectedEle := expected.elems[i] + actualEle := actual.elems[i] - err := v.mapElementEqual(expectedEle, actualEle) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by mapElementEqual(). - return err + err := v.mapElementEqual(expectedEle, actualEle, actualDecodedFromCompactMap) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by mapElementEqual(). + return err + } } } @@ -1195,7 +1225,7 @@ func (v *serializationVerifier) mapSingleElementsEqual(expected, actual *singleE return nil } -func (v *serializationVerifier) mapElementEqual(expected, actual element) error { +func (v *serializationVerifier) mapElementEqual(expected, actual element, actualDecodedFromCompactMap bool) error { switch expectedElem := expected.(type) { case *singleElement: @@ -1210,7 +1240,7 @@ func (v *serializationVerifier) mapElementEqual(expected, actual element) error if !ok { return NewFatalError(fmt.Errorf("elements type %T is wrong, want %T", actual, expected)) } - return v.mapElementsEqual(expectedElem.elements, actualElem.elements) + return v.mapElementsEqual(expectedElem.elements, actualElem.elements, actualDecodedFromCompactMap) case *externalCollisionGroup: actualElem, ok := actual.(*externalCollisionGroup) @@ -1322,7 +1352,7 @@ func (v *serializationVerifier) mapSingleElementEqual(expected, actual *singleEl func (v *serializationVerifier) mapMetaDataSlabEqual(expected, actual *MapMetaDataSlab) error { // Compare extra data - err := mapExtraDataEqual(expected.extraData, actual.extraData) + err := mapExtraDataEqual(expected.extraData, actual.extraData, false) if err != nil { // Don't need to wrap error as external error because err is already categorized by mapExtraDataEqual(). return err @@ -1341,7 +1371,7 @@ func (v *serializationVerifier) mapMetaDataSlabEqual(expected, actual *MapMetaDa return nil } -func mapExtraDataEqual(expected, actual *MapExtraData) error { +func mapExtraDataEqual(expected, actual *MapExtraData, actualDecodedFromCompactMap bool) error { if (expected == nil) && (actual == nil) { return nil @@ -1359,7 +1389,7 @@ func mapExtraDataEqual(expected, actual *MapExtraData) error { return NewFatalError(fmt.Errorf("map extra data count %d is wrong, want %d", actual.Count, expected.Count)) } - if !expected.TypeInfo.IsComposite() { + if !actualDecodedFromCompactMap { if expected.Seed != actual.Seed { return NewFatalError(fmt.Errorf("map extra data seed %d is wrong, want %d", actual.Seed, expected.Seed)) } From 5be1f94f6fbec756bbcfa348560b90bdd93a8cf5 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 18 Oct 2023 17:12:35 -0500 Subject: [PATCH 076/126] Add more tests for decoded array/map --- array_test.go | 24 ++++++++++++++++++++++++ map_test.go | 25 +++++++++++++++++++++++++ utils_test.go | 9 +++++++++ 3 files changed, 58 insertions(+) diff --git a/array_test.go b/array_test.go index 588f475e..1a30d718 100644 --- a/array_test.go +++ b/array_test.go @@ -142,6 +142,30 @@ func _testArray( require.Equal(t, 1, len(rootIDs)) require.Equal(t, array.SlabID(), rootIDs[0]) + // Encode all non-nil slab + encodedSlabs := make(map[SlabID][]byte) + for id, slab := range storage.deltas { + if slab != nil { + b, err := EncodeSlab(slab, storage.cborEncMode) + require.NoError(t, err) + encodedSlabs[id] = b + } + } + + // Test decoded array from new storage to force slab decoding + decodedArray, err := NewArrayWithRootID( + newTestPersistentStorageWithBaseStorageAndDeltas(t, storage.baseStorage, encodedSlabs), + array.SlabID()) + require.NoError(t, err) + + // Verify decoded array elements + for i, expected := range expectedValues { + actual, err := decodedArray.Get(uint64(i)) + require.NoError(t, err) + + valueEqual(t, expected, actual) + } + if !hasNestedArrayMapElement { // Need to call Commit before calling storage.Count() for PersistentSlabStorage. err = storage.Commit() diff --git a/map_test.go b/map_test.go index 30d25ccb..dccfd4bc 100644 --- a/map_test.go +++ b/map_test.go @@ -215,6 +215,31 @@ func _testMap( require.Equal(t, 1, len(rootIDs)) require.Equal(t, m.SlabID(), rootIDs[0]) + // Encode all non-nil slab + encodedSlabs := make(map[SlabID][]byte) + for id, slab := range storage.deltas { + if slab != nil { + b, err := EncodeSlab(slab, storage.cborEncMode) + require.NoError(t, err) + encodedSlabs[id] = b + } + } + + // Test decoded map from new storage to force slab decoding + decodedMap, err := NewMapWithRootID( + newTestPersistentStorageWithBaseStorageAndDeltas(t, storage.baseStorage, encodedSlabs), + m.SlabID(), + m.digesterBuilder) + require.NoError(t, err) + + // Verify decoded map elements + for k, expected := range expectedKeyValues { + actual, err := decodedMap.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + valueEqual(t, expected, actual) + } + if !hasNestedArrayMapElement { // Need to call Commit before calling storage.Count() for PersistentSlabStorage. err = storage.Commit() diff --git a/utils_test.go b/utils_test.go index 0648885b..84aba2c6 100644 --- a/utils_test.go +++ b/utils_test.go @@ -200,6 +200,15 @@ func newTestPersistentStorageWithBaseStorage(t testing.TB, baseStorage BaseStora ) } +func newTestPersistentStorageWithBaseStorageAndDeltas(t testing.TB, baseStorage BaseStorage, data map[SlabID][]byte) *PersistentSlabStorage { + storage := newTestPersistentStorageWithBaseStorage(t, baseStorage) + for id, b := range data { + err := storage.baseStorage.Store(id, b) + require.NoError(t, err) + } + return storage +} + func newTestBasicStorage(t testing.TB) *BasicSlabStorage { encMode, err := cbor.EncOptions{}.EncMode() require.NoError(t, err) From 800a44a4c769ebbd5b2110ea5cc4dfedb681b5a7 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 18 Oct 2023 17:29:42 -0500 Subject: [PATCH 077/126] Export EncodeStorableAsElement() --- array.go | 2 +- encode.go | 4 ++-- map.go | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/array.go b/array.go index 1fc8f0fc..4211128c 100644 --- a/array.go +++ b/array.go @@ -908,7 +908,7 @@ func (a *ArrayDataSlab) encodeElements(enc *Encoder, inlinedTypeInfo InlinedExtr // Encode data slab content (array of elements) for _, e := range a.elements { - err = encodeStorableAsElement(enc, e, inlinedTypeInfo) + err = EncodeStorableAsElement(enc, e, inlinedTypeInfo) if err != nil { // err is already categorized by encodeStorableAsElement(). return err diff --git a/encode.go b/encode.go index 0d01e758..1d876d50 100644 --- a/encode.go +++ b/encode.go @@ -42,9 +42,9 @@ func NewEncoder(w io.Writer, encMode cbor.EncMode) *Encoder { } } -// encodeStorableAsElement encodes storable as Array or OrderedMap element. +// EncodeStorableAsElement encodes storable as Array or OrderedMap element. // Storable is encode as an inlined ArrayDataSlab or MapDataSlab if it is ArrayDataSlab or MapDataSlab. -func encodeStorableAsElement(enc *Encoder, storable Storable, inlinedTypeInfo InlinedExtraData) error { +func EncodeStorableAsElement(enc *Encoder, storable Storable, inlinedTypeInfo InlinedExtraData) error { switch storable := storable.(type) { diff --git a/map.go b/map.go index 8e1b31c0..f053da44 100644 --- a/map.go +++ b/map.go @@ -595,14 +595,14 @@ func (e *singleElement) Encode(enc *Encoder, inlinedTypeInfo InlinedExtraData) e } // Encode key - err = encodeStorableAsElement(enc, e.key, inlinedTypeInfo) + err = EncodeStorableAsElement(enc, e.key, inlinedTypeInfo) if err != nil { // Don't need to wrap error as external error because err is already categorized by encodeStorableAsElement(). return err } // Encode value - err = encodeStorableAsElement(enc, e.value, inlinedTypeInfo) + err = EncodeStorableAsElement(enc, e.value, inlinedTypeInfo) if err != nil { // Don't need to wrap error as external error because err is already categorized by encodeStorableAsElement(). return err @@ -2967,7 +2967,7 @@ func encodeCompactMapValues( found = true keyIndexes[i], keyIndexes[j] = keyIndexes[j], keyIndexes[i] - err = encodeStorableAsElement(enc, values[index], inlinedTypeInfo) + err = EncodeStorableAsElement(enc, values[index], inlinedTypeInfo) if err != nil { // Don't need to wrap error as external error because err is already categorized by encodeStorableAsElement(). return err From eecb91a42d071e44f670d91b750261a4391bef12 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 18 Oct 2023 17:58:24 -0500 Subject: [PATCH 078/126] Add ContainerStorable.HasPointer() --- array.go | 4 ++-- map.go | 4 ++-- storable.go | 19 +++++++++---------- storable_test.go | 19 +++++++++++++++---- 4 files changed, 28 insertions(+), 18 deletions(-) diff --git a/array.go b/array.go index 4211128c..0170c7d9 100644 --- a/array.go +++ b/array.go @@ -819,7 +819,7 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { return NewEncodingError(err) } - if a.hasPointer() { + if a.HasPointer() { h.setHasPointers() } @@ -1002,7 +1002,7 @@ func (a *ArrayDataSlab) Uninline(storage SlabStorage) error { return nil } -func (a *ArrayDataSlab) hasPointer() bool { +func (a *ArrayDataSlab) HasPointer() bool { for _, e := range a.elements { if hasPointer(e) { return true diff --git a/map.go b/map.go index f053da44..3fb56e88 100644 --- a/map.go +++ b/map.go @@ -2705,7 +2705,7 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { return NewEncodingError(err) } - if m.hasPointer() { + if m.HasPointer() { h.setHasPointers() } @@ -3031,7 +3031,7 @@ func (m *MapDataSlab) canBeEncodedAsCompactMap() ([]Digest, []ComparableStorable return elements.hkeys, keys, values, true } -func (m *MapDataSlab) hasPointer() bool { +func (m *MapDataSlab) HasPointer() bool { return m.elements.hasPointer() } diff --git a/storable.go b/storable.go index dc46cfc0..c1c83620 100644 --- a/storable.go +++ b/storable.go @@ -59,16 +59,12 @@ type ContainerStorable interface { Storable EncodeAsElement(*Encoder, InlinedExtraData) error -} - -type containerStorable interface { - Storable - hasPointer() bool + HasPointer() bool } func hasPointer(storable Storable) bool { - if cs, ok := storable.(containerStorable); ok { - return cs.hasPointer() + if cs, ok := storable.(ContainerStorable); ok { + return cs.HasPointer() } return false } @@ -95,10 +91,9 @@ const ( type SlabIDStorable SlabID -var _ Storable = SlabIDStorable{} -var _ containerStorable = SlabIDStorable{} +var _ ContainerStorable = SlabIDStorable{} -func (v SlabIDStorable) hasPointer() bool { +func (v SlabIDStorable) HasPointer() bool { return true } @@ -155,6 +150,10 @@ func (v SlabIDStorable) Encode(enc *Encoder) error { return nil } +func (v SlabIDStorable) EncodeAsElement(enc *Encoder, _ InlinedExtraData) error { + return v.Encode(enc) +} + func (v SlabIDStorable) ByteSize() uint32 { // tag number (2 bytes) + byte string header (1 byte) + slab id (16 bytes) return 2 + 1 + slabIDSize diff --git a/storable_test.go b/storable_test.go index 12b732f2..539dd9e3 100644 --- a/storable_test.go +++ b/storable_test.go @@ -698,11 +698,11 @@ type SomeStorable struct { Storable Storable } -var _ Storable = SomeStorable{} +var _ ContainerStorable = SomeStorable{} -func (v SomeStorable) hasPointer() bool { - if ms, ok := v.Storable.(containerStorable); ok { - return ms.hasPointer() +func (v SomeStorable) HasPointer() bool { + if ms, ok := v.Storable.(ContainerStorable); ok { + return ms.HasPointer() } return false } @@ -723,6 +723,17 @@ func (v SomeStorable) Encode(enc *Encoder) error { return v.Storable.Encode(enc) } +func (v SomeStorable) EncodeAsElement(enc *Encoder, inlinedExtraData InlinedExtraData) error { + err := enc.CBOR.EncodeRawBytes([]byte{ + // tag number + 0xd8, cborTagSomeValue, + }) + if err != nil { + return err + } + return EncodeStorableAsElement(enc, v.Storable, inlinedExtraData) +} + func (v SomeStorable) ChildStorables() []Storable { return []Storable{v.Storable} } From eb9a50354126252aa2b4aa247a705a5e000328ff Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 18 Oct 2023 20:00:36 -0500 Subject: [PATCH 079/126] Fix lint warning --- map_debug.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/map_debug.go b/map_debug.go index afb69fb4..df1c2002 100644 --- a/map_debug.go +++ b/map_debug.go @@ -1105,10 +1105,8 @@ func (v *serializationVerifier) mapDataSlabEqual(expected, actual *MapDataSlab) if expected.header.size != actual.header.size { return NewFatalError(fmt.Errorf("header.size %d is wrong, want %d", actual.header.size, expected.header.size)) } - } else { - if !reflect.DeepEqual(expected.header, actual.header) { - return NewFatalError(fmt.Errorf("header %+v is wrong, want %+v", actual.header, expected.header)) - } + } else if !reflect.DeepEqual(expected.header, actual.header) { + return NewFatalError(fmt.Errorf("header %+v is wrong, want %+v", actual.header, expected.header)) } // Compare elements From c8f01db5b13b814e05b302d401cd80b448ad94e5 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Fri, 20 Oct 2023 09:59:03 -0500 Subject: [PATCH 080/126] Test inlined array/map not stored in storage --- array_debug.go | 8 ++++++-- map_debug.go | 8 ++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/array_debug.go b/array_debug.go index 4436ee4d..eb0d2fe7 100644 --- a/array_debug.go +++ b/array_debug.go @@ -272,8 +272,12 @@ func (v *arrayVerifier) verifySlab( // Verify that inlined slab is not in storage if slab.Inlined() { - slab := v.storage.RetrieveIfLoaded(id) - if slab != nil { + _, exist, err := v.storage.Retrieve(id) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storage interface. + return 0, nil, nil, wrapErrorAsExternalErrorIfNeeded(err) + } + if exist { return 0, nil, nil, NewFatalError(fmt.Errorf("inlined slab %s is in storage", id)) } } diff --git a/map_debug.go b/map_debug.go index df1c2002..dbfd6834 100644 --- a/map_debug.go +++ b/map_debug.go @@ -386,8 +386,12 @@ func (v *mapVerifier) verifySlab( // Verify that inlined slab is not in storage if slab.Inlined() { - slab := v.storage.RetrieveIfLoaded(id) - if slab != nil { + _, exist, err := v.storage.Retrieve(id) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storage interface. + return 0, nil, nil, nil, wrapErrorAsExternalErrorIfNeeded(err) + } + if exist { return 0, nil, nil, nil, NewFatalError(fmt.Errorf("inlined slab %s is in storage", id)) } } From 25c4d5e0072db937470b2ae98e1d873f46ab064c Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Fri, 20 Oct 2023 10:12:10 -0500 Subject: [PATCH 081/126] Add more comments --- storable.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/storable.go b/storable.go index c1c83620..b152e40c 100644 --- a/storable.go +++ b/storable.go @@ -58,7 +58,13 @@ type ComparableStorable interface { type ContainerStorable interface { Storable + // EncodeAsElement encodes ContainerStorable and its child storables as an element + // of parent array/map. Since child storable can be inlined array or map, + // encoding inlined array or map requires extra parameter InlinedExtraData. EncodeAsElement(*Encoder, InlinedExtraData) error + + // HasPointer returns true if any of its child storables is SlabIDStorable + // (references to another slab). This function is used during encoding. HasPointer() bool } From 3688e8969973c35b03f5bb0a2607028fb12e791a Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Fri, 20 Oct 2023 10:32:45 -0500 Subject: [PATCH 082/126] Export InlinedExtraData struct to replace interface --- array.go | 4 ++-- encode.go | 2 +- map.go | 24 ++++++++++++------------ storable.go | 4 ++-- storable_test.go | 2 +- typeinfo.go | 30 ++++++++---------------------- 6 files changed, 26 insertions(+), 40 deletions(-) diff --git a/array.go b/array.go index 0170c7d9..62b2aedd 100644 --- a/array.go +++ b/array.go @@ -705,7 +705,7 @@ func DecodeInlinedArrayStorable( // +------------------+----------------+----------+ // | extra data index | value ID index | elements | // +------------------+----------------+----------+ -func (a *ArrayDataSlab) EncodeAsElement(enc *Encoder, inlinedTypeInfo InlinedExtraData) error { +func (a *ArrayDataSlab) EncodeAsElement(enc *Encoder, inlinedTypeInfo *InlinedExtraData) error { if a.extraData == nil { return NewEncodingError( fmt.Errorf("failed to encode non-root array data slab as inlined")) @@ -887,7 +887,7 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { return nil } -func (a *ArrayDataSlab) encodeElements(enc *Encoder, inlinedTypeInfo InlinedExtraData) error { +func (a *ArrayDataSlab) encodeElements(enc *Encoder, inlinedTypeInfo *InlinedExtraData) error { // Encode CBOR array size manually for fix-sized encoding enc.Scratch[0] = 0x80 | 25 diff --git a/encode.go b/encode.go index 1d876d50..7d8eb3c4 100644 --- a/encode.go +++ b/encode.go @@ -44,7 +44,7 @@ func NewEncoder(w io.Writer, encMode cbor.EncMode) *Encoder { // EncodeStorableAsElement encodes storable as Array or OrderedMap element. // Storable is encode as an inlined ArrayDataSlab or MapDataSlab if it is ArrayDataSlab or MapDataSlab. -func EncodeStorableAsElement(enc *Encoder, storable Storable, inlinedTypeInfo InlinedExtraData) error { +func EncodeStorableAsElement(enc *Encoder, storable Storable, inlinedTypeInfo *InlinedExtraData) error { switch storable := storable.(type) { diff --git a/map.go b/map.go index 3fb56e88..a4b52e81 100644 --- a/map.go +++ b/map.go @@ -148,7 +148,7 @@ type element interface { key Value, ) (MapKey, MapValue, element, error) - Encode(*Encoder, InlinedExtraData) error + Encode(*Encoder, *InlinedExtraData) error hasPointer() bool @@ -215,7 +215,7 @@ type elements interface { Element(int) (element, error) - Encode(*Encoder, InlinedExtraData) error + Encode(*Encoder, *InlinedExtraData) error hasPointer() bool @@ -586,7 +586,7 @@ func newSingleElementFromData(cborDec *cbor.StreamDecoder, decodeStorable Storab // Encode encodes singleElement to the given encoder. // // CBOR encoded array of 2 elements (key, value). -func (e *singleElement) Encode(enc *Encoder, inlinedTypeInfo InlinedExtraData) error { +func (e *singleElement) Encode(enc *Encoder, inlinedTypeInfo *InlinedExtraData) error { // Encode CBOR array head for 2 elements err := enc.CBOR.EncodeRawBytes([]byte{0x82}) @@ -763,7 +763,7 @@ func newInlineCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorable // Encode encodes inlineCollisionGroup to the given encoder. // // CBOR tag (number: CBORTagInlineCollisionGroup, content: elements) -func (e *inlineCollisionGroup) Encode(enc *Encoder, inlinedTypeInfo InlinedExtraData) error { +func (e *inlineCollisionGroup) Encode(enc *Encoder, inlinedTypeInfo *InlinedExtraData) error { err := enc.CBOR.EncodeRawBytes([]byte{ // tag number CBORTagInlineCollisionGroup @@ -953,7 +953,7 @@ func newExternalCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorab // Encode encodes externalCollisionGroup to the given encoder. // // CBOR tag (number: CBORTagExternalCollisionGroup, content: slab ID) -func (e *externalCollisionGroup) Encode(enc *Encoder, _ InlinedExtraData) error { +func (e *externalCollisionGroup) Encode(enc *Encoder, _ *InlinedExtraData) error { err := enc.CBOR.EncodeRawBytes([]byte{ // tag number CBORTagExternalCollisionGroup 0xd8, CBORTagExternalCollisionGroup, @@ -1259,7 +1259,7 @@ func newHkeyElementsWithElement(level uint, hkey Digest, elem element) *hkeyElem // 1: hkeys (byte string) // 2: elements (array) // ] -func (e *hkeyElements) Encode(enc *Encoder, inlinedTypeInfo InlinedExtraData) error { +func (e *hkeyElements) Encode(enc *Encoder, inlinedTypeInfo *InlinedExtraData) error { if e.level > maxDigestLevel { return NewFatalError(fmt.Errorf("hash level %d exceeds max digest level %d", e.level, maxDigestLevel)) @@ -1921,7 +1921,7 @@ func newSingleElementsWithElement(level uint, elem *singleElement) *singleElemen // 1: hkeys (0 length byte string) // 2: elements (array) // ] -func (e *singleElements) Encode(enc *Encoder, inlinedTypeInfo InlinedExtraData) error { +func (e *singleElements) Encode(enc *Encoder, inlinedTypeInfo *InlinedExtraData) error { if e.level > maxDigestLevel { return NewFatalError(fmt.Errorf("digest level %d exceeds max digest level %d", e.level, maxDigestLevel)) @@ -2777,7 +2777,7 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { return nil } -func (m *MapDataSlab) encodeElements(enc *Encoder, inlinedTypes InlinedExtraData) error { +func (m *MapDataSlab) encodeElements(enc *Encoder, inlinedTypes *InlinedExtraData) error { err := m.elements.Encode(enc, inlinedTypes) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Encode(). @@ -2799,7 +2799,7 @@ func (m *MapDataSlab) encodeElements(enc *Encoder, inlinedTypes InlinedExtraData // +------------------+----------------+----------+ // | extra data index | value ID index | elements | // +------------------+----------------+----------+ -func (m *MapDataSlab) EncodeAsElement(enc *Encoder, inlinedTypeInfo InlinedExtraData) error { +func (m *MapDataSlab) EncodeAsElement(enc *Encoder, inlinedTypeInfo *InlinedExtraData) error { if m.extraData == nil { return NewEncodingError( fmt.Errorf("failed to encode non-root map data slab as inlined")) @@ -2817,7 +2817,7 @@ func (m *MapDataSlab) EncodeAsElement(enc *Encoder, inlinedTypeInfo InlinedExtra return m.encodeAsInlinedMap(enc, inlinedTypeInfo) } -func (m *MapDataSlab) encodeAsInlinedMap(enc *Encoder, inlinedTypeInfo InlinedExtraData) error { +func (m *MapDataSlab) encodeAsInlinedMap(enc *Encoder, inlinedTypeInfo *InlinedExtraData) error { extraDataIndex := inlinedTypeInfo.addMapExtraData(m.extraData) @@ -2877,7 +2877,7 @@ func encodeAsInlinedCompactMap( hkeys []Digest, keys []ComparableStorable, values []Storable, - inlinedTypeInfo InlinedExtraData, + inlinedTypeInfo *InlinedExtraData, ) error { extraDataIndex, cachedKeys := inlinedTypeInfo.addCompactMapExtraData(extraData, hkeys, keys) @@ -2941,7 +2941,7 @@ func encodeCompactMapValues( cachedKeys []ComparableStorable, keys []ComparableStorable, values []Storable, - inlinedTypeInfo InlinedExtraData, + inlinedTypeInfo *InlinedExtraData, ) error { var err error diff --git a/storable.go b/storable.go index b152e40c..34f83648 100644 --- a/storable.go +++ b/storable.go @@ -61,7 +61,7 @@ type ContainerStorable interface { // EncodeAsElement encodes ContainerStorable and its child storables as an element // of parent array/map. Since child storable can be inlined array or map, // encoding inlined array or map requires extra parameter InlinedExtraData. - EncodeAsElement(*Encoder, InlinedExtraData) error + EncodeAsElement(*Encoder, *InlinedExtraData) error // HasPointer returns true if any of its child storables is SlabIDStorable // (references to another slab). This function is used during encoding. @@ -156,7 +156,7 @@ func (v SlabIDStorable) Encode(enc *Encoder) error { return nil } -func (v SlabIDStorable) EncodeAsElement(enc *Encoder, _ InlinedExtraData) error { +func (v SlabIDStorable) EncodeAsElement(enc *Encoder, _ *InlinedExtraData) error { return v.Encode(enc) } diff --git a/storable_test.go b/storable_test.go index 539dd9e3..1e747120 100644 --- a/storable_test.go +++ b/storable_test.go @@ -723,7 +723,7 @@ func (v SomeStorable) Encode(enc *Encoder) error { return v.Storable.Encode(enc) } -func (v SomeStorable) EncodeAsElement(enc *Encoder, inlinedExtraData InlinedExtraData) error { +func (v SomeStorable) EncodeAsElement(enc *Encoder, inlinedExtraData *InlinedExtraData) error { err := enc.CBOR.EncodeRawBytes([]byte{ // tag number 0xd8, cborTagSomeValue, diff --git a/typeinfo.go b/typeinfo.go index 4082a3b5..b472ee6d 100644 --- a/typeinfo.go +++ b/typeinfo.go @@ -199,32 +199,18 @@ type compactMapTypeInfo struct { keys []ComparableStorable } -type InlinedExtraData interface { - Encode(*Encoder) error - - addArrayExtraData(data *ArrayExtraData) int - addMapExtraData(data *MapExtraData) int - addCompactMapExtraData( - data *MapExtraData, - digests []Digest, - keys []ComparableStorable, - ) (int, []ComparableStorable) -} - -type inlinedExtraData struct { +type InlinedExtraData struct { extraData []ExtraData compactMapTypes map[string]compactMapTypeInfo arrayTypes map[string]int } -var _ InlinedExtraData = &inlinedExtraData{} - -func newInlinedExtraData() *inlinedExtraData { - return &inlinedExtraData{} +func newInlinedExtraData() *InlinedExtraData { + return &InlinedExtraData{} } // Encode encodes inlined extra data as CBOR array. -func (ied *inlinedExtraData) Encode(enc *Encoder) error { +func (ied *InlinedExtraData) Encode(enc *Encoder) error { err := enc.CBOR.EncodeArrayHead(uint64(len(ied.extraData))) if err != nil { return NewEncodingError(err) @@ -321,7 +307,7 @@ func newInlinedExtraDataFromData( // addArrayExtraData returns index of deduplicated array extra data. // Array extra data is deduplicated by array type info ID because array // extra data only contains type info. -func (ied *inlinedExtraData) addArrayExtraData(data *ArrayExtraData) int { +func (ied *InlinedExtraData) addArrayExtraData(data *ArrayExtraData) int { if ied.arrayTypes == nil { ied.arrayTypes = make(map[string]int) } @@ -340,7 +326,7 @@ func (ied *inlinedExtraData) addArrayExtraData(data *ArrayExtraData) int { // addMapExtraData returns index of map extra data. // Map extra data is not deduplicated because it also contains count and seed. -func (ied *inlinedExtraData) addMapExtraData(data *MapExtraData) int { +func (ied *InlinedExtraData) addMapExtraData(data *MapExtraData) int { index := len(ied.extraData) ied.extraData = append(ied.extraData, data) return index @@ -348,7 +334,7 @@ func (ied *inlinedExtraData) addMapExtraData(data *MapExtraData) int { // addCompactMapExtraData returns index of deduplicated compact map extra data. // Compact map extra data is deduplicated by TypeInfo.ID() with sorted field names. -func (ied *inlinedExtraData) addCompactMapExtraData( +func (ied *InlinedExtraData) addCompactMapExtraData( data *MapExtraData, digests []Digest, keys []ComparableStorable, @@ -381,7 +367,7 @@ func (ied *inlinedExtraData) addCompactMapExtraData( return index, keys } -func (ied *inlinedExtraData) empty() bool { +func (ied *InlinedExtraData) empty() bool { return len(ied.extraData) == 0 } From 87e6b6b3299678b710c3b26de57d7e00406026c4 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Fri, 20 Oct 2023 10:47:22 -0500 Subject: [PATCH 083/126] Optimize compact map field names concatenation --- typeinfo.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/typeinfo.go b/typeinfo.go index b472ee6d..86c9fe67 100644 --- a/typeinfo.go +++ b/typeinfo.go @@ -22,6 +22,7 @@ import ( "encoding/binary" "fmt" "sort" + "strings" "github.com/fxamacker/cbor/v2" ) @@ -418,9 +419,12 @@ func (fn *fieldNameSorter) Swap(i, j int) { } func (fn *fieldNameSorter) join(sep string) string { - var s string - for _, i := range fn.index { - s += sep + fn.names[i].ID() + var sb strings.Builder + for i, index := range fn.index { + if i > 0 { + sb.WriteString(sep) + } + sb.WriteString(fn.names[index].ID()) } - return s + return sb.String() } From d718306fb5fd57bd8caa94c797984f44748d21c6 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Sun, 22 Oct 2023 17:51:53 -0500 Subject: [PATCH 084/126] Remove ContainerStorable.EncodeAsElement Currently, EncodeAsElement is used to encode inlined array and map, while Encode is used to encode standalone array and map. This commit simplifies encoding API by using Encode() to encode both inlined/standalone array/map. --- array.go | 29 +++++++++---------- encode.go | 38 ++++++------------------ map.go | 75 ++++++++++++++++++++++-------------------------- storable.go | 9 ------ storable_test.go | 11 ------- 5 files changed, 56 insertions(+), 106 deletions(-) diff --git a/array.go b/array.go index 62b2aedd..b9c5a3d4 100644 --- a/array.go +++ b/array.go @@ -698,14 +698,14 @@ func DecodeInlinedArrayStorable( }, nil } -// EncodeAsElement encodes inlined array data slab. Encoding is +// encodeAsInlined encodes inlined array data slab. Encoding is // version 1 with CBOR tag having tag number CBORTagInlinedArray, // and tag contant as 3-element array: // // +------------------+----------------+----------+ // | extra data index | value ID index | elements | // +------------------+----------------+----------+ -func (a *ArrayDataSlab) EncodeAsElement(enc *Encoder, inlinedTypeInfo *InlinedExtraData) error { +func (a *ArrayDataSlab) encodeAsInlined(enc *Encoder) error { if a.extraData == nil { return NewEncodingError( fmt.Errorf("failed to encode non-root array data slab as inlined")) @@ -716,7 +716,7 @@ func (a *ArrayDataSlab) EncodeAsElement(enc *Encoder, inlinedTypeInfo *InlinedEx fmt.Errorf("failed to encode standalone array data slab as inlined")) } - extraDataIndex := inlinedTypeInfo.addArrayExtraData(a.extraData) + extraDataIndex := enc.inlinedExtraData.addArrayExtraData(a.extraData) if extraDataIndex > maxInlinedExtraDataIndex { return NewEncodingError( @@ -753,7 +753,7 @@ func (a *ArrayDataSlab) EncodeAsElement(enc *Encoder, inlinedTypeInfo *InlinedEx } // element 2: array elements - err = a.encodeElements(enc, inlinedTypeInfo) + err = a.encodeElements(enc) if err != nil { // err is already categorized by ArrayDataSlab.encodeElements(). return err @@ -784,8 +784,7 @@ func (a *ArrayDataSlab) EncodeAsElement(enc *Encoder, inlinedTypeInfo *InlinedEx func (a *ArrayDataSlab) Encode(enc *Encoder) error { if a.inlined { - return NewEncodingError( - fmt.Errorf("failed to encode inlined array data slab as standalone slab")) + return a.encodeAsInlined(enc) } // Encoding is done in two steps: @@ -793,15 +792,13 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { // 1. Encode array elements using a new buffer while collecting inlined extra data from inlined elements. // 2. Encode slab with deduplicated inlined extra data and copy encoded elements from previous buffer. - inlinedTypes := newInlinedExtraData() - // Get a buffer from a pool to encode elements. elementBuf := getBuffer() defer putBuffer(elementBuf) elementEnc := NewEncoder(elementBuf, enc.encMode) - err := a.encodeElements(elementEnc, inlinedTypes) + err := a.encodeElements(elementEnc) if err != nil { // err is already categorized by Array.encodeElements(). return err @@ -831,7 +828,7 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { h.setRoot() } - if !inlinedTypes.empty() { + if !elementEnc.inlinedExtraData.empty() { h.setHasInlinedSlabs() } @@ -851,8 +848,8 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { } // Encode inlined extra data - if !inlinedTypes.empty() { - err = inlinedTypes.Encode(enc) + if !elementEnc.inlinedExtraData.empty() { + err = elementEnc.inlinedExtraData.Encode(enc) if err != nil { // err is already categorized by inlinedExtraData.Encode(). return err @@ -887,7 +884,7 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { return nil } -func (a *ArrayDataSlab) encodeElements(enc *Encoder, inlinedTypeInfo *InlinedExtraData) error { +func (a *ArrayDataSlab) encodeElements(enc *Encoder) error { // Encode CBOR array size manually for fix-sized encoding enc.Scratch[0] = 0x80 | 25 @@ -908,10 +905,10 @@ func (a *ArrayDataSlab) encodeElements(enc *Encoder, inlinedTypeInfo *InlinedExt // Encode data slab content (array of elements) for _, e := range a.elements { - err = EncodeStorableAsElement(enc, e, inlinedTypeInfo) + err = e.Encode(enc) if err != nil { - // err is already categorized by encodeStorableAsElement(). - return err + // Wrap err as external error (if needed) because err is returned by Storable interface. + return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode array element") } } diff --git a/encode.go b/encode.go index 7d8eb3c4..213e7316 100644 --- a/encode.go +++ b/encode.go @@ -28,44 +28,22 @@ import ( // Encoder writes atree slabs to io.Writer. type Encoder struct { io.Writer - CBOR *cbor.StreamEncoder - Scratch [64]byte - encMode cbor.EncMode + CBOR *cbor.StreamEncoder + Scratch [64]byte + encMode cbor.EncMode + inlinedExtraData *InlinedExtraData } func NewEncoder(w io.Writer, encMode cbor.EncMode) *Encoder { streamEncoder := encMode.NewStreamEncoder(w) return &Encoder{ - Writer: w, - CBOR: streamEncoder, - encMode: encMode, + Writer: w, + CBOR: streamEncoder, + encMode: encMode, + inlinedExtraData: newInlinedExtraData(), } } -// EncodeStorableAsElement encodes storable as Array or OrderedMap element. -// Storable is encode as an inlined ArrayDataSlab or MapDataSlab if it is ArrayDataSlab or MapDataSlab. -func EncodeStorableAsElement(enc *Encoder, storable Storable, inlinedTypeInfo *InlinedExtraData) error { - - switch storable := storable.(type) { - - case ContainerStorable: - err := storable.EncodeAsElement(enc, inlinedTypeInfo) - if err != nil { - // Wrap err as external error (if needed) because err is returned by ContainerStorable interface. - return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode container storable as element") - } - - default: - err := storable.Encode(enc) - if err != nil { - // Wrap err as external error (if needed) because err is returned by Storable interface. - return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode storable as element") - } - } - - return nil -} - type StorableDecoder func( decoder *cbor.StreamDecoder, storableSlabID SlabID, diff --git a/map.go b/map.go index a4b52e81..3d23752b 100644 --- a/map.go +++ b/map.go @@ -148,7 +148,7 @@ type element interface { key Value, ) (MapKey, MapValue, element, error) - Encode(*Encoder, *InlinedExtraData) error + Encode(*Encoder) error hasPointer() bool @@ -215,7 +215,7 @@ type elements interface { Element(int) (element, error) - Encode(*Encoder, *InlinedExtraData) error + Encode(*Encoder) error hasPointer() bool @@ -586,7 +586,7 @@ func newSingleElementFromData(cborDec *cbor.StreamDecoder, decodeStorable Storab // Encode encodes singleElement to the given encoder. // // CBOR encoded array of 2 elements (key, value). -func (e *singleElement) Encode(enc *Encoder, inlinedTypeInfo *InlinedExtraData) error { +func (e *singleElement) Encode(enc *Encoder) error { // Encode CBOR array head for 2 elements err := enc.CBOR.EncodeRawBytes([]byte{0x82}) @@ -595,17 +595,17 @@ func (e *singleElement) Encode(enc *Encoder, inlinedTypeInfo *InlinedExtraData) } // Encode key - err = EncodeStorableAsElement(enc, e.key, inlinedTypeInfo) + err = e.key.Encode(enc) if err != nil { - // Don't need to wrap error as external error because err is already categorized by encodeStorableAsElement(). - return err + // Wrap err as external error (if needed) because err is returned by Storable interface. + return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode map key storable") } // Encode value - err = EncodeStorableAsElement(enc, e.value, inlinedTypeInfo) + err = e.value.Encode(enc) if err != nil { - // Don't need to wrap error as external error because err is already categorized by encodeStorableAsElement(). - return err + // Wrap err as external error (if needed) because err is returned by Storable interface. + return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode map value storable") } err = enc.CBOR.Flush() @@ -763,7 +763,7 @@ func newInlineCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorable // Encode encodes inlineCollisionGroup to the given encoder. // // CBOR tag (number: CBORTagInlineCollisionGroup, content: elements) -func (e *inlineCollisionGroup) Encode(enc *Encoder, inlinedTypeInfo *InlinedExtraData) error { +func (e *inlineCollisionGroup) Encode(enc *Encoder) error { err := enc.CBOR.EncodeRawBytes([]byte{ // tag number CBORTagInlineCollisionGroup @@ -773,7 +773,7 @@ func (e *inlineCollisionGroup) Encode(enc *Encoder, inlinedTypeInfo *InlinedExtr return NewEncodingError(err) } - err = e.elements.Encode(enc, inlinedTypeInfo) + err = e.elements.Encode(enc) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Encode(). return err @@ -953,7 +953,7 @@ func newExternalCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorab // Encode encodes externalCollisionGroup to the given encoder. // // CBOR tag (number: CBORTagExternalCollisionGroup, content: slab ID) -func (e *externalCollisionGroup) Encode(enc *Encoder, _ *InlinedExtraData) error { +func (e *externalCollisionGroup) Encode(enc *Encoder) error { err := enc.CBOR.EncodeRawBytes([]byte{ // tag number CBORTagExternalCollisionGroup 0xd8, CBORTagExternalCollisionGroup, @@ -1259,7 +1259,7 @@ func newHkeyElementsWithElement(level uint, hkey Digest, elem element) *hkeyElem // 1: hkeys (byte string) // 2: elements (array) // ] -func (e *hkeyElements) Encode(enc *Encoder, inlinedTypeInfo *InlinedExtraData) error { +func (e *hkeyElements) Encode(enc *Encoder) error { if e.level > maxDigestLevel { return NewFatalError(fmt.Errorf("hash level %d exceeds max digest level %d", e.level, maxDigestLevel)) @@ -1313,7 +1313,7 @@ func (e *hkeyElements) Encode(enc *Encoder, inlinedTypeInfo *InlinedExtraData) e // Encode each element for _, e := range e.elems { - err = e.Encode(enc, inlinedTypeInfo) + err = e.Encode(enc) if err != nil { // Don't need to wrap error as external error because err is already categorized by element.Encode(). return err @@ -1921,7 +1921,7 @@ func newSingleElementsWithElement(level uint, elem *singleElement) *singleElemen // 1: hkeys (0 length byte string) // 2: elements (array) // ] -func (e *singleElements) Encode(enc *Encoder, inlinedTypeInfo *InlinedExtraData) error { +func (e *singleElements) Encode(enc *Encoder) error { if e.level > maxDigestLevel { return NewFatalError(fmt.Errorf("digest level %d exceeds max digest level %d", e.level, maxDigestLevel)) @@ -1952,7 +1952,7 @@ func (e *singleElements) Encode(enc *Encoder, inlinedTypeInfo *InlinedExtraData) // Encode each element for _, e := range e.elems { - err = e.Encode(enc, inlinedTypeInfo) + err = e.Encode(enc) if err != nil { // Don't need to wrap error as external error because err is already categorized by singleElement.Encode(). return err @@ -2671,8 +2671,7 @@ func DecodeInlinedMapStorable( func (m *MapDataSlab) Encode(enc *Encoder) error { if m.inlined { - return NewEncodingError( - fmt.Errorf("failed to encode inlined map data slab as standalone slab")) + return m.encodeAsInlined(enc) } // Encoding is done in two steps: @@ -2680,15 +2679,13 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { // 1. Encode map elements using a new buffer while collecting inlined extra data from inlined elements. // 2. Encode slab with deduplicated inlined extra data and copy encoded elements from previous buffer. - inlinedTypes := newInlinedExtraData() - // Get a buffer from a pool to encode elements. elementBuf := getBuffer() defer putBuffer(elementBuf) elemEnc := NewEncoder(elementBuf, enc.encMode) - err := m.encodeElements(elemEnc, inlinedTypes) + err := m.encodeElements(elemEnc) if err != nil { return err } @@ -2721,7 +2718,7 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { h.setRoot() } - if !inlinedTypes.empty() { + if !elemEnc.inlinedExtraData.empty() { h.setHasInlinedSlabs() } @@ -2741,8 +2738,8 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { } // Encode inlined types - if !inlinedTypes.empty() { - err = inlinedTypes.Encode(enc) + if !elemEnc.inlinedExtraData.empty() { + err = elemEnc.inlinedExtraData.Encode(enc) if err != nil { return NewEncodingError(err) } @@ -2777,8 +2774,8 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { return nil } -func (m *MapDataSlab) encodeElements(enc *Encoder, inlinedTypes *InlinedExtraData) error { - err := m.elements.Encode(enc, inlinedTypes) +func (m *MapDataSlab) encodeElements(enc *Encoder) error { + err := m.elements.Encode(enc) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Encode(). return err @@ -2792,14 +2789,14 @@ func (m *MapDataSlab) encodeElements(enc *Encoder, inlinedTypes *InlinedExtraDat return nil } -// EncodeAsElement encodes inlined map data slab. Encoding is +// encodeAsInlined encodes inlined map data slab. Encoding is // version 1 with CBOR tag having tag number CBORTagInlinedMap, // and tag contant as 3-element array: // // +------------------+----------------+----------+ // | extra data index | value ID index | elements | // +------------------+----------------+----------+ -func (m *MapDataSlab) EncodeAsElement(enc *Encoder, inlinedTypeInfo *InlinedExtraData) error { +func (m *MapDataSlab) encodeAsInlined(enc *Encoder) error { if m.extraData == nil { return NewEncodingError( fmt.Errorf("failed to encode non-root map data slab as inlined")) @@ -2811,15 +2808,15 @@ func (m *MapDataSlab) EncodeAsElement(enc *Encoder, inlinedTypeInfo *InlinedExtr } if hkeys, keys, values, ok := m.canBeEncodedAsCompactMap(); ok { - return encodeAsInlinedCompactMap(enc, m.header.slabID, m.extraData, hkeys, keys, values, inlinedTypeInfo) + return encodeAsInlinedCompactMap(enc, m.header.slabID, m.extraData, hkeys, keys, values) } - return m.encodeAsInlinedMap(enc, inlinedTypeInfo) + return m.encodeAsInlinedMap(enc) } -func (m *MapDataSlab) encodeAsInlinedMap(enc *Encoder, inlinedTypeInfo *InlinedExtraData) error { +func (m *MapDataSlab) encodeAsInlinedMap(enc *Encoder) error { - extraDataIndex := inlinedTypeInfo.addMapExtraData(m.extraData) + extraDataIndex := enc.inlinedExtraData.addMapExtraData(m.extraData) if extraDataIndex > maxInlinedExtraDataIndex { return NewEncodingError(fmt.Errorf("extra data index %d exceeds limit %d", extraDataIndex, maxInlinedExtraDataIndex)) @@ -2855,7 +2852,7 @@ func (m *MapDataSlab) encodeAsInlinedMap(enc *Encoder, inlinedTypeInfo *InlinedE } // element 2: map elements - err = m.elements.Encode(enc, inlinedTypeInfo) + err = m.elements.Encode(enc) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Encode(). return err @@ -2877,10 +2874,9 @@ func encodeAsInlinedCompactMap( hkeys []Digest, keys []ComparableStorable, values []Storable, - inlinedTypeInfo *InlinedExtraData, ) error { - extraDataIndex, cachedKeys := inlinedTypeInfo.addCompactMapExtraData(extraData, hkeys, keys) + extraDataIndex, cachedKeys := enc.inlinedExtraData.addCompactMapExtraData(extraData, hkeys, keys) if len(keys) != len(cachedKeys) { return NewEncodingError(fmt.Errorf("number of elements %d is different from number of elements in cached compact map type %d", len(keys), len(cachedKeys))) @@ -2921,7 +2917,7 @@ func encodeAsInlinedCompactMap( } // element 2: compact map values in the order of cachedKeys - err = encodeCompactMapValues(enc, cachedKeys, keys, values, inlinedTypeInfo) + err = encodeCompactMapValues(enc, cachedKeys, keys, values) if err != nil { // err is already categorized by encodeCompactMapValues(). return err @@ -2941,7 +2937,6 @@ func encodeCompactMapValues( cachedKeys []ComparableStorable, keys []ComparableStorable, values []Storable, - inlinedTypeInfo *InlinedExtraData, ) error { var err error @@ -2967,10 +2962,10 @@ func encodeCompactMapValues( found = true keyIndexes[i], keyIndexes[j] = keyIndexes[j], keyIndexes[i] - err = EncodeStorableAsElement(enc, values[index], inlinedTypeInfo) + err = values[index].Encode(enc) if err != nil { - // Don't need to wrap error as external error because err is already categorized by encodeStorableAsElement(). - return err + // Wrap err as external error (if needed) because err is returned by Storable interface. + return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode map value storable") } break diff --git a/storable.go b/storable.go index 34f83648..a59529c8 100644 --- a/storable.go +++ b/storable.go @@ -58,11 +58,6 @@ type ComparableStorable interface { type ContainerStorable interface { Storable - // EncodeAsElement encodes ContainerStorable and its child storables as an element - // of parent array/map. Since child storable can be inlined array or map, - // encoding inlined array or map requires extra parameter InlinedExtraData. - EncodeAsElement(*Encoder, *InlinedExtraData) error - // HasPointer returns true if any of its child storables is SlabIDStorable // (references to another slab). This function is used during encoding. HasPointer() bool @@ -156,10 +151,6 @@ func (v SlabIDStorable) Encode(enc *Encoder) error { return nil } -func (v SlabIDStorable) EncodeAsElement(enc *Encoder, _ *InlinedExtraData) error { - return v.Encode(enc) -} - func (v SlabIDStorable) ByteSize() uint32 { // tag number (2 bytes) + byte string header (1 byte) + slab id (16 bytes) return 2 + 1 + slabIDSize diff --git a/storable_test.go b/storable_test.go index 1e747120..4f705ab6 100644 --- a/storable_test.go +++ b/storable_test.go @@ -723,17 +723,6 @@ func (v SomeStorable) Encode(enc *Encoder) error { return v.Storable.Encode(enc) } -func (v SomeStorable) EncodeAsElement(enc *Encoder, inlinedExtraData *InlinedExtraData) error { - err := enc.CBOR.EncodeRawBytes([]byte{ - // tag number - 0xd8, cborTagSomeValue, - }) - if err != nil { - return err - } - return EncodeStorableAsElement(enc, v.Storable, inlinedExtraData) -} - func (v SomeStorable) ChildStorables() []Storable { return []Storable{v.Storable} } From d88bd127864796d7197b710c227cc5fbbf7ba1e4 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 24 Oct 2023 15:01:05 -0500 Subject: [PATCH 085/126] Create Encoder.inlinedExtraData lazily --- array.go | 8 ++++---- encode.go | 22 ++++++++++++++-------- map.go | 10 +++++----- 3 files changed, 23 insertions(+), 17 deletions(-) diff --git a/array.go b/array.go index b9c5a3d4..715d639d 100644 --- a/array.go +++ b/array.go @@ -716,7 +716,7 @@ func (a *ArrayDataSlab) encodeAsInlined(enc *Encoder) error { fmt.Errorf("failed to encode standalone array data slab as inlined")) } - extraDataIndex := enc.inlinedExtraData.addArrayExtraData(a.extraData) + extraDataIndex := enc.inlinedExtraData().addArrayExtraData(a.extraData) if extraDataIndex > maxInlinedExtraDataIndex { return NewEncodingError( @@ -828,7 +828,7 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { h.setRoot() } - if !elementEnc.inlinedExtraData.empty() { + if !elementEnc.inlinedExtraData().empty() { h.setHasInlinedSlabs() } @@ -848,8 +848,8 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { } // Encode inlined extra data - if !elementEnc.inlinedExtraData.empty() { - err = elementEnc.inlinedExtraData.Encode(enc) + if !elementEnc.inlinedExtraData().empty() { + err = elementEnc.inlinedExtraData().Encode(enc) if err != nil { // err is already categorized by inlinedExtraData.Encode(). return err diff --git a/encode.go b/encode.go index 213e7316..00b7e08f 100644 --- a/encode.go +++ b/encode.go @@ -28,22 +28,28 @@ import ( // Encoder writes atree slabs to io.Writer. type Encoder struct { io.Writer - CBOR *cbor.StreamEncoder - Scratch [64]byte - encMode cbor.EncMode - inlinedExtraData *InlinedExtraData + CBOR *cbor.StreamEncoder + Scratch [64]byte + encMode cbor.EncMode + _inlinedExtraData *InlinedExtraData } func NewEncoder(w io.Writer, encMode cbor.EncMode) *Encoder { streamEncoder := encMode.NewStreamEncoder(w) return &Encoder{ - Writer: w, - CBOR: streamEncoder, - encMode: encMode, - inlinedExtraData: newInlinedExtraData(), + Writer: w, + CBOR: streamEncoder, + encMode: encMode, } } +func (enc *Encoder) inlinedExtraData() *InlinedExtraData { + if enc._inlinedExtraData == nil { + enc._inlinedExtraData = newInlinedExtraData() + } + return enc._inlinedExtraData +} + type StorableDecoder func( decoder *cbor.StreamDecoder, storableSlabID SlabID, diff --git a/map.go b/map.go index 3d23752b..fff1c671 100644 --- a/map.go +++ b/map.go @@ -2718,7 +2718,7 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { h.setRoot() } - if !elemEnc.inlinedExtraData.empty() { + if !elemEnc.inlinedExtraData().empty() { h.setHasInlinedSlabs() } @@ -2738,8 +2738,8 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { } // Encode inlined types - if !elemEnc.inlinedExtraData.empty() { - err = elemEnc.inlinedExtraData.Encode(enc) + if !elemEnc.inlinedExtraData().empty() { + err = elemEnc.inlinedExtraData().Encode(enc) if err != nil { return NewEncodingError(err) } @@ -2816,7 +2816,7 @@ func (m *MapDataSlab) encodeAsInlined(enc *Encoder) error { func (m *MapDataSlab) encodeAsInlinedMap(enc *Encoder) error { - extraDataIndex := enc.inlinedExtraData.addMapExtraData(m.extraData) + extraDataIndex := enc.inlinedExtraData().addMapExtraData(m.extraData) if extraDataIndex > maxInlinedExtraDataIndex { return NewEncodingError(fmt.Errorf("extra data index %d exceeds limit %d", extraDataIndex, maxInlinedExtraDataIndex)) @@ -2876,7 +2876,7 @@ func encodeAsInlinedCompactMap( values []Storable, ) error { - extraDataIndex, cachedKeys := enc.inlinedExtraData.addCompactMapExtraData(extraData, hkeys, keys) + extraDataIndex, cachedKeys := enc.inlinedExtraData().addCompactMapExtraData(extraData, hkeys, keys) if len(keys) != len(cachedKeys) { return NewEncodingError(fmt.Errorf("number of elements %d is different from number of elements in cached compact map type %d", len(keys), len(cachedKeys))) From acdb685d6dcd585f6e697cae1fb3f30623b63e54 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 24 Oct 2023 15:19:11 -0500 Subject: [PATCH 086/126] Fail StorableSlab.Encode() with inlined array/map This commit makes StorableSlab.Encode() return error if it contains inlined array or inlined map. --- array.go | 4 ++-- encode.go | 7 +++++++ map.go | 4 ++-- storable_slab.go | 4 ++++ 4 files changed, 15 insertions(+), 4 deletions(-) diff --git a/array.go b/array.go index 715d639d..cf3217c8 100644 --- a/array.go +++ b/array.go @@ -828,7 +828,7 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { h.setRoot() } - if !elementEnc.inlinedExtraData().empty() { + if elementEnc.hasInlinedExtraData() { h.setHasInlinedSlabs() } @@ -848,7 +848,7 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { } // Encode inlined extra data - if !elementEnc.inlinedExtraData().empty() { + if elementEnc.hasInlinedExtraData() { err = elementEnc.inlinedExtraData().Encode(enc) if err != nil { // err is already categorized by inlinedExtraData.Encode(). diff --git a/encode.go b/encode.go index 00b7e08f..d82d56aa 100644 --- a/encode.go +++ b/encode.go @@ -50,6 +50,13 @@ func (enc *Encoder) inlinedExtraData() *InlinedExtraData { return enc._inlinedExtraData } +func (enc *Encoder) hasInlinedExtraData() bool { + if enc._inlinedExtraData == nil { + return false + } + return !enc._inlinedExtraData.empty() +} + type StorableDecoder func( decoder *cbor.StreamDecoder, storableSlabID SlabID, diff --git a/map.go b/map.go index fff1c671..63db77ab 100644 --- a/map.go +++ b/map.go @@ -2718,7 +2718,7 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { h.setRoot() } - if !elemEnc.inlinedExtraData().empty() { + if elemEnc.hasInlinedExtraData() { h.setHasInlinedSlabs() } @@ -2738,7 +2738,7 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { } // Encode inlined types - if !elemEnc.inlinedExtraData().empty() { + if elemEnc.hasInlinedExtraData() { err = elemEnc.inlinedExtraData().Encode(enc) if err != nil { return NewEncodingError(err) diff --git a/storable_slab.go b/storable_slab.go index 9cc6d7bd..07db18f4 100644 --- a/storable_slab.go +++ b/storable_slab.go @@ -93,6 +93,10 @@ func (s *StorableSlab) Encode(enc *Encoder) error { return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode storable") } + if enc.hasInlinedExtraData() { + return NewEncodingError(fmt.Errorf("failed to encode storable slab because storable contains inlined array/map")) + } + return nil } From 9171a724589d892a0f9257ceae7d719d62c615ed Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 30 Nov 2023 11:10:41 -0600 Subject: [PATCH 087/126] Support mutable iterator for array and map Mutable iterator for array and map supports: - indirect element mutation, such as modifying nested container - direct element mutation, such as overwriting existing element with new element Mutable iterator for array and map doesn't support: - inserting new elements into the array/map - removing existing elements from the array/map NOTE: use readonly iterator if mutation is not needed for better performance. This commit: - adds new interfaces ArrayIterator and MapIterator - decouples implementation of mutable and readonly iterators - refactors related functions --- array.go | 229 ++++++++++++++-------- map.go | 583 ++++++++++++++++++++++++++++++++++++++++++++++--------- 2 files changed, 647 insertions(+), 165 deletions(-) diff --git a/array.go b/array.go index cf3217c8..d8f39487 100644 --- a/array.go +++ b/array.go @@ -3346,103 +3346,184 @@ func (a *Array) Storable(_ SlabStorage, _ Address, maxInlineSize uint64) (Storab } } -var emptyArrayIterator = &ArrayIterator{} +type ArrayIterator interface { + CanMutate() bool + Next() (Value, error) +} + +type emptyArrayIterator struct { + readOnly bool +} + +var _ ArrayIterator = &emptyArrayIterator{} + +var emptyMutableArrayIterator = &emptyArrayIterator{readOnly: false} +var emptyReadOnlyArrayIterator = &emptyArrayIterator{readOnly: true} + +func (i *emptyArrayIterator) CanMutate() bool { + return !i.readOnly +} + +func (*emptyArrayIterator) Next() (Value, error) { + return nil, nil +} -type ArrayIterator struct { +type mutableArrayIterator struct { + array *Array + nextIndex uint64 + lastIndex uint64 // noninclusive index +} + +var _ ArrayIterator = &mutableArrayIterator{} + +func (i *mutableArrayIterator) CanMutate() bool { + return true +} + +func (i *mutableArrayIterator) Next() (Value, error) { + if i.nextIndex == i.lastIndex { + // No more elements. + return nil, nil + } + + // Don't need to set up notification callback for v because + // Get() returns value with notification already. + v, err := i.array.Get(i.nextIndex) + if err != nil { + return nil, err + } + + i.nextIndex++ + + return v, nil +} + +type readOnlyArrayIterator struct { array *Array - id SlabID dataSlab *ArrayDataSlab - indexInArray int - indexInDataSlab int - remainingCount int - readOnly bool + indexInDataSlab uint64 + remainingCount uint64 // needed for range iteration } -func (i *ArrayIterator) CanMutate() bool { - return !i.readOnly +var _ ArrayIterator = &readOnlyArrayIterator{} + +func (i *readOnlyArrayIterator) CanMutate() bool { + return false } -func (i *ArrayIterator) Next() (Value, error) { +func (i *readOnlyArrayIterator) Next() (Value, error) { if i.remainingCount == 0 { return nil, nil } - if i.dataSlab == nil { - if i.id == SlabIDUndefined { + if i.indexInDataSlab >= uint64(len(i.dataSlab.elements)) { + // No more elements in current data slab. + + nextDataSlabID := i.dataSlab.next + + if nextDataSlabID == SlabIDUndefined { + // No more elements in array. return nil, nil } - slab, found, err := i.array.Storage.Retrieve(i.id) + // Load next data slab. + slab, found, err := i.array.Storage.Retrieve(nextDataSlabID) if err != nil { // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve slab %s", i.id)) + return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve slab %s", nextDataSlabID)) } if !found { - return nil, NewSlabNotFoundErrorf(i.id, "slab not found during array iteration") + return nil, NewSlabNotFoundErrorf(nextDataSlabID, "slab not found during array iteration") } i.dataSlab = slab.(*ArrayDataSlab) i.indexInDataSlab = 0 - } - var element Value - var err error - if i.indexInDataSlab < len(i.dataSlab.elements) { - element, err = i.dataSlab.elements[i.indexInDataSlab].StoredValue(i.array.Storage) - if err != nil { - // Wrap err as external error (if needed) because err is returned by Storable interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") + // Check current data slab isn't empty because i.remainingCount > 0. + if len(i.dataSlab.elements) == 0 { + return nil, NewSlabDataErrorf("data slab contains 0 elements, expect more") } - - if i.CanMutate() { - // Set up notification callback in child value so - // when child value is modified parent a is notified. - i.array.setCallbackWithChild(uint64(i.indexInArray), element, maxInlineArrayElementSize) - } - - i.indexInDataSlab++ - i.indexInArray++ } - if i.indexInDataSlab >= len(i.dataSlab.elements) { - i.id = i.dataSlab.next - i.dataSlab = nil + // At this point: + // - There are elements to iterate in array (i.remainingCount > 0), and + // - There are elements to iterate in i.dataSlab (i.indexInDataSlab < len(i.dataSlab.elements)) + + element, err := i.dataSlab.elements[i.indexInDataSlab].StoredValue(i.array.Storage) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storable interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") } + i.indexInDataSlab++ i.remainingCount-- return element, nil } -func (a *Array) Iterator() (*ArrayIterator, error) { +// Iterator returns mutable iterator for array elements. +// Mutable iterator handles: +// - indirect element mutation, such as modifying nested container +// - direct element mutation, such as overwriting existing element with new element +// Mutable iterator doesn't handle: +// - inserting new elements into the array +// - removing existing elements from the array +// NOTE: Use readonly iterator if mutation is not needed for better performance. +func (a *Array) Iterator() (ArrayIterator, error) { + if a.Count() == 0 { + return emptyMutableArrayIterator, nil + } + + return &mutableArrayIterator{ + array: a, + lastIndex: a.Count(), + }, nil +} + +// ReadOnlyIterator returns readonly iterator for array elements. +// If elements are mutated, those changes are not guaranteed to persist. +// NOTE: Use readonly iterator if mutation is not needed for better performance. +func (a *Array) ReadOnlyIterator() (ArrayIterator, error) { + if a.Count() == 0 { + return emptyReadOnlyArrayIterator, nil + } + slab, err := firstArrayDataSlab(a.Storage, a.root) if err != nil { // Don't need to wrap error as external error because err is already categorized by firstArrayDataSlab(). return nil, err } - return &ArrayIterator{ + return &readOnlyArrayIterator{ array: a, - id: slab.SlabID(), dataSlab: slab, - remainingCount: int(a.Count()), + remainingCount: a.Count(), }, nil } -// ReadOnlyIterator returns readonly iterator for array elements. -// If elements of child containers are mutated, those changes -// are not guaranteed to persist. -func (a *Array) ReadOnlyIterator() (*ArrayIterator, error) { - iterator, err := a.Iterator() - if err != nil { - // Don't need to wrap error as external error because err is already categorized by Iterator(). - return nil, err +func (a *Array) RangeIterator(startIndex uint64, endIndex uint64) (ArrayIterator, error) { + count := a.Count() + + if startIndex > count || endIndex > count { + return nil, NewSliceOutOfBoundsError(startIndex, endIndex, 0, count) } - iterator.readOnly = true - return iterator, nil + + if startIndex > endIndex { + return nil, NewInvalidSliceIndexError(startIndex, endIndex) + } + + if endIndex == startIndex { + return emptyMutableArrayIterator, nil + } + + return &mutableArrayIterator{ + array: a, + nextIndex: startIndex, + lastIndex: endIndex, + }, nil } -func (a *Array) RangeIterator(startIndex uint64, endIndex uint64) (*ArrayIterator, error) { +func (a *Array) ReadOnlyRangeIterator(startIndex uint64, endIndex uint64) (ArrayIterator, error) { count := a.Count() if startIndex > count || endIndex > count { @@ -3456,7 +3537,7 @@ func (a *Array) RangeIterator(startIndex uint64, endIndex uint64) (*ArrayIterato numberOfElements := endIndex - startIndex if numberOfElements == 0 { - return emptyArrayIterator, nil + return emptyReadOnlyArrayIterator, nil } var dataSlab *ArrayDataSlab @@ -3483,28 +3564,17 @@ func (a *Array) RangeIterator(startIndex uint64, endIndex uint64) (*ArrayIterato } } - return &ArrayIterator{ + return &readOnlyArrayIterator{ array: a, - id: dataSlab.SlabID(), dataSlab: dataSlab, - indexInArray: int(startIndex), - indexInDataSlab: int(index), - remainingCount: int(numberOfElements), + indexInDataSlab: index, + remainingCount: numberOfElements, }, nil } -func (a *Array) ReadOnlyRangeIterator(startIndex uint64, endIndex uint64) (*ArrayIterator, error) { - iterator, err := a.RangeIterator(startIndex, endIndex) - if err != nil { - return nil, err - } - iterator.readOnly = true - return iterator, nil -} - type ArrayIterationFunc func(element Value) (resume bool, err error) -func iterateArray(iterator *ArrayIterator, fn ArrayIterationFunc) error { +func iterateArray(iterator ArrayIterator, fn ArrayIterationFunc) error { for { value, err := iterator.Next() if err != nil { @@ -3621,18 +3691,23 @@ func getArraySlab(storage SlabStorage, id SlabID) (ArraySlab, error) { } func firstArrayDataSlab(storage SlabStorage, slab ArraySlab) (*ArrayDataSlab, error) { - if slab.IsData() { - return slab.(*ArrayDataSlab), nil - } - meta := slab.(*ArrayMetaDataSlab) - firstChildID := meta.childrenHeaders[0].slabID - firstChild, err := getArraySlab(storage, firstChildID) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by getArraySlab(). - return nil, err + switch slab := slab.(type) { + case *ArrayDataSlab: + return slab, nil + + case *ArrayMetaDataSlab: + firstChildID := slab.childrenHeaders[0].slabID + firstChild, err := getArraySlab(storage, firstChildID) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by getArraySlab(). + return nil, err + } + // Don't need to wrap error as external error because err is already categorized by firstArrayDataSlab(). + return firstArrayDataSlab(storage, firstChild) + + default: + return nil, NewUnreachableError() } - // Don't need to wrap error as external error because err is already categorized by firstArrayDataSlab(). - return firstArrayDataSlab(storage, firstChild) } // getArrayDataSlabWithIndex returns data slab containing element at specified index diff --git a/map.go b/map.go index 63db77ab..8983e8a0 100644 --- a/map.go +++ b/map.go @@ -173,6 +173,15 @@ type elementGroup interface { type elements interface { fmt.Stringer + getElementAndNextKey( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, + ) (MapKey, MapValue, MapKey, error) + Get( storage SlabStorage, digester Digester, @@ -317,6 +326,15 @@ var _ MapSlab = &MapMetaDataSlab{} type MapSlab interface { Slab + getElementAndNextKey( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, + ) (MapKey, MapValue, MapKey, error) + Get( storage SlabStorage, digester Digester, @@ -1329,10 +1347,10 @@ func (e *hkeyElements) Encode(enc *Encoder) error { return nil } -func (e *hkeyElements) Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { +func (e *hkeyElements) get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, int, error) { if level >= digester.Levels() { - return nil, nil, NewHashLevelErrorf("hkey elements digest level is %d, want < %d", level, digester.Levels()) + return nil, nil, 0, NewHashLevelErrorf("hkey elements digest level is %d, want < %d", level, digester.Levels()) } // binary search by hkey @@ -1354,13 +1372,62 @@ func (e *hkeyElements) Get(storage SlabStorage, digester Digester, level uint, h // No matching hkey if equalIndex == -1 { - return nil, nil, NewKeyNotFoundError(key) + return nil, nil, 0, NewKeyNotFoundError(key) } elem := e.elems[equalIndex] - // Don't need to wrap error as external error because err is already categorized by element.Get(). - return elem.Get(storage, digester, level, hkey, comparator, key) + k, v, err := elem.Get(storage, digester, level, hkey, comparator, key) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by element.Get(). + return nil, nil, 0, err + } + + return k, v, equalIndex, nil +} + +func (e *hkeyElements) Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { + k, v, _, err := e.get(storage, digester, level, hkey, comparator, key) + return k, v, err +} + +func (e *hkeyElements) getElementAndNextKey( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, +) (MapKey, MapValue, MapKey, error) { + k, v, index, err := e.get(storage, digester, level, hkey, comparator, key) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by hkeyElements.get(). + return nil, nil, nil, err + } + + nextIndex := index + 1 + + switch { + case nextIndex < len(e.elems): + // Next element is still in the same hkeyElements group. + nextElement := e.elems[nextIndex] + + nextKey, err := firstKeyInElement(storage, nextElement) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by firstKeyInElement(). + return nil, nil, nil, err + } + + return k, v, nextKey, nil + + case nextIndex == len(e.elems): + // Next element is outside this hkeyElements group, so nextKey is nil. + return k, v, nil, nil + + default: // nextIndex > len(e.elems) + // This should never happen. + return nil, nil, nil, NewUnreachableError() + } } func (e *hkeyElements) Set( @@ -1968,25 +2035,61 @@ func (e *singleElements) Encode(enc *Encoder) error { return nil } -func (e *singleElements) Get(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { +func (e *singleElements) get(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapKey, MapValue, int, error) { if level != digester.Levels() { - return nil, nil, NewHashLevelErrorf("single elements digest level is %d, want %d", level, digester.Levels()) + return nil, nil, 0, NewHashLevelErrorf("single elements digest level is %d, want %d", level, digester.Levels()) } // linear search by key - for _, elem := range e.elems { + for i, elem := range e.elems { equal, err := comparator(storage, key, elem.key) if err != nil { // Wrap err as external error (if needed) because err is returned by ValueComparator callback. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") + return nil, nil, 0, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") } if equal { - return elem.key, elem.value, nil + return elem.key, elem.value, i, nil } } - return nil, nil, NewKeyNotFoundError(key) + return nil, nil, 0, NewKeyNotFoundError(key) +} + +func (e *singleElements) Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { + k, v, _, err := e.get(storage, digester, level, hkey, comparator, key) + return k, v, err +} + +func (e *singleElements) getElementAndNextKey( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, +) (MapKey, MapValue, MapKey, error) { + k, v, index, err := e.get(storage, digester, level, hkey, comparator, key) + if err != nil { + return nil, nil, nil, err + } + + nextIndex := index + 1 + + switch { + case nextIndex < len(e.elems): + // Next element is still in the same singleElements group. + nextKey := e.elems[nextIndex].key + return k, v, nextKey, nil + + case nextIndex == len(e.elems): + // Next element is outside this singleElements group, so nextKey is nil. + return k, v, nil, nil + + default: // nextIndex > len(e.elems) + // This should never happen + return nil, nil, nil, NewUnreachableError() + } } func (e *singleElements) Set( @@ -3818,7 +3921,7 @@ func (m *MapMetaDataSlab) ChildStorables() []Storable { return childIDs } -func (m *MapMetaDataSlab) Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { +func (m *MapMetaDataSlab) getChildSlabByDigest(storage SlabStorage, hkey Digest, key Value) (MapSlab, int, error) { ans := -1 i, j := 0, len(m.childrenHeaders) @@ -3833,7 +3936,7 @@ func (m *MapMetaDataSlab) Get(storage SlabStorage, digester Digester, level uint } if ans == -1 { - return nil, nil, NewKeyNotFoundError(key) + return nil, 0, NewKeyNotFoundError(key) } childHeaderIndex := ans @@ -3842,7 +3945,15 @@ func (m *MapMetaDataSlab) Get(storage SlabStorage, digester Digester, level uint child, err := getMapSlab(storage, childID) if err != nil { - // Don't need to wrap error as external error because err is already categorized by getMapSlab(). + return nil, 0, err + } + + return child, childHeaderIndex, nil +} + +func (m *MapMetaDataSlab) Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { + child, _, err := m.getChildSlabByDigest(storage, hkey, key) + if err != nil { return nil, nil, err } @@ -3850,6 +3961,60 @@ func (m *MapMetaDataSlab) Get(storage SlabStorage, digester Digester, level uint return child.Get(storage, digester, level, hkey, comparator, key) } +func (m *MapMetaDataSlab) getElementAndNextKey( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, +) (MapKey, MapValue, MapKey, error) { + child, index, err := m.getChildSlabByDigest(storage, hkey, key) + if err != nil { + return nil, nil, nil, err + } + + k, v, nextKey, err := child.getElementAndNextKey(storage, digester, level, hkey, comparator, key) + if err != nil { + return nil, nil, nil, err + } + + if nextKey != nil { + // Next element is still in the same child slab. + return k, v, nextKey, nil + } + + // Next element is in the next child slab. + + nextIndex := index + 1 + + switch { + case nextIndex < len(m.childrenHeaders): + // Next element is in the next child of this MapMetaDataSlab. + nextChildID := m.childrenHeaders[nextIndex].slabID + + nextChild, err := getMapSlab(storage, nextChildID) + if err != nil { + return nil, nil, nil, err + } + + nextKey, err = firstKeyInMapSlab(storage, nextChild) + if err != nil { + return nil, nil, nil, err + } + + return k, v, nextKey, nil + + case nextIndex == len(m.childrenHeaders): + // Next element is outside this MapMetaDataSlab, so nextKey is nil. + return k, v, nil, nil + + default: // nextIndex > len(m.childrenHeaders) + // This should never happen + return nil, nil, nil, NewUnreachableError() + } +} + func (m *MapMetaDataSlab) Set( storage SlabStorage, b DigesterBuilder, @@ -4853,6 +5018,92 @@ func (m *OrderedMap) get(comparator ValueComparator, hip HashInputProvider, key return m.root.Get(m.Storage, keyDigest, level, hkey, comparator, key) } +func (m *OrderedMap) getElementAndNextKey(comparator ValueComparator, hip HashInputProvider, key Value) (Value, Value, Value, error) { + + keyDigest, err := m.digesterBuilder.Digest(hip, key) + if err != nil { + // Wrap err as external error (if needed) because err is returned by DigesterBuilder interface. + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to create map key digester") + } + defer putDigester(keyDigest) + + level := uint(0) + + hkey, err := keyDigest.Digest(level) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Digesert interface. + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to get map key digest at level %d", level)) + } + + keyStorable, valueStorable, nextKeyStorable, err := m.root.getElementAndNextKey(m.Storage, keyDigest, level, hkey, comparator, key) + if err != nil { + return nil, nil, nil, err + } + + k, err := keyStorable.StoredValue(m.Storage) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storable interface. + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") + } + + v, err := valueStorable.StoredValue(m.Storage) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storable interface. + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") + } + + var nextKey Value + if nextKeyStorable != nil { + nextKey, err = nextKeyStorable.StoredValue(m.Storage) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storable interface. + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") + } + } + + // As a parent, this map (m) sets up notification callback with child + // value (v) so this map can be notified when child value is modified. + maxInlineSize := maxInlineMapValueSize(uint64(keyStorable.ByteSize())) + m.setCallbackWithChild(comparator, hip, key, v, maxInlineSize) + + return k, v, nextKey, nil +} + +func (m *OrderedMap) getNextKey(comparator ValueComparator, hip HashInputProvider, key Value) (Value, error) { + + keyDigest, err := m.digesterBuilder.Digest(hip, key) + if err != nil { + // Wrap err as external error (if needed) because err is returned by DigesterBuilder interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to create map key digester") + } + defer putDigester(keyDigest) + + level := uint(0) + + hkey, err := keyDigest.Digest(level) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Digesert interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to get map key digest at level %d", level)) + } + + _, _, nextKeyStorable, err := m.root.getElementAndNextKey(m.Storage, keyDigest, level, hkey, comparator, key) + if err != nil { + return nil, err + } + + if nextKeyStorable == nil { + return nil, nil + } + + nextKey, err := nextKeyStorable.StoredValue(m.Storage) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storable interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") + } + + return nextKey, nil +} + func (m *OrderedMap) Set(comparator ValueComparator, hip HashInputProvider, key Value, value Value) (Storable, error) { storable, err := m.set(comparator, hip, key, value) if err != nil { @@ -5252,19 +5503,24 @@ func getMapSlab(storage SlabStorage, id SlabID) (MapSlab, error) { return mapSlab, nil } -func firstMapDataSlab(storage SlabStorage, slab MapSlab) (MapSlab, error) { - if slab.IsData() { +func firstMapDataSlab(storage SlabStorage, slab MapSlab) (*MapDataSlab, error) { + switch slab := slab.(type) { + case *MapDataSlab: return slab, nil + + case *MapMetaDataSlab: + firstChildID := slab.childrenHeaders[0].slabID + firstChild, err := getMapSlab(storage, firstChildID) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by getMapSlab(). + return nil, err + } + // Don't need to wrap error as external error because err is already categorized by firstMapDataSlab(). + return firstMapDataSlab(storage, firstChild) + + default: + return nil, NewUnreachableError() } - meta := slab.(*MapMetaDataSlab) - firstChildID := meta.childrenHeaders[0].slabID - firstChild, err := getMapSlab(storage, firstChildID) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by getMapSlab(). - return nil, err - } - // Don't need to wrap error as external error because err is already categorized by firstMapDataSlab(). - return firstMapDataSlab(storage, firstChild) } func (m *MapExtraData) incrementCount() { @@ -5335,17 +5591,116 @@ func (i *mapElementIterator) next() (key MapKey, value MapValue, err error) { type MapEntryIterationFunc func(Value, Value) (resume bool, err error) type MapElementIterationFunc func(Value) (resume bool, err error) -type MapIterator struct { - m *OrderedMap - comparator ValueComparator - hip HashInputProvider - id SlabID - elemIterator *mapElementIterator +type MapIterator interface { + CanMutate() bool + Next() (Value, Value, error) + NextKey() (Value, error) + NextValue() (Value, error) +} + +type emptyMapIterator struct { + readOnly bool +} + +var _ MapIterator = &emptyMapIterator{} + +var emptyMutableMapIterator = &emptyMapIterator{readOnly: false} +var emptyReadOnlyMapIterator = &emptyMapIterator{readOnly: true} + +func (i *emptyMapIterator) CanMutate() bool { + return !i.readOnly +} + +func (*emptyMapIterator) Next() (Value, Value, error) { + return nil, nil, nil +} + +func (*emptyMapIterator) NextKey() (Value, error) { + return nil, nil +} + +func (*emptyMapIterator) NextValue() (Value, error) { + return nil, nil +} + +type mutableMapIterator struct { + m *OrderedMap + comparator ValueComparator + hip HashInputProvider + nextKey Value +} + +var _ MapIterator = &mutableMapIterator{} + +func (i *mutableMapIterator) CanMutate() bool { + return true +} + +func (i *mutableMapIterator) Next() (Value, Value, error) { + if i.nextKey == nil { + // No more elements + return nil, nil, nil + } + + // Don't need to set up notification callback for v because + // getElementAndNextKey() returns value with notification already. + k, v, nk, err := i.m.getElementAndNextKey(i.comparator, i.hip, i.nextKey) + if err != nil { + return nil, nil, err + } + + i.nextKey = nk + + return k, v, nil +} + +func (i *mutableMapIterator) NextKey() (Value, error) { + if i.nextKey == nil { + // No more elements + return nil, nil + } + + key := i.nextKey + + nk, err := i.m.getNextKey(i.comparator, i.hip, key) + if err != nil { + return nil, err + } + + i.nextKey = nk + + return key, nil +} + +func (i *mutableMapIterator) NextValue() (Value, error) { + if i.nextKey == nil { + // No more elements. + return nil, nil + } + + // Don't need to set up notification callback for v because + // getElementAndNextKey() returns value with notification already. + _, v, nk, err := i.m.getElementAndNextKey(i.comparator, i.hip, i.nextKey) + if err != nil { + return nil, err + } + + i.nextKey = nk + + return v, nil +} + +type readOnlyMapIterator struct { + m *OrderedMap + nextDataSlabID SlabID + elemIterator *mapElementIterator } -func (i *MapIterator) Next() (key Value, value Value, err error) { +var _ MapIterator = &readOnlyMapIterator{} + +func (i *readOnlyMapIterator) Next() (key Value, value Value, err error) { if i.elemIterator == nil { - if i.id == SlabIDUndefined { + if i.nextDataSlabID == SlabIDUndefined { return nil, nil, nil } @@ -5375,11 +5730,6 @@ func (i *MapIterator) Next() (key Value, value Value, err error) { return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get map value's stored value") } - if i.CanMutate() { - maxInlineSize := maxInlineMapValueSize(uint64(ks.ByteSize())) - i.m.setCallbackWithChild(i.comparator, i.hip, key, value, maxInlineSize) - } - return key, value, nil } @@ -5389,9 +5739,9 @@ func (i *MapIterator) Next() (key Value, value Value, err error) { return i.Next() } -func (i *MapIterator) NextKey() (key Value, err error) { +func (i *readOnlyMapIterator) NextKey() (key Value, err error) { if i.elemIterator == nil { - if i.id == SlabIDUndefined { + if i.nextDataSlabID == SlabIDUndefined { return nil, nil } @@ -5424,9 +5774,9 @@ func (i *MapIterator) NextKey() (key Value, err error) { return i.NextKey() } -func (i *MapIterator) NextValue() (value Value, err error) { +func (i *readOnlyMapIterator) NextValue() (value Value, err error) { if i.elemIterator == nil { - if i.id == SlabIDUndefined { + if i.nextDataSlabID == SlabIDUndefined { return nil, nil } @@ -5437,8 +5787,8 @@ func (i *MapIterator) NextValue() (value Value, err error) { } } - var ks, vs Storable - ks, vs, err = i.elemIterator.next() + var vs Storable + _, vs, err = i.elemIterator.next() if err != nil { // Don't need to wrap error as external error because err is already categorized by MapElementIterator.Next(). return nil, err @@ -5450,17 +5800,6 @@ func (i *MapIterator) NextValue() (value Value, err error) { return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get map value's stored value") } - if i.CanMutate() { - key, err := ks.StoredValue(i.m.Storage) - if err != nil { - // Wrap err as external error (if needed) because err is returned by Storable interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get map value's stored value") - } - - maxInlineSize := maxInlineMapValueSize(uint64(ks.ByteSize())) - i.m.setCallbackWithChild(i.comparator, i.hip, key, value, maxInlineSize) - } - return value, nil } @@ -5470,22 +5809,22 @@ func (i *MapIterator) NextValue() (value Value, err error) { return i.NextValue() } -func (i *MapIterator) advance() error { - slab, found, err := i.m.Storage.Retrieve(i.id) +func (i *readOnlyMapIterator) advance() error { + slab, found, err := i.m.Storage.Retrieve(i.nextDataSlabID) if err != nil { // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve slab %s", i.id)) + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve slab %s", i.nextDataSlabID)) } if !found { - return NewSlabNotFoundErrorf(i.id, "slab not found during map iteration") + return NewSlabNotFoundErrorf(i.nextDataSlabID, "slab not found during map iteration") } dataSlab, ok := slab.(*MapDataSlab) if !ok { - return NewSlabDataErrorf("slab %s isn't MapDataSlab", i.id) + return NewSlabDataErrorf("slab %s isn't MapDataSlab", i.nextDataSlabID) } - i.id = dataSlab.next + i.nextDataSlabID = dataSlab.next i.elemIterator = &mapElementIterator{ storage: i.m.Storage, @@ -5495,50 +5834,72 @@ func (i *MapIterator) advance() error { return nil } -func (m *OrderedMap) iterator(comparator ValueComparator, hip HashInputProvider) (*MapIterator, error) { - slab, err := firstMapDataSlab(m.Storage, m.root) +func (i *readOnlyMapIterator) CanMutate() bool { + return false +} + +// Iterator returns mutable iterator for map elements. +// Mutable iterator handles: +// - indirect element mutation, such as modifying nested container +// - direct element mutation, such as overwriting existing element with new element +// Mutable iterator doesn't handle: +// - inserting new elements into the map +// - removing existing elements from the map +// NOTE: Use readonly iterator if mutation is not needed for better performance. +func (m *OrderedMap) Iterator(comparator ValueComparator, hip HashInputProvider) (MapIterator, error) { + if m.Count() == 0 { + return emptyMutableMapIterator, nil + } + + keyStorable, err := firstKeyInMapSlab(m.Storage, m.root) if err != nil { - // Don't need to wrap error as external error because err is already categorized by firstMapDataSlab(). + // Don't need to wrap error as external error because err is already categorized by firstKeyInMapSlab(). return nil, err } - dataSlab := slab.(*MapDataSlab) + if keyStorable == nil { + // This should never happen because m.Count() > 0. + return nil, NewSlabDataErrorf("failed to find first key in map while map count > 0") + } - return &MapIterator{ + key, err := keyStorable.StoredValue(m.Storage) + if err != nil { + return nil, err + } + + return &mutableMapIterator{ m: m, comparator: comparator, hip: hip, - id: dataSlab.next, - elemIterator: &mapElementIterator{ - storage: m.Storage, - elements: dataSlab.elements, - }, + nextKey: key, }, nil } -func (i *MapIterator) CanMutate() bool { - return i.comparator != nil && i.hip != nil -} +// ReadOnlyIterator returns readonly iterator for map elements. +// If elements are mutated, those changes are not guaranteed to persist. +// NOTE: Use readonly iterator if mutation is not needed for better performance. +func (m *OrderedMap) ReadOnlyIterator() (MapIterator, error) { + if m.Count() == 0 { + return emptyReadOnlyMapIterator, nil + } -func (m *OrderedMap) Iterator(comparator ValueComparator, hip HashInputProvider) (*MapIterator, error) { - iterator, err := m.iterator(comparator, hip) + dataSlab, err := firstMapDataSlab(m.Storage, m.root) if err != nil { + // Don't need to wrap error as external error because err is already categorized by firstMapDataSlab(). return nil, err } - if !iterator.CanMutate() { - return nil, NewUserError(fmt.Errorf("failed to create MapIterator: ValueComparator or HashInputProvider is nil")) - } - return iterator, nil -} -// ReadOnlyIterator returns readonly iterator for map elements. -// If elements of child containers are mutated, those changes -// are not guaranteed to persist. -func (m *OrderedMap) ReadOnlyIterator() (*MapIterator, error) { - return m.iterator(nil, nil) + return &readOnlyMapIterator{ + m: m, + nextDataSlabID: dataSlab.next, + elemIterator: &mapElementIterator{ + storage: m.Storage, + elements: dataSlab.elements, + }, + }, nil } -func iterateMap(iterator *MapIterator, fn MapEntryIterationFunc) error { +func iterateMap(iterator MapIterator, fn MapEntryIterationFunc) error { var err error var key, value Value for { @@ -5608,7 +5969,7 @@ func (m *OrderedMap) IterateReadOnlyKeys(fn MapElementIterationFunc) error { } } -func iterateMapValues(iterator *MapIterator, fn MapElementIterationFunc) error { +func iterateMapValues(iterator MapIterator, fn MapElementIterationFunc) error { var err error var value Value for { @@ -6313,3 +6674,49 @@ func (m *OrderedMap) IterateReadOnlyLoadedValues(fn MapEntryIterationFunc) error } } } + +func firstKeyInMapSlab(storage SlabStorage, slab MapSlab) (MapKey, error) { + dataSlab, err := firstMapDataSlab(storage, slab) + if err != nil { + return nil, err + } + return firstKeyInElements(storage, dataSlab.elements) +} + +func firstKeyInElements(storage SlabStorage, elems elements) (MapKey, error) { + switch elements := elems.(type) { + case *hkeyElements: + if len(elements.elems) == 0 { + return nil, nil + } + firstElem := elements.elems[0] + return firstKeyInElement(storage, firstElem) + + case *singleElements: + if len(elements.elems) == 0 { + return nil, nil + } + firstElem := elements.elems[0] + return firstElem.key, nil + + default: + return nil, NewUnreachableError() + } +} + +func firstKeyInElement(storage SlabStorage, elem element) (MapKey, error) { + switch elem := elem.(type) { + case *singleElement: + return elem.key, nil + + case elementGroup: + group, err := elem.Elements(storage) + if err != nil { + return nil, err + } + return firstKeyInElements(storage, group) + + default: + return nil, NewUnreachableError() + } +} From 1624f6d709c9228e72a92e70fe0e676f81a4f05a Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 30 Nov 2023 18:02:12 -0600 Subject: [PATCH 088/126] Add more tests for mutable array iterator --- array_test.go | 1056 ++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 1036 insertions(+), 20 deletions(-) diff --git a/array_test.go b/array_test.go index 1a30d718..fa4e3070 100644 --- a/array_test.go +++ b/array_test.go @@ -692,7 +692,7 @@ func TestArrayRemove(t *testing.T) { }) } -func TestArrayIterate(t *testing.T) { +func TestArrayIterateReadOnly(t *testing.T) { t.Run("empty", func(t *testing.T) { typeInfo := testTypeInfo{42} @@ -906,12 +906,788 @@ func TestArrayIterate(t *testing.T) { require.Equal(t, count/2, i) }) +} + +func TestMutableArrayIterate(t *testing.T) { + + t.Run("empty", func(t *testing.T) { + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + i := uint64(0) + err = array.Iterate(func(v Value) (bool, error) { + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, uint64(0), i) + }) + + t.Run("mutate primitive values, root is data slab, no slab operation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 15 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := Uint64Value(i) + err = array.Append(v) + require.NoError(t, err) + + expectedValues[i] = v + } + require.True(t, array.root.IsData()) + + i := 0 + err = array.Iterate(func(v Value) (bool, error) { + require.Equal(t, Uint64Value(i), v) + + // Mutate primitive array elements by overwritting existing elements of similar byte size. + newValue := Uint64Value(i * 2) + existingStorable, err := array.Set(uint64(i), newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, Uint64Value(i), existingValue) + + expectedValues[i] = newValue + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + require.True(t, array.root.IsData()) + + testArray(t, storage, typeInfo, address, array, expectedValues, false) + }) + + t.Run("mutate primitive values, root is metadata slab, no slab operation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 1024 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := Uint64Value(i) + err = array.Append(v) + require.NoError(t, err) + + expectedValues[i] = v + } + require.False(t, array.root.IsData()) + + i := 0 + err = array.Iterate(func(v Value) (bool, error) { + require.Equal(t, Uint64Value(i), v) + + // Mutate primitive array elements by overwritting existing elements with elements of similar size. + newValue := Uint64Value(i * 2) + existingStorable, err := array.Set(uint64(i), newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, Uint64Value(i), existingValue) + + expectedValues[i] = newValue + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + require.False(t, array.root.IsData()) + + testArray(t, storage, typeInfo, address, array, expectedValues, false) + }) + + t.Run("mutate primitive values, root is data slab, split slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 15 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + r := rune('a') + for i := uint64(0); i < arraySize; i++ { + v := NewStringValue(string(r)) + err = array.Append(v) + require.NoError(t, err) + + expectedValues[i] = v + r++ + } + require.True(t, array.root.IsData()) + + i := 0 + r = rune('a') + err = array.Iterate(func(v Value) (bool, error) { + require.Equal(t, NewStringValue(string(r)), v) + + // Mutate primitive array elements by overwritting existing elements with larger elements. + // Larger elements causes slabs to split. + newValue := NewStringValue(strings.Repeat(string(r), 25)) + existingStorable, err := array.Set(uint64(i), newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, NewStringValue(string(r)), existingValue) + + expectedValues[i] = newValue + + i++ + r++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + require.False(t, array.root.IsData()) + + testArray(t, storage, typeInfo, address, array, expectedValues, false) + }) + + t.Run("mutate primitive values, root is metadata slab, split slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 200 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + r := rune('a') + for i := uint64(0); i < arraySize; i++ { + v := NewStringValue(string(r)) + err = array.Append(v) + require.NoError(t, err) + + expectedValues[i] = v + r++ + } + require.False(t, array.root.IsData()) + + i := 0 + r = rune('a') + err = array.Iterate(func(v Value) (bool, error) { + require.Equal(t, NewStringValue(string(r)), v) + + // Mutate primitive array elements by overwritting existing elements with larger elements. + // Larger elements causes slabs to split. + newValue := NewStringValue(strings.Repeat(string(r), 25)) + existingStorable, err := array.Set(uint64(i), newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, NewStringValue(string(r)), existingValue) + + expectedValues[i] = newValue + + i++ + r++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + require.False(t, array.root.IsData()) + + testArray(t, storage, typeInfo, address, array, expectedValues, false) + }) + + t.Run("mutate primitive values, root is metadata slab, merge slabs", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 80 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + r := rune('a') + for i := uint64(0); i < arraySize; i++ { + v := NewStringValue(strings.Repeat(string(r), 25)) + err = array.Append(v) + require.NoError(t, err) + + expectedValues[i] = v + r++ + } + require.False(t, array.root.IsData()) + + i := 0 + r = rune('a') + err = array.Iterate(func(v Value) (bool, error) { + require.Equal(t, NewStringValue(strings.Repeat(string(r), 25)), v) + + // Mutate primitive array elements by overwritting existing elements with smaller elements. + // Smaller elements causes slabs to merge. + newValue := NewStringValue(string(r)) + existingStorable, err := array.Set(uint64(i), newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat(string(r), 25)), existingValue) + + expectedValues[i] = newValue + + i++ + r++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + require.True(t, array.root.IsData()) + + testArray(t, storage, typeInfo, address, array, expectedValues, false) + }) + + t.Run("mutate inlined container, root is data slab, no slab operation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 15 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + v := Uint64Value(i) + err = childArray.Append(v) + require.NoError(t, err) + + err = array.Append(childArray) + require.NoError(t, err) + + expectedValues[i] = arrayValue{v} + } + require.True(t, array.root.IsData()) + + i := 0 + err = array.Iterate(func(v Value) (bool, error) { + childArray, ok := v.(*Array) + require.True(t, ok) + require.Equal(t, uint64(1), childArray.Count()) + require.True(t, childArray.Inlined()) + + // Mutate array elements by inserting more elements to child arrays. + newElement := Uint64Value(0) + err := childArray.Append(newElement) + require.NoError(t, err) + require.Equal(t, uint64(2), childArray.Count()) + require.True(t, childArray.Inlined()) + + expectedChildArrayValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + expectedChildArrayValues = append(expectedChildArrayValues, newElement) + expectedValues[i] = expectedChildArrayValues + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + require.True(t, array.root.IsData()) + + testArray(t, storage, typeInfo, address, array, expectedValues, false) + }) + + t.Run("mutate inlined container, root is metadata slab, no slab operation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 25 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + v := Uint64Value(i) + err = childArray.Append(v) + require.NoError(t, err) + + err = array.Append(childArray) + require.NoError(t, err) + + expectedValues[i] = arrayValue{v} + } + require.False(t, array.root.IsData()) + + i := 0 + err = array.Iterate(func(v Value) (bool, error) { + childArray, ok := v.(*Array) + require.True(t, ok) + require.Equal(t, uint64(1), childArray.Count()) + require.True(t, childArray.Inlined()) + + // Mutate array elements by inserting more elements to child arrays. + newElement := Uint64Value(0) + err := childArray.Append(newElement) + require.NoError(t, err) + require.Equal(t, uint64(2), childArray.Count()) + require.True(t, childArray.Inlined()) + + expectedChildArrayValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + expectedChildArrayValues = append(expectedChildArrayValues, newElement) + expectedValues[i] = expectedChildArrayValues + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + require.False(t, array.root.IsData()) + + testArray(t, storage, typeInfo, address, array, expectedValues, false) + }) + + t.Run("mutate inlined container, root is data slab, split slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const ( + arraySize = 15 + childArraySize = 1 + mutatedChildArraySize = 4 + ) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + var expectedValue arrayValue + for j := i; j < i+childArraySize; j++ { + v := Uint64Value(j) + err = childArray.Append(v) + require.NoError(t, err) + + expectedValue = append(expectedValue, v) + } + + err = array.Append(childArray) + require.NoError(t, err) + + expectedValues[i] = expectedValue + } + require.True(t, array.root.IsData()) + + i := 0 + err = array.Iterate(func(v Value) (bool, error) { + childArray, ok := v.(*Array) + require.True(t, ok) + require.Equal(t, uint64(childArraySize), childArray.Count()) + require.True(t, childArray.Inlined()) + + expectedChildArrayValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + // Mutate array elements by inserting more elements to child arrays. + for j := i; j < i+mutatedChildArraySize-childArraySize; j++ { + newElement := Uint64Value(j) + + err := childArray.Append(newElement) + require.NoError(t, err) + + expectedChildArrayValues = append(expectedChildArrayValues, newElement) + } + + require.Equal(t, uint64(mutatedChildArraySize), childArray.Count()) + require.True(t, childArray.Inlined()) + + expectedValues[i] = expectedChildArrayValues + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + require.False(t, array.root.IsData()) + + testArray(t, storage, typeInfo, address, array, expectedValues, false) + }) + + t.Run("mutate inlined container, root is metadata slab, split slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const ( + arraySize = 25 + childArraySize = 1 + mutatedChildArraySize = 4 + ) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + var expectedValue arrayValue + for j := i; j < i+childArraySize; j++ { + v := Uint64Value(j) + err = childArray.Append(v) + require.NoError(t, err) + + expectedValue = append(expectedValue, v) + } + + err = array.Append(childArray) + require.NoError(t, err) + + expectedValues[i] = expectedValue + } + require.False(t, array.root.IsData()) + + i := 0 + err = array.Iterate(func(v Value) (bool, error) { + childArray, ok := v.(*Array) + require.True(t, ok) + require.Equal(t, uint64(childArraySize), childArray.Count()) + require.True(t, childArray.Inlined()) + + expectedChildArrayValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + // Mutate array elements by inserting more elements to child arrays. + for j := i; j < i+mutatedChildArraySize-childArraySize; j++ { + newElement := Uint64Value(j) + + err := childArray.Append(newElement) + require.NoError(t, err) + + expectedChildArrayValues = append(expectedChildArrayValues, newElement) + } + + require.Equal(t, uint64(mutatedChildArraySize), childArray.Count()) + require.True(t, childArray.Inlined()) + + expectedValues[i] = expectedChildArrayValues + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + require.False(t, array.root.IsData()) + + testArray(t, storage, typeInfo, address, array, expectedValues, false) + }) + + t.Run("mutate inlined container, root is metadata slab, merge slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const ( + arraySize = 10 + childArraySize = 10 + mutatedChildArraySize = 1 + ) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + var expectedValue arrayValue + for j := i; j < i+childArraySize; j++ { + v := Uint64Value(j) + err = childArray.Append(v) + require.NoError(t, err) + + expectedValue = append(expectedValue, v) + } + + err = array.Append(childArray) + require.NoError(t, err) + + expectedValues[i] = expectedValue + } + + require.False(t, array.root.IsData()) + + i := 0 + err = array.Iterate(func(v Value) (bool, error) { + childArray, ok := v.(*Array) + require.True(t, ok) + require.Equal(t, uint64(childArraySize), childArray.Count()) + require.True(t, childArray.Inlined()) + + expectedChildArrayValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + for j := childArraySize - 1; j > mutatedChildArraySize-1; j-- { + existingStorble, err := childArray.Remove(uint64(j)) + require.NoError(t, err) + + existingValue, err := existingStorble.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, Uint64Value(i+j), existingValue) + } + + require.Equal(t, uint64(mutatedChildArraySize), childArray.Count()) + require.True(t, childArray.Inlined()) + + expectedValues[i] = expectedChildArrayValues[:mutatedChildArraySize] + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + require.True(t, array.root.IsData()) + + testArray(t, storage, typeInfo, address, array, expectedValues, false) + }) + + t.Run("uninline inlined container, root is data slab, no slab operation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const ( + arraySize = 2 + childArraySize = 1 + mutatedChildArraySize = 50 + ) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + var expectedValue arrayValue + for j := i; j < i+childArraySize; j++ { + v := Uint64Value(j) + err = childArray.Append(v) + require.NoError(t, err) + + expectedValue = append(expectedValue, v) + } + + err = array.Append(childArray) + require.NoError(t, err) + + expectedValues[i] = expectedValue + } + + require.True(t, array.root.IsData()) + + i := 0 + err = array.Iterate(func(v Value) (bool, error) { + childArray, ok := v.(*Array) + require.True(t, ok) + require.Equal(t, uint64(childArraySize), childArray.Count()) + require.True(t, childArray.Inlined()) + + expectedChildArrayValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + for j := childArraySize; j < mutatedChildArraySize; j++ { + v := Uint64Value(i + j) + + err := childArray.Append(v) + require.NoError(t, err) + + expectedChildArrayValues = append(expectedChildArrayValues, v) + } + + require.Equal(t, uint64(mutatedChildArraySize), childArray.Count()) + require.False(t, childArray.Inlined()) + + expectedValues[i] = expectedChildArrayValues + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + + require.True(t, array.root.IsData()) + + testArray(t, storage, typeInfo, address, array, expectedValues, false) + }) + + t.Run("uninline inlined container, root is metadata slab, merge slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const ( + arraySize = 10 + childArraySize = 10 + mutatedChildArraySize = 50 + ) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + var expectedValue arrayValue + + for j := i; j < i+childArraySize; j++ { + v := Uint64Value(j) + err = childArray.Append(v) + require.NoError(t, err) + + expectedValue = append(expectedValue, v) + } + + err = array.Append(childArray) + require.NoError(t, err) + + expectedValues[i] = expectedValue + } + + require.False(t, array.root.IsData()) + + i := 0 + err = array.Iterate(func(v Value) (bool, error) { + childArray, ok := v.(*Array) + require.True(t, ok) + require.Equal(t, uint64(childArraySize), childArray.Count()) + require.True(t, childArray.Inlined()) + + expectedChildArrayValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + for j := childArraySize; j < mutatedChildArraySize; j++ { + v := Uint64Value(i + j) + + err := childArray.Append(v) + require.NoError(t, err) + + expectedChildArrayValues = append(expectedChildArrayValues, v) + } + + require.Equal(t, uint64(mutatedChildArraySize), childArray.Count()) + require.False(t, childArray.Inlined()) + + expectedValues[i] = expectedChildArrayValues + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + require.True(t, array.root.IsData()) - t.Run("mutation", func(t *testing.T) { + testArray(t, storage, typeInfo, address, array, expectedValues, false) + }) + + t.Run("inline uninlined container, root is data slab, no slab operation", func(t *testing.T) { SetThreshold(256) defer SetThreshold(1024) - const arraySize = 15 + const ( + arraySize = 2 + childArraySize = 50 + mutatedChildArraySize = 1 + ) typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) @@ -925,48 +1701,196 @@ func TestArrayIterate(t *testing.T) { childArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - v := Uint64Value(i) - err = childArray.Append(v) - require.NoError(t, err) + var expectedValue arrayValue + + for j := i; j < i+childArraySize; j++ { + v := Uint64Value(j) + err = childArray.Append(v) + require.NoError(t, err) + + expectedValue = append(expectedValue, v) + } err = array.Append(childArray) require.NoError(t, err) - expectedValues[i] = arrayValue{v} + expectedValues[i] = expectedValue } - require.True(t, array.root.IsData()) - sizeBeforeMutation := array.root.Header().size + require.True(t, array.root.IsData()) i := 0 - newElement := Uint64Value(0) err = array.Iterate(func(v Value) (bool, error) { childArray, ok := v.(*Array) require.True(t, ok) - require.Equal(t, uint64(1), childArray.Count()) + require.Equal(t, uint64(childArraySize), childArray.Count()) + require.False(t, childArray.Inlined()) + + expectedChildArrayValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + for j := childArraySize - 1; j > mutatedChildArraySize-1; j-- { + existingStorable, err := childArray.Remove(uint64(j)) + require.NoError(t, err) + + value, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, Uint64Value(i+j), value) + } + + require.Equal(t, uint64(mutatedChildArraySize), childArray.Count()) require.True(t, childArray.Inlined()) - err := childArray.Append(newElement) + expectedValues[i] = expectedChildArrayValues[:1] + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + + require.True(t, array.root.IsData()) + + testArray(t, storage, typeInfo, address, array, expectedValues, false) + }) + + t.Run("inline uninlined container, root is data slab, split slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const ( + arraySize = 4 + childArraySize = 50 + mutatedChildArraySize = 25 + ) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + var expectedValue arrayValue + + for j := i; j < i+childArraySize; j++ { + v := Uint64Value(j) + err = childArray.Append(v) + require.NoError(t, err) + + expectedValue = append(expectedValue, v) + } + + err = array.Append(childArray) require.NoError(t, err) + expectedValues[i] = expectedValue + } + + require.True(t, array.root.IsData()) + + i := 0 + err = array.Iterate(func(v Value) (bool, error) { + childArray, ok := v.(*Array) + require.True(t, ok) + require.Equal(t, uint64(childArraySize), childArray.Count()) + require.False(t, childArray.Inlined()) + expectedChildArrayValues, ok := expectedValues[i].(arrayValue) require.True(t, ok) - expectedChildArrayValues = append(expectedChildArrayValues, newElement) - expectedValues[i] = expectedChildArrayValues + for j := childArraySize - 1; j >= mutatedChildArraySize; j-- { + existingStorable, err := childArray.Remove(uint64(j)) + require.NoError(t, err) - i++ + value, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, Uint64Value(i+j), value) + } - require.Equal(t, array.root.Header().size, sizeBeforeMutation+uint32(i)*newElement.ByteSize()) + require.Equal(t, uint64(mutatedChildArraySize), childArray.Count()) + require.True(t, childArray.Inlined()) + + expectedValues[i] = expectedChildArrayValues[:mutatedChildArraySize] + + i++ return true, nil }) require.NoError(t, err) require.Equal(t, arraySize, i) - require.True(t, array.root.IsData()) + + require.False(t, array.root.IsData()) testArray(t, storage, typeInfo, address, array, expectedValues, false) }) + + t.Run("stop", func(t *testing.T) { + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + const count = 10 + for i := uint64(0); i < count; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + } + + i := 0 + err = array.Iterate(func(_ Value) (bool, error) { + if i == count/2 { + return false, nil + } + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, count/2, i) + }) + + t.Run("error", func(t *testing.T) { + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + const count = 10 + for i := uint64(0); i < count; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + } + + testErr := errors.New("test") + + i := 0 + err = array.Iterate(func(_ Value) (bool, error) { + if i == count/2 { + return false, testErr + } + i++ + return true, nil + }) + // err is testErr wrapped in ExternalError. + require.Equal(t, 1, errorCategorizationCount(err)) + var externalError *ExternalError + require.ErrorAs(t, err, &externalError) + require.Equal(t, testErr, externalError.Unwrap()) + + require.Equal(t, count/2, i) + }) } func testArrayIterateRange(t *testing.T, array *Array, values []Value) { @@ -1029,7 +1953,7 @@ func testArrayIterateRange(t *testing.T, array *Array, values []Value) { } } -func TestArrayIterateRange(t *testing.T) { +func TestReadOnlyArrayIterateRange(t *testing.T) { typeInfo := testTypeInfo{42} address := Address{1, 2, 3, 4, 5, 6, 7, 8} @@ -1143,8 +2067,29 @@ func TestArrayIterateRange(t *testing.T) { require.Equal(t, testErr, externalError.Unwrap()) require.Equal(t, count/2, i) }) +} + +func TestMutableArrayIterateRange(t *testing.T) { + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("empty", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + i := 0 + err = array.IterateRange(0, 0, func(v Value) (bool, error) { + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, 0, i) + }) - t.Run("mutation", func(t *testing.T) { + t.Run("mutate inlined container, root is data slab, no slab operation", func(t *testing.T) { SetThreshold(256) defer SetThreshold(1024) @@ -1207,6 +2152,67 @@ func TestArrayIterateRange(t *testing.T) { testArray(t, storage, typeInfo, address, array, expectedValues, false) }) + + t.Run("stop", func(t *testing.T) { + const arraySize = 10 + + storage := newTestPersistentStorage(t) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + } + + i := uint64(0) + startIndex := uint64(1) + endIndex := uint64(5) + count := endIndex - startIndex + err = array.IterateRange(startIndex, endIndex, func(_ Value) (bool, error) { + if i == count/2 { + return false, nil + } + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, count/2, i) + }) + + t.Run("error", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + const arraySize = 10 + for i := uint64(0); i < arraySize; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + } + + testErr := errors.New("test") + + i := uint64(0) + startIndex := uint64(1) + endIndex := uint64(5) + count := endIndex - startIndex + err = array.IterateRange(startIndex, endIndex, func(_ Value) (bool, error) { + if i == count/2 { + return false, testErr + } + i++ + return true, nil + }) + // err is testErr wrapped in ExternalError. + require.Equal(t, 1, errorCategorizationCount(err)) + var externalError *ExternalError + require.ErrorAs(t, err, &externalError) + require.Equal(t, testErr, externalError.Unwrap()) + require.Equal(t, count/2, i) + }) } func TestArrayRootSlabID(t *testing.T) { @@ -3206,7 +4212,7 @@ func TestEmptyArray(t *testing.T) { require.Nil(t, s) }) - t.Run("iterate", func(t *testing.T) { + t.Run("readonly iterate", func(t *testing.T) { i := uint64(0) err := array.IterateReadOnly(func(v Value) (bool, error) { i++ @@ -3216,6 +4222,16 @@ func TestEmptyArray(t *testing.T) { require.Equal(t, uint64(0), i) }) + t.Run("iterate", func(t *testing.T) { + i := uint64(0) + err := array.Iterate(func(v Value) (bool, error) { + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, uint64(0), i) + }) + t.Run("count", func(t *testing.T) { count := array.Count() require.Equal(t, uint64(0), count) From 20d7796e3e8b890ccca2b63392658bd78409ec76 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 30 Nov 2023 18:29:11 -0600 Subject: [PATCH 089/126] Support mutation of values during map key iteration --- map.go | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/map.go b/map.go index 8983e8a0..e0473fd6 100644 --- a/map.go +++ b/map.go @@ -5940,14 +5940,8 @@ func (m *OrderedMap) IterateReadOnly(fn MapEntryIterationFunc) error { return iterateMap(iterator, fn) } -func (m *OrderedMap) IterateReadOnlyKeys(fn MapElementIterationFunc) error { - - iterator, err := m.ReadOnlyIterator() - if err != nil { - // Don't need to wrap error as external error because err is already categorized by OrderedMap.Iterator(). - return err - } - +func iterateMapKeys(iterator MapIterator, fn MapElementIterationFunc) error { + var err error var key Value for { key, err = iterator.NextKey() @@ -5969,6 +5963,24 @@ func (m *OrderedMap) IterateReadOnlyKeys(fn MapElementIterationFunc) error { } } +func (m *OrderedMap) IterateKeys(comparator ValueComparator, hip HashInputProvider, fn MapElementIterationFunc) error { + iterator, err := m.Iterator(comparator, hip) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by OrderedMap.Iterator(). + return err + } + return iterateMapKeys(iterator, fn) +} + +func (m *OrderedMap) IterateReadOnlyKeys(fn MapElementIterationFunc) error { + iterator, err := m.ReadOnlyIterator() + if err != nil { + // Don't need to wrap error as external error because err is already categorized by OrderedMap.ReadOnlyIterator(). + return err + } + return iterateMapKeys(iterator, fn) +} + func iterateMapValues(iterator MapIterator, fn MapElementIterationFunc) error { var err error var value Value From bcae77e59ec32019f078e8fa23233eb5ecc12b03 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Fri, 1 Dec 2023 16:58:30 -0600 Subject: [PATCH 090/126] Handle collision group in mutable map iterator --- map.go | 112 +++++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 98 insertions(+), 14 deletions(-) diff --git a/map.go b/map.go index e0473fd6..823f2dc4 100644 --- a/map.go +++ b/map.go @@ -114,6 +114,15 @@ type MapValue Storable type element interface { fmt.Stringer + getElementAndNextKey( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, + ) (MapKey, MapValue, MapKey, error) + Get( storage SlabStorage, digester Digester, @@ -634,6 +643,20 @@ func (e *singleElement) Encode(enc *Encoder) error { return nil } +func (e *singleElement) getElementAndNextKey( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, +) (MapKey, MapValue, MapKey, error) { + k, v, err := e.Get(storage, digester, level, hkey, comparator, key) + + nextKey := MapKey(nil) + return k, v, nextKey, err +} + func (e *singleElement) Get(storage SlabStorage, _ Digester, _ uint, _ Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { equal, err := comparator(storage, key, e.key) if err != nil { @@ -806,6 +829,27 @@ func (e *inlineCollisionGroup) Encode(enc *Encoder) error { return nil } +func (e *inlineCollisionGroup) getElementAndNextKey( + storage SlabStorage, + digester Digester, + level uint, + _ Digest, + comparator ValueComparator, + key Value, +) (MapKey, MapValue, MapKey, error) { + + // Adjust level and hkey for collision group + level++ + if level > digester.Levels() { + return nil, nil, nil, NewHashLevelErrorf("inline collision group digest level is %d, want <= %d", level, digester.Levels()) + } + hkey, _ := digester.Digest(level) + + // Search key in collision group with adjusted hkeyPrefix and hkey + // Don't need to wrap error as external error because err is already categorized by elements.Get(). + return e.elements.getElementAndNextKey(storage, digester, level, hkey, comparator, key) +} + func (e *inlineCollisionGroup) Get(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { // Adjust level and hkey for collision group @@ -995,6 +1039,32 @@ func (e *externalCollisionGroup) Encode(enc *Encoder) error { return nil } +func (e *externalCollisionGroup) getElementAndNextKey( + storage SlabStorage, + digester Digester, + level uint, + _ Digest, + comparator ValueComparator, + key Value, +) (MapKey, MapValue, MapKey, error) { + slab, err := getMapSlab(storage, e.slabID) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by getMapSlab(). + return nil, nil, nil, err + } + + // Adjust level and hkey for collision group + level++ + if level > digester.Levels() { + return nil, nil, nil, NewHashLevelErrorf("external collision group digest level is %d, want <= %d", level, digester.Levels()) + } + hkey, _ := digester.Digest(level) + + // Search key in collision group with adjusted hkeyPrefix and hkey + // Don't need to wrap error as external error because err is already categorized by MapSlab.getElementAndNextKey(). + return slab.getElementAndNextKey(storage, digester, level, hkey, comparator, key) +} + func (e *externalCollisionGroup) Get(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { slab, err := getMapSlab(storage, e.slabID) if err != nil { @@ -1347,10 +1417,15 @@ func (e *hkeyElements) Encode(enc *Encoder) error { return nil } -func (e *hkeyElements) get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, int, error) { +func (e *hkeyElements) getElement( + digester Digester, + level uint, + hkey Digest, + key Value, +) (element, int, error) { if level >= digester.Levels() { - return nil, nil, 0, NewHashLevelErrorf("hkey elements digest level is %d, want < %d", level, digester.Levels()) + return nil, 0, NewHashLevelErrorf("hkey elements digest level is %d, want < %d", level, digester.Levels()) } // binary search by hkey @@ -1372,23 +1447,21 @@ func (e *hkeyElements) get(storage SlabStorage, digester Digester, level uint, h // No matching hkey if equalIndex == -1 { - return nil, nil, 0, NewKeyNotFoundError(key) + return nil, 0, NewKeyNotFoundError(key) } - elem := e.elems[equalIndex] + return e.elems[equalIndex], equalIndex, nil +} - k, v, err := elem.Get(storage, digester, level, hkey, comparator, key) +func (e *hkeyElements) Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { + elem, _, err := e.getElement(digester, level, hkey, key) if err != nil { - // Don't need to wrap error as external error because err is already categorized by element.Get(). - return nil, nil, 0, err + // Don't need to wrap error as external error because err is already categorized by hkeyElements.getElement(). + return nil, nil, err } - return k, v, equalIndex, nil -} - -func (e *hkeyElements) Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { - k, v, _, err := e.get(storage, digester, level, hkey, comparator, key) - return k, v, err + // Don't need to wrap error as external error because err is already categorized by element.Get(). + return elem.Get(storage, digester, level, hkey, comparator, key) } func (e *hkeyElements) getElementAndNextKey( @@ -1399,12 +1472,23 @@ func (e *hkeyElements) getElementAndNextKey( comparator ValueComparator, key Value, ) (MapKey, MapValue, MapKey, error) { - k, v, index, err := e.get(storage, digester, level, hkey, comparator, key) + elem, index, err := e.getElement(digester, level, hkey, key) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by hkeyElements.getElement(). + return nil, nil, nil, err + } + + k, v, nk, err := elem.getElementAndNextKey(storage, digester, level, hkey, comparator, key) if err != nil { // Don't need to wrap error as external error because err is already categorized by hkeyElements.get(). return nil, nil, nil, err } + if nk != nil { + // Found next key in element group + return k, v, nk, nil + } + nextIndex := index + 1 switch { From a0e7907039cce4d0ba68b85568cb96c1315ebc00 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Fri, 1 Dec 2023 17:40:50 -0600 Subject: [PATCH 091/126] Add more tests for mutable map iterator --- array_test.go | 2 +- map_test.go | 1570 +++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 1527 insertions(+), 45 deletions(-) diff --git a/array_test.go b/array_test.go index fa4e3070..7c64d61c 100644 --- a/array_test.go +++ b/array_test.go @@ -692,7 +692,7 @@ func TestArrayRemove(t *testing.T) { }) } -func TestArrayIterateReadOnly(t *testing.T) { +func TestReadOnlyArrayIterate(t *testing.T) { t.Run("empty", func(t *testing.T) { typeInfo := testTypeInfo{42} diff --git a/map_test.go b/map_test.go index dccfd4bc..d8e58579 100644 --- a/map_test.go +++ b/map_test.go @@ -1107,7 +1107,7 @@ func TestMapRemove(t *testing.T) { }) } -func TestMapIterate(t *testing.T) { +func TestReadOnlyMapIterate(t *testing.T) { t.Run("empty", func(t *testing.T) { @@ -1303,8 +1303,1109 @@ func TestMapIterate(t *testing.T) { testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) }) +} + +func TestMutableMapIterate(t *testing.T) { + + t.Run("empty", func(t *testing.T) { + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + // Iterate key value pairs + i := 0 + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + i++ + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + // Iterate keys + i = 0 + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + i++ + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + // Iterate values + i = 0 + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + i++ + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + testMap(t, storage, typeInfo, address, m, mapValue{}, nil, false) + }) + + t.Run("mutate primitive values, root is data slab, no slab operation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 15 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := 0 + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (bool, error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + newValue := v.(Uint64Value) * 2 + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate primitive values, root is metadata slab, no slab operation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 25 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + sortedKeys[i] = k + keyValues[k] = v + } + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := 0 + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (bool, error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + newValue := v.(Uint64Value) * 2 + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate primitive values, root is data slab, split slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 15 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := 0 + r := 'a' + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (bool, error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + newValue := NewStringValue(strings.Repeat(string(r), 25)) + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + r++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate primitive values, root is metadata slab, split slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 25 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := 0 + r := 'a' + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (bool, error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + newValue := NewStringValue(strings.Repeat(string(r), 25)) + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + r++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate primitive values, root is metadata slab, merge slabs", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 10 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + r := 'a' + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := NewStringValue(strings.Repeat(string(r), 25)) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + r++ + keyValues[k] = v + sortedKeys[i] = k + } + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := 0 + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (bool, error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + newValue := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + r++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate collision primitive values, 1 level", func(t *testing.T) { + const ( + mapSize = 1024 + ) + + r := newRand(t) + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{ + Digest(r.Intn(256)), + } + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate key value pairs + i := uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + newValue := v.(Uint64Value) / 2 + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, keyValues[k], existingValue) + + i++ + keyValues[k] = newValue + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, i, uint64(mapSize)) + + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + }) + + t.Run("mutate collision primitive values, 4 levels", func(t *testing.T) { + const ( + mapSize = 1024 + ) + + r := newRand(t) + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{ + Digest(r.Intn(256)), + Digest(r.Intn(256)), + Digest(r.Intn(256)), + Digest(r.Intn(256)), + } + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate key value pairs + i := uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + newValue := v.(Uint64Value) / 2 + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, keyValues[k], existingValue) + + i++ + keyValues[k] = newValue + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, i, uint64(mapSize)) + + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + }) + + t.Run("mutate inlined container, root is data slab, no slab operation", func(t *testing.T) { + const ( + mapSize = 15 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + ck := Uint64Value(0) + cv := Uint64Value(i) + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (updating elements) + i := uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + childKey := Uint64Value(0) + childNewValue := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childNewValue) + require.NoError(t, err) + require.NotNil(t, existingStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + expectedChildMapValues[childKey] = childNewValue + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate inlined container, root is metadata slab, no slab operation", func(t *testing.T) { + const ( + mapSize = 35 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + ck := Uint64Value(0) + cv := Uint64Value(i) + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (updating elements) + i := uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + childKey := Uint64Value(0) + childNewValue := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childNewValue) + require.NoError(t, err) + require.NotNil(t, existingStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + expectedChildMapValues[childKey] = childNewValue + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate inlined container, root is data slab, split slab", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 1 + mutatedChildMapSize = 5 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize; j < mutatedChildMapSize; j++ { + childKey := Uint64Value(j) + childValue := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[childKey] = childValue + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate inlined container, root is metadata slab, split slab", func(t *testing.T) { + const ( + mapSize = 35 + childMapSize = 1 + mutatedChildMapSize = 5 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize; j < mutatedChildMapSize; j++ { + childKey := Uint64Value(j) + childValue := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[childKey] = childValue + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate inlined container, root is metadata slab, merge slab", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 10 + mutatedChildMapSize = 1 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize - 1; j >= mutatedChildMapSize; j-- { + childKey := Uint64Value(j) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, childKey) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + + delete(expectedChildMapValues, childKey) + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate collision inlined container, 1 level", func(t *testing.T) { + const ( + mapSize = 1024 + ) + + r := newRand(t) + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + ck := Uint64Value(0) + cv := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + digests := []Digest{ + Digest(r.Intn(256)), + } + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + sortedKeys[i] = k + } + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate key value pairs + i := uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + childKey := Uint64Value(0) + childNewValue := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childNewValue) + require.NoError(t, err) + require.NotNil(t, existingStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues[childKey] = childNewValue + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, i, uint64(mapSize)) + + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + }) + + t.Run("mutate collision inlined container, 4 levels", func(t *testing.T) { + const ( + mapSize = 1024 + ) + + r := newRand(t) + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + ck := Uint64Value(0) + cv := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + digests := []Digest{ + Digest(r.Intn(256)), + Digest(r.Intn(256)), + Digest(r.Intn(256)), + Digest(r.Intn(256)), + } + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + sortedKeys[i] = k + } + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate key value pairs + i := uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + childKey := Uint64Value(0) + childNewValue := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childNewValue) + require.NoError(t, err) + require.NotNil(t, existingStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues[childKey] = childNewValue + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, i, uint64(mapSize)) + + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + }) - t.Run("mutation", func(t *testing.T) { + t.Run("mutate inlined container", func(t *testing.T) { const ( mapSize = 15 valueStringSize = 16 @@ -1312,7 +2413,125 @@ func TestMapIterate(t *testing.T) { r := newRand(t) - elementSize := digestSize + singleElementPrefixSize + Uint64Value(0).ByteSize() + NewStringValue(randStr(r, valueStringSize)).ByteSize() + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + i := uint64(0) + for i := 0; i < mapSize; i++ { + ck := Uint64Value(0) + cv := NewStringValue(randStr(r, valueStringSize)) + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + sortedKeys[i] = k + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + } + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i = uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + newChildMapKey := Uint64Value(1) // Previous key is 0 + newChildMapValue := NewStringValue(randStr(r, valueStringSize)) + + existingStorable, err := childMap.Set(compare, hashInputProvider, newChildMapKey, newChildMapValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(2), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + expectedChildMapValues[newChildMapKey] = newChildMapValue + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Iterate and mutate child map (removing elements) + i = uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(2), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + // Remove key 0 + ck := Uint64Value(0) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, ck) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + delete(expectedChildMapValues, ck) + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("uninline inlined container, root is data slab, no slab operation", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 1 + mutatedChildMapSize = 35 + ) typeInfo := testTypeInfo{42} address := Address{1, 2, 3, 4, 5, 6, 7, 8} @@ -1323,31 +2542,36 @@ func TestMapIterate(t *testing.T) { require.NoError(t, err) keyValues := make(map[Value]Value, mapSize) - sortedKeys := make([]Value, 0, mapSize) - i := uint64(0) + sortedKeys := make([]Value, mapSize) for i := 0; i < mapSize; i++ { - ck := Uint64Value(0) - cv := NewStringValue(randStr(r, valueStringSize)) childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) require.NoError(t, err) - existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) - require.NoError(t, err) - require.Nil(t, existingStorable) + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } k := Uint64Value(i) - sortedKeys = append(sortedKeys, k) - existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) require.NoError(t, err) require.Nil(t, existingStorable) - - require.Equal(t, uint64(1), childMap.Count()) + require.Equal(t, uint64(childMapSize), childMap.Count()) require.True(t, childMap.Inlined()) - keyValues[k] = mapValue{ck: cv} + keyValues[k] = childMapValues + sortedKeys[i] = k } + require.Equal(t, uint64(mapSize), m.Count()) require.True(t, m.root.IsData()) @@ -1356,76 +2580,324 @@ func TestMapIterate(t *testing.T) { // Sort keys by digest sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) - sizeBeforeMutation := m.root.Header().size - // Iterate and mutate child map (inserting elements) - i = uint64(0) + i := uint64(0) err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) childMap, ok := v.(*OrderedMap) require.True(t, ok) - require.Equal(t, uint64(1), childMap.Count()) + require.Equal(t, uint64(childMapSize), childMap.Count()) require.True(t, childMap.Inlined()) - newChildMapKey := Uint64Value(1) // Previous key is 0 - newChildMapValue := NewStringValue(randStr(r, valueStringSize)) + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) - existingStorable, err := childMap.Set(compare, hashInputProvider, newChildMapKey, newChildMapValue) + for j := childMapSize; j < mutatedChildMapSize; j++ { + childKey := Uint64Value(j) + childValue := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[childKey] = childValue + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("uninline inlined container, root is metadata slab, merge slab", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 5 + mutatedChildMapSize = 35 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) require.NoError(t, err) require.Nil(t, existingStorable) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) - expectedChildMapValues, ok := keyValues[k].(mapValue) - require.True(t, ok) + keyValues[k] = childMapValues + sortedKeys[i] = k + } - expectedChildMapValues[newChildMapKey] = newChildMapValue + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { valueEqual(t, sortedKeys[i], k) valueEqual(t, keyValues[k], v) - i++ - require.Equal(t, m.root.Header().size, sizeBeforeMutation+uint32(i)*elementSize) + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize; j < mutatedChildMapSize; j++ { + childKey := Uint64Value(j) + childValue := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[childKey] = childValue + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + i++ return true, nil }) require.NoError(t, err) require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("inline uninlined container, root is data slab, no slab operation", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 35 + mutatedChildMapSize = 1 + ) - sizeAfterInsertionMutation := m.root.Header().size + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) - // Iterate and mutate child map (removing elements) - i = uint64(0) - err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { childMap, ok := v.(*OrderedMap) require.True(t, ok) - require.Equal(t, uint64(2), childMap.Count()) - require.True(t, childMap.Inlined()) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) - // Remove key 0 - ck := Uint64Value(0) + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) - existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, ck) - require.NoError(t, err) - require.NotNil(t, existingKeyStorable) - require.NotNil(t, existingValueStorable) + for j := childMapSize - 1; j > mutatedChildMapSize-1; j-- { + childKey := Uint64Value(j) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, childKey) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + + delete(expectedChildMapValues, childKey) + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) i++ - require.Equal(t, m.root.Header().size, sizeAfterInsertionMutation-uint32(i)*elementSize) return true, nil }) require.NoError(t, err) require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("inline uninlined container, root is data slab, split slab", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 35 + mutatedChildMapSize = 10 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) - for k := range keyValues { expectedChildMapValues, ok := keyValues[k].(mapValue) require.True(t, ok) - delete(expectedChildMapValues, Uint64Value(0)) - } + for j := childMapSize - 1; j > mutatedChildMapSize-1; j-- { + childKey := Uint64Value(j) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, childKey) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + + delete(expectedChildMapValues, childKey) + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.False(t, m.root.IsData()) testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) @@ -8075,7 +9547,7 @@ func TestEmptyMap(t *testing.T) { require.Nil(t, existingMapValueStorable) }) - t.Run("iterate", func(t *testing.T) { + t.Run("readonly iterate", func(t *testing.T) { i := 0 err := m.IterateReadOnly(func(k Value, v Value) (bool, error) { i++ @@ -8085,6 +9557,16 @@ func TestEmptyMap(t *testing.T) { require.Equal(t, 0, i) }) + t.Run("iterate", func(t *testing.T) { + i := 0 + err := m.Iterate(compare, hashInputProvider, func(k Value, v Value) (bool, error) { + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, 0, i) + }) + t.Run("count", func(t *testing.T) { count := m.Count() require.Equal(t, uint64(0), count) From bbee1cce9857cea3be94465430535b95398d4b0c Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Fri, 1 Dec 2023 18:16:22 -0600 Subject: [PATCH 092/126] Add more tests for mutable map key iterator --- map_test.go | 1620 ++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 1600 insertions(+), 20 deletions(-) diff --git a/map_test.go b/map_test.go index d8e58579..dca0ca77 100644 --- a/map_test.go +++ b/map_test.go @@ -1326,26 +1326,6 @@ func TestMutableMapIterate(t *testing.T) { require.NoError(t, err) require.Equal(t, 0, i) - // Iterate keys - i = 0 - err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { - i++ - return true, nil - }) - - require.NoError(t, err) - require.Equal(t, 0, i) - - // Iterate values - i = 0 - err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { - i++ - return true, nil - }) - - require.NoError(t, err) - require.Equal(t, 0, i) - testMap(t, storage, typeInfo, address, m, mapValue{}, nil, false) }) @@ -2903,6 +2883,1606 @@ func TestMutableMapIterate(t *testing.T) { }) } +func TestMutableMapIterateKeys(t *testing.T) { + + t.Run("empty", func(t *testing.T) { + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + i := 0 + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + i++ + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + testMap(t, storage, typeInfo, address, m, mapValue{}, nil, false) + }) + + t.Run("mutate primitive values, root is data slab, no slab operation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 15 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := 0 + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (bool, error) { + valueEqual(t, sortedKeys[i], k) + + v := keyValues[k] + newValue := v.(Uint64Value) * 2 + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate primitive values, root is metadata slab, no slab operation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 25 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + sortedKeys[i] = k + keyValues[k] = v + } + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := 0 + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (bool, error) { + valueEqual(t, sortedKeys[i], k) + + v := keyValues[k] + newValue := v.(Uint64Value) * 2 + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate primitive values, root is data slab, split slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 15 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := 0 + r := 'a' + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (bool, error) { + valueEqual(t, sortedKeys[i], k) + + v := keyValues[k] + newValue := NewStringValue(strings.Repeat(string(r), 25)) + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + r++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate primitive values, root is metadata slab, split slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 25 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := 0 + r := 'a' + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (bool, error) { + valueEqual(t, sortedKeys[i], k) + + v := keyValues[k] + newValue := NewStringValue(strings.Repeat(string(r), 25)) + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + r++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate primitive values, root is metadata slab, merge slabs", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 10 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + r := 'a' + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := NewStringValue(strings.Repeat(string(r), 25)) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + r++ + keyValues[k] = v + sortedKeys[i] = k + } + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := 0 + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (bool, error) { + valueEqual(t, sortedKeys[i], k) + + v := keyValues[k] + newValue := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + r++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate collision primitive values, 1 level", func(t *testing.T) { + const ( + mapSize = 1024 + ) + + r := newRand(t) + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{ + Digest(r.Intn(256)), + } + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v := keyValues[k] + newValue := v.(Uint64Value) / 2 + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, keyValues[k], existingValue) + + i++ + keyValues[k] = newValue + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, i, uint64(mapSize)) + + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + }) + + t.Run("mutate collision primitive values, 4 levels", func(t *testing.T) { + const ( + mapSize = 1024 + ) + + r := newRand(t) + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{ + Digest(r.Intn(256)), + Digest(r.Intn(256)), + Digest(r.Intn(256)), + Digest(r.Intn(256)), + } + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v := keyValues[k] + newValue := v.(Uint64Value) / 2 + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, keyValues[k], existingValue) + + i++ + keyValues[k] = newValue + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, i, uint64(mapSize)) + + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + }) + + t.Run("mutate inlined container, root is data slab, no slab operation", func(t *testing.T) { + const ( + mapSize = 15 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + ck := Uint64Value(0) + cv := Uint64Value(i) + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (updating elements) + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + childKey := Uint64Value(0) + childNewValue := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childNewValue) + require.NoError(t, err) + require.NotNil(t, existingStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + expectedChildMapValues[childKey] = childNewValue + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate inlined container, root is metadata slab, no slab operation", func(t *testing.T) { + const ( + mapSize = 35 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + ck := Uint64Value(0) + cv := Uint64Value(i) + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (updating elements) + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + childKey := Uint64Value(0) + childNewValue := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childNewValue) + require.NoError(t, err) + require.NotNil(t, existingStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + expectedChildMapValues[childKey] = childNewValue + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate inlined container, root is data slab, split slab", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 1 + mutatedChildMapSize = 5 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize; j < mutatedChildMapSize; j++ { + childKey := Uint64Value(j) + childValue := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[childKey] = childValue + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate inlined container, root is metadata slab, split slab", func(t *testing.T) { + const ( + mapSize = 35 + childMapSize = 1 + mutatedChildMapSize = 5 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize; j < mutatedChildMapSize; j++ { + childKey := Uint64Value(j) + childValue := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[childKey] = childValue + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate inlined container, root is metadata slab, merge slab", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 10 + mutatedChildMapSize = 1 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize - 1; j >= mutatedChildMapSize; j-- { + childKey := Uint64Value(j) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, childKey) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + + delete(expectedChildMapValues, childKey) + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate collision inlined container, 1 level", func(t *testing.T) { + const ( + mapSize = 1024 + ) + + r := newRand(t) + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + ck := Uint64Value(0) + cv := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + digests := []Digest{ + Digest(r.Intn(256)), + } + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + sortedKeys[i] = k + } + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate key value pairs + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + childKey := Uint64Value(0) + childNewValue := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childNewValue) + require.NoError(t, err) + require.NotNil(t, existingStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues[childKey] = childNewValue + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, i, uint64(mapSize)) + + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + }) + + t.Run("mutate collision inlined container, 4 levels", func(t *testing.T) { + const ( + mapSize = 1024 + ) + + r := newRand(t) + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + ck := Uint64Value(0) + cv := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + digests := []Digest{ + Digest(r.Intn(256)), + Digest(r.Intn(256)), + Digest(r.Intn(256)), + Digest(r.Intn(256)), + } + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + sortedKeys[i] = k + } + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + childKey := Uint64Value(0) + childNewValue := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childNewValue) + require.NoError(t, err) + require.NotNil(t, existingStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues[childKey] = childNewValue + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, i, uint64(mapSize)) + + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + }) + + t.Run("mutate inlined container", func(t *testing.T) { + const ( + mapSize = 15 + valueStringSize = 16 + ) + + r := newRand(t) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + i := uint64(0) + for i := 0; i < mapSize; i++ { + ck := Uint64Value(0) + cv := NewStringValue(randStr(r, valueStringSize)) + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + sortedKeys[i] = k + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + } + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i = uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + newChildMapKey := Uint64Value(1) // Previous key is 0 + newChildMapValue := NewStringValue(randStr(r, valueStringSize)) + + existingStorable, err := childMap.Set(compare, hashInputProvider, newChildMapKey, newChildMapValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(2), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + expectedChildMapValues[newChildMapKey] = newChildMapValue + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Iterate and mutate child map (removing elements) + i = uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(2), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + // Remove key 0 + ck := Uint64Value(0) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, ck) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + delete(expectedChildMapValues, ck) + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("uninline inlined container, root is data slab, no slab operation", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 1 + mutatedChildMapSize = 35 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize; j < mutatedChildMapSize; j++ { + childKey := Uint64Value(j) + childValue := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[childKey] = childValue + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("uninline inlined container, root is metadata slab, merge slab", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 5 + mutatedChildMapSize = 35 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize; j < mutatedChildMapSize; j++ { + childKey := Uint64Value(j) + childValue := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[childKey] = childValue + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("inline uninlined container, root is data slab, no slab operation", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 35 + mutatedChildMapSize = 1 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize - 1; j > mutatedChildMapSize-1; j-- { + childKey := Uint64Value(j) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, childKey) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + + delete(expectedChildMapValues, childKey) + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("inline uninlined container, root is data slab, split slab", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 35 + mutatedChildMapSize = 10 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize - 1; j > mutatedChildMapSize-1; j-- { + childKey := Uint64Value(j) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, childKey) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + + delete(expectedChildMapValues, childKey) + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) +} + func testMapDeterministicHashCollision(t *testing.T, r *rand.Rand, maxDigestLevel int) { const ( From 4b0ec40c7d65a1919a2d723051e402117c9b9400 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Fri, 1 Dec 2023 18:28:43 -0600 Subject: [PATCH 093/126] Add more tests for mutable map value iterator --- map_test.go | 1598 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1598 insertions(+) diff --git a/map_test.go b/map_test.go index dca0ca77..ba8ca3a0 100644 --- a/map_test.go +++ b/map_test.go @@ -4483,6 +4483,1604 @@ func TestMutableMapIterateKeys(t *testing.T) { }) } +func TestMutableMapIterateValues(t *testing.T) { + + t.Run("empty", func(t *testing.T) { + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + i := 0 + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + i++ + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + testMap(t, storage, typeInfo, address, m, mapValue{}, nil, false) + }) + + t.Run("mutate primitive values, root is data slab, no slab operation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 15 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := 0 + err = m.IterateValues(compare, hashInputProvider, func(v Value) (bool, error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + newValue := v.(Uint64Value) * 2 + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate primitive values, root is metadata slab, no slab operation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 25 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + sortedKeys[i] = k + keyValues[k] = v + } + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := 0 + err = m.IterateValues(compare, hashInputProvider, func(v Value) (bool, error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + newValue := v.(Uint64Value) * 2 + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate primitive values, root is data slab, split slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 15 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := 0 + r := 'a' + err = m.IterateValues(compare, hashInputProvider, func(v Value) (bool, error) { + k := sortedKeys[i] + + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + newValue := NewStringValue(strings.Repeat(string(r), 25)) + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + r++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate primitive values, root is metadata slab, split slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 25 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := 0 + r := 'a' + err = m.IterateValues(compare, hashInputProvider, func(v Value) (bool, error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + newValue := NewStringValue(strings.Repeat(string(r), 25)) + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + r++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate primitive values, root is metadata slab, merge slabs", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 10 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + r := 'a' + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := NewStringValue(strings.Repeat(string(r), 25)) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + r++ + keyValues[k] = v + sortedKeys[i] = k + } + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := 0 + err = m.IterateValues(compare, hashInputProvider, func(v Value) (bool, error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + newValue := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + r++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate collision primitive values, 1 level", func(t *testing.T) { + const ( + mapSize = 1024 + ) + + r := newRand(t) + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{ + Digest(r.Intn(256)), + } + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate key value pairs + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + newValue := v.(Uint64Value) / 2 + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, keyValues[k], existingValue) + + i++ + keyValues[k] = newValue + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, i, uint64(mapSize)) + + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + }) + + t.Run("mutate collision primitive values, 4 levels", func(t *testing.T) { + const ( + mapSize = 1024 + ) + + r := newRand(t) + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{ + Digest(r.Intn(256)), + Digest(r.Intn(256)), + Digest(r.Intn(256)), + Digest(r.Intn(256)), + } + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate key value pairs + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + newValue := v.(Uint64Value) / 2 + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, keyValues[k], existingValue) + + i++ + keyValues[k] = newValue + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, i, uint64(mapSize)) + + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + }) + + t.Run("mutate inlined container, root is data slab, no slab operation", func(t *testing.T) { + const ( + mapSize = 15 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + ck := Uint64Value(0) + cv := Uint64Value(i) + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (updating elements) + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + childKey := Uint64Value(0) + childNewValue := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childNewValue) + require.NoError(t, err) + require.NotNil(t, existingStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + expectedChildMapValues[childKey] = childNewValue + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate inlined container, root is metadata slab, no slab operation", func(t *testing.T) { + const ( + mapSize = 35 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + ck := Uint64Value(0) + cv := Uint64Value(i) + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (updating elements) + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + childKey := Uint64Value(0) + childNewValue := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childNewValue) + require.NoError(t, err) + require.NotNil(t, existingStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + expectedChildMapValues[childKey] = childNewValue + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate inlined container, root is data slab, split slab", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 1 + mutatedChildMapSize = 5 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize; j < mutatedChildMapSize; j++ { + childKey := Uint64Value(j) + childValue := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[childKey] = childValue + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate inlined container, root is metadata slab, split slab", func(t *testing.T) { + const ( + mapSize = 35 + childMapSize = 1 + mutatedChildMapSize = 5 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize; j < mutatedChildMapSize; j++ { + childKey := Uint64Value(j) + childValue := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[childKey] = childValue + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate inlined container, root is metadata slab, merge slab", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 10 + mutatedChildMapSize = 1 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize - 1; j >= mutatedChildMapSize; j-- { + childKey := Uint64Value(j) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, childKey) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + + delete(expectedChildMapValues, childKey) + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate collision inlined container, 1 level", func(t *testing.T) { + const ( + mapSize = 1024 + ) + + r := newRand(t) + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + ck := Uint64Value(0) + cv := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + digests := []Digest{ + Digest(r.Intn(256)), + } + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + sortedKeys[i] = k + } + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate key value pairs + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + childKey := Uint64Value(0) + childNewValue := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childNewValue) + require.NoError(t, err) + require.NotNil(t, existingStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues[childKey] = childNewValue + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, i, uint64(mapSize)) + + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + }) + + t.Run("mutate collision inlined container, 4 levels", func(t *testing.T) { + const ( + mapSize = 1024 + ) + + r := newRand(t) + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + ck := Uint64Value(0) + cv := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + digests := []Digest{ + Digest(r.Intn(256)), + Digest(r.Intn(256)), + Digest(r.Intn(256)), + Digest(r.Intn(256)), + } + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + sortedKeys[i] = k + } + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate key value pairs + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + childKey := Uint64Value(0) + childNewValue := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childNewValue) + require.NoError(t, err) + require.NotNil(t, existingStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues[childKey] = childNewValue + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, i, uint64(mapSize)) + + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + }) + + t.Run("mutate inlined container", func(t *testing.T) { + const ( + mapSize = 15 + valueStringSize = 16 + ) + + r := newRand(t) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + i := uint64(0) + for i := 0; i < mapSize; i++ { + ck := Uint64Value(0) + cv := NewStringValue(randStr(r, valueStringSize)) + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + sortedKeys[i] = k + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + } + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i = uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + newChildMapKey := Uint64Value(1) // Previous key is 0 + newChildMapValue := NewStringValue(randStr(r, valueStringSize)) + + existingStorable, err := childMap.Set(compare, hashInputProvider, newChildMapKey, newChildMapValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(2), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + expectedChildMapValues[newChildMapKey] = newChildMapValue + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Iterate and mutate child map (removing elements) + i = uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(2), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + // Remove key 0 + ck := Uint64Value(0) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, ck) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + delete(expectedChildMapValues, ck) + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("uninline inlined container, root is data slab, no slab operation", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 1 + mutatedChildMapSize = 35 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize; j < mutatedChildMapSize; j++ { + childKey := Uint64Value(j) + childValue := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[childKey] = childValue + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("uninline inlined container, root is metadata slab, merge slab", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 5 + mutatedChildMapSize = 35 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize; j < mutatedChildMapSize; j++ { + childKey := Uint64Value(j) + childValue := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[childKey] = childValue + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("inline uninlined container, root is data slab, no slab operation", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 35 + mutatedChildMapSize = 1 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize - 1; j > mutatedChildMapSize-1; j-- { + childKey := Uint64Value(j) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, childKey) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + + delete(expectedChildMapValues, childKey) + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("inline uninlined container, root is data slab, split slab", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 35 + mutatedChildMapSize = 10 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize - 1; j > mutatedChildMapSize-1; j-- { + childKey := Uint64Value(j) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, childKey) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + + delete(expectedChildMapValues, childKey) + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) +} + func testMapDeterministicHashCollision(t *testing.T, r *rand.Rand, maxDigestLevel int) { const ( From b3e41ec4c022d986aa5453c2396bf486e257dabd Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Sat, 2 Dec 2023 14:45:38 -0600 Subject: [PATCH 094/126] Update CI to increase timeout While at it, also reduce matrix of OS and Go versions given the duration of tests. --- .github/workflows/ci.yml | 6 +++--- .github/workflows/coverage.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index db59c4ee..fcab7c8d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -30,8 +30,8 @@ jobs: strategy: matrix: - os: [macos-latest, ubuntu-latest] - go-version: [1.17, 1.18, 1.19] + os: [ubuntu-latest] + go-version: ['1.20', 1.21] steps: - name: Install Go @@ -54,4 +54,4 @@ jobs: - name: Run tests run: | go version - go test -timeout 60m -race -v ./... + go test -timeout 180m -race -v ./... diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 1fe10561..88b4907a 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -32,7 +32,7 @@ jobs: run: go build ./... - name: Generate coverage report - run: go test -timeout 60m -race -coverprofile=coverage.txt -covermode=atomic + run: go test -timeout 180m -race -coverprofile=coverage.txt -covermode=atomic - name: Upload coverage report to Codecov uses: codecov/codecov-action@v3.1.4 From 5e67357edc8b912f8cf26088ce5e1685bcc262f2 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 4 Dec 2023 09:25:46 -0600 Subject: [PATCH 095/126] Update some comments --- map.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/map.go b/map.go index 823f2dc4..57891d7a 100644 --- a/map.go +++ b/map.go @@ -838,14 +838,14 @@ func (e *inlineCollisionGroup) getElementAndNextKey( key Value, ) (MapKey, MapValue, MapKey, error) { - // Adjust level and hkey for collision group + // Adjust level and hkey for collision group. level++ if level > digester.Levels() { return nil, nil, nil, NewHashLevelErrorf("inline collision group digest level is %d, want <= %d", level, digester.Levels()) } hkey, _ := digester.Digest(level) - // Search key in collision group with adjusted hkeyPrefix and hkey + // Search key in collision group with adjusted hkeyPrefix and hkey. // Don't need to wrap error as external error because err is already categorized by elements.Get(). return e.elements.getElementAndNextKey(storage, digester, level, hkey, comparator, key) } @@ -1053,14 +1053,14 @@ func (e *externalCollisionGroup) getElementAndNextKey( return nil, nil, nil, err } - // Adjust level and hkey for collision group + // Adjust level and hkey for collision group. level++ if level > digester.Levels() { return nil, nil, nil, NewHashLevelErrorf("external collision group digest level is %d, want <= %d", level, digester.Levels()) } hkey, _ := digester.Digest(level) - // Search key in collision group with adjusted hkeyPrefix and hkey + // Search key in collision group with adjusted hkeyPrefix and hkey. // Don't need to wrap error as external error because err is already categorized by MapSlab.getElementAndNextKey(). return slab.getElementAndNextKey(storage, digester, level, hkey, comparator, key) } @@ -1485,7 +1485,7 @@ func (e *hkeyElements) getElementAndNextKey( } if nk != nil { - // Found next key in element group + // Found next key in element group. return k, v, nk, nil } @@ -2171,7 +2171,7 @@ func (e *singleElements) getElementAndNextKey( return k, v, nil, nil default: // nextIndex > len(e.elems) - // This should never happen + // This should never happen. return nil, nil, nil, NewUnreachableError() } } @@ -4094,7 +4094,7 @@ func (m *MapMetaDataSlab) getElementAndNextKey( return k, v, nil, nil default: // nextIndex > len(m.childrenHeaders) - // This should never happen + // This should never happen. return nil, nil, nil, NewUnreachableError() } } @@ -5722,7 +5722,7 @@ func (i *mutableMapIterator) CanMutate() bool { func (i *mutableMapIterator) Next() (Value, Value, error) { if i.nextKey == nil { - // No more elements + // No more elements. return nil, nil, nil } @@ -5740,7 +5740,7 @@ func (i *mutableMapIterator) Next() (Value, Value, error) { func (i *mutableMapIterator) NextKey() (Value, error) { if i.nextKey == nil { - // No more elements + // No more elements. return nil, nil } From 83d8870b7e7b9c5cbf3900c471b0b7b45e33c6cd Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 28 Feb 2024 14:17:11 -0600 Subject: [PATCH 096/126] Deduplicate inlined dict type info to reduce RAM This change deduplicates Cadence dictionary type and composite type info, resulting in reduced memory and also persistent storage. More specifically, this encodes inlined atree slab extra data section as two-element array: - array of deduplicated type info - array of deduplicated extra data with type info index --- array.go | 10 +- array_debug.go | 17 +++- array_test.go | 86 ++++++++++++++--- map.go | 10 +- map_test.go | 255 +++++++++++++++++++++++++++++++++++++------------ storable.go | 2 + typeinfo.go | 195 +++++++++++++++++++++++++++++++------ 7 files changed, 464 insertions(+), 111 deletions(-) diff --git a/array.go b/array.go index d8f39487..d5e4fc70 100644 --- a/array.go +++ b/array.go @@ -301,13 +301,13 @@ func (a *ArrayExtraData) isExtraData() bool { // Encode encodes extra data as CBOR array: // // [type info] -func (a *ArrayExtraData) Encode(enc *Encoder) error { +func (a *ArrayExtraData) Encode(enc *Encoder, encodeTypeInfo encodeTypeInfo) error { err := enc.CBOR.EncodeArrayHead(arrayExtraDataLength) if err != nil { return NewEncodingError(err) } - err = a.TypeInfo.Encode(enc.CBOR) + err = encodeTypeInfo(enc, a.TypeInfo) if err != nil { // Wrap err as external error (if needed) because err is returned by TypeInfo interface. return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode type info") @@ -840,7 +840,8 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { // Encode extra data if a.extraData != nil { - err = a.extraData.Encode(enc) + // Use defaultEncodeTypeInfo to encode root level TypeInfo as is. + err = a.extraData.Encode(enc, defaultEncodeTypeInfo) if err != nil { // err is already categorized by ArrayExtraData.Encode(). return err @@ -1738,7 +1739,8 @@ func (a *ArrayMetaDataSlab) Encode(enc *Encoder) error { // Encode extra data if present if a.extraData != nil { - err = a.extraData.Encode(enc) + // Use defaultEncodeTypeInfo to encode root level TypeInfo as is. + err = a.extraData.Encode(enc, defaultEncodeTypeInfo) if err != nil { // Don't need to wrap because err is already categorized by ArrayExtraData.Encode(). return err diff --git a/array_debug.go b/array_debug.go index eb0d2fe7..7ffa335d 100644 --- a/array_debug.go +++ b/array_debug.go @@ -861,12 +861,27 @@ func hasInlinedComposite(data []byte) (bool, error) { // Parse inlined extra data to find compact map extra data. dec := cbor.NewStreamDecoder(bytes.NewBuffer(data)) + count, err := dec.DecodeArrayHead() if err != nil { return false, NewDecodingError(err) } + if count != inlinedExtraDataArrayCount { + return false, NewDecodingError(fmt.Errorf("failed to decode inlined extra data, expect %d elements, got %d elements", inlinedExtraDataArrayCount, count)) + } - for i := uint64(0); i < count; i++ { + // Skip element 0 (inlined type info) + err = dec.Skip() + if err != nil { + return false, NewDecodingError(err) + } + + // Decoding element 1 (inlined extra data) + extraDataCount, err := dec.DecodeArrayHead() + if err != nil { + return false, NewDecodingError(err) + } + for i := uint64(0); i < extraDataCount; i++ { tagNum, err := dec.DecodeTagNumber() if err != nil { return false, NewDecodingError(err) diff --git a/array_test.go b/array_test.go index 7c64d61c..58a0c9cf 100644 --- a/array_test.go +++ b/array_test.go @@ -3184,11 +3184,19 @@ func TestArrayEncodeDecode(t *testing.T) { 0x18, 0x2a, // inlined extra data + 0x82, + // element 0: array of type info 0x81, - // inlined array extra data + // type info + 0x18, 0x2b, + // element 1: array of extra data + 0x81, + // array extra data 0xd8, 0xf7, 0x81, - 0x18, 0x2b, + // array type info ref + 0xd8, 0xf6, + 0x00, // CBOR encoded array head (fixed size 3 byte) 0x99, 0x00, 0x02, @@ -3266,14 +3274,22 @@ func TestArrayEncodeDecode(t *testing.T) { // inlined extra data 0x82, + // element 0: array of inlined type info + 0x82, + 0x18, 0x2c, + 0x18, 0x2b, + // element 1: array of inlined extra data + 0x82, // inlined array extra data 0xd8, 0xf7, 0x81, - 0x18, 0x2c, + 0xd8, 0xf6, + 0x00, // inlined array extra data 0xd8, 0xf7, 0x81, - 0x18, 0x2b, + 0xd8, 0xf6, + 0x01, // CBOR encoded array head (fixed size 3 byte) 0x99, 0x00, 0x02, @@ -3355,13 +3371,21 @@ func TestArrayEncodeDecode(t *testing.T) { // inlined extra data 0x82, + // element 0: array of inlined type info + 0x82, + 0x18, 0x2c, + 0x18, 0x2b, + // element 1: array of inlined extra data + 0x82, // inlined array extra data 0xd8, 0xf7, 0x81, - 0x18, 0x2c, + 0xd8, 0xf6, + 0x00, 0xd8, 0xf7, 0x81, - 0x18, 0x2b, + 0xd8, 0xf6, + 0x01, // CBOR encoded array head (fixed size 3 byte) 0x99, 0x00, 0x02, @@ -3454,23 +3478,35 @@ func TestArrayEncodeDecode(t *testing.T) { 0x18, 0x2a, // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x84, + 0x18, 0x2c, + 0x18, 0x2b, + 0x18, 0x2e, + 0x18, 0x2d, + // element 1: array of inlined extra data 0x84, // typeInfo3 0xd8, 0xf7, 0x81, - 0x18, 0x2c, + 0xd8, 0xf6, + 0x00, // typeInfo2 0xd8, 0xf7, 0x81, - 0x18, 0x2b, + 0xd8, 0xf6, + 0x01, // typeInfo5 0xd8, 0xf7, 0x81, - 0x18, 0x2e, + 0xd8, 0xf6, + 0x02, // typeInfo4 0xd8, 0xf7, 0x81, - 0x18, 0x2d, + 0xd8, 0xf6, + 0x03, // CBOR encoded array head (fixed size 3 byte) 0x99, 0x00, 0x02, @@ -3595,11 +3631,17 @@ func TestArrayEncodeDecode(t *testing.T) { // array data slab flag 0x00, // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x81, + 0x18, 0x2b, + // element 1: array of inlined extra data 0x81, // inlined array extra data 0xd8, 0xf7, 0x81, - 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // CBOR encoded array head (fixed size 3 byte) 0x99, 0x00, 0x0b, // CBOR encoded array elements @@ -3744,13 +3786,21 @@ func TestArrayEncodeDecode(t *testing.T) { 0x00, // inlined extra data 0x82, + // element 0: array of inlined extra data + 0x82, + 0x18, 0x2c, + 0x18, 0x2b, + // element 1: array of inlined extra data + 0x82, // inlined array extra data 0xd8, 0xf7, 0x81, - 0x18, 0x2c, + 0xd8, 0xf6, + 0x00, 0xd8, 0xf7, 0x81, - 0x18, 0x2b, + 0xd8, 0xf6, + 0x01, // CBOR encoded array head (fixed size 3 byte) 0x99, 0x00, 0x0b, // CBOR encoded array elements @@ -4064,12 +4114,18 @@ func TestArrayEncodeDecode(t *testing.T) { // array data slab flag (has pointer) 0x40, - // inlined array of extra data + // inlined extra data + 0x82, + // element 0: array of type info + 0x81, + 0x18, 0x2c, + // element 1: array of extra data 0x81, // type info 0xd8, 0xf7, 0x81, - 0x18, 0x2c, + 0xd8, 0xf6, + 0x00, // CBOR encoded array head (fixed size 3 byte) 0x99, 0x00, 0x0b, diff --git a/map.go b/map.go index 57891d7a..bf142744 100644 --- a/map.go +++ b/map.go @@ -497,14 +497,14 @@ func (m *MapExtraData) isExtraData() bool { // Encode encodes extra data as CBOR array: // // [type info, count, seed] -func (m *MapExtraData) Encode(enc *Encoder) error { +func (m *MapExtraData) Encode(enc *Encoder, encodeTypeInfo encodeTypeInfo) error { err := enc.CBOR.EncodeArrayHead(mapExtraDataLength) if err != nil { return NewEncodingError(err) } - err = m.TypeInfo.Encode(enc.CBOR) + err = encodeTypeInfo(enc, m.TypeInfo) if err != nil { // Wrap err as external error (if needed) because err is returned by TypeInfo interface. return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode type info") @@ -2917,7 +2917,8 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { // Encode extra data if m.extraData != nil { - err = m.extraData.Encode(enc) + // Use defaultEncodeTypeInfo to encode root level TypeInfo as is. + err = m.extraData.Encode(enc, defaultEncodeTypeInfo) if err != nil { // Don't need to wrap error as external error because err is already categorized by MapExtraData.Encode(). return err @@ -3918,7 +3919,8 @@ func (m *MapMetaDataSlab) Encode(enc *Encoder) error { // Encode extra data if present if m.extraData != nil { - err = m.extraData.Encode(enc) + // Use defaultEncodeTypeInfo to encode root level TypeInfo as is. + err = m.extraData.Encode(enc, defaultEncodeTypeInfo) if err != nil { // Don't need to wrap error as external error because err is already categorized by MapExtraData.Encode(). return err diff --git a/map_test.go b/map_test.go index ba8ca3a0..f80c8dcb 100644 --- a/map_test.go +++ b/map_test.go @@ -7660,12 +7660,18 @@ func TestMapEncodeDecode(t *testing.T) { // flag: has inlined slab + map data 0x08, - // inlined slab extra data + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x81, + 0x18, 0x2b, + // element 1: array of inlined extra data 0x81, // inlined array extra data 0xd8, 0xf7, 0x81, - 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // the following encoded data is valid CBOR @@ -7723,7 +7729,7 @@ func TestMapEncodeDecode(t *testing.T) { require.Equal(t, 2, len(meta.childrenHeaders)) require.Equal(t, uint32(len(stored[id2])), meta.childrenHeaders[0].size) - const inlinedExtraDataSize = 6 + const inlinedExtraDataSize = 11 require.Equal(t, uint32(len(stored[id3])-inlinedExtraDataSize+slabIDSize), meta.childrenHeaders[1].size) // Decode data to new storage @@ -7803,14 +7809,20 @@ func TestMapEncodeDecode(t *testing.T) { // seed 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, - // 2 inlined slab extra data + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x81, + 0x18, 0x2b, + // element 1: array of inlined extra data 0x82, // element 0 // inlined map extra data 0xd8, 0xf8, 0x83, // type info - 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count: 1 0x01, // seed @@ -7820,7 +7832,8 @@ func TestMapEncodeDecode(t *testing.T) { 0xd8, 0xf8, 0x83, // type info - 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count: 1 0x01, // seed @@ -7992,14 +8005,21 @@ func TestMapEncodeDecode(t *testing.T) { // seed 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, - // 2 inlined slab extra data + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x82, + 0x18, 0x2c, + 0x18, 0x2b, + // element 1: array of inlined extra data 0x82, // element 0 // inlined map extra data 0xd8, 0xf8, 0x83, // type info - 0x18, 0x2c, + 0xd8, 0xf6, + 0x00, // count: 1 0x01, // seed @@ -8009,7 +8029,8 @@ func TestMapEncodeDecode(t *testing.T) { 0xd8, 0xf8, 0x83, // type info - 0x18, 0x2b, + 0xd8, 0xf6, + 0x01, // count: 1 0x01, // seed @@ -8183,14 +8204,20 @@ func TestMapEncodeDecode(t *testing.T) { // seed 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, - // 4 inlined slab extra data + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x81, + 0x18, 0x2b, + // element 1: array of inlined extra data 0x84, // element 0 // inlined map extra data 0xd8, 0xf8, 0x83, // type info - 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count: 1 0x01, // seed @@ -8200,7 +8227,8 @@ func TestMapEncodeDecode(t *testing.T) { 0xd8, 0xf8, 0x83, // type info - 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count: 1 0x01, // seed @@ -8210,7 +8238,8 @@ func TestMapEncodeDecode(t *testing.T) { 0xd8, 0xf8, 0x83, // type info - 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count: 1 0x01, // seed @@ -8220,7 +8249,8 @@ func TestMapEncodeDecode(t *testing.T) { 0xd8, 0xf8, 0x83, // type info - 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count: 1 0x01, // seed @@ -8453,14 +8483,23 @@ func TestMapEncodeDecode(t *testing.T) { // seed 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, - // 4 inlined slab extra data + // inlined extra data + 0x82, + // element 0: array of type info + 0x84, + 0x18, 0x2c, + 0x18, 0x2e, + 0x18, 0x2b, + 0x18, 0x2d, + // element 1: array of extra data 0x84, // element 0 // inlined map extra data 0xd8, 0xf8, 0x83, // type info: 44 - 0x18, 0x2c, + 0xd8, 0xf6, + 0x00, // count: 1 0x01, // seed @@ -8471,7 +8510,8 @@ func TestMapEncodeDecode(t *testing.T) { 0xd8, 0xf8, 0x83, // type info: 46 - 0x18, 0x2e, + 0xd8, 0xf6, + 0x01, // count: 1 0x01, // seed @@ -8482,7 +8522,8 @@ func TestMapEncodeDecode(t *testing.T) { 0xd8, 0xf8, 0x83, // type info: 43 - 0x18, 0x2b, + 0xd8, 0xf6, + 0x02, // count: 1 0x01, // seed @@ -8493,7 +8534,8 @@ func TestMapEncodeDecode(t *testing.T) { 0xd8, 0xf8, 0x83, // type info: 45 - 0x18, 0x2d, + 0xd8, 0xf6, + 0x03, // count: 1 0x01, // seed @@ -8720,14 +8762,20 @@ func TestMapEncodeDecode(t *testing.T) { // flag: map data 0x08, - // 4 inlined slab extra data + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x81, + 0x18, 0x2b, + // element 1: array of inlined extra data 0x84, // element 0 // inlined map extra data 0xd8, 0xf8, 0x83, // type info - 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count: 1 0x01, // seed @@ -8737,7 +8785,8 @@ func TestMapEncodeDecode(t *testing.T) { 0xd8, 0xf8, 0x83, // type info - 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count: 1 0x01, // seed @@ -8747,7 +8796,8 @@ func TestMapEncodeDecode(t *testing.T) { 0xd8, 0xf8, 0x83, // type info - 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count: 1 0x01, // seed @@ -8756,7 +8806,8 @@ func TestMapEncodeDecode(t *testing.T) { 0xd8, 0xf8, 0x83, // type info - 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count: 1 0x01, // seed @@ -8903,14 +8954,20 @@ func TestMapEncodeDecode(t *testing.T) { // flag: map data 0x08, - // 4 inlined slab extra data + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x81, + 0x18, 0x2b, + // element 1: array of inlined extra data 0x84, // element 0 // inlined map extra data 0xd8, 0xf8, 0x83, // type info - 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count: 1 0x01, // seed @@ -8920,7 +8977,8 @@ func TestMapEncodeDecode(t *testing.T) { 0xd8, 0xf8, 0x83, // type info - 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count: 1 0x01, // seed @@ -8930,7 +8988,8 @@ func TestMapEncodeDecode(t *testing.T) { 0xd8, 0xf8, 0x83, // type info - 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count: 1 0x01, // seed @@ -8939,7 +8998,8 @@ func TestMapEncodeDecode(t *testing.T) { 0xd8, 0xf8, 0x83, // type info - 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count: 1 0x01, // seed @@ -9201,14 +9261,23 @@ func TestMapEncodeDecode(t *testing.T) { // flag: map data 0x08, - // 4 inlined slab extra data + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x84, + 0x18, 0x2b, + 0x18, 0x2c, + 0x18, 0x2d, + 0x18, 0x2e, + // element 1: array of inlined extra data 0x84, // element 0 // inlined map extra data 0xd8, 0xf8, 0x83, // type info - 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count: 1 0x01, // seed @@ -9218,7 +9287,8 @@ func TestMapEncodeDecode(t *testing.T) { 0xd8, 0xf8, 0x83, // type info - 0x18, 0x2c, + 0xd8, 0xf6, + 0x01, // count: 1 0x01, // seed @@ -9228,7 +9298,8 @@ func TestMapEncodeDecode(t *testing.T) { 0xd8, 0xf8, 0x83, // type info - 0x18, 0x2d, + 0xd8, 0xf6, + 0x02, // count: 1 0x01, // seed @@ -9237,7 +9308,8 @@ func TestMapEncodeDecode(t *testing.T) { 0xd8, 0xf8, 0x83, // type info - 0x18, 0x2e, + 0xd8, 0xf6, + 0x03, // count: 1 0x01, // seed @@ -9384,14 +9456,23 @@ func TestMapEncodeDecode(t *testing.T) { // flag: map data 0x08, - // 4 inlined slab extra data + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x84, + 0x18, 0x2b, + 0x18, 0x2c, + 0x18, 0x2d, + 0x18, 0x2e, + // element 1: array of inlined extra data 0x84, // element 0 // inlined map extra data 0xd8, 0xf8, 0x83, // type info - 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count: 1 0x01, // seed @@ -9401,7 +9482,8 @@ func TestMapEncodeDecode(t *testing.T) { 0xd8, 0xf8, 0x83, // type info - 0x18, 0x2c, + 0xd8, 0xf6, + 0x01, // count: 1 0x01, // seed @@ -9411,7 +9493,8 @@ func TestMapEncodeDecode(t *testing.T) { 0xd8, 0xf8, 0x83, // type info - 0x18, 0x2d, + 0xd8, 0xf6, + 0x02, // count: 1 0x01, // seed @@ -9420,7 +9503,8 @@ func TestMapEncodeDecode(t *testing.T) { 0xd8, 0xf8, 0x83, // type info - 0x18, 0x2e, + 0xd8, 0xf6, + 0x03, // count: 1 0x01, // seed @@ -10510,14 +10594,20 @@ func TestMapEncodeDecode(t *testing.T) { // seed 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, - // array of inlined slab extra data + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x81, + 0x18, 0x2b, + // element 1: array of inlined extra data 0x81, // element 0 // inlined map extra data 0xd8, 0xf8, 0x83, // type info - 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count: 1 0x01, // seed @@ -10985,14 +11075,20 @@ func TestMapEncodeDecode(t *testing.T) { // seed 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, - // array of inlined slab extra data + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x81, + 0x18, 0x2b, + // element 1: array of inlined extra data 0x81, // element 0 // inlined array extra data 0xd8, 0xf7, 0x81, // type info - 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // the following encoded data is valid CBOR @@ -11280,7 +11376,12 @@ func TestMapEncodeDecode(t *testing.T) { // seed 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, - // 1 inlined slab extra data + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x81, + 0xd8, 0xf6, 0x18, 0x2b, + // element 1: array of inlined extra data 0x81, // element 0 // inlined composite extra data @@ -11289,7 +11390,8 @@ func TestMapEncodeDecode(t *testing.T) { // map extra data 0x83, // type info - 0xd8, 0xf6, 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count 0x01, // seed @@ -11448,7 +11550,12 @@ func TestMapEncodeDecode(t *testing.T) { // seed 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, - // 1 inlined slab extra data + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x81, + 0xd8, 0xf6, 0x18, 0x2b, + // element 1: array of inlined extra data 0x81, // element 0 // inlined composite extra data @@ -11457,7 +11564,8 @@ func TestMapEncodeDecode(t *testing.T) { // map extra data 0x83, // type info - 0xd8, 0xf6, 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count: 2 0x02, // seed @@ -11623,7 +11731,12 @@ func TestMapEncodeDecode(t *testing.T) { // seed 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, - // 1 inlined slab extra data + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x81, + 0xd8, 0xf6, 0x18, 0x2b, + // element 1: array of inlined extra data 0x81, // element 0 // inlined composite extra data @@ -11632,7 +11745,8 @@ func TestMapEncodeDecode(t *testing.T) { // map extra data 0x83, // type info - 0xd8, 0xf6, 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count: 2 0x02, // seed @@ -11813,7 +11927,12 @@ func TestMapEncodeDecode(t *testing.T) { // seed 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, - // 3 inlined slab extra data + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x81, + 0xd8, 0xf6, 0x18, 0x2b, + // element 1: array of inlined extra data 0x83, // element 0 // inlined composite extra data @@ -11822,7 +11941,8 @@ func TestMapEncodeDecode(t *testing.T) { // map extra data 0x83, // type info - 0xd8, 0xf6, 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count: 2 0x02, // seed @@ -11841,7 +11961,8 @@ func TestMapEncodeDecode(t *testing.T) { // map extra data 0x83, // type info - 0xd8, 0xf6, 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count: 2 0x02, // seed @@ -11860,7 +11981,8 @@ func TestMapEncodeDecode(t *testing.T) { // map extra data 0x83, // type info - 0xd8, 0xf6, 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count: 2 0x02, // seed @@ -12049,7 +12171,12 @@ func TestMapEncodeDecode(t *testing.T) { // seed 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, - // 2 inlined slab extra data + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x81, + 0xd8, 0xf6, 0x18, 0x2b, + // element 1: array of inlined extra data 0x82, // element 0 // inlined map extra data @@ -12058,7 +12185,8 @@ func TestMapEncodeDecode(t *testing.T) { // map extra data 0x83, // type info - 0xd8, 0xf6, 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count: 2 0x02, // seed @@ -12076,7 +12204,8 @@ func TestMapEncodeDecode(t *testing.T) { // map extra data 0x83, // type info - 0xd8, 0xf6, 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count: 1 0x01, // seed @@ -12247,7 +12376,13 @@ func TestMapEncodeDecode(t *testing.T) { // seed 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, - // 2 inlined slab extra data + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x82, + 0xd8, 0xf6, 0x18, 0x2b, + 0xd8, 0xf6, 0x18, 0x2c, + // element 1: array of inlined extra data 0x82, // element 0 // inlined composite extra data @@ -12256,7 +12391,8 @@ func TestMapEncodeDecode(t *testing.T) { // map extra data 0x83, // type info - 0xd8, 0xf6, 0x18, 0x2b, + 0xd8, 0xf6, + 0x00, // count: 2 0x02, // seed @@ -12274,7 +12410,8 @@ func TestMapEncodeDecode(t *testing.T) { // map extra data 0x83, // type info - 0xd8, 0xf6, 0x18, 0x2c, + 0xd8, 0xf6, + 0x01, // count: 2 0x02, // seed diff --git a/storable.go b/storable.go index a59529c8..634c4572 100644 --- a/storable.go +++ b/storable.go @@ -76,6 +76,8 @@ const ( // As of Oct. 2, 2023, Cadence uses tag numbers from 128 to 224. // See runtime/interpreter/encode.go at github.com/onflow/cadence. + CBORTagTypeInfoRef = 246 + CBORTagInlinedArrayExtraData = 247 CBORTagInlinedMapExtraData = 248 CBORTagInlinedCompactMapExtraData = 249 diff --git a/typeinfo.go b/typeinfo.go index 86c9fe67..a2eacddb 100644 --- a/typeinfo.go +++ b/typeinfo.go @@ -41,9 +41,19 @@ type TypeInfoDecoder func( error, ) +// encodeTypeInfo encodes TypeInfo either: +// - as is (for TypeInfo in root slab extra data section), or +// - as index of inlined TypeInfos (for TypeInfo in inlined slab extra data section) +type encodeTypeInfo func(*Encoder, TypeInfo) error + +// defaultEncodeTypeInfo encodes TypeInfo as is. +func defaultEncodeTypeInfo(enc *Encoder, typeInfo TypeInfo) error { + return typeInfo.Encode(enc.CBOR) +} + type ExtraData interface { isExtraData() bool - Encode(enc *Encoder) error + Encode(enc *Encoder, encodeTypeInfo encodeTypeInfo) error } // compactMapExtraData is used for inlining compact values. @@ -64,14 +74,14 @@ func (c *compactMapExtraData) isExtraData() bool { return true } -func (c *compactMapExtraData) Encode(enc *Encoder) error { +func (c *compactMapExtraData) Encode(enc *Encoder, encodeTypeInfo encodeTypeInfo) error { err := enc.CBOR.EncodeArrayHead(compactMapExtraDataLength) if err != nil { return NewEncodingError(err) } // element 0: map extra data - err = c.mapExtraData.Encode(enc) + err = c.mapExtraData.Encode(enc, encodeTypeInfo) if err != nil { return err } @@ -200,27 +210,64 @@ type compactMapTypeInfo struct { keys []ComparableStorable } +type extraDataInfo struct { + data ExtraData + typeInfoIndex int +} + type InlinedExtraData struct { - extraData []ExtraData - compactMapTypes map[string]compactMapTypeInfo - arrayTypes map[string]int + extraData []extraDataInfo // Used to encode deduplicated ExtraData in order + typeInfo []TypeInfo // Used to encode deduplicated TypeInfo in order + compactMapTypeSet map[string]compactMapTypeInfo // Used to deduplicate compactMapExtraData by TypeInfo.Identifier() + sorted field names + arrayExtraDataSet map[string]int // Used to deduplicate arrayExtraData by TypeInfo.Identifier() + typeInfoSet map[string]int // Used to deduplicate TypeInfo by TypeInfo.Identifier() } func newInlinedExtraData() *InlinedExtraData { + // Maps used for deduplication are initialized lazily. return &InlinedExtraData{} } -// Encode encodes inlined extra data as CBOR array. +const inlinedExtraDataArrayCount = 2 + +// Encode encodes inlined extra data as 2-element array: +// +// +-----------------------+------------------------+ +// | [+ inlined type info] | [+ inlined extra data] | +// +-----------------------+------------------------+ func (ied *InlinedExtraData) Encode(enc *Encoder) error { - err := enc.CBOR.EncodeArrayHead(uint64(len(ied.extraData))) + var err error + + err = enc.CBOR.EncodeArrayHead(inlinedExtraDataArrayCount) if err != nil { return NewEncodingError(err) } - var tagNum uint64 + // element 0: deduplicated array of type info + err = enc.CBOR.EncodeArrayHead(uint64(len(ied.typeInfo))) + if err != nil { + return NewEncodingError(err) + } + + // Encode inlined type info + for _, typeInfo := range ied.typeInfo { + err = typeInfo.Encode(enc.CBOR) + if err != nil { + return NewEncodingError(err) + } + } + + // element 1: deduplicated array of extra data + err = enc.CBOR.EncodeArrayHead(uint64(len(ied.extraData))) + if err != nil { + return NewEncodingError(err) + } + // Encode inlined extra data for _, extraData := range ied.extraData { - switch extraData.(type) { + var tagNum uint64 + + switch extraData.data.(type) { case *ArrayExtraData: tagNum = CBORTagInlinedArrayExtraData @@ -239,7 +286,25 @@ func (ied *InlinedExtraData) Encode(enc *Encoder) error { return NewEncodingError(err) } - err = extraData.Encode(enc) + err = extraData.data.Encode(enc, func(enc *Encoder, typeInfo TypeInfo) error { + id := typeInfo.Identifier() + index, exist := ied.typeInfoSet[id] + if !exist { + return NewEncodingError(fmt.Errorf("failed to encode type info ref %s (%T)", id, typeInfo)) + } + + err := enc.CBOR.EncodeTagHead(CBORTagTypeInfoRef) + if err != nil { + return NewEncodingError(err) + } + + err = enc.CBOR.EncodeUint64(uint64(index)) + if err != nil { + return NewEncodingError(err) + } + + return nil + }) if err != nil { return err } @@ -267,12 +332,60 @@ func newInlinedExtraDataFromData( return nil, nil, NewDecodingError(err) } - if count == 0 { + if count != inlinedExtraDataArrayCount { + return nil, nil, NewDecodingError(fmt.Errorf("failed to decode inlined extra data: expect %d elements, got %d elements", inlinedExtraDataArrayCount, count)) + } + + // element 0: array of deduplicated type info + typeInfoCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, nil, NewDecodingError(err) + } + + if typeInfoCount == 0 { + return nil, nil, NewDecodingError(fmt.Errorf("failed to decode inlined extra data: expect at least one inlined type info")) + } + + inlinedTypeInfo := make([]TypeInfo, typeInfoCount) + for i := uint64(0); i < typeInfoCount; i++ { + inlinedTypeInfo[i], err = decodeTypeInfo(dec) + if err != nil { + return nil, nil, err + } + } + + typeInfoRefDecoder := func(decoder *cbor.StreamDecoder) (TypeInfo, error) { + tagNum, err := decoder.DecodeTagNumber() + if err != nil { + return nil, err + } + if tagNum != CBORTagTypeInfoRef { + return nil, NewDecodingError(fmt.Errorf("failed to decode type info ref: expect tag number %d, got %d", CBORTagTypeInfoRef, tagNum)) + } + + index, err := decoder.DecodeUint64() + if err != nil { + return nil, NewDecodingError(err) + } + if index >= uint64(len(inlinedTypeInfo)) { + return nil, NewDecodingError(fmt.Errorf("failed to decode type info ref: expect index < %d, got %d", len(inlinedTypeInfo), index)) + } + + return inlinedTypeInfo[int(index)], nil + } + + // element 1: array of deduplicated extra data info + extraDataCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, nil, NewDecodingError(err) + } + + if extraDataCount == 0 { return nil, nil, NewDecodingError(fmt.Errorf("failed to decode inlined extra data: expect at least one inlined extra data")) } - inlinedExtraData := make([]ExtraData, count) - for i := uint64(0); i < count; i++ { + inlinedExtraData := make([]ExtraData, extraDataCount) + for i := uint64(0); i < extraDataCount; i++ { tagNum, err := dec.DecodeTagNumber() if err != nil { return nil, nil, NewDecodingError(err) @@ -280,19 +393,19 @@ func newInlinedExtraDataFromData( switch tagNum { case CBORTagInlinedArrayExtraData: - inlinedExtraData[i], err = newArrayExtraData(dec, decodeTypeInfo) + inlinedExtraData[i], err = newArrayExtraData(dec, typeInfoRefDecoder) if err != nil { return nil, nil, err } case CBORTagInlinedMapExtraData: - inlinedExtraData[i], err = newMapExtraData(dec, decodeTypeInfo) + inlinedExtraData[i], err = newMapExtraData(dec, typeInfoRefDecoder) if err != nil { return nil, nil, err } case CBORTagInlinedCompactMapExtraData: - inlinedExtraData[i], err = newCompactMapExtraData(dec, decodeTypeInfo, decodeStorable) + inlinedExtraData[i], err = newCompactMapExtraData(dec, typeInfoRefDecoder, decodeStorable) if err != nil { return nil, nil, err } @@ -305,31 +418,55 @@ func newInlinedExtraDataFromData( return inlinedExtraData, data[dec.NumBytesDecoded():], nil } +// addTypeInfo returns index of deduplicated type info. +func (ied *InlinedExtraData) addTypeInfo(typeInfo TypeInfo) int { + if ied.typeInfoSet == nil { + ied.typeInfoSet = make(map[string]int) + } + + id := typeInfo.Identifier() + index, exist := ied.typeInfoSet[id] + if exist { + return index + } + + index = len(ied.typeInfo) + ied.typeInfo = append(ied.typeInfo, typeInfo) + ied.typeInfoSet[id] = index + + return index +} + // addArrayExtraData returns index of deduplicated array extra data. // Array extra data is deduplicated by array type info ID because array // extra data only contains type info. func (ied *InlinedExtraData) addArrayExtraData(data *ArrayExtraData) int { - if ied.arrayTypes == nil { - ied.arrayTypes = make(map[string]int) + if ied.arrayExtraDataSet == nil { + ied.arrayExtraDataSet = make(map[string]int) } id := data.TypeInfo.Identifier() - index, exist := ied.arrayTypes[id] + index, exist := ied.arrayExtraDataSet[id] if exist { return index } + typeInfoIndex := ied.addTypeInfo(data.TypeInfo) + index = len(ied.extraData) - ied.extraData = append(ied.extraData, data) - ied.arrayTypes[id] = index + ied.extraData = append(ied.extraData, extraDataInfo{data, typeInfoIndex}) + ied.arrayExtraDataSet[id] = index + return index } // addMapExtraData returns index of map extra data. // Map extra data is not deduplicated because it also contains count and seed. func (ied *InlinedExtraData) addMapExtraData(data *MapExtraData) int { + typeInfoIndex := ied.addTypeInfo(data.TypeInfo) + index := len(ied.extraData) - ied.extraData = append(ied.extraData, data) + ied.extraData = append(ied.extraData, extraDataInfo{data, typeInfoIndex}) return index } @@ -341,12 +478,12 @@ func (ied *InlinedExtraData) addCompactMapExtraData( keys []ComparableStorable, ) (int, []ComparableStorable) { - if ied.compactMapTypes == nil { - ied.compactMapTypes = make(map[string]compactMapTypeInfo) + if ied.compactMapTypeSet == nil { + ied.compactMapTypeSet = make(map[string]compactMapTypeInfo) } id := makeCompactMapTypeID(data.TypeInfo, keys) - info, exist := ied.compactMapTypes[id] + info, exist := ied.compactMapTypeSet[id] if exist { return info.index, info.keys } @@ -357,10 +494,12 @@ func (ied *InlinedExtraData) addCompactMapExtraData( keys: keys, } + typeInfoIndex := ied.addTypeInfo(data.TypeInfo) + index := len(ied.extraData) - ied.extraData = append(ied.extraData, compactMapData) + ied.extraData = append(ied.extraData, extraDataInfo{compactMapData, typeInfoIndex}) - ied.compactMapTypes[id] = compactMapTypeInfo{ + ied.compactMapTypeSet[id] = compactMapTypeInfo{ keys: keys, index: index, } From 68259507247d0ecea6c2b59310849324375d3abe Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 28 Feb 2024 15:11:07 -0600 Subject: [PATCH 097/126] Preallocate map in FastCommit --- storage.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage.go b/storage.go index 0b324381..934678ec 100644 --- a/storage.go +++ b/storage.go @@ -910,7 +910,7 @@ func (s *PersistentSlabStorage) FastCommit(numWorkers int) error { // process the results while encoders are working // we need to capture them inside a map // again so we can apply them in order of keys - encSlabByID := make(map[SlabID][]byte) + encSlabByID := make(map[SlabID][]byte, len(keysWithOwners)) for i := 0; i < len(keysWithOwners); i++ { result := <-results // if any error return From e029e4368874935cf8ea8a712446994b2f53ef7d Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 5 Mar 2024 17:05:51 -0600 Subject: [PATCH 098/126] Add Array.SetType() to allow updating TypeInfo --- array.go | 34 +++++++ array_test.go | 249 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 283 insertions(+) diff --git a/array.go b/array.go index d8f39487..fa764751 100644 --- a/array.go +++ b/array.go @@ -3653,6 +3653,40 @@ func (a *Array) Type() TypeInfo { return nil } +func (a *Array) SetType(typeInfo TypeInfo) error { + extraData := a.root.ExtraData() + extraData.TypeInfo = typeInfo + + a.root.SetExtraData(extraData) + + if a.Inlined() { + // Array is inlined. + // Notify parent container so parent slab is saved in storage with updated TypeInfo of inlined array. + found, err := a.parentUpdater() + if err != nil { + return err + } + if !found { + a.parentUpdater = nil + } + + return nil + } + + // Array is standalone. + + slabID := a.SlabID() + + // Store modified root slab in storage since typeInfo is part of extraData stored in root slab. + err := a.Storage.Store(slabID, a.root) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", slabID)) + } + + return nil +} + func (a *Array) String() string { iterator, err := a.ReadOnlyIterator() if err != nil { diff --git a/array_test.go b/array_test.go index 7c64d61c..09304fb2 100644 --- a/array_test.go +++ b/array_test.go @@ -23,6 +23,7 @@ import ( "math" "math/rand" "reflect" + "runtime" "strings" "testing" @@ -8480,3 +8481,251 @@ func TestArrayWithOutdatedCallback(t *testing.T) { valueEqual(t, expectedValues, parentArray) }) } + +func TestArraySetType(t *testing.T) { + typeInfo := testTypeInfo{42} + newTypeInfo := testTypeInfo{43} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("empty", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + // Create a new array in memory + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + require.Equal(t, uint64(0), array.Count()) + require.Equal(t, typeInfo, array.Type()) + require.True(t, array.root.IsData()) + + // Modify type info of new array + err = array.SetType(newTypeInfo) + require.NoError(t, err) + require.Equal(t, newTypeInfo, array.Type()) + + // Commit new array to storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + testExistingArraySetType(t, array.SlabID(), storage.baseStorage, newTypeInfo, array.Count()) + }) + + t.Run("data slab root", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + arraySize := 10 + for i := 0; i < arraySize; i++ { + v := Uint64Value(i) + err := array.Append(v) + require.NoError(t, err) + } + + require.Equal(t, uint64(arraySize), array.Count()) + require.Equal(t, typeInfo, array.Type()) + require.True(t, array.root.IsData()) + + err = array.SetType(newTypeInfo) + require.NoError(t, err) + require.Equal(t, newTypeInfo, array.Type()) + + // Commit modified slabs in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + testExistingArraySetType(t, array.SlabID(), storage.baseStorage, newTypeInfo, array.Count()) + }) + + t.Run("metadata slab root", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + arraySize := 10_000 + for i := 0; i < arraySize; i++ { + v := Uint64Value(i) + err := array.Append(v) + require.NoError(t, err) + } + + require.Equal(t, uint64(arraySize), array.Count()) + require.Equal(t, typeInfo, array.Type()) + require.False(t, array.root.IsData()) + + err = array.SetType(newTypeInfo) + require.NoError(t, err) + require.Equal(t, newTypeInfo, array.Type()) + + // Commit modified slabs in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + testExistingArraySetType(t, array.SlabID(), storage.baseStorage, newTypeInfo, array.Count()) + }) + + t.Run("inlined in parent container root data slab", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + err = parentArray.Append(childArray) + require.NoError(t, err) + + require.Equal(t, uint64(1), parentArray.Count()) + require.Equal(t, typeInfo, parentArray.Type()) + require.True(t, parentArray.root.IsData()) + require.False(t, parentArray.Inlined()) + + require.Equal(t, uint64(0), childArray.Count()) + require.Equal(t, typeInfo, childArray.Type()) + require.True(t, childArray.root.IsData()) + require.True(t, childArray.Inlined()) + + err = childArray.SetType(newTypeInfo) + require.NoError(t, err) + require.Equal(t, newTypeInfo, childArray.Type()) + + // Commit modified slabs in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + testExistingInlinedArraySetType(t, parentArray.SlabID(), 0, storage.baseStorage, newTypeInfo, childArray.Count()) + }) + + t.Run("inlined in parent container non-root data slab", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + arraySize := 10_000 + for i := 0; i < arraySize-1; i++ { + v := Uint64Value(i) + err := parentArray.Append(v) + require.NoError(t, err) + } + + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + err = parentArray.Append(childArray) + require.NoError(t, err) + + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.Equal(t, typeInfo, parentArray.Type()) + require.False(t, parentArray.root.IsData()) + require.False(t, parentArray.Inlined()) + + require.Equal(t, uint64(0), childArray.Count()) + require.Equal(t, typeInfo, childArray.Type()) + require.True(t, childArray.root.IsData()) + require.True(t, childArray.Inlined()) + + err = childArray.SetType(newTypeInfo) + require.NoError(t, err) + require.Equal(t, newTypeInfo, childArray.Type()) + + // Commit modified slabs in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + testExistingInlinedArraySetType(t, parentArray.SlabID(), arraySize-1, storage.baseStorage, newTypeInfo, childArray.Count()) + }) +} + +func testExistingArraySetType( + t *testing.T, + id SlabID, + baseStorage BaseStorage, + expectedTypeInfo testTypeInfo, + expectedCount uint64, +) { + newTypeInfo := testTypeInfo{value: expectedTypeInfo.value + 1} + + // Create storage from existing data + storage := newTestPersistentStorageWithBaseStorage(t, baseStorage) + + // Load existing array by ID + array, err := NewArrayWithRootID(storage, id) + require.NoError(t, err) + require.Equal(t, expectedCount, array.Count()) + require.Equal(t, expectedTypeInfo, array.Type()) + + // Modify type info of existing array + err = array.SetType(newTypeInfo) + require.NoError(t, err) + require.Equal(t, expectedCount, array.Count()) + require.Equal(t, newTypeInfo, array.Type()) + + // Commit data in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + // Create storage from existing data + storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) + + // Load existing array again from storage + array2, err := NewArrayWithRootID(storage2, id) + require.NoError(t, err) + require.Equal(t, expectedCount, array2.Count()) + require.Equal(t, newTypeInfo, array2.Type()) +} + +func testExistingInlinedArraySetType( + t *testing.T, + parentID SlabID, + inlinedChildIndex int, + baseStorage BaseStorage, + expectedTypeInfo testTypeInfo, + expectedCount uint64, +) { + newTypeInfo := testTypeInfo{value: expectedTypeInfo.value + 1} + + // Create storage from existing data + storage := newTestPersistentStorageWithBaseStorage(t, baseStorage) + + // Load existing array by ID + parentArray, err := NewArrayWithRootID(storage, parentID) + require.NoError(t, err) + + element, err := parentArray.Get(uint64(inlinedChildIndex)) + require.NoError(t, err) + + childArray, ok := element.(*Array) + require.True(t, ok) + + require.Equal(t, expectedCount, childArray.Count()) + require.Equal(t, expectedTypeInfo, childArray.Type()) + + // Modify type info of existing array + err = childArray.SetType(newTypeInfo) + require.NoError(t, err) + require.Equal(t, expectedCount, childArray.Count()) + require.Equal(t, newTypeInfo, childArray.Type()) + + // Commit data in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + // Create storage from existing data + storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) + + // Load existing array again from storage + parentArray2, err := NewArrayWithRootID(storage2, parentID) + require.NoError(t, err) + + element2, err := parentArray2.Get(uint64(inlinedChildIndex)) + require.NoError(t, err) + + childArray2, ok := element2.(*Array) + require.True(t, ok) + + require.Equal(t, expectedCount, childArray2.Count()) + require.Equal(t, newTypeInfo, childArray2.Type()) +} From 9be1712075a7901c6e2ac720da702abfd0bb0dca Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 5 Mar 2024 17:59:40 -0600 Subject: [PATCH 099/126] Add OrderedMap.SetType() to allow updating TypeInfo --- map.go | 34 ++++++ map_test.go | 296 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 330 insertions(+) diff --git a/map.go b/map.go index 57891d7a..da3847d9 100644 --- a/map.go +++ b/map.go @@ -5550,6 +5550,40 @@ func (m *OrderedMap) Type() TypeInfo { return nil } +func (m *OrderedMap) SetType(typeInfo TypeInfo) error { + extraData := m.root.ExtraData() + extraData.TypeInfo = typeInfo + + m.root.SetExtraData(extraData) + + if m.Inlined() { + // Map is inlined. + // Notify parent container so parent slab is saved in storage with updated TypeInfo of inlined array. + found, err := m.parentUpdater() + if err != nil { + return err + } + if !found { + m.parentUpdater = nil + } + + return nil + } + + // Map is standalone. + + slabID := m.SlabID() + + // Store modified root slab in storage since typeInfo is part of extraData stored in root slab. + err := m.Storage.Store(slabID, m.root) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", slabID)) + } + + return nil +} + func (m *OrderedMap) String() string { iterator, err := m.ReadOnlyIterator() if err != nil { diff --git a/map_test.go b/map_test.go index ba8ca3a0..d951aa9b 100644 --- a/map_test.go +++ b/map_test.go @@ -24,6 +24,7 @@ import ( "math" "math/rand" "reflect" + "runtime" "sort" "strings" "testing" @@ -18337,3 +18338,298 @@ func TestMapWithOutdatedCallback(t *testing.T) { valueEqual(t, expectedKeyValues, parentMap) }) } + +func TestMapSetType(t *testing.T) { + typeInfo := testTypeInfo{42} + newTypeInfo := testTypeInfo{43} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("empty", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + require.Equal(t, uint64(0), m.Count()) + require.Equal(t, typeInfo, m.Type()) + require.True(t, m.root.IsData()) + + seed := m.root.ExtraData().Seed + + err = m.SetType(newTypeInfo) + require.NoError(t, err) + require.Equal(t, uint64(0), m.Count()) + require.Equal(t, newTypeInfo, m.Type()) + require.Equal(t, seed, m.root.ExtraData().Seed) + + // Commit modified slabs in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + testExistingMapSetType(t, m.SlabID(), storage.baseStorage, newTypeInfo, m.Count(), seed) + }) + + t.Run("data slab root", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + mapSize := 10 + for i := 0; i < mapSize; i++ { + v := Uint64Value(i) + existingStorable, err := m.Set(compare, hashInputProvider, v, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.Equal(t, typeInfo, m.Type()) + require.True(t, m.root.IsData()) + + seed := m.root.ExtraData().Seed + + err = m.SetType(newTypeInfo) + require.NoError(t, err) + require.Equal(t, newTypeInfo, m.Type()) + require.Equal(t, uint64(mapSize), m.Count()) + require.Equal(t, seed, m.root.ExtraData().Seed) + + // Commit modified slabs in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + testExistingMapSetType(t, m.SlabID(), storage.baseStorage, newTypeInfo, m.Count(), seed) + }) + + t.Run("metadata slab root", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + mapSize := 10_000 + for i := 0; i < mapSize; i++ { + v := Uint64Value(i) + existingStorable, err := m.Set(compare, hashInputProvider, v, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.Equal(t, typeInfo, m.Type()) + require.False(t, m.root.IsData()) + + seed := m.root.ExtraData().Seed + + err = m.SetType(newTypeInfo) + require.NoError(t, err) + require.Equal(t, newTypeInfo, m.Type()) + require.Equal(t, uint64(mapSize), m.Count()) + require.Equal(t, seed, m.root.ExtraData().Seed) + + // Commit modified slabs in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + testExistingMapSetType(t, m.SlabID(), storage.baseStorage, newTypeInfo, m.Count(), seed) + }) + + t.Run("inlined in parent container root data slab", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapSeed := childMap.root.ExtraData().Seed + + existingStorable, err := parentMap.Set(compare, hashInputProvider, Uint64Value(0), childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), parentMap.Count()) + require.Equal(t, typeInfo, parentMap.Type()) + require.True(t, parentMap.root.IsData()) + require.False(t, parentMap.Inlined()) + + require.Equal(t, uint64(0), childMap.Count()) + require.Equal(t, typeInfo, childMap.Type()) + require.True(t, childMap.root.IsData()) + require.True(t, childMap.Inlined()) + + err = childMap.SetType(newTypeInfo) + require.NoError(t, err) + require.Equal(t, newTypeInfo, childMap.Type()) + require.Equal(t, uint64(0), childMap.Count()) + require.Equal(t, childMapSeed, childMap.root.ExtraData().Seed) + + // Commit modified slabs in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + testExistingInlinedMapSetType( + t, + parentMap.SlabID(), + Uint64Value(0), + storage.baseStorage, + newTypeInfo, + childMap.Count(), + childMapSeed, + ) + }) + + t.Run("inlined in parent container non-root data slab", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapSeed := childMap.root.ExtraData().Seed + + mapSize := 10_000 + for i := 0; i < mapSize-1; i++ { + v := Uint64Value(i) + existingStorable, err := parentMap.Set(compare, hashInputProvider, v, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + existingStorable, err := parentMap.Set(compare, hashInputProvider, Uint64Value(mapSize-1), childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.Equal(t, typeInfo, parentMap.Type()) + require.False(t, parentMap.root.IsData()) + require.False(t, parentMap.Inlined()) + + require.Equal(t, uint64(0), childMap.Count()) + require.Equal(t, typeInfo, childMap.Type()) + require.True(t, childMap.root.IsData()) + require.True(t, childMap.Inlined()) + + err = childMap.SetType(newTypeInfo) + require.NoError(t, err) + require.Equal(t, newTypeInfo, childMap.Type()) + require.Equal(t, uint64(0), childMap.Count()) + require.Equal(t, childMapSeed, childMap.root.ExtraData().Seed) + + // Commit modified slabs in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + testExistingInlinedMapSetType( + t, + parentMap.SlabID(), + Uint64Value(mapSize-1), + storage.baseStorage, + newTypeInfo, + childMap.Count(), + childMapSeed, + ) + }) +} + +func testExistingMapSetType( + t *testing.T, + id SlabID, + baseStorage BaseStorage, + expectedTypeInfo testTypeInfo, + expectedCount uint64, + expectedSeed uint64, +) { + newTypeInfo := testTypeInfo{value: expectedTypeInfo.value + 1} + + // Create storage from existing data + storage := newTestPersistentStorageWithBaseStorage(t, baseStorage) + + // Load existing map by ID + m, err := NewMapWithRootID(storage, id, newBasicDigesterBuilder()) + require.NoError(t, err) + require.Equal(t, expectedCount, m.Count()) + require.Equal(t, expectedTypeInfo, m.Type()) + require.Equal(t, expectedSeed, m.root.ExtraData().Seed) + + // Modify type info of existing map + err = m.SetType(newTypeInfo) + require.NoError(t, err) + require.Equal(t, expectedCount, m.Count()) + require.Equal(t, newTypeInfo, m.Type()) + require.Equal(t, expectedSeed, m.root.ExtraData().Seed) + + // Commit data in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + // Create storage from existing data + storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) + + // Load existing map again from storage + m2, err := NewMapWithRootID(storage2, id, newBasicDigesterBuilder()) + require.NoError(t, err) + require.Equal(t, expectedCount, m2.Count()) + require.Equal(t, newTypeInfo, m2.Type()) + require.Equal(t, expectedSeed, m2.root.ExtraData().Seed) +} + +func testExistingInlinedMapSetType( + t *testing.T, + parentID SlabID, + inlinedChildKey Value, + baseStorage BaseStorage, + expectedTypeInfo testTypeInfo, + expectedCount uint64, + expectedSeed uint64, +) { + newTypeInfo := testTypeInfo{value: expectedTypeInfo.value + 1} + + // Create storage from existing data + storage := newTestPersistentStorageWithBaseStorage(t, baseStorage) + + // Load existing map by ID + parentMap, err := NewMapWithRootID(storage, parentID, newBasicDigesterBuilder()) + require.NoError(t, err) + + element, err := parentMap.Get(compare, hashInputProvider, inlinedChildKey) + require.NoError(t, err) + + childMap, ok := element.(*OrderedMap) + require.True(t, ok) + + require.Equal(t, expectedCount, childMap.Count()) + require.Equal(t, expectedTypeInfo, childMap.Type()) + require.Equal(t, expectedSeed, childMap.root.ExtraData().Seed) + + // Modify type info of existing map + err = childMap.SetType(newTypeInfo) + require.NoError(t, err) + require.Equal(t, expectedCount, childMap.Count()) + require.Equal(t, newTypeInfo, childMap.Type()) + require.Equal(t, expectedSeed, childMap.root.ExtraData().Seed) + + // Commit data in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + // Create storage from existing data + storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) + + // Load existing map again from storage + parentMap2, err := NewMapWithRootID(storage2, parentID, newBasicDigesterBuilder()) + require.NoError(t, err) + + element2, err := parentMap2.Get(compare, hashInputProvider, inlinedChildKey) + require.NoError(t, err) + + childMap2, ok := element2.(*OrderedMap) + require.True(t, ok) + + require.Equal(t, expectedCount, childMap2.Count()) + require.Equal(t, newTypeInfo, childMap2.Type()) + require.Equal(t, expectedSeed, childMap.root.ExtraData().Seed) +} From a05f97ef880b9df4a27c56f1ff01d1291bb97538 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Fri, 8 Mar 2024 12:52:05 -0600 Subject: [PATCH 100/126] Extract functionality of storeSlab for reuse --- array.go | 230 ++++++++++++++++------------------------------- basicarray.go | 21 +---- map.go | 208 +++++++++++++++--------------------------- storable_slab.go | 5 +- storage.go | 10 +++ 5 files changed, 168 insertions(+), 306 deletions(-) diff --git a/array.go b/array.go index fa764751..09a72817 100644 --- a/array.go +++ b/array.go @@ -990,13 +990,7 @@ func (a *ArrayDataSlab) Uninline(storage SlabStorage) error { a.inlined = false // Store slab in storage - err := storage.Store(a.header.slabID, a) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) - } - - return nil + return storeSlab(storage, a) } func (a *ArrayDataSlab) HasPointer() bool { @@ -1057,10 +1051,9 @@ func (a *ArrayDataSlab) Set(storage SlabStorage, address Address, index uint64, a.header.size = size if !a.inlined { - err := storage.Store(a.header.slabID, a) + err := storeSlab(storage, a) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return nil, err } } @@ -1090,10 +1083,9 @@ func (a *ArrayDataSlab) Insert(storage SlabStorage, address Address, index uint6 a.header.size += storable.ByteSize() if !a.inlined { - err := storage.Store(a.header.slabID, a) + err = storeSlab(storage, a) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return err } } @@ -1122,10 +1114,9 @@ func (a *ArrayDataSlab) Remove(storage SlabStorage, index uint64) (Storable, err a.header.size -= v.ByteSize() if !a.inlined { - err := storage.Store(a.header.slabID, a) + err := storeSlab(storage, a) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return nil, err } } @@ -1909,11 +1900,11 @@ func (a *ArrayMetaDataSlab) Set(storage SlabStorage, address Address, index uint return existingElem, nil } - err = storage.Store(a.header.slabID, a) + err = storeSlab(storage, a) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return nil, err } + return existingElem, nil } @@ -1974,13 +1965,7 @@ func (a *ArrayMetaDataSlab) Insert(storage SlabStorage, address Address, index u // Insertion always increases the size, // so there is no need to check underflow - err = storage.Store(a.header.slabID, a) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) - } - - return nil + return storeSlab(storage, a) } func (a *ArrayMetaDataSlab) Remove(storage SlabStorage, index uint64) (Storable, error) { @@ -2030,10 +2015,9 @@ func (a *ArrayMetaDataSlab) Remove(storage SlabStorage, index uint64) (Storable, // Removal always decreases the size, // so there is no need to check isFull - err = storage.Store(a.header.slabID, a) + err = storeSlab(storage, a) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return nil, err } return v, nil @@ -2066,25 +2050,17 @@ func (a *ArrayMetaDataSlab) SplitChildSlab(storage SlabStorage, child ArraySlab, a.header.size += arraySlabHeaderSize // Store modified slabs - err = storage.Store(left.SlabID(), left) + err = storeSlab(storage, left) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", left.SlabID())) - } - - err = storage.Store(right.SlabID(), right) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", right.SlabID())) + return err } - err = storage.Store(a.header.slabID, a) + err = storeSlab(storage, right) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return err } - return nil + return storeSlab(storage, a) } // MergeOrRebalanceChildSlab merges or rebalances child slab. @@ -2152,22 +2128,15 @@ func (a *ArrayMetaDataSlab) MergeOrRebalanceChildSlab( a.childrenCountSum[childHeaderIndex] = baseCountSum + child.Header().count // Store modified slabs - err = storage.Store(child.SlabID(), child) + err = storeSlab(storage, child) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", child.SlabID())) - } - err = storage.Store(rightSib.SlabID(), rightSib) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", rightSib.SlabID())) + return err } - err = storage.Store(a.header.slabID, a) + err = storeSlab(storage, rightSib) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return err } - return nil + return storeSlab(storage, a) } // Rebalance with left sib @@ -2187,22 +2156,15 @@ func (a *ArrayMetaDataSlab) MergeOrRebalanceChildSlab( a.childrenCountSum[childHeaderIndex-1] = baseCountSum + leftSib.Header().count // Store modified slabs - err = storage.Store(leftSib.SlabID(), leftSib) + err = storeSlab(storage, leftSib) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", leftSib.SlabID())) - } - err = storage.Store(child.SlabID(), child) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", child.SlabID())) + return err } - err = storage.Store(a.header.slabID, a) + err = storeSlab(storage, child) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return err } - return nil + return storeSlab(storage, a) } // Rebalance with bigger sib @@ -2222,22 +2184,18 @@ func (a *ArrayMetaDataSlab) MergeOrRebalanceChildSlab( a.childrenCountSum[childHeaderIndex-1] = baseCountSum + leftSib.Header().count // Store modified slabs - err = storage.Store(leftSib.SlabID(), leftSib) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", leftSib.SlabID())) - } - err = storage.Store(child.SlabID(), child) + err = storeSlab(storage, leftSib) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", child.SlabID())) + return err } - err = storage.Store(a.header.slabID, a) + + err = storeSlab(storage, child) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return err } - return nil + + return storeSlab(storage, a) + } else { // leftSib.ByteSize() <= rightSib.ByteSize @@ -2256,22 +2214,17 @@ func (a *ArrayMetaDataSlab) MergeOrRebalanceChildSlab( a.childrenCountSum[childHeaderIndex] = baseCountSum + child.Header().count // Store modified slabs - err = storage.Store(child.SlabID(), child) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", child.SlabID())) - } - err = storage.Store(rightSib.SlabID(), rightSib) + err = storeSlab(storage, child) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", rightSib.SlabID())) + return err } - err = storage.Store(a.header.slabID, a) + + err = storeSlab(storage, rightSib) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return err } - return nil + + return storeSlab(storage, a) } } @@ -2299,16 +2252,14 @@ func (a *ArrayMetaDataSlab) MergeOrRebalanceChildSlab( a.header.size -= arraySlabHeaderSize // Store modified slabs in storage - err = storage.Store(child.SlabID(), child) + err = storeSlab(storage, child) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", child.SlabID())) + return err } - err = storage.Store(a.header.slabID, a) + err = storeSlab(storage, a) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return err } // Remove right sib from storage @@ -2343,16 +2294,14 @@ func (a *ArrayMetaDataSlab) MergeOrRebalanceChildSlab( a.header.size -= arraySlabHeaderSize // Store modified slabs in storage - err = storage.Store(leftSib.SlabID(), leftSib) + err = storeSlab(storage, leftSib) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", leftSib.SlabID())) + return err } - err = storage.Store(a.header.slabID, a) + err = storeSlab(storage, a) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return err } // Remove child from storage @@ -2386,15 +2335,14 @@ func (a *ArrayMetaDataSlab) MergeOrRebalanceChildSlab( a.header.size -= arraySlabHeaderSize // Store modified slabs in storage - err = storage.Store(leftSib.SlabID(), leftSib) + err = storeSlab(storage, leftSib) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", leftSib.SlabID())) + return err } - err = storage.Store(a.header.slabID, a) + + err = storeSlab(storage, a) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return err } // Remove child from storage @@ -2427,15 +2375,13 @@ func (a *ArrayMetaDataSlab) MergeOrRebalanceChildSlab( a.header.size -= arraySlabHeaderSize // Store modified slabs in storage - err = storage.Store(child.SlabID(), child) + err = storeSlab(storage, child) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", child.SlabID())) + return err } - err = storage.Store(a.header.slabID, a) + err = storeSlab(storage, a) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return err } // Remove rightSib from storage @@ -2741,10 +2687,9 @@ func NewArray(storage SlabStorage, address Address, typeInfo TypeInfo) (*Array, extraData: extraData, } - err = storage.Store(root.header.slabID, root) + err = storeSlab(storage, root) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", root.header.slabID)) + return nil, err } return &Array{ @@ -3234,23 +3179,17 @@ func (a *Array) splitRoot() error { a.root = newRoot - err = a.Storage.Store(left.SlabID(), left) + err = storeSlab(a.Storage, left) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", left.SlabID())) - } - err = a.Storage.Store(right.SlabID(), right) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", right.SlabID())) + return err } - err = a.Storage.Store(a.root.SlabID(), a.root) + + err = storeSlab(a.Storage, right) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.root.SlabID())) + return err } - return nil + return storeSlab(a.Storage, a.root) } func (a *Array) promoteChildAsNewRoot(childID SlabID) error { @@ -3277,11 +3216,11 @@ func (a *Array) promoteChildAsNewRoot(childID SlabID) error { a.root.SetExtraData(extraData) - err = a.Storage.Store(rootID, a.root) + err = storeSlab(a.Storage, a.root) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", rootID)) + return err } + err = a.Storage.Remove(childID) if err != nil { // Wrap err as external error (if needed) because err is returned by SlabStorage interface. @@ -3675,16 +3614,8 @@ func (a *Array) SetType(typeInfo TypeInfo) error { // Array is standalone. - slabID := a.SlabID() - // Store modified root slab in storage since typeInfo is part of extraData stored in root slab. - err := a.Storage.Store(slabID, a.root) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", slabID)) - } - - return nil + return storeSlab(a.Storage, a.root) } func (a *Array) String() string { @@ -3806,10 +3737,9 @@ func (a *Array) PopIterate(fn ArrayPopIterationFunc) error { // Save root slab if !a.Inlined() { - err = a.Storage.Store(a.root.SlabID(), a.root) + err = storeSlab(a.Storage, a.root) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.root.SlabID())) + return err } } @@ -3934,12 +3864,9 @@ func NewArrayFromBatchData(storage SlabStorage, address Address, typeInfo TypeIn // Store all slabs for _, slab := range slabs { - err = storage.Store(slab.SlabID(), slab) + err = storeSlab(storage, slab) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded( - err, - fmt.Sprintf("failed to store slab %s", slab.SlabID())) + return nil, err } } @@ -3966,10 +3893,9 @@ func NewArrayFromBatchData(storage SlabStorage, address Address, typeInfo TypeIn root.SetExtraData(extraData) // Store root - err = storage.Store(root.SlabID(), root) + err = storeSlab(storage, root) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", root.SlabID())) + return nil, err } return &Array{ diff --git a/basicarray.go b/basicarray.go index 143bec35..58b77e47 100644 --- a/basicarray.go +++ b/basicarray.go @@ -167,13 +167,7 @@ func (a *BasicArrayDataSlab) Set(storage SlabStorage, index uint64, v Storable) oldElem.ByteSize() + v.ByteSize() - err := storage.Store(a.header.slabID, a) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) - } - - return nil + return storeSlab(storage, a) } func (a *BasicArrayDataSlab) Insert(storage SlabStorage, index uint64, v Storable) error { @@ -192,13 +186,7 @@ func (a *BasicArrayDataSlab) Insert(storage SlabStorage, index uint64, v Storabl a.header.count++ a.header.size += v.ByteSize() - err := storage.Store(a.header.slabID, a) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) - } - - return nil + return storeSlab(storage, a) } func (a *BasicArrayDataSlab) Remove(storage SlabStorage, index uint64) (Storable, error) { @@ -221,10 +209,9 @@ func (a *BasicArrayDataSlab) Remove(storage SlabStorage, index uint64) (Storable a.header.count-- a.header.size -= v.ByteSize() - err := storage.Store(a.header.slabID, a) + err := storeSlab(storage, a) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return nil, err } return v, nil diff --git a/map.go b/map.go index 57891d7a..f32ec112 100644 --- a/map.go +++ b/map.go @@ -915,10 +915,9 @@ func (e *inlineCollisionGroup) Set( collisionGroup: true, } - err = storage.Store(id, slab) + err = storeSlab(storage, slab) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", id)) + return nil, nil, nil, err } // Create and return externalCollisionGroup (wrapper of newly created MapDataSlab) @@ -3287,13 +3286,7 @@ func (m *MapDataSlab) Uninline(storage SlabStorage) error { m.inlined = false // Store slab in storage - err := storage.Store(m.header.slabID, m) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) - } - - return nil + return storeSlab(storage, m) } func elementsStorables(elems elements, childStorables []Storable) []Storable { @@ -3374,10 +3367,9 @@ func (m *MapDataSlab) Set( // Store modified slab if !m.inlined { - err := storage.Store(m.header.slabID, m) + err := storeSlab(storage, m) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + return nil, nil, err } } @@ -3400,10 +3392,9 @@ func (m *MapDataSlab) Remove(storage SlabStorage, digester Digester, level uint, // Store modified slab if !m.inlined { - err := storage.Store(m.header.slabID, m) + err := storeSlab(storage, m) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + return nil, nil, err } } @@ -4164,10 +4155,9 @@ func (m *MapMetaDataSlab) Set( return keyStorable, existingMapValueStorable, nil } - err = storage.Store(m.header.slabID, m) + err = storeSlab(storage, m) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + return nil, nil, err } return keyStorable, existingMapValueStorable, nil } @@ -4231,10 +4221,9 @@ func (m *MapMetaDataSlab) Remove(storage SlabStorage, digester Digester, level u return k, v, nil } - err = storage.Store(m.header.slabID, m) + err = storeSlab(storage, m) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + return nil, nil, err } return k, v, nil } @@ -4261,25 +4250,17 @@ func (m *MapMetaDataSlab) SplitChildSlab(storage SlabStorage, child MapSlab, chi m.header.size += mapSlabHeaderSize // Store modified slabs - err = storage.Store(left.SlabID(), left) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", left.SlabID())) - } - - err = storage.Store(right.SlabID(), right) + err = storeSlab(storage, left) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", right.SlabID())) + return err } - err = storage.Store(m.header.slabID, m) + err = storeSlab(storage, right) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + return err } - return nil + return storeSlab(storage, m) } // MergeOrRebalanceChildSlab merges or rebalances child slab. @@ -4352,24 +4333,17 @@ func (m *MapMetaDataSlab) MergeOrRebalanceChildSlab( } // Store modified slabs - err = storage.Store(child.SlabID(), child) + err = storeSlab(storage, child) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", child.SlabID())) + return err } - err = storage.Store(rightSib.SlabID(), rightSib) + err = storeSlab(storage, rightSib) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", rightSib.SlabID())) + return err } - err = storage.Store(m.header.slabID, m) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) - } - return nil + return storeSlab(storage, m) } // Rebalance with left sib @@ -4385,24 +4359,17 @@ func (m *MapMetaDataSlab) MergeOrRebalanceChildSlab( m.childrenHeaders[childHeaderIndex] = child.Header() // Store modified slabs - err = storage.Store(leftSib.SlabID(), leftSib) + err = storeSlab(storage, leftSib) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", leftSib.SlabID())) + return err } - err = storage.Store(child.SlabID(), child) + err = storeSlab(storage, child) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", child.SlabID())) + return err } - err = storage.Store(m.header.slabID, m) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) - } - return nil + return storeSlab(storage, m) } // Rebalance with bigger sib @@ -4418,24 +4385,18 @@ func (m *MapMetaDataSlab) MergeOrRebalanceChildSlab( m.childrenHeaders[childHeaderIndex] = child.Header() // Store modified slabs - err = storage.Store(leftSib.SlabID(), leftSib) + err = storeSlab(storage, leftSib) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", leftSib.SlabID())) + return err } - err = storage.Store(child.SlabID(), child) + err = storeSlab(storage, child) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", child.SlabID())) + return err } - err = storage.Store(m.header.slabID, m) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) - } - return nil + return storeSlab(storage, m) + } else { // leftSib.ByteSize() <= rightSib.ByteSize @@ -4454,24 +4415,17 @@ func (m *MapMetaDataSlab) MergeOrRebalanceChildSlab( } // Store modified slabs - err = storage.Store(child.SlabID(), child) + err = storeSlab(storage, child) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", child.SlabID())) + return err } - err = storage.Store(rightSib.SlabID(), rightSib) + err = storeSlab(storage, rightSib) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", rightSib.SlabID())) + return err } - err = storage.Store(m.header.slabID, m) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) - } - return nil + return storeSlab(storage, m) } } @@ -4500,15 +4454,14 @@ func (m *MapMetaDataSlab) MergeOrRebalanceChildSlab( } // Store modified slabs in storage - err = storage.Store(child.SlabID(), child) + err = storeSlab(storage, child) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", child.SlabID())) + return err } - err = storage.Store(m.header.slabID, m) + + err = storeSlab(storage, m) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + return err } // Remove right sib from storage @@ -4538,15 +4491,14 @@ func (m *MapMetaDataSlab) MergeOrRebalanceChildSlab( m.header.size -= mapSlabHeaderSize // Store modified slabs in storage - err = storage.Store(leftSib.SlabID(), leftSib) + err = storeSlab(storage, leftSib) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", leftSib.SlabID())) + return err } - err = storage.Store(m.header.slabID, m) + + err = storeSlab(storage, m) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + return err } // Remove child from storage @@ -4575,15 +4527,14 @@ func (m *MapMetaDataSlab) MergeOrRebalanceChildSlab( m.header.size -= mapSlabHeaderSize // Store modified slabs in storage - err = storage.Store(leftSib.SlabID(), leftSib) + err = storeSlab(storage, leftSib) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", leftSib.SlabID())) + return err } - err = storage.Store(m.header.slabID, m) + + err = storeSlab(storage, m) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + return err } // Remove child from storage @@ -4616,15 +4567,14 @@ func (m *MapMetaDataSlab) MergeOrRebalanceChildSlab( } // Store modified slabs in storage - err = storage.Store(child.SlabID(), child) + err = storeSlab(storage, child) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", child.SlabID())) + return err } - err = storage.Store(m.header.slabID, m) + + err = storeSlab(storage, m) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + return err } // Remove rightSib from storage @@ -4874,10 +4824,9 @@ func NewMap(storage SlabStorage, address Address, digestBuilder DigesterBuilder, extraData: extraData, } - err = storage.Store(root.header.slabID, root) + err = storeSlab(storage, root) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", root.header.slabID)) + return nil, err } return &OrderedMap{ @@ -5421,22 +5370,17 @@ func (m *OrderedMap) splitRoot() error { m.root = newRoot - err = m.Storage.Store(left.SlabID(), left) + err = storeSlab(m.Storage, left) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", left.SlabID())) - } - err = m.Storage.Store(right.SlabID(), right) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", right.SlabID())) + return err } - err = m.Storage.Store(m.root.SlabID(), m.root) + + err = storeSlab(m.Storage, right) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.root.SlabID())) + return err } - return nil + + return storeSlab(m.Storage, m.root) } func (m *OrderedMap) promoteChildAsNewRoot(childID SlabID) error { @@ -5463,10 +5407,9 @@ func (m *OrderedMap) promoteChildAsNewRoot(childID SlabID) error { m.root.SetExtraData(extraData) - err = m.Storage.Store(rootID, m.root) + err = storeSlab(m.Storage, m.root) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", rootID)) + return err } err = m.Storage.Remove(childID) @@ -6144,10 +6087,9 @@ func (m *OrderedMap) PopIterate(fn MapPopIterationFunc) error { if !m.Inlined() { // Save root slab - err = m.Storage.Store(m.root.SlabID(), m.root) + err = storeSlab(m.Storage, m.root) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.root.SlabID())) + return err } } @@ -6376,10 +6318,9 @@ func NewMapFromBatchData( // Store all slabs for _, slab := range slabs { - err = storage.Store(slab.SlabID(), slab) + err = storeSlab(storage, slab) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", slab.SlabID())) + return nil, err } } @@ -6406,10 +6347,9 @@ func NewMapFromBatchData( root.SetExtraData(extraData) // Store root - err = storage.Store(root.SlabID(), root) + err = storeSlab(storage, root) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", root.SlabID())) + return nil, err } return &OrderedMap{ diff --git a/storable_slab.go b/storable_slab.go index 07db18f4..62c80308 100644 --- a/storable_slab.go +++ b/storable_slab.go @@ -50,10 +50,9 @@ func NewStorableSlab(storage SlabStorage, address Address, storable Storable) (S storable: storable, } - err = storage.Store(id, slab) + err = storeSlab(storage, slab) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", id)) + return nil, err } return SlabIDStorable(id), nil diff --git a/storage.go b/storage.go index 0b324381..9e4eb7d7 100644 --- a/storage.go +++ b/storage.go @@ -1078,3 +1078,13 @@ func (s *PersistentSlabStorage) DeltasSizeWithoutTempAddresses() uint64 { } return size } + +func storeSlab(storage SlabStorage, slab Slab) error { + id := slab.SlabID() + err := storage.Store(id, slab) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", id)) + } + return nil +} From 8c7c31e0965f314b395baf70b085ecbcbda0d1a7 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Fri, 8 Mar 2024 13:08:09 -0600 Subject: [PATCH 101/126] Reuse notifyParentIfNeeded in Array.SetType() --- array.go | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/array.go b/array.go index 09a72817..c0a2be00 100644 --- a/array.go +++ b/array.go @@ -3600,16 +3600,9 @@ func (a *Array) SetType(typeInfo TypeInfo) error { if a.Inlined() { // Array is inlined. - // Notify parent container so parent slab is saved in storage with updated TypeInfo of inlined array. - found, err := a.parentUpdater() - if err != nil { - return err - } - if !found { - a.parentUpdater = nil - } - return nil + // Notify parent container so parent slab is saved in storage with updated TypeInfo of inlined array. + return a.notifyParentIfNeeded() } // Array is standalone. From 95dad1cea18dde0aa31d5bb5d1d6c9de0c4f9814 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Fri, 8 Mar 2024 14:35:08 -0600 Subject: [PATCH 102/126] Refactor to use storeSlab in OrderedMap.SetType() --- map.go | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-) diff --git a/map.go b/map.go index 58bc36cb..d74e9575 100644 --- a/map.go +++ b/map.go @@ -5501,30 +5501,15 @@ func (m *OrderedMap) SetType(typeInfo TypeInfo) error { if m.Inlined() { // Map is inlined. - // Notify parent container so parent slab is saved in storage with updated TypeInfo of inlined array. - found, err := m.parentUpdater() - if err != nil { - return err - } - if !found { - m.parentUpdater = nil - } - return nil + // Notify parent container so parent slab is saved in storage with updated TypeInfo of inlined array. + return m.notifyParentIfNeeded() } // Map is standalone. - slabID := m.SlabID() - // Store modified root slab in storage since typeInfo is part of extraData stored in root slab. - err := m.Storage.Store(slabID, m.root) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", slabID)) - } - - return nil + return storeSlab(m.Storage, m.root) } func (m *OrderedMap) String() string { From 35fdb7e4e109b999d627cafe9ff89df86e21dd24 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Fri, 8 Mar 2024 17:08:14 -0600 Subject: [PATCH 103/126] Deduplicate inlined type info if repeated --- array.go | 4 + array_test.go | 67 +++++----------- map.go | 4 + map_test.go | 129 ++++++++----------------------- typeinfo.go | 209 +++++++++++++++++++++++++++++++------------------- 5 files changed, 194 insertions(+), 219 deletions(-) diff --git a/array.go b/array.go index d5e4fc70..69affd85 100644 --- a/array.go +++ b/array.go @@ -298,6 +298,10 @@ func (a *ArrayExtraData) isExtraData() bool { return true } +func (a *ArrayExtraData) Type() TypeInfo { + return a.TypeInfo +} + // Encode encodes extra data as CBOR array: // // [type info] diff --git a/array_test.go b/array_test.go index 58a0c9cf..8f42abac 100644 --- a/array_test.go +++ b/array_test.go @@ -3186,17 +3186,14 @@ func TestArrayEncodeDecode(t *testing.T) { // inlined extra data 0x82, // element 0: array of type info - 0x81, - // type info - 0x18, 0x2b, + 0x80, // element 1: array of extra data 0x81, // array extra data 0xd8, 0xf7, 0x81, // array type info ref - 0xd8, 0xf6, - 0x00, + 0x18, 0x2b, // CBOR encoded array head (fixed size 3 byte) 0x99, 0x00, 0x02, @@ -3275,21 +3272,17 @@ func TestArrayEncodeDecode(t *testing.T) { // inlined extra data 0x82, // element 0: array of inlined type info - 0x82, - 0x18, 0x2c, - 0x18, 0x2b, + 0x80, // element 1: array of inlined extra data 0x82, // inlined array extra data 0xd8, 0xf7, 0x81, - 0xd8, 0xf6, - 0x00, + 0x18, 0x2c, // inlined array extra data 0xd8, 0xf7, 0x81, - 0xd8, 0xf6, - 0x01, + 0x18, 0x2b, // CBOR encoded array head (fixed size 3 byte) 0x99, 0x00, 0x02, @@ -3372,20 +3365,16 @@ func TestArrayEncodeDecode(t *testing.T) { // inlined extra data 0x82, // element 0: array of inlined type info - 0x82, - 0x18, 0x2c, - 0x18, 0x2b, + 0x80, // element 1: array of inlined extra data 0x82, // inlined array extra data 0xd8, 0xf7, 0x81, - 0xd8, 0xf6, - 0x00, + 0x18, 0x2c, 0xd8, 0xf7, 0x81, - 0xd8, 0xf6, - 0x01, + 0x18, 0x2b, // CBOR encoded array head (fixed size 3 byte) 0x99, 0x00, 0x02, @@ -3480,33 +3469,25 @@ func TestArrayEncodeDecode(t *testing.T) { // inlined extra data 0x82, // element 0: array of inlined type info - 0x84, - 0x18, 0x2c, - 0x18, 0x2b, - 0x18, 0x2e, - 0x18, 0x2d, + 0x80, // element 1: array of inlined extra data 0x84, // typeInfo3 0xd8, 0xf7, 0x81, - 0xd8, 0xf6, - 0x00, + 0x18, 0x2c, // typeInfo2 0xd8, 0xf7, 0x81, - 0xd8, 0xf6, - 0x01, + 0x18, 0x2b, // typeInfo5 0xd8, 0xf7, 0x81, - 0xd8, 0xf6, - 0x02, + 0x18, 0x2e, // typeInfo4 0xd8, 0xf7, 0x81, - 0xd8, 0xf6, - 0x03, + 0x18, 0x2d, // CBOR encoded array head (fixed size 3 byte) 0x99, 0x00, 0x02, @@ -3633,15 +3614,13 @@ func TestArrayEncodeDecode(t *testing.T) { // inlined extra data 0x82, // element 0: array of inlined type info - 0x81, - 0x18, 0x2b, + 0x80, // element 1: array of inlined extra data 0x81, // inlined array extra data 0xd8, 0xf7, 0x81, - 0xd8, 0xf6, - 0x00, + 0x18, 0x2b, // CBOR encoded array head (fixed size 3 byte) 0x99, 0x00, 0x0b, // CBOR encoded array elements @@ -3787,20 +3766,16 @@ func TestArrayEncodeDecode(t *testing.T) { // inlined extra data 0x82, // element 0: array of inlined extra data - 0x82, - 0x18, 0x2c, - 0x18, 0x2b, + 0x80, // element 1: array of inlined extra data 0x82, // inlined array extra data 0xd8, 0xf7, 0x81, - 0xd8, 0xf6, - 0x00, + 0x18, 0x2c, 0xd8, 0xf7, 0x81, - 0xd8, 0xf6, - 0x01, + 0x18, 0x2b, // CBOR encoded array head (fixed size 3 byte) 0x99, 0x00, 0x0b, // CBOR encoded array elements @@ -4117,15 +4092,13 @@ func TestArrayEncodeDecode(t *testing.T) { // inlined extra data 0x82, // element 0: array of type info - 0x81, - 0x18, 0x2c, + 0x80, // element 1: array of extra data 0x81, // type info 0xd8, 0xf7, 0x81, - 0xd8, 0xf6, - 0x00, + 0x18, 0x2c, // CBOR encoded array head (fixed size 3 byte) 0x99, 0x00, 0x0b, diff --git a/map.go b/map.go index bf142744..d34af66c 100644 --- a/map.go +++ b/map.go @@ -494,6 +494,10 @@ func (m *MapExtraData) isExtraData() bool { return true } +func (m *MapExtraData) Type() TypeInfo { + return m.TypeInfo +} + // Encode encodes extra data as CBOR array: // // [type info, count, seed] diff --git a/map_test.go b/map_test.go index f80c8dcb..397bdce1 100644 --- a/map_test.go +++ b/map_test.go @@ -7663,15 +7663,13 @@ func TestMapEncodeDecode(t *testing.T) { // inlined extra data 0x82, // element 0: array of inlined type info - 0x81, - 0x18, 0x2b, + 0x80, // element 1: array of inlined extra data 0x81, // inlined array extra data 0xd8, 0xf7, 0x81, - 0xd8, 0xf6, - 0x00, + 0x18, 0x2b, // the following encoded data is valid CBOR @@ -7729,7 +7727,7 @@ func TestMapEncodeDecode(t *testing.T) { require.Equal(t, 2, len(meta.childrenHeaders)) require.Equal(t, uint32(len(stored[id2])), meta.childrenHeaders[0].size) - const inlinedExtraDataSize = 11 + const inlinedExtraDataSize = 8 require.Equal(t, uint32(len(stored[id3])-inlinedExtraDataSize+slabIDSize), meta.childrenHeaders[1].size) // Decode data to new storage @@ -7827,7 +7825,6 @@ func TestMapEncodeDecode(t *testing.T) { 0x01, // seed 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, - // element 1 // inlined map extra data 0xd8, 0xf8, 0x83, @@ -8008,18 +8005,14 @@ func TestMapEncodeDecode(t *testing.T) { // inlined extra data 0x82, // element 0: array of inlined type info - 0x82, - 0x18, 0x2c, - 0x18, 0x2b, + 0x80, // element 1: array of inlined extra data 0x82, // element 0 // inlined map extra data 0xd8, 0xf8, 0x83, - // type info - 0xd8, 0xf6, - 0x00, + 0x18, 0x2c, // count: 1 0x01, // seed @@ -8028,9 +8021,7 @@ func TestMapEncodeDecode(t *testing.T) { // inlined map extra data 0xd8, 0xf8, 0x83, - // type info - 0xd8, 0xf6, - 0x01, + 0x18, 0x2b, // count: 1 0x01, // seed @@ -8486,20 +8477,14 @@ func TestMapEncodeDecode(t *testing.T) { // inlined extra data 0x82, // element 0: array of type info - 0x84, - 0x18, 0x2c, - 0x18, 0x2e, - 0x18, 0x2b, - 0x18, 0x2d, + 0x80, // element 1: array of extra data 0x84, // element 0 // inlined map extra data 0xd8, 0xf8, 0x83, - // type info: 44 - 0xd8, 0xf6, - 0x00, + 0x18, 0x2c, // count: 1 0x01, // seed @@ -8509,9 +8494,7 @@ func TestMapEncodeDecode(t *testing.T) { // inlined map extra data 0xd8, 0xf8, 0x83, - // type info: 46 - 0xd8, 0xf6, - 0x01, + 0x18, 0x2e, // count: 1 0x01, // seed @@ -8521,9 +8504,7 @@ func TestMapEncodeDecode(t *testing.T) { // inlined map extra data 0xd8, 0xf8, 0x83, - // type info: 43 - 0xd8, 0xf6, - 0x02, + 0x18, 0x2b, // count: 1 0x01, // seed @@ -8533,9 +8514,7 @@ func TestMapEncodeDecode(t *testing.T) { // inlined map extra data 0xd8, 0xf8, 0x83, - // type info: 45 - 0xd8, 0xf6, - 0x03, + 0x18, 0x2d, // count: 1 0x01, // seed @@ -9264,20 +9243,14 @@ func TestMapEncodeDecode(t *testing.T) { // inlined extra data 0x82, // element 0: array of inlined type info - 0x84, - 0x18, 0x2b, - 0x18, 0x2c, - 0x18, 0x2d, - 0x18, 0x2e, + 0x80, // element 1: array of inlined extra data 0x84, // element 0 // inlined map extra data 0xd8, 0xf8, 0x83, - // type info - 0xd8, 0xf6, - 0x00, + 0x18, 0x2b, // count: 1 0x01, // seed @@ -9286,9 +9259,7 @@ func TestMapEncodeDecode(t *testing.T) { // inlined map extra data 0xd8, 0xf8, 0x83, - // type info - 0xd8, 0xf6, - 0x01, + 0x18, 0x2c, // count: 1 0x01, // seed @@ -9297,9 +9268,7 @@ func TestMapEncodeDecode(t *testing.T) { // inlined map extra data 0xd8, 0xf8, 0x83, - // type info - 0xd8, 0xf6, - 0x02, + 0x18, 0x2d, // count: 1 0x01, // seed @@ -9307,9 +9276,7 @@ func TestMapEncodeDecode(t *testing.T) { // inlined map extra data 0xd8, 0xf8, 0x83, - // type info - 0xd8, 0xf6, - 0x03, + 0x18, 0x2e, // count: 1 0x01, // seed @@ -9459,20 +9426,14 @@ func TestMapEncodeDecode(t *testing.T) { // inlined extra data 0x82, // element 0: array of inlined type info - 0x84, - 0x18, 0x2b, - 0x18, 0x2c, - 0x18, 0x2d, - 0x18, 0x2e, + 0x80, // element 1: array of inlined extra data 0x84, // element 0 // inlined map extra data 0xd8, 0xf8, 0x83, - // type info - 0xd8, 0xf6, - 0x00, + 0x18, 0x2b, // count: 1 0x01, // seed @@ -9481,9 +9442,7 @@ func TestMapEncodeDecode(t *testing.T) { // inlined map extra data 0xd8, 0xf8, 0x83, - // type info - 0xd8, 0xf6, - 0x01, + 0x18, 0x2c, // count: 1 0x01, // seed @@ -9492,9 +9451,7 @@ func TestMapEncodeDecode(t *testing.T) { // inlined map extra data 0xd8, 0xf8, 0x83, - // type info - 0xd8, 0xf6, - 0x02, + 0x18, 0x2d, // count: 1 0x01, // seed @@ -9502,9 +9459,7 @@ func TestMapEncodeDecode(t *testing.T) { // inlined map extra data 0xd8, 0xf8, 0x83, - // type info - 0xd8, 0xf6, - 0x03, + 0x18, 0x2e, // count: 1 0x01, // seed @@ -10597,17 +10552,14 @@ func TestMapEncodeDecode(t *testing.T) { // inlined extra data 0x82, // element 0: array of inlined type info - 0x81, - 0x18, 0x2b, + 0x80, // element 1: array of inlined extra data 0x81, // element 0 // inlined map extra data 0xd8, 0xf8, 0x83, - // type info - 0xd8, 0xf6, - 0x00, + 0x18, 0x2b, // count: 1 0x01, // seed @@ -11078,17 +11030,14 @@ func TestMapEncodeDecode(t *testing.T) { // inlined extra data 0x82, // element 0: array of inlined type info - 0x81, - 0x18, 0x2b, + 0x80, // element 1: array of inlined extra data 0x81, // element 0 // inlined array extra data 0xd8, 0xf7, 0x81, - // type info - 0xd8, 0xf6, - 0x00, + 0x18, 0x2b, // the following encoded data is valid CBOR @@ -11379,8 +11328,7 @@ func TestMapEncodeDecode(t *testing.T) { // inlined extra data 0x82, // element 0: array of inlined type info - 0x81, - 0xd8, 0xf6, 0x18, 0x2b, + 0x80, // element 1: array of inlined extra data 0x81, // element 0 @@ -11390,8 +11338,7 @@ func TestMapEncodeDecode(t *testing.T) { // map extra data 0x83, // type info - 0xd8, 0xf6, - 0x00, + 0xd8, 0xf6, 0x18, 0x2b, // count 0x01, // seed @@ -11553,8 +11500,7 @@ func TestMapEncodeDecode(t *testing.T) { // inlined extra data 0x82, // element 0: array of inlined type info - 0x81, - 0xd8, 0xf6, 0x18, 0x2b, + 0x80, // element 1: array of inlined extra data 0x81, // element 0 @@ -11564,8 +11510,7 @@ func TestMapEncodeDecode(t *testing.T) { // map extra data 0x83, // type info - 0xd8, 0xf6, - 0x00, + 0xd8, 0xf6, 0x18, 0x2b, // count: 2 0x02, // seed @@ -11734,8 +11679,7 @@ func TestMapEncodeDecode(t *testing.T) { // inlined extra data 0x82, // element 0: array of inlined type info - 0x81, - 0xd8, 0xf6, 0x18, 0x2b, + 0x80, // element 1: array of inlined extra data 0x81, // element 0 @@ -11745,8 +11689,7 @@ func TestMapEncodeDecode(t *testing.T) { // map extra data 0x83, // type info - 0xd8, 0xf6, - 0x00, + 0xd8, 0xf6, 0x18, 0x2b, // count: 2 0x02, // seed @@ -12379,9 +12322,7 @@ func TestMapEncodeDecode(t *testing.T) { // inlined extra data 0x82, // element 0: array of inlined type info - 0x82, - 0xd8, 0xf6, 0x18, 0x2b, - 0xd8, 0xf6, 0x18, 0x2c, + 0x80, // element 1: array of inlined extra data 0x82, // element 0 @@ -12391,8 +12332,7 @@ func TestMapEncodeDecode(t *testing.T) { // map extra data 0x83, // type info - 0xd8, 0xf6, - 0x00, + 0xd8, 0xf6, 0x18, 0x2b, // count: 2 0x02, // seed @@ -12410,8 +12350,7 @@ func TestMapEncodeDecode(t *testing.T) { // map extra data 0x83, // type info - 0xd8, 0xf6, - 0x01, + 0xd8, 0xf6, 0x18, 0x2c, // count: 2 0x02, // seed diff --git a/typeinfo.go b/typeinfo.go index a2eacddb..61cdcc49 100644 --- a/typeinfo.go +++ b/typeinfo.go @@ -19,6 +19,7 @@ package atree import ( + "bytes" "encoding/binary" "fmt" "sort" @@ -51,8 +52,49 @@ func defaultEncodeTypeInfo(enc *Encoder, typeInfo TypeInfo) error { return typeInfo.Encode(enc.CBOR) } +func decodeTypeInfoRefIfNeeded(inlinedTypeInfo []TypeInfo, defaultTypeInfoDecoder TypeInfoDecoder) TypeInfoDecoder { + if len(inlinedTypeInfo) == 0 { + return defaultTypeInfoDecoder + } + + return func(decoder *cbor.StreamDecoder) (TypeInfo, error) { + rawTypeInfo, err := decoder.DecodeRawBytes() + if err != nil { + return nil, NewDecodingError(fmt.Errorf("failed to decode raw type info: %w", err)) + } + + if len(rawTypeInfo) > len(typeInfoRefTagHeadAndTagNumber) && + bytes.Equal( + rawTypeInfo[:len(typeInfoRefTagHeadAndTagNumber)], + typeInfoRefTagHeadAndTagNumber) { + + // Type info is encoded as type info ref. + + var index uint64 + + err = cbor.Unmarshal(rawTypeInfo[len(typeInfoRefTagHeadAndTagNumber):], &index) + if err != nil { + return nil, NewDecodingError(err) + } + + if index >= uint64(len(inlinedTypeInfo)) { + return nil, NewDecodingError(fmt.Errorf("failed to decode type info ref: expect index < %d, got %d", len(inlinedTypeInfo), index)) + } + + return inlinedTypeInfo[int(index)], nil + } + + // Decode type info as is. + + dec := cbor.NewByteStreamDecoder(rawTypeInfo) + + return defaultTypeInfoDecoder(dec) + } +} + type ExtraData interface { isExtraData() bool + Type() TypeInfo Encode(enc *Encoder, encodeTypeInfo encodeTypeInfo) error } @@ -74,6 +116,10 @@ func (c *compactMapExtraData) isExtraData() bool { return true } +func (c *compactMapExtraData) Type() TypeInfo { + return c.mapExtraData.TypeInfo +} + func (c *compactMapExtraData) Encode(enc *Encoder, encodeTypeInfo encodeTypeInfo) error { err := enc.CBOR.EncodeArrayHead(compactMapExtraDataLength) if err != nil { @@ -210,17 +256,10 @@ type compactMapTypeInfo struct { keys []ComparableStorable } -type extraDataInfo struct { - data ExtraData - typeInfoIndex int -} - type InlinedExtraData struct { - extraData []extraDataInfo // Used to encode deduplicated ExtraData in order - typeInfo []TypeInfo // Used to encode deduplicated TypeInfo in order + extraData []ExtraData // Used to encode deduplicated ExtraData in order compactMapTypeSet map[string]compactMapTypeInfo // Used to deduplicate compactMapExtraData by TypeInfo.Identifier() + sorted field names arrayExtraDataSet map[string]int // Used to deduplicate arrayExtraData by TypeInfo.Identifier() - typeInfoSet map[string]int // Used to deduplicate TypeInfo by TypeInfo.Identifier() } func newInlinedExtraData() *InlinedExtraData { @@ -230,12 +269,17 @@ func newInlinedExtraData() *InlinedExtraData { const inlinedExtraDataArrayCount = 2 +var typeInfoRefTagHeadAndTagNumber = []byte{0xd8, CBORTagTypeInfoRef} + // Encode encodes inlined extra data as 2-element array: // // +-----------------------+------------------------+ // | [+ inlined type info] | [+ inlined extra data] | // +-----------------------+------------------------+ func (ied *InlinedExtraData) Encode(enc *Encoder) error { + + typeInfos, typeInfoIndexes := findDuplicateTypeInfo(ied.extraData) + var err error err = enc.CBOR.EncodeArrayHead(inlinedExtraDataArrayCount) @@ -243,14 +287,14 @@ func (ied *InlinedExtraData) Encode(enc *Encoder) error { return NewEncodingError(err) } - // element 0: deduplicated array of type info - err = enc.CBOR.EncodeArrayHead(uint64(len(ied.typeInfo))) + // element 0: array of duplicate type info + err = enc.CBOR.EncodeArrayHead(uint64(len(typeInfos))) if err != nil { return NewEncodingError(err) } - // Encode inlined type info - for _, typeInfo := range ied.typeInfo { + // Encode type info + for _, typeInfo := range typeInfos { err = typeInfo.Encode(enc.CBOR) if err != nil { return NewEncodingError(err) @@ -267,7 +311,7 @@ func (ied *InlinedExtraData) Encode(enc *Encoder) error { for _, extraData := range ied.extraData { var tagNum uint64 - switch extraData.data.(type) { + switch extraData.(type) { case *ArrayExtraData: tagNum = CBORTagInlinedArrayExtraData @@ -286,14 +330,18 @@ func (ied *InlinedExtraData) Encode(enc *Encoder) error { return NewEncodingError(err) } - err = extraData.data.Encode(enc, func(enc *Encoder, typeInfo TypeInfo) error { - id := typeInfo.Identifier() - index, exist := ied.typeInfoSet[id] + err = extraData.Encode(enc, func(enc *Encoder, typeInfo TypeInfo) error { + index, exist := typeInfoIndexes[typeInfo.Identifier()] if !exist { - return NewEncodingError(fmt.Errorf("failed to encode type info ref %s (%T)", id, typeInfo)) + // typeInfo is not encoded separately, so encode typeInfo as is here. + err = typeInfo.Encode(enc.CBOR) + if err != nil { + return NewEncodingError(err) + } + return nil } - err := enc.CBOR.EncodeTagHead(CBORTagTypeInfoRef) + err := enc.CBOR.EncodeRawBytes(typeInfoRefTagHeadAndTagNumber) if err != nil { return NewEncodingError(err) } @@ -318,11 +366,65 @@ func (ied *InlinedExtraData) Encode(enc *Encoder) error { return nil } +func findDuplicateTypeInfo(extraData []ExtraData) ([]TypeInfo, map[string]int) { + if len(extraData) < 2 { + // No duplicate type info + return nil, nil + } + + // typeInfoSet is used to deduplicate TypeInfo. + // typeInfoSet key: TypeInfo.Identifier() + // typeInfoSet value: indexes of extra data containing this type info + typeInfoSet := make(map[string][]int, len(extraData)) + + for i, data := range extraData { + typeID := data.Type().Identifier() + + indexes := typeInfoSet[typeID] + typeInfoSet[typeID] = append(indexes, i) + } + + if len(extraData) == len(typeInfoSet) { + // No duplicate type info + return nil, nil + } + + firstExtraDataIndexContainingDuplicateTypeInfo := make([]int, 0, len(typeInfoSet)) + for _, v := range typeInfoSet { + if len(v) > 1 { + firstExtraDataIndexContainingDuplicateTypeInfo = append(firstExtraDataIndexContainingDuplicateTypeInfo, v[0]) + } + } + + switch len(firstExtraDataIndexContainingDuplicateTypeInfo) { + case 1: + extraDataIndex := firstExtraDataIndexContainingDuplicateTypeInfo[0] + typeInfo := extraData[extraDataIndex].Type() + return []TypeInfo{typeInfo}, map[string]int{typeInfo.Identifier(): 0} + + default: + sort.Ints(firstExtraDataIndexContainingDuplicateTypeInfo) + + typeInfos := make([]TypeInfo, 0, len(firstExtraDataIndexContainingDuplicateTypeInfo)) + typeInfoIndexes := make(map[string]int) + + for _, extraDataIndex := range firstExtraDataIndexContainingDuplicateTypeInfo { + index := len(typeInfos) + + typeInfo := extraData[extraDataIndex].Type() + typeInfos = append(typeInfos, typeInfo) + typeInfoIndexes[typeInfo.Identifier()] = index + } + + return typeInfos, typeInfoIndexes + } +} + func newInlinedExtraDataFromData( data []byte, decMode cbor.DecMode, decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, + defaultDecodeTypeInfo TypeInfoDecoder, ) ([]ExtraData, []byte, error) { dec := decMode.NewByteStreamDecoder(data) @@ -336,43 +438,21 @@ func newInlinedExtraDataFromData( return nil, nil, NewDecodingError(fmt.Errorf("failed to decode inlined extra data: expect %d elements, got %d elements", inlinedExtraDataArrayCount, count)) } - // element 0: array of deduplicated type info + // element 0: array of duplicate type info typeInfoCount, err := dec.DecodeArrayHead() if err != nil { return nil, nil, NewDecodingError(err) } - if typeInfoCount == 0 { - return nil, nil, NewDecodingError(fmt.Errorf("failed to decode inlined extra data: expect at least one inlined type info")) - } - - inlinedTypeInfo := make([]TypeInfo, typeInfoCount) + inlinedTypeInfo := make([]TypeInfo, int(typeInfoCount)) for i := uint64(0); i < typeInfoCount; i++ { - inlinedTypeInfo[i], err = decodeTypeInfo(dec) + inlinedTypeInfo[i], err = defaultDecodeTypeInfo(dec) if err != nil { - return nil, nil, err + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode typeInfo") } } - typeInfoRefDecoder := func(decoder *cbor.StreamDecoder) (TypeInfo, error) { - tagNum, err := decoder.DecodeTagNumber() - if err != nil { - return nil, err - } - if tagNum != CBORTagTypeInfoRef { - return nil, NewDecodingError(fmt.Errorf("failed to decode type info ref: expect tag number %d, got %d", CBORTagTypeInfoRef, tagNum)) - } - - index, err := decoder.DecodeUint64() - if err != nil { - return nil, NewDecodingError(err) - } - if index >= uint64(len(inlinedTypeInfo)) { - return nil, NewDecodingError(fmt.Errorf("failed to decode type info ref: expect index < %d, got %d", len(inlinedTypeInfo), index)) - } - - return inlinedTypeInfo[int(index)], nil - } + decodeTypeInfo := decodeTypeInfoRefIfNeeded(inlinedTypeInfo, defaultDecodeTypeInfo) // element 1: array of deduplicated extra data info extraDataCount, err := dec.DecodeArrayHead() @@ -393,19 +473,19 @@ func newInlinedExtraDataFromData( switch tagNum { case CBORTagInlinedArrayExtraData: - inlinedExtraData[i], err = newArrayExtraData(dec, typeInfoRefDecoder) + inlinedExtraData[i], err = newArrayExtraData(dec, decodeTypeInfo) if err != nil { return nil, nil, err } case CBORTagInlinedMapExtraData: - inlinedExtraData[i], err = newMapExtraData(dec, typeInfoRefDecoder) + inlinedExtraData[i], err = newMapExtraData(dec, decodeTypeInfo) if err != nil { return nil, nil, err } case CBORTagInlinedCompactMapExtraData: - inlinedExtraData[i], err = newCompactMapExtraData(dec, typeInfoRefDecoder, decodeStorable) + inlinedExtraData[i], err = newCompactMapExtraData(dec, decodeTypeInfo, decodeStorable) if err != nil { return nil, nil, err } @@ -418,25 +498,6 @@ func newInlinedExtraDataFromData( return inlinedExtraData, data[dec.NumBytesDecoded():], nil } -// addTypeInfo returns index of deduplicated type info. -func (ied *InlinedExtraData) addTypeInfo(typeInfo TypeInfo) int { - if ied.typeInfoSet == nil { - ied.typeInfoSet = make(map[string]int) - } - - id := typeInfo.Identifier() - index, exist := ied.typeInfoSet[id] - if exist { - return index - } - - index = len(ied.typeInfo) - ied.typeInfo = append(ied.typeInfo, typeInfo) - ied.typeInfoSet[id] = index - - return index -} - // addArrayExtraData returns index of deduplicated array extra data. // Array extra data is deduplicated by array type info ID because array // extra data only contains type info. @@ -451,10 +512,8 @@ func (ied *InlinedExtraData) addArrayExtraData(data *ArrayExtraData) int { return index } - typeInfoIndex := ied.addTypeInfo(data.TypeInfo) - index = len(ied.extraData) - ied.extraData = append(ied.extraData, extraDataInfo{data, typeInfoIndex}) + ied.extraData = append(ied.extraData, data) ied.arrayExtraDataSet[id] = index return index @@ -463,10 +522,8 @@ func (ied *InlinedExtraData) addArrayExtraData(data *ArrayExtraData) int { // addMapExtraData returns index of map extra data. // Map extra data is not deduplicated because it also contains count and seed. func (ied *InlinedExtraData) addMapExtraData(data *MapExtraData) int { - typeInfoIndex := ied.addTypeInfo(data.TypeInfo) - index := len(ied.extraData) - ied.extraData = append(ied.extraData, extraDataInfo{data, typeInfoIndex}) + ied.extraData = append(ied.extraData, data) return index } @@ -494,10 +551,8 @@ func (ied *InlinedExtraData) addCompactMapExtraData( keys: keys, } - typeInfoIndex := ied.addTypeInfo(data.TypeInfo) - index := len(ied.extraData) - ied.extraData = append(ied.extraData, extraDataInfo{compactMapData, typeInfoIndex}) + ied.extraData = append(ied.extraData, compactMapData) ied.compactMapTypeSet[id] = compactMapTypeInfo{ keys: keys, From 83c99b3dfa4236b10ff580840fd30e27fb45dd81 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 2 Apr 2024 17:17:24 -0500 Subject: [PATCH 104/126] Use encoded type info to deduplicate extra data Currently, we use TypeInfo.Identifier() to deduplicate extra data and type info. However, TypeInfo.Identifier() is implemented in another package and we can't enforce its uniqueness for different types. If TypeInfo.Identifier() returns same ID for different types, different types is wrongly deduplicated. This commit uses encoded type info via TypeInfo.Encode() to deduplicate extra data. This prevents differently encoded type info from being deduplicated by mistake. This commit also uses sync.Pool to reuse buffer for type info encoding. --- array.go | 7 +- cmd/stress/utils.go | 41 +++++++++- map.go | 14 ++-- typeinfo.go | 187 +++++++++++++++++++++++++++----------------- 4 files changed, 169 insertions(+), 80 deletions(-) diff --git a/array.go b/array.go index fc385f4e..15381b5d 100644 --- a/array.go +++ b/array.go @@ -720,15 +720,16 @@ func (a *ArrayDataSlab) encodeAsInlined(enc *Encoder) error { fmt.Errorf("failed to encode standalone array data slab as inlined")) } - extraDataIndex := enc.inlinedExtraData().addArrayExtraData(a.extraData) + extraDataIndex, err := enc.inlinedExtraData().addArrayExtraData(a.extraData) + if err != nil { + return NewEncodingError(err) + } if extraDataIndex > maxInlinedExtraDataIndex { return NewEncodingError( fmt.Errorf("failed to encode inlined array data slab: extra data index %d exceeds limit %d", extraDataIndex, maxInlinedExtraDataIndex)) } - var err error - // Encode tag number and array head of 3 elements err = enc.CBOR.EncodeRawBytes([]byte{ // tag number diff --git a/cmd/stress/utils.go b/cmd/stress/utils.go index e5ffba91..ba3653ca 100644 --- a/cmd/stress/utils.go +++ b/cmd/stress/utils.go @@ -19,12 +19,16 @@ package main import ( + "bytes" "fmt" "math" "math/rand" "reflect" + "sync" "time" + "github.com/fxamacker/cbor/v2" + "github.com/onflow/atree" ) @@ -540,5 +544,40 @@ func (v mapValue) Storable(atree.SlabStorage, atree.Address, uint64) (atree.Stor } var typeInfoComparator = func(a atree.TypeInfo, b atree.TypeInfo) bool { - return a.Identifier() == b.Identifier() + aID, _ := getEncodedTypeInfo(a) + bID, _ := getEncodedTypeInfo(b) + return aID == bID +} + +func getEncodedTypeInfo(ti atree.TypeInfo) (string, error) { + b := getTypeIDBuffer() + defer putTypeIDBuffer(b) + + enc := cbor.NewStreamEncoder(b) + err := ti.Encode(enc) + if err != nil { + return "", err + } + enc.Flush() + + return b.String(), nil +} + +const defaultTypeIDBufferSize = 256 + +var typeIDBufferPool = sync.Pool{ + New: func() interface{} { + e := new(bytes.Buffer) + e.Grow(defaultTypeIDBufferSize) + return e + }, +} + +func getTypeIDBuffer() *bytes.Buffer { + return typeIDBufferPool.Get().(*bytes.Buffer) +} + +func putTypeIDBuffer(e *bytes.Buffer) { + e.Reset() + typeIDBufferPool.Put(e) } diff --git a/map.go b/map.go index 9b086faf..5c3d37a2 100644 --- a/map.go +++ b/map.go @@ -3007,14 +3007,15 @@ func (m *MapDataSlab) encodeAsInlined(enc *Encoder) error { func (m *MapDataSlab) encodeAsInlinedMap(enc *Encoder) error { - extraDataIndex := enc.inlinedExtraData().addMapExtraData(m.extraData) + extraDataIndex, err := enc.inlinedExtraData().addMapExtraData(m.extraData) + if err != nil { + return NewEncodingError(err) + } if extraDataIndex > maxInlinedExtraDataIndex { return NewEncodingError(fmt.Errorf("extra data index %d exceeds limit %d", extraDataIndex, maxInlinedExtraDataIndex)) } - var err error - // Encode tag number and array head of 3 elements err = enc.CBOR.EncodeRawBytes([]byte{ // tag number @@ -3067,7 +3068,10 @@ func encodeAsInlinedCompactMap( values []Storable, ) error { - extraDataIndex, cachedKeys := enc.inlinedExtraData().addCompactMapExtraData(extraData, hkeys, keys) + extraDataIndex, cachedKeys, err := enc.inlinedExtraData().addCompactMapExtraData(extraData, hkeys, keys) + if err != nil { + return NewEncodingError(err) + } if len(keys) != len(cachedKeys) { return NewEncodingError(fmt.Errorf("number of elements %d is different from number of elements in cached compact map type %d", len(keys), len(cachedKeys))) @@ -3078,8 +3082,6 @@ func encodeAsInlinedCompactMap( return NewEncodingError(fmt.Errorf("extra data index %d exceeds limit %d", extraDataIndex, maxInlinedExtraDataIndex)) } - var err error - // Encode tag number and array head of 3 elements err = enc.CBOR.EncodeRawBytes([]byte{ // tag number diff --git a/typeinfo.go b/typeinfo.go index 61cdcc49..4f62fd9d 100644 --- a/typeinfo.go +++ b/typeinfo.go @@ -24,6 +24,7 @@ import ( "fmt" "sort" "strings" + "sync" "github.com/fxamacker/cbor/v2" ) @@ -31,7 +32,6 @@ import ( type TypeInfo interface { Encode(*cbor.StreamEncoder) error IsComposite() bool - Identifier() string Copy() TypeInfo } @@ -256,10 +256,15 @@ type compactMapTypeInfo struct { keys []ComparableStorable } +type extraDataAndEncodedTypeInfo struct { + extraData ExtraData + encodedTypeInfo string // cached encoded type info +} + type InlinedExtraData struct { - extraData []ExtraData // Used to encode deduplicated ExtraData in order - compactMapTypeSet map[string]compactMapTypeInfo // Used to deduplicate compactMapExtraData by TypeInfo.Identifier() + sorted field names - arrayExtraDataSet map[string]int // Used to deduplicate arrayExtraData by TypeInfo.Identifier() + extraData []extraDataAndEncodedTypeInfo // Used to encode deduplicated ExtraData in order + compactMapTypeSet map[string]compactMapTypeInfo // Used to deduplicate compactMapExtraData by encoded TypeInfo + sorted field names + arrayExtraDataSet map[string]int // Used to deduplicate arrayExtraData by encoded TypeInfo } func newInlinedExtraData() *InlinedExtraData { @@ -278,7 +283,7 @@ var typeInfoRefTagHeadAndTagNumber = []byte{0xd8, CBORTagTypeInfoRef} // +-----------------------+------------------------+ func (ied *InlinedExtraData) Encode(enc *Encoder) error { - typeInfos, typeInfoIndexes := findDuplicateTypeInfo(ied.extraData) + typeInfos, typeInfoIndexes := ied.findDuplicateTypeInfo() var err error @@ -295,7 +300,8 @@ func (ied *InlinedExtraData) Encode(enc *Encoder) error { // Encode type info for _, typeInfo := range typeInfos { - err = typeInfo.Encode(enc.CBOR) + // Encode cached type info as is. + err = enc.CBOR.EncodeRawBytes([]byte(typeInfo)) if err != nil { return NewEncodingError(err) } @@ -308,10 +314,10 @@ func (ied *InlinedExtraData) Encode(enc *Encoder) error { } // Encode inlined extra data - for _, extraData := range ied.extraData { + for _, extraDataInfo := range ied.extraData { var tagNum uint64 - switch extraData.(type) { + switch extraDataInfo.extraData.(type) { case *ArrayExtraData: tagNum = CBORTagInlinedArrayExtraData @@ -322,7 +328,7 @@ func (ied *InlinedExtraData) Encode(enc *Encoder) error { tagNum = CBORTagInlinedCompactMapExtraData default: - return NewEncodingError(fmt.Errorf("failed to encode unsupported extra data type %T", extraData)) + return NewEncodingError(fmt.Errorf("failed to encode unsupported extra data type %T", extraDataInfo.extraData)) } err = enc.CBOR.EncodeTagHead(tagNum) @@ -330,18 +336,20 @@ func (ied *InlinedExtraData) Encode(enc *Encoder) error { return NewEncodingError(err) } - err = extraData.Encode(enc, func(enc *Encoder, typeInfo TypeInfo) error { - index, exist := typeInfoIndexes[typeInfo.Identifier()] + err = extraDataInfo.extraData.Encode(enc, func(enc *Encoder, _ TypeInfo) error { + encodedTypeInfo := extraDataInfo.encodedTypeInfo + + index, exist := typeInfoIndexes[encodedTypeInfo] if !exist { // typeInfo is not encoded separately, so encode typeInfo as is here. - err = typeInfo.Encode(enc.CBOR) + err = enc.CBOR.EncodeRawBytes([]byte(encodedTypeInfo)) if err != nil { return NewEncodingError(err) } return nil } - err := enc.CBOR.EncodeRawBytes(typeInfoRefTagHeadAndTagNumber) + err = enc.CBOR.EncodeRawBytes(typeInfoRefTagHeadAndTagNumber) if err != nil { return NewEncodingError(err) } @@ -366,58 +374,50 @@ func (ied *InlinedExtraData) Encode(enc *Encoder) error { return nil } -func findDuplicateTypeInfo(extraData []ExtraData) ([]TypeInfo, map[string]int) { - if len(extraData) < 2 { +func (ied *InlinedExtraData) findDuplicateTypeInfo() ([]string, map[string]int) { + if len(ied.extraData) < 2 { // No duplicate type info return nil, nil } - // typeInfoSet is used to deduplicate TypeInfo. - // typeInfoSet key: TypeInfo.Identifier() - // typeInfoSet value: indexes of extra data containing this type info - typeInfoSet := make(map[string][]int, len(extraData)) - - for i, data := range extraData { - typeID := data.Type().Identifier() - - indexes := typeInfoSet[typeID] - typeInfoSet[typeID] = append(indexes, i) + // Make a copy of encoded type info to sort + encodedTypeInfo := make([]string, len(ied.extraData)) + for i, info := range ied.extraData { + encodedTypeInfo[i] = info.encodedTypeInfo } - if len(extraData) == len(typeInfoSet) { - // No duplicate type info - return nil, nil - } + sort.Strings(encodedTypeInfo) - firstExtraDataIndexContainingDuplicateTypeInfo := make([]int, 0, len(typeInfoSet)) - for _, v := range typeInfoSet { - if len(v) > 1 { - firstExtraDataIndexContainingDuplicateTypeInfo = append(firstExtraDataIndexContainingDuplicateTypeInfo, v[0]) - } - } + // Find duplicate type info + var duplicateTypeInfo []string + var duplicateTypeInfoIndexes map[string]int - switch len(firstExtraDataIndexContainingDuplicateTypeInfo) { - case 1: - extraDataIndex := firstExtraDataIndexContainingDuplicateTypeInfo[0] - typeInfo := extraData[extraDataIndex].Type() - return []TypeInfo{typeInfo}, map[string]int{typeInfo.Identifier(): 0} + for currentIndex := 1; currentIndex < len(encodedTypeInfo); { - default: - sort.Ints(firstExtraDataIndexContainingDuplicateTypeInfo) + if encodedTypeInfo[currentIndex-1] != encodedTypeInfo[currentIndex] { + currentIndex++ + continue + } - typeInfos := make([]TypeInfo, 0, len(firstExtraDataIndexContainingDuplicateTypeInfo)) - typeInfoIndexes := make(map[string]int) + // Found duplicate type info at currentIndex + duplicate := encodedTypeInfo[currentIndex] - for _, extraDataIndex := range firstExtraDataIndexContainingDuplicateTypeInfo { - index := len(typeInfos) + // Insert duplicate into duplicate type info list and map + duplicateTypeInfo = append(duplicateTypeInfo, duplicate) - typeInfo := extraData[extraDataIndex].Type() - typeInfos = append(typeInfos, typeInfo) - typeInfoIndexes[typeInfo.Identifier()] = index + if duplicateTypeInfoIndexes == nil { + duplicateTypeInfoIndexes = make(map[string]int) } + duplicateTypeInfoIndexes[duplicate] = len(duplicateTypeInfo) - 1 - return typeInfos, typeInfoIndexes + // Skip same duplicate from sorted list + currentIndex++ + for currentIndex < len(encodedTypeInfo) && encodedTypeInfo[currentIndex] == duplicate { + currentIndex++ + } } + + return duplicateTypeInfo, duplicateTypeInfoIndexes } func newInlinedExtraDataFromData( @@ -501,30 +501,39 @@ func newInlinedExtraDataFromData( // addArrayExtraData returns index of deduplicated array extra data. // Array extra data is deduplicated by array type info ID because array // extra data only contains type info. -func (ied *InlinedExtraData) addArrayExtraData(data *ArrayExtraData) int { +func (ied *InlinedExtraData) addArrayExtraData(data *ArrayExtraData) (int, error) { + encodedTypeInfo, err := getEncodedTypeInfo(data.TypeInfo) + if err != nil { + return 0, err + } + if ied.arrayExtraDataSet == nil { ied.arrayExtraDataSet = make(map[string]int) } - id := data.TypeInfo.Identifier() - index, exist := ied.arrayExtraDataSet[id] + index, exist := ied.arrayExtraDataSet[encodedTypeInfo] if exist { - return index + return index, nil } index = len(ied.extraData) - ied.extraData = append(ied.extraData, data) - ied.arrayExtraDataSet[id] = index + ied.extraData = append(ied.extraData, extraDataAndEncodedTypeInfo{data, encodedTypeInfo}) + ied.arrayExtraDataSet[encodedTypeInfo] = index - return index + return index, nil } // addMapExtraData returns index of map extra data. // Map extra data is not deduplicated because it also contains count and seed. -func (ied *InlinedExtraData) addMapExtraData(data *MapExtraData) int { +func (ied *InlinedExtraData) addMapExtraData(data *MapExtraData) (int, error) { + encodedTypeInfo, err := getEncodedTypeInfo(data.TypeInfo) + if err != nil { + return 0, err + } + index := len(ied.extraData) - ied.extraData = append(ied.extraData, data) - return index + ied.extraData = append(ied.extraData, extraDataAndEncodedTypeInfo{data, encodedTypeInfo}) + return index, nil } // addCompactMapExtraData returns index of deduplicated compact map extra data. @@ -533,16 +542,21 @@ func (ied *InlinedExtraData) addCompactMapExtraData( data *MapExtraData, digests []Digest, keys []ComparableStorable, -) (int, []ComparableStorable) { +) (int, []ComparableStorable, error) { + + encodedTypeInfo, err := getEncodedTypeInfo(data.TypeInfo) + if err != nil { + return 0, nil, err + } if ied.compactMapTypeSet == nil { ied.compactMapTypeSet = make(map[string]compactMapTypeInfo) } - id := makeCompactMapTypeID(data.TypeInfo, keys) - info, exist := ied.compactMapTypeSet[id] + compactMapTypeID := makeCompactMapTypeID(encodedTypeInfo, keys) + info, exist := ied.compactMapTypeSet[compactMapTypeID] if exist { - return info.index, info.keys + return info.index, info.keys, nil } compactMapData := &compactMapExtraData{ @@ -552,14 +566,14 @@ func (ied *InlinedExtraData) addCompactMapExtraData( } index := len(ied.extraData) - ied.extraData = append(ied.extraData, compactMapData) + ied.extraData = append(ied.extraData, extraDataAndEncodedTypeInfo{compactMapData, encodedTypeInfo}) - ied.compactMapTypeSet[id] = compactMapTypeInfo{ + ied.compactMapTypeSet[compactMapTypeID] = compactMapTypeInfo{ keys: keys, index: index, } - return index, keys + return index, keys, nil } func (ied *InlinedExtraData) empty() bool { @@ -567,18 +581,18 @@ func (ied *InlinedExtraData) empty() bool { } // makeCompactMapTypeID returns id of concatenated t.ID() with sorted names with "," as separator. -func makeCompactMapTypeID(t TypeInfo, names []ComparableStorable) string { +func makeCompactMapTypeID(encodedTypeInfo string, names []ComparableStorable) string { const separator = "," if len(names) == 1 { - return t.Identifier() + separator + names[0].ID() + return encodedTypeInfo + separator + names[0].ID() } sorter := newFieldNameSorter(names) sort.Sort(sorter) - return t.Identifier() + separator + sorter.join(separator) + return encodedTypeInfo + separator + sorter.join(separator) } // fieldNameSorter sorts names by index (not in place sort). @@ -622,3 +636,36 @@ func (fn *fieldNameSorter) join(sep string) string { } return sb.String() } + +func getEncodedTypeInfo(ti TypeInfo) (string, error) { + b := getTypeIDBuffer() + defer putTypeIDBuffer(b) + + enc := cbor.NewStreamEncoder(b) + err := ti.Encode(enc) + if err != nil { + return "", err + } + enc.Flush() + + return b.String(), nil +} + +const defaultTypeIDBufferSize = 256 + +var typeIDBufferPool = sync.Pool{ + New: func() interface{} { + e := new(bytes.Buffer) + e.Grow(defaultTypeIDBufferSize) + return e + }, +} + +func getTypeIDBuffer() *bytes.Buffer { + return typeIDBufferPool.Get().(*bytes.Buffer) +} + +func putTypeIDBuffer(e *bytes.Buffer) { + e.Reset() + typeIDBufferPool.Put(e) +} From 661c07f63f6d3cd114725f84999a1f63c3c8895c Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Fri, 5 Apr 2024 10:54:08 -0500 Subject: [PATCH 105/126] Fix error type for external errors during serialization The wrong error type was returned when an external error was encountered. This commit returns the correct error type. --- array.go | 3 ++- map.go | 6 ++++-- typeinfo.go | 15 +++++++++++++-- 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/array.go b/array.go index 15381b5d..ab2f6514 100644 --- a/array.go +++ b/array.go @@ -722,7 +722,8 @@ func (a *ArrayDataSlab) encodeAsInlined(enc *Encoder) error { extraDataIndex, err := enc.inlinedExtraData().addArrayExtraData(a.extraData) if err != nil { - return NewEncodingError(err) + // err is already categorized by InlinedExtraData.addArrayExtraData(). + return err } if extraDataIndex > maxInlinedExtraDataIndex { diff --git a/map.go b/map.go index 5c3d37a2..b89474cd 100644 --- a/map.go +++ b/map.go @@ -3009,7 +3009,8 @@ func (m *MapDataSlab) encodeAsInlinedMap(enc *Encoder) error { extraDataIndex, err := enc.inlinedExtraData().addMapExtraData(m.extraData) if err != nil { - return NewEncodingError(err) + // err is already categorized by InlinedExtraData.addMapExtraData(). + return err } if extraDataIndex > maxInlinedExtraDataIndex { @@ -3070,7 +3071,8 @@ func encodeAsInlinedCompactMap( extraDataIndex, cachedKeys, err := enc.inlinedExtraData().addCompactMapExtraData(extraData, hkeys, keys) if err != nil { - return NewEncodingError(err) + // err is already categorized by InlinedExtraData.addCompactMapExtraData(). + return err } if len(keys) != len(cachedKeys) { diff --git a/typeinfo.go b/typeinfo.go index 4f62fd9d..a5dadeaa 100644 --- a/typeinfo.go +++ b/typeinfo.go @@ -129,6 +129,7 @@ func (c *compactMapExtraData) Encode(enc *Encoder, encodeTypeInfo encodeTypeInfo // element 0: map extra data err = c.mapExtraData.Encode(enc, encodeTypeInfo) if err != nil { + // err is already categorized by MapExtraData.Encode(). return err } @@ -160,7 +161,8 @@ func (c *compactMapExtraData) Encode(enc *Encoder, encodeTypeInfo encodeTypeInfo for _, key := range c.keys { err = key.Encode(enc) if err != nil { - return NewEncodingError(err) + // Wrap err as external error (if needed) because err is returned by ComparableStorable.Encode(). + return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode key's storable") } } @@ -195,6 +197,7 @@ func newCompactMapExtraData( // element 0: map extra data mapExtraData, err := newMapExtraData(dec, decodeTypeInfo) if err != nil { + // err is already categorized by newMapExtraData(). return nil, err } @@ -362,6 +365,7 @@ func (ied *InlinedExtraData) Encode(enc *Encoder) error { return nil }) if err != nil { + // err is already categorized by ExtraData.Encode(). return err } } @@ -475,18 +479,21 @@ func newInlinedExtraDataFromData( case CBORTagInlinedArrayExtraData: inlinedExtraData[i], err = newArrayExtraData(dec, decodeTypeInfo) if err != nil { + // err is already categorized by newArrayExtraData(). return nil, nil, err } case CBORTagInlinedMapExtraData: inlinedExtraData[i], err = newMapExtraData(dec, decodeTypeInfo) if err != nil { + // err is already categorized by newMapExtraData(). return nil, nil, err } case CBORTagInlinedCompactMapExtraData: inlinedExtraData[i], err = newCompactMapExtraData(dec, decodeTypeInfo, decodeStorable) if err != nil { + // err is already categorized by newCompactMapExtraData(). return nil, nil, err } @@ -504,6 +511,7 @@ func newInlinedExtraDataFromData( func (ied *InlinedExtraData) addArrayExtraData(data *ArrayExtraData) (int, error) { encodedTypeInfo, err := getEncodedTypeInfo(data.TypeInfo) if err != nil { + // err is already categorized by getEncodedTypeInfo(). return 0, err } @@ -528,6 +536,7 @@ func (ied *InlinedExtraData) addArrayExtraData(data *ArrayExtraData) (int, error func (ied *InlinedExtraData) addMapExtraData(data *MapExtraData) (int, error) { encodedTypeInfo, err := getEncodedTypeInfo(data.TypeInfo) if err != nil { + // err is already categorized by getEncodedTypeInfo(). return 0, err } @@ -546,6 +555,7 @@ func (ied *InlinedExtraData) addCompactMapExtraData( encodedTypeInfo, err := getEncodedTypeInfo(data.TypeInfo) if err != nil { + // err is already categorized by getEncodedTypeInfo(). return 0, nil, err } @@ -644,7 +654,8 @@ func getEncodedTypeInfo(ti TypeInfo) (string, error) { enc := cbor.NewStreamEncoder(b) err := ti.Encode(enc) if err != nil { - return "", err + // Wrap err as external error (if needed) because err is returned by TypeInfo.Encode(). + return "", wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode type info") } enc.Flush() From 30dad00da2bfa82e14bcde1cbd677aa3c6861cc6 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 11 Apr 2024 15:02:45 -0500 Subject: [PATCH 106/126] Add feature to fix refs to non-existent registers In testnet, broken references seem to have resulted from a bug that was fixed 2 years ago by https://github.com/onflow/cadence/pull/1565. A broken reference is a `StorageID` referencing a non-existent register. So far, only 10 registers in testnet (none on mainnet) were found to contain broken references. This commit adds a feature to enable migration programs in onflow/flow-go to fix broken references in maps. `FixLoadedBrokenReferences()` traverses loaded slabs and replaces broken map (if any) with empty map having the same StorageID and also removes all slabs in the old map. Limitations: - only fix broken references in map (this is intentional) - only traverse loaded slabs in deltas and cache IMPORTANT: This should not be used to silently fix unknown problems. It should only be used by migration programs to fix known problems which were determined to be appropriate to fix in this manner. --- storage.go | 233 +++++++++ storage_test.go | 1274 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 1507 insertions(+) diff --git a/storage.go b/storage.go index 3f79d40e..9b4f5a5d 100644 --- a/storage.go +++ b/storage.go @@ -21,6 +21,7 @@ package atree import ( "bytes" "encoding/binary" + "errors" "fmt" "sort" "strings" @@ -1088,3 +1089,235 @@ func storeSlab(storage SlabStorage, slab Slab) error { } return nil } + +// FixLoadedBrokenReferences traverses loaded slabs and fixes broken references in maps. +// A broken reference is a SlabID referencing a non-existent slab. +// To fix a map containing broken references, this function replaces broken map with +// empty map having the same SlabID and also removes all slabs in the old map. +// Limitations: +// - only fix broken references in map +// - only traverse loaded slabs in deltas and cache +// NOTE: The intended use case is to enable migration programs in onflow/flow-go to +// fix broken references. As of April 2024, only 10 registers in testnet (not mainnet) +// were found to have broken references and they seem to have resulted from a bug +// that was fixed 2 years ago by https://github.com/onflow/cadence/pull/1565. +func (s *PersistentSlabStorage) FixLoadedBrokenReferences() ([]SlabID, error) { + + // parentOf is used to find root slab from non-root slab. + // Broken reference can be in non-root slab, and we need SlabID of root slab + // to replace broken map by creating an empty new map with same SlabID. + parentOf := make(map[SlabID]SlabID) + + getRootSlabID := func(id SlabID) SlabID { + for { + parentID, ok := parentOf[id] + if ok { + id = parentID + } else { + return id + } + } + } + + hasBrokenReferenceInSlab := func(id SlabID, slab Slab) bool { + if slab == nil { + return false + } + + var isMetaDataSlab bool + + switch slab.(type) { + case *ArrayMetaDataSlab, *MapMetaDataSlab: + isMetaDataSlab = true + } + + var foundBrokenRef bool + for _, childStorable := range slab.ChildStorables() { + + slabIDStorable, ok := childStorable.(SlabIDStorable) + if !ok { + continue + } + + childID := SlabID(slabIDStorable) + + // Track parent-child relationship of root slabs and non-root slabs. + if isMetaDataSlab { + parentOf[childID] = id + } + + if s.existIfLoaded(childID) { + continue + } + + foundBrokenRef = true + + if !isMetaDataSlab { + return true + } + } + + return foundBrokenRef + } + + var brokenStorageIDs []SlabID + + // Iterate delta slabs. + for id, slab := range s.deltas { + if hasBrokenReferenceInSlab(id, slab) { + brokenStorageIDs = append(brokenStorageIDs, id) + } + } + + // Iterate cache slabs. + for id, slab := range s.cache { + if _, ok := s.deltas[id]; ok { + continue + } + if hasBrokenReferenceInSlab(id, slab) { + brokenStorageIDs = append(brokenStorageIDs, id) + } + } + + if len(brokenStorageIDs) == 0 { + return nil, nil + } + + rootSlabStorageIDsWithBrokenData := make(map[SlabID]struct{}) + var errs []error + + // Find StorageIDs of root slab for slabs containing broken references. + for _, id := range brokenStorageIDs { + rootID := getRootSlabID(id) + if rootID == SlabIDUndefined { + errs = append(errs, fmt.Errorf("failed to get root slab id for slab %s", id)) + continue + } + rootSlabStorageIDsWithBrokenData[rootID] = struct{}{} + } + + for rootSlabID := range rootSlabStorageIDsWithBrokenData { + rootSlab := s.RetrieveIfLoaded(rootSlabID) + if rootSlab == nil { + errs = append(errs, fmt.Errorf("failed to retrieve loaded root slab %s", rootSlabID)) + continue + } + + switch rootSlab := rootSlab.(type) { + case MapSlab: + if rootSlab.ExtraData() == nil { + errs = append(errs, fmt.Errorf("failed to fix broken references because slab %s isn't root slab", rootSlab.SlabID())) + continue + } + + err := s.fixBrokenReferencesInMap(rootSlab) + if err != nil { + errs = append(errs, err) + } + + default: + // IMPORTANT: Only handle map slabs for now. DO NOT silently fix currently unknown problems. + errs = append(errs, fmt.Errorf("failed to fix broken references in non-map slab %s (%T)", rootSlab.SlabID(), rootSlab)) + } + } + + return brokenStorageIDs, errors.Join(errs...) +} + +// fixBrokenReferencesInMap replaces replaces broken map with empty map +// having the same SlabID and also removes all slabs in the old map. +func (s *PersistentSlabStorage) fixBrokenReferencesInMap(old MapSlab) error { + id := old.SlabID() + + // Create an empty map with the same StorgeID, type, and seed as the old map. + new := &MapDataSlab{ + header: MapSlabHeader{ + slabID: id, + size: mapRootDataSlabPrefixSize + hkeyElementsPrefixSize, + }, + extraData: &MapExtraData{ + TypeInfo: old.ExtraData().TypeInfo, + Seed: old.ExtraData().Seed, + }, + elements: newHkeyElements(0), + } + + // Store new empty map with the same SlabID. + err := s.Store(id, new) + if err != nil { + return err + } + + // Remove all slabs and references in old map. + references, _, err := s.getAllChildReferences(old) + if err != nil { + return err + } + + for _, childID := range references { + err = s.Remove(childID) + if err != nil { + return err + } + } + + return nil +} + +func (s *PersistentSlabStorage) existIfLoaded(id SlabID) bool { + // Check deltas. + if slab, ok := s.deltas[id]; ok { + return slab != nil + } + + // Check read cache. + if slab, ok := s.cache[id]; ok { + return slab != nil + } + + return false +} + +// getAllChildReferences returns child references of given slab (all levels). +func (s *PersistentSlabStorage) getAllChildReferences(slab Slab) ( + references []SlabID, + brokenReferences []SlabID, + err error, +) { + childStorables := slab.ChildStorables() + + for len(childStorables) > 0 { + + var nextChildStorables []Storable + + for _, childStorable := range childStorables { + + slabIDStorable, ok := childStorable.(SlabIDStorable) + if !ok { + continue + } + + childID := SlabID(slabIDStorable) + + childSlab, ok, err := s.Retrieve(childID) + if err != nil { + return nil, nil, err + } + if !ok { + brokenReferences = append(brokenReferences, childID) + continue + } + + references = append(references, childID) + + nextChildStorables = append( + nextChildStorables, + childSlab.ChildStorables()..., + ) + } + + childStorables = nextChildStorables + } + + return references, brokenReferences, nil +} diff --git a/storage_test.go b/storage_test.go index 2b90bb11..9852cd64 100644 --- a/storage_test.go +++ b/storage_test.go @@ -1240,3 +1240,1277 @@ func (s slowStorable) Encode(encoder *Encoder) error { runtime.KeepAlive(n) return s.Uint8Value.Encode(encoder) } + +func TestFixLoadedBrokenReferences(t *testing.T) { + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("healthy", func(t *testing.T) { + + // Create a health storage with arrays and maps + mapMetaDataRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + mapDataNonRootID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + mapDataNonRootID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + nestedArrayID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} + + emptyMapDataRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 5}} + + mapDataRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 6}} + + emptyArrayDataRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 7}} + + arrayDataRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 8}} + + arrayMetaDataRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 9}} + arrayDataNonRootID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 10}} + arrayDataNonRootID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 11}} + nestedArrayID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 12}} + + rootIDs := []SlabID{ + mapMetaDataRootID, + emptyMapDataRootID, + mapDataRootID, + emptyArrayDataRootID, + arrayDataRootID, + arrayMetaDataRootID, + } + + data := map[SlabID][]byte{ + // root map metadata slab + // metadata slab + mapMetaDataRootID: { + // extra data + // version + 0x00, + // flag: root + map meta + 0x89, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + meta + 0x89, + // child header count + 0x00, 0x02, + // child header 1 (storage id, first key, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x02, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0xfe, + }, + + // data slab + mapDataNonRootID1: { + // version + 0x00, + // flag: map data + 0x08, + // next storage id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [cccccccccccccccccccccc:cccccccccccccccccccccc] + 0x82, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + // element: [dddddddddddddddddddddd:dddddddddddddddddddddd] + 0x82, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + }, + + // data slab + mapDataNonRootID2: { + // version + 0x00, + // flag: has pointer + map data + 0x48, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] + 0x82, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] + 0x82, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] + 0x82, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + // element: [hhhhhhhhhhhhhhhhhhhhhh:SlabID(1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,4)] + 0x82, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + }, + + // array data slab + nestedArrayID: { + // extra data + // version + 0x00, + // flag: root + array data + 0x80, + // extra data (CBOR encoded array of 1 elements) + 0x81, + // type info + 0x18, 0x2b, + + // version + 0x00, + // flag: root + array data + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + }, + + // empty map + emptyMapDataRootID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 0 + 0x00, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 0 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + + // root map data slab + mapDataRootID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 1 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // element: [uint64(0):uint64(0)] + 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + }, + + // empty array + emptyArrayDataRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x00, + }, + + // root array data slab + arrayDataRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + }, + + // root array metadata slab + // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:270 count:11} ] + arrayMetaDataRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x81, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array meta data slab flag + 0x81, + // child header count + 0x00, 0x02, + // child header 1 (storage id, count, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0x00, 0x00, 0xe4, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, + 0x00, 0x00, 0x00, 0x0b, + 0x00, 0x00, 0x01, 0x0e, + }, + + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + arrayDataNonRootID1: { + // version + 0x00, + // array data slab flag + 0x00, + // next storage id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... SlabID(...)] + arrayDataNonRootID2: { + // version + 0x00, + // array data slab flag + 0x40, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, + }, + + // (data slab) next: 0, data: [0] + nestedArrayID2: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2b, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + }, + } + + storage := newTestPersistentStorageWithData(t, data) + + // Load data in storage + for id := range data { + _, found, err := storage.Retrieve(id) + require.NoError(t, err) + require.True(t, found) + } + + // Check health before fixing broken reference + rootIDSet, err := CheckStorageHealth(storage, -1) + require.NoError(t, err) + require.Equal(t, len(rootIDs), len(rootIDSet)) + + for _, rootID := range rootIDs { + _, found := rootIDSet[rootID] + require.True(t, found) + } + + // Fix broken reference + fixedIDs, err := storage.FixLoadedBrokenReferences() + require.NoError(t, err) + require.Equal(t, 0, len(fixedIDs)) + + // No data is modified during fixing broken reference + require.Equal(t, 0, len(storage.deltas)) + + // Check health after fixing broken reference + rootIDSet, err = CheckStorageHealth(storage, -1) + require.NoError(t, err) + require.Equal(t, len(rootIDs), len(rootIDSet)) + + }) + + t.Run("broken root map data slab", func(t *testing.T) { + + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + brokenRefSlabID := rootID + + data := map[SlabID][]byte{ + rootID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 1 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // element: [SlabID(0x0.1):uint64(0)] + 0x82, + 0xd8, 0xff, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0xd8, 0xa4, 0x00, + }, + } + + fixedData := map[SlabID][]byte{ + rootID: { + // version + 0x10, + // flag: root + map data + 0x88, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 0 + 0x00, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x00, + + // elements (array of 0 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x00, + }, + } + + storage := newTestPersistentStorageWithData(t, data) + + // Load data in storage + for id := range data { + _, found, err := storage.Retrieve(id) + require.NoError(t, err) + require.True(t, found) + } + + // Check health before fixing broken reference + _, err := CheckStorageHealth(storage, -1) + require.ErrorContains(t, err, "slab (0x0.1) not found: slab not found during slab iteration") + + // Fix broken reference + fixedIDs, err := storage.FixLoadedBrokenReferences() + require.NoError(t, err) + require.Equal(t, 1, len(fixedIDs)) + require.Equal(t, brokenRefSlabID, fixedIDs[0]) + require.Equal(t, 1, len(storage.deltas)) + + // Check health after fixing broken reference + rootIDs, err := CheckStorageHealth(storage, -1) + require.NoError(t, err) + require.Equal(t, 1, len(rootIDs)) + + _, ok := rootIDs[rootID] + require.True(t, ok) + + // Save data in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + require.Equal(t, 0, len(storage.deltas)) + + // Check encoded data + baseStorage := storage.baseStorage.(*InMemBaseStorage) + require.Equal(t, 1, len(baseStorage.segments)) + + savedData, found, err := baseStorage.Retrieve(rootID) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, fixedData[rootID], savedData) + }) + + t.Run("broken non-root map data slab", func(t *testing.T) { + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootDataID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootDataID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + brokenRefSlabID := nonRootDataID2 + + // Expected serialized slab data with storage id + data := map[SlabID][]byte{ + + // metadata slab + rootID: { + // extra data + // version + 0x00, + // flag: root + map meta + 0x89, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + meta + 0x89, + // child header count + 0x00, 0x02, + // child header 1 (storage id, first key, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x02, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0xfe, + }, + + // data slab + nonRootDataID1: { + // version + 0x00, + // flag: map data + 0x08, + // next storage id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [cccccccccccccccccccccc:cccccccccccccccccccccc] + 0x82, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + // element: [dddddddddddddddddddddd:dddddddddddddddddddddd] + 0x82, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + }, + + // data slab + nonRootDataID2: { + // version + 0x00, + // flag: has pointer + map data + 0x48, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] + 0x82, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] + 0x82, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] + 0x82, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + // element: [hhhhhhhhhhhhhhhhhhhhhh:SlabID(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1)] + 0x82, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0xd8, 0xff, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + }, + } + + fixedData := map[SlabID][]byte{ + rootID: { + // version + 0x10, + // flag: root + map data + 0x88, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 0 + 0x00, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x00, + + // elements (array of 0 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x00, + }, + } + + storage := newTestPersistentStorageWithData(t, data) + + // Load data in storage + for id := range data { + _, found, err := storage.Retrieve(id) + require.NoError(t, err) + require.True(t, found) + } + + // Check health before fixing broken reference + _, err := CheckStorageHealth(storage, -1) + require.ErrorContains(t, err, "slab (0x0.1) not found: slab not found during slab iteration") + + // Fix broken reference + fixedIDs, err := storage.FixLoadedBrokenReferences() + require.NoError(t, err) + require.Equal(t, 1, len(fixedIDs)) + require.Equal(t, brokenRefSlabID, fixedIDs[0]) + require.Equal(t, 3, len(storage.deltas)) + + // Check health after fixing broken reference + rootIDs, err := CheckStorageHealth(storage, -1) + require.NoError(t, err) + require.Equal(t, 1, len(rootIDs)) + + _, ok := rootIDs[rootID] + require.True(t, ok) + + // Save data in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + require.Equal(t, 0, len(storage.deltas)) + + // Check encoded data + baseStorage := storage.baseStorage.(*InMemBaseStorage) + require.Equal(t, 1, len(baseStorage.segments)) + + savedData, found, err := baseStorage.Retrieve(rootID) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, fixedData[rootID], savedData) + }) + + t.Run("multiple data slabs with broken reference in the same map", func(t *testing.T) { + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootDataID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootDataID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + brokenRefSlabID := []SlabID{nonRootDataID1, nonRootDataID2} + + data := map[SlabID][]byte{ + + // metadata slab + rootID: { + // extra data + // version + 0x00, + // flag: root + map meta + 0x89, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + meta + 0x89, + // child header count + 0x00, 0x02, + // child header 1 (storage id, first key, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x02, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0xfe, + }, + + // data slab + nonRootDataID1: { + // version + 0x00, + // flag: map data + 0x08, + // next storage id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [cccccccccccccccccccccc:cccccccccccccccccccccc] + 0x82, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + // element: [dddddddddddddddddddddd:SlabID(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1)] + 0x82, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0xd8, 0xff, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + }, + + // data slab + nonRootDataID2: { + // version + 0x00, + // flag: has pointer + map data + 0x48, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] + 0x82, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] + 0x82, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] + 0x82, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + // element: [hhhhhhhhhhhhhhhhhhhhhh:SlabID(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2)] + 0x82, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0xd8, 0xff, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + }, + } + + fixedData := map[SlabID][]byte{ + rootID: { + // version + 0x10, + // flag: root + map data + 0x88, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 0 + 0x00, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x00, + + // elements (array of 0 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x00, + }, + } + + storage := newTestPersistentStorageWithData(t, data) + + // Load data in storage + for id := range data { + _, found, err := storage.Retrieve(id) + require.NoError(t, err) + require.True(t, found) + } + + // Check health before fixing broken reference + _, err := CheckStorageHealth(storage, -1) + require.ErrorContains(t, err, "slab not found during slab iteration") + + // Fix broken reference + fixedIDs, err := storage.FixLoadedBrokenReferences() + require.NoError(t, err) + require.Equal(t, len(brokenRefSlabID), len(fixedIDs)) + require.Equal(t, 3, len(storage.deltas)) + + for _, expected := range brokenRefSlabID { + var found bool + for _, actual := range fixedIDs { + if actual == expected { + found = true + break + } + } + require.True(t, found) + } + + // Check health after fixing broken reference + rootIDs, err := CheckStorageHealth(storage, -1) + require.NoError(t, err) + require.Equal(t, 1, len(rootIDs)) + + _, ok := rootIDs[rootID] + require.True(t, ok) + + // Save data in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + require.Equal(t, 0, len(storage.deltas)) + + // Check encoded data + baseStorage := storage.baseStorage.(*InMemBaseStorage) + require.Equal(t, 1, len(baseStorage.segments)) + + savedData, found, err := baseStorage.Retrieve(rootID) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, fixedData[rootID], savedData) + }) + + t.Run("broken reference in nested container", func(t *testing.T) { + parentContainerRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootDataID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootDataID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + nestedContainerRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} + + brokenRefSlabID := []SlabID{nestedContainerRootID} + + data := map[SlabID][]byte{ + + // metadata slab + parentContainerRootID: { + // extra data + // version + 0x00, + // flag: root + map meta + 0x89, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + meta + 0x89, + // child header count + 0x00, 0x02, + // child header 1 (storage id, first key, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x02, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0xfe, + }, + + // data slab + nonRootDataID1: { + // version + 0x00, + // flag: map data + 0x08, + // next storage id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [cccccccccccccccccccccc:cccccccccccccccccccccc] + 0x82, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + // element: [dddddddddddddddddddddd:dddddddddddddddddddddd] + 0x82, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + }, + + // data slab + nonRootDataID2: { + // version + 0x00, + // flag: has pointer + map data + 0x48, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] + 0x82, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] + 0x82, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] + 0x82, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + // element: [hhhhhhhhhhhhhhhhhhhhhh:SlabID(1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,4)] + 0x82, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + }, + + // map data slab + nestedContainerRootID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 1 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // element: [uint64(0):SlabID(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1)] + 0x82, + 0xd8, 0xa4, 0x00, + 0xd8, 0xff, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + }, + } + + fixedData := map[SlabID][]byte{ + // map data slab + nestedContainerRootID: { + // version + 0x10, + // flag: root + map data + 0x88, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 0 + 0x00, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x00, + + // elements (array of 0 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x00, + }, + } + + storage := newTestPersistentStorageWithData(t, data) + + // Load data in storage + for id := range data { + _, found, err := storage.Retrieve(id) + require.NoError(t, err) + require.True(t, found) + } + + // Check health before fixing broken reference + _, err := CheckStorageHealth(storage, -1) + require.ErrorContains(t, err, "slab (0x0.1) not found: slab not found during slab iteration") + + // Fix broken reference + fixedIDs, err := storage.FixLoadedBrokenReferences() + require.NoError(t, err) + require.Equal(t, len(brokenRefSlabID), len(fixedIDs)) + require.Equal(t, brokenRefSlabID[0], fixedIDs[0]) + require.Equal(t, 1, len(storage.deltas)) + + // Check health after fixing broken reference + rootIDs, err := CheckStorageHealth(storage, -1) + require.NoError(t, err) + require.Equal(t, 1, len(rootIDs)) + + _, ok := rootIDs[parentContainerRootID] + require.True(t, ok) + + // Save data in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + require.Equal(t, 0, len(storage.deltas)) + + // Check encoded data + baseStorage := storage.baseStorage.(*InMemBaseStorage) + require.Equal(t, 4, len(baseStorage.segments)) + + savedData, found, err := baseStorage.Retrieve(nestedContainerRootID) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, fixedData[nestedContainerRootID], savedData) + }) +} From 2a71edc4f15c73c07b73f2340281ab45d2402f9c Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 11 Apr 2024 15:45:40 -0500 Subject: [PATCH 107/126] Bump Go to 1.20 in coverage.yml --- .github/workflows/coverage.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 88b4907a..0ec246bc 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -22,7 +22,7 @@ jobs: - uses: actions/setup-go@v4 with: - go-version: '1.19' + go-version: '1.20' check-latest: true - name: Get dependencies From 66477156165abfe3c46ac869272f4aeaf020d229 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 15 Apr 2024 09:23:01 -0500 Subject: [PATCH 108/126] Update storage.go MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Bastian Müller --- storage.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/storage.go b/storage.go index 9b4f5a5d..fb0f206c 100644 --- a/storage.go +++ b/storage.go @@ -1229,6 +1229,8 @@ func (s *PersistentSlabStorage) FixLoadedBrokenReferences() ([]SlabID, error) { func (s *PersistentSlabStorage) fixBrokenReferencesInMap(old MapSlab) error { id := old.SlabID() + oldExtraData := old.ExtraData() + // Create an empty map with the same StorgeID, type, and seed as the old map. new := &MapDataSlab{ header: MapSlabHeader{ @@ -1236,8 +1238,8 @@ func (s *PersistentSlabStorage) fixBrokenReferencesInMap(old MapSlab) error { size: mapRootDataSlabPrefixSize + hkeyElementsPrefixSize, }, extraData: &MapExtraData{ - TypeInfo: old.ExtraData().TypeInfo, - Seed: old.ExtraData().Seed, + TypeInfo: oldExtraData.TypeInfo, + Seed: oldExtraData.Seed, }, elements: newHkeyElements(0), } From 3efd6bed83271ff396879fc0fb60e43c4c3522c3 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 16 Apr 2024 14:25:22 -0500 Subject: [PATCH 109/126] Allow callers to skip fixing a broken reference Currently, calls to this function can be limited by migration programs by specifying the 9 testnet accounts affected by 10 registers with broken references on testnet. This commit allows callers to implement additional restrictions, so calling FixLoadedBrokenReferences for the affected 9 testnet accounts can be even more limited at the callers discretion. In practice, this change is not expected to produce different migration results because full migration tests before this change correctly fixed the 10 known registers in the 9 testnet accounts. This commit added predicate func(old Value) bool to PersistentSlabStorage.FixLoadedBrokenReferences() to control whether to fix a atree.Value containing broken references. Also modified PersistentSlabStorage.FixLoadedBrokenReferences() to return fixed storage IDs and skipped storage IDs. Both returned values are of type map[StorageID][]StorageID, with key as root slab ID and value as all slab IDs containing broken references connected to the root. Also added more tests and improved existing tests. --- storage.go | 51 +++-- storage_test.go | 514 +++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 520 insertions(+), 45 deletions(-) diff --git a/storage.go b/storage.go index fb0f206c..e8cd23ff 100644 --- a/storage.go +++ b/storage.go @@ -1101,7 +1101,11 @@ func storeSlab(storage SlabStorage, slab Slab) error { // fix broken references. As of April 2024, only 10 registers in testnet (not mainnet) // were found to have broken references and they seem to have resulted from a bug // that was fixed 2 years ago by https://github.com/onflow/cadence/pull/1565. -func (s *PersistentSlabStorage) FixLoadedBrokenReferences() ([]SlabID, error) { +func (s *PersistentSlabStorage) FixLoadedBrokenReferences(needToFix func(old Value) bool) ( + fixedSlabIDs map[SlabID][]SlabID, // key: root slab ID, value: slab IDs containing broken refs + skippedSlabIDs map[SlabID][]SlabID, // key: root slab ID, value: slab IDs containing broken refs + err error, +) { // parentOf is used to find root slab from non-root slab. // Broken reference can be in non-root slab, and we need SlabID of root slab @@ -1160,12 +1164,12 @@ func (s *PersistentSlabStorage) FixLoadedBrokenReferences() ([]SlabID, error) { return foundBrokenRef } - var brokenStorageIDs []SlabID + var brokenSlabIDs []SlabID // Iterate delta slabs. for id, slab := range s.deltas { if hasBrokenReferenceInSlab(id, slab) { - brokenStorageIDs = append(brokenStorageIDs, id) + brokenSlabIDs = append(brokenSlabIDs, id) } } @@ -1175,28 +1179,28 @@ func (s *PersistentSlabStorage) FixLoadedBrokenReferences() ([]SlabID, error) { continue } if hasBrokenReferenceInSlab(id, slab) { - brokenStorageIDs = append(brokenStorageIDs, id) + brokenSlabIDs = append(brokenSlabIDs, id) } } - if len(brokenStorageIDs) == 0 { - return nil, nil + if len(brokenSlabIDs) == 0 { + return nil, nil, nil } - rootSlabStorageIDsWithBrokenData := make(map[SlabID]struct{}) + rootSlabIDsWithBrokenData := make(map[SlabID][]SlabID) var errs []error - // Find StorageIDs of root slab for slabs containing broken references. - for _, id := range brokenStorageIDs { + // Find SlabIDs of root slab for slabs containing broken references. + for _, id := range brokenSlabIDs { rootID := getRootSlabID(id) if rootID == SlabIDUndefined { errs = append(errs, fmt.Errorf("failed to get root slab id for slab %s", id)) continue } - rootSlabStorageIDsWithBrokenData[rootID] = struct{}{} + rootSlabIDsWithBrokenData[rootID] = append(rootSlabIDsWithBrokenData[rootID], id) } - for rootSlabID := range rootSlabStorageIDsWithBrokenData { + for rootSlabID, brokenSlabIDs := range rootSlabIDsWithBrokenData { rootSlab := s.RetrieveIfLoaded(rootSlabID) if rootSlab == nil { errs = append(errs, fmt.Errorf("failed to retrieve loaded root slab %s", rootSlabID)) @@ -1205,14 +1209,23 @@ func (s *PersistentSlabStorage) FixLoadedBrokenReferences() ([]SlabID, error) { switch rootSlab := rootSlab.(type) { case MapSlab: - if rootSlab.ExtraData() == nil { - errs = append(errs, fmt.Errorf("failed to fix broken references because slab %s isn't root slab", rootSlab.SlabID())) + value, err := rootSlab.StoredValue(s) + if err != nil { + errs = append(errs, fmt.Errorf("failed to convert slab %s into value", rootSlab.SlabID())) continue } - err := s.fixBrokenReferencesInMap(rootSlab) - if err != nil { - errs = append(errs, err) + if needToFix(value) { + err := s.fixBrokenReferencesInMap(rootSlab) + if err != nil { + errs = append(errs, err) + continue + } + } else { + if skippedSlabIDs == nil { + skippedSlabIDs = make(map[SlabID][]SlabID) + } + skippedSlabIDs[rootSlabID] = brokenSlabIDs } default: @@ -1221,7 +1234,11 @@ func (s *PersistentSlabStorage) FixLoadedBrokenReferences() ([]SlabID, error) { } } - return brokenStorageIDs, errors.Join(errs...) + for id := range skippedSlabIDs { + delete(rootSlabIDsWithBrokenData, id) + } + + return rootSlabIDsWithBrokenData, skippedSlabIDs, errors.Join(errs...) } // fixBrokenReferencesInMap replaces replaces broken map with empty map diff --git a/storage_test.go b/storage_test.go index 9852cd64..7dadeb04 100644 --- a/storage_test.go +++ b/storage_test.go @@ -1665,10 +1665,27 @@ func TestFixLoadedBrokenReferences(t *testing.T) { require.True(t, found) } + var fixedRootIDs map[SlabID][]SlabID + var skippedRootIDs map[SlabID][]SlabID + + // Don't fix any broken references + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return false + }) + require.NoError(t, err) + require.Equal(t, 0, len(fixedRootIDs)) + require.Equal(t, 0, len(skippedRootIDs)) + + // No data is modified because no fix happened + require.Equal(t, 0, len(storage.deltas)) + // Fix broken reference - fixedIDs, err := storage.FixLoadedBrokenReferences() + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return true + }) require.NoError(t, err) - require.Equal(t, 0, len(fixedIDs)) + require.Equal(t, 0, len(fixedRootIDs)) + require.Equal(t, 0, len(skippedRootIDs)) // No data is modified during fixing broken reference require.Equal(t, 0, len(storage.deltas)) @@ -1684,7 +1701,9 @@ func TestFixLoadedBrokenReferences(t *testing.T) { rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - brokenRefSlabID := rootID + brokenRefs := map[SlabID][]SlabID{ + rootID: {rootID}, + } data := map[SlabID][]byte{ rootID: { @@ -1777,11 +1796,36 @@ func TestFixLoadedBrokenReferences(t *testing.T) { _, err := CheckStorageHealth(storage, -1) require.ErrorContains(t, err, "slab (0x0.1) not found: slab not found during slab iteration") - // Fix broken reference - fixedIDs, err := storage.FixLoadedBrokenReferences() + var fixedRootIDs map[SlabID][]SlabID + var skippedRootIDs map[SlabID][]SlabID + + // Don't fix any broken references + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return false + }) + require.NoError(t, err) + require.Equal(t, 0, len(fixedRootIDs)) + require.Equal(t, len(brokenRefs), len(skippedRootIDs)) + + for rootID, slabIDsWithBrokenRef := range brokenRefs { + require.ElementsMatch(t, slabIDsWithBrokenRef, skippedRootIDs[rootID]) + } + + // No data is modified because no fix happened + require.Equal(t, 0, len(storage.deltas)) + + // Fix broken references + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return true + }) require.NoError(t, err) - require.Equal(t, 1, len(fixedIDs)) - require.Equal(t, brokenRefSlabID, fixedIDs[0]) + require.Equal(t, len(brokenRefs), len(fixedRootIDs)) + + for rootID, slabIDsWithBrokenRef := range brokenRefs { + require.ElementsMatch(t, slabIDsWithBrokenRef, fixedRootIDs[rootID]) + } + + require.Equal(t, 0, len(skippedRootIDs)) require.Equal(t, 1, len(storage.deltas)) // Check health after fixing broken reference @@ -1812,7 +1856,9 @@ func TestFixLoadedBrokenReferences(t *testing.T) { nonRootDataID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} nonRootDataID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - brokenRefSlabID := nonRootDataID2 + brokenRefs := map[SlabID][]SlabID{ + rootID: {nonRootDataID2}, + } // Expected serialized slab data with storage id data := map[SlabID][]byte{ @@ -1995,11 +2041,36 @@ func TestFixLoadedBrokenReferences(t *testing.T) { _, err := CheckStorageHealth(storage, -1) require.ErrorContains(t, err, "slab (0x0.1) not found: slab not found during slab iteration") + var fixedRootIDs map[SlabID][]SlabID + var skippedRootIDs map[SlabID][]SlabID + + // Don't fix any broken references + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return false + }) + require.NoError(t, err) + require.Equal(t, 0, len(fixedRootIDs)) + require.Equal(t, len(brokenRefs), len(skippedRootIDs)) + + for rootID, slabIDsWithBrokenRef := range brokenRefs { + require.ElementsMatch(t, slabIDsWithBrokenRef, skippedRootIDs[rootID]) + } + + // No data is modified because no fix happened + require.Equal(t, 0, len(storage.deltas)) + // Fix broken reference - fixedIDs, err := storage.FixLoadedBrokenReferences() + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return true + }) require.NoError(t, err) - require.Equal(t, 1, len(fixedIDs)) - require.Equal(t, brokenRefSlabID, fixedIDs[0]) + require.Equal(t, 1, len(fixedRootIDs)) + + for rootID, slabIDsWithBrokenRef := range brokenRefs { + require.ElementsMatch(t, slabIDsWithBrokenRef, fixedRootIDs[rootID]) + } + + require.Equal(t, 0, len(skippedRootIDs)) require.Equal(t, 3, len(storage.deltas)) // Check health after fixing broken reference @@ -2030,7 +2101,9 @@ func TestFixLoadedBrokenReferences(t *testing.T) { nonRootDataID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} nonRootDataID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - brokenRefSlabID := []SlabID{nonRootDataID1, nonRootDataID2} + brokenRefs := map[SlabID][]SlabID{ + rootID: {nonRootDataID1, nonRootDataID2}, + } data := map[SlabID][]byte{ @@ -2212,23 +2285,38 @@ func TestFixLoadedBrokenReferences(t *testing.T) { _, err := CheckStorageHealth(storage, -1) require.ErrorContains(t, err, "slab not found during slab iteration") + var fixedRootIDs map[SlabID][]SlabID + var skippedRootIDs map[SlabID][]SlabID + + // Don't fix any broken references + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return false + }) + require.NoError(t, err) + require.Equal(t, 0, len(fixedRootIDs)) + require.Equal(t, len(brokenRefs), len(skippedRootIDs)) + + for rootID, slabIDsWithBrokenRef := range brokenRefs { + require.ElementsMatch(t, slabIDsWithBrokenRef, skippedRootIDs[rootID]) + } + + // No data is modified because no fix happened + require.Equal(t, 0, len(storage.deltas)) + // Fix broken reference - fixedIDs, err := storage.FixLoadedBrokenReferences() + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return true + }) require.NoError(t, err) - require.Equal(t, len(brokenRefSlabID), len(fixedIDs)) - require.Equal(t, 3, len(storage.deltas)) + require.Equal(t, len(brokenRefs), len(fixedRootIDs)) - for _, expected := range brokenRefSlabID { - var found bool - for _, actual := range fixedIDs { - if actual == expected { - found = true - break - } - } - require.True(t, found) + for rootID, slabIDsWithBrokenRef := range brokenRefs { + require.ElementsMatch(t, slabIDsWithBrokenRef, fixedRootIDs[rootID]) } + require.Equal(t, 0, len(skippedRootIDs)) + require.Equal(t, 3, len(storage.deltas)) + // Check health after fixing broken reference rootIDs, err := CheckStorageHealth(storage, -1) require.NoError(t, err) @@ -2258,7 +2346,9 @@ func TestFixLoadedBrokenReferences(t *testing.T) { nonRootDataID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} nestedContainerRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} - brokenRefSlabID := []SlabID{nestedContainerRootID} + brokenRefs := map[SlabID][]SlabID{ + nestedContainerRootID: {nestedContainerRootID}, + } data := map[SlabID][]byte{ @@ -2484,11 +2574,36 @@ func TestFixLoadedBrokenReferences(t *testing.T) { _, err := CheckStorageHealth(storage, -1) require.ErrorContains(t, err, "slab (0x0.1) not found: slab not found during slab iteration") + var fixedRootIDs map[SlabID][]SlabID + var skippedRootIDs map[SlabID][]SlabID + + // Don't fix any broken references + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return false + }) + require.NoError(t, err) + require.Equal(t, 0, len(fixedRootIDs)) + require.Equal(t, len(brokenRefs), len(skippedRootIDs)) + + for rootID, slabIDsWithBrokenRef := range brokenRefs { + require.ElementsMatch(t, slabIDsWithBrokenRef, skippedRootIDs[rootID]) + } + + // No data is modified because no fix happened + require.Equal(t, 0, len(storage.deltas)) + // Fix broken reference - fixedIDs, err := storage.FixLoadedBrokenReferences() + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return true + }) require.NoError(t, err) - require.Equal(t, len(brokenRefSlabID), len(fixedIDs)) - require.Equal(t, brokenRefSlabID[0], fixedIDs[0]) + require.Equal(t, len(brokenRefs), len(fixedRootIDs)) + + for rootID, slabIDsWithBrokenRef := range brokenRefs { + require.ElementsMatch(t, slabIDsWithBrokenRef, fixedRootIDs[rootID]) + } + + require.Equal(t, 0, len(skippedRootIDs)) require.Equal(t, 1, len(storage.deltas)) // Check health after fixing broken reference @@ -2513,4 +2628,347 @@ func TestFixLoadedBrokenReferences(t *testing.T) { require.True(t, found) require.Equal(t, fixedData[nestedContainerRootID], savedData) }) + + t.Run("selectively fix maps", func(t *testing.T) { + rootID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootDataID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootDataID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} // containing broken ref + + rootID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} // containing broken ref + + rootIDs := []SlabID{rootID1, rootID2} + + brokenRefs := map[SlabID][]SlabID{ + rootID1: {nonRootDataID2}, + rootID2: {rootID2}, + } + + data := map[SlabID][]byte{ + // metadata slab + rootID1: { + // extra data + // version + 0x00, + // flag: root + map meta + 0x89, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + meta + 0x89, + // child header count + 0x00, 0x02, + // child header 1 (storage id, first key, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x02, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0xfe, + }, + + // data slab + nonRootDataID1: { + // version + 0x00, + // flag: map data + 0x08, + // next storage id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [cccccccccccccccccccccc:cccccccccccccccccccccc] + 0x82, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + // element: [dddddddddddddddddddddd:dddddddddddddddddddddd] + 0x82, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + }, + + // data slab + nonRootDataID2: { + // version + 0x00, + // flag: has pointer + map data + 0x48, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] + 0x82, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] + 0x82, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] + 0x82, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + // element: [hhhhhhhhhhhhhhhhhhhhhh:SlabID(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1)] + 0x82, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0xd8, 0xff, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + }, + + // map data slab + rootID2: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 1 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x4a, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // element: [uint64(0):SlabID(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1)] + 0x82, + 0xd8, 0xa4, 0x00, + 0xd8, 0xff, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + }, + } + + fixedData := map[SlabID][]byte{ + rootID1: { + // version + 0x10, + // flag: root + map data + 0x88, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 0 + 0x00, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x00, + + // elements (array of 0 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x00, + }, + + rootID2: { + // version + 0x10, + // flag: root + map data + 0x88, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 0 + 0x00, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x4a, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x00, + + // elements (array of 0 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x00, + }, + } + + storage := newTestPersistentStorageWithData(t, data) + + // Load data in storage + for id := range data { + _, found, err := storage.Retrieve(id) + require.NoError(t, err) + require.True(t, found) + } + + // Check health before fixing broken reference + _, err := CheckStorageHealth(storage, -1) + require.ErrorContains(t, err, "slab not found during slab iteration") + + var fixedRootIDs map[SlabID][]SlabID + var skippedRootIDs map[SlabID][]SlabID + + // Don't fix any broken references + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return false + }) + require.NoError(t, err) + require.Equal(t, 0, len(fixedRootIDs)) + require.Equal(t, len(brokenRefs), len(skippedRootIDs)) + + for rootID, slabIDsWithBrokenRef := range brokenRefs { + require.ElementsMatch(t, slabIDsWithBrokenRef, skippedRootIDs[rootID]) + } + + // No data is modified because no fix happened + require.Equal(t, 0, len(storage.deltas)) + + // Only fix one map with broken reference + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(v Value) bool { + m, ok := v.(*OrderedMap) + require.True(t, ok) + return rootID1 == m.SlabID() + }) + require.NoError(t, err) + require.Equal(t, 1, len(fixedRootIDs)) + require.Equal(t, brokenRefs[rootID1], fixedRootIDs[rootID1]) + require.Equal(t, 1, len(skippedRootIDs)) + require.Equal(t, brokenRefs[rootID2], skippedRootIDs[rootID2]) + require.Equal(t, 3, len(storage.deltas)) + + // Save data in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + require.Equal(t, 0, len(storage.deltas)) + + // Check health after only fixing one map with broken reference + _, err = CheckStorageHealth(storage, -1) + require.ErrorContains(t, err, "slab not found during slab iteration") + + // Fix remaining map with broken reference + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(v Value) bool { + return true + }) + require.NoError(t, err) + require.Equal(t, 1, len(fixedRootIDs)) + require.Equal(t, brokenRefs[rootID2], fixedRootIDs[rootID2]) + require.Equal(t, 0, len(skippedRootIDs)) + require.Equal(t, 1, len(storage.deltas)) + + // Check health after fixing remaining maps with broken reference + returnedRootIDs, err := CheckStorageHealth(storage, -1) + require.NoError(t, err) + require.Equal(t, len(rootIDs), len(returnedRootIDs)) + + // Save data in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + require.Equal(t, 0, len(storage.deltas)) + + // Check encoded data + baseStorage := storage.baseStorage.(*InMemBaseStorage) + require.Equal(t, 2, len(baseStorage.segments)) + + savedData, found, err := baseStorage.Retrieve(rootID1) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, fixedData[rootID1], savedData) + + savedData, found, err = baseStorage.Retrieve(rootID2) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, fixedData[rootID2], savedData) + }) } From b362f829eaad900199a708c47bee2ec2a16f674d Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 16 Apr 2024 17:19:27 -0500 Subject: [PATCH 110/126] Add PersistentSlabStorage.GetAllChildReferences() This commit adds GetAllChildReferences() which essentially wraps an existing internal function. --- storage.go | 17 + storage_test.go | 1488 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 1505 insertions(+) diff --git a/storage.go b/storage.go index e8cd23ff..e8843fac 100644 --- a/storage.go +++ b/storage.go @@ -1297,6 +1297,23 @@ func (s *PersistentSlabStorage) existIfLoaded(id SlabID) bool { return false } +// GetAllChildReferences returns child references of given slab (all levels), +// including nested container and theirs child references. +func (s *PersistentSlabStorage) GetAllChildReferences(id StorageID) ( + references []StorageID, + brokenReferences []StorageID, + err error, +) { + slab, found, err := s.Retrieve(id) + if err != nil { + return nil, nil, err + } + if !found { + return nil, nil, NewSlabNotFoundErrorf(id, fmt.Sprintf("failed to get root slab by id %s", id)) + } + return s.getAllChildReferences(slab) +} + // getAllChildReferences returns child references of given slab (all levels). func (s *PersistentSlabStorage) getAllChildReferences(slab Slab) ( references []SlabID, diff --git a/storage_test.go b/storage_test.go index 7dadeb04..74ff3a25 100644 --- a/storage_test.go +++ b/storage_test.go @@ -2972,3 +2972,1491 @@ func TestFixLoadedBrokenReferences(t *testing.T) { require.Equal(t, fixedData[rootID2], savedData) }) } + +func TestGetAllChildReferencesFromArray(t *testing.T) { + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("empty", func(t *testing.T) { + rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + expectedRefIDs := []StorageID{} + expectedBrokenRefIDs := []StorageID{} + + data := map[StorageID][]byte{ + rootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x00, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root data slab without refs", func(t *testing.T) { + rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + expectedRefIDs := []StorageID{} + expectedBrokenRefIDs := []StorageID{} + + data := map[StorageID][]byte{ + rootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root data slab with ref to nested element", func(t *testing.T) { + parentRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} + childRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} + + expectedRefIDs := []StorageID{childRootID} + expectedBrokenRefIDs := []StorageID{} + + data := map[StorageID][]byte{ + parentRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + }, + + childRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + }, + } + + testGetAllChildReferences(t, data, parentRootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root data slab with broken ref", func(t *testing.T) { + parentRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} + childRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} + + expectedRefIDs := []StorageID{} + expectedBrokenRefIDs := []StorageID{childRootID} + + data := map[StorageID][]byte{ + parentRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + }, + } + + testGetAllChildReferences(t, data, parentRootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root metadata slab", func(t *testing.T) { + rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootID1 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootID2 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + expectedRefIDs := []StorageID{nonRootID1, nonRootID2} + expectedBrokenRefIDs := []StorageID{} + + data := map[StorageID][]byte{ + rootID: { + // extra data + // version + 0x00, + // extra data flag + 0x81, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array meta data slab flag + 0x81, + // child header count + 0x00, 0x02, + // child header 1 (storage id, count, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0x00, 0x00, 0xe4, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x0b, + 0x00, 0x00, 0x01, 0x0e, + }, + + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + nonRootID1: { + // version + 0x00, + // array data slab flag + 0x00, + // next storage id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... StorageID(...)] + nonRootID2: { + // version + 0x00, + // array data slab flag + 0x40, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root metadata slab with broken ref to first data slab", func(t *testing.T) { + rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootID1 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootID2 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + expectedRefIDs := []StorageID{nonRootID2} + expectedBrokenRefIDs := []StorageID{nonRootID1} + + data := map[StorageID][]byte{ + rootID: { + // extra data + // version + 0x00, + // extra data flag + 0x81, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array meta data slab flag + 0x81, + // child header count + 0x00, 0x02, + // child header 1 (storage id, count, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0x00, 0x00, 0xe4, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x0b, + 0x00, 0x00, 0x01, 0x0e, + }, + + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... StorageID(...)] + nonRootID2: { + // version + 0x00, + // array data slab flag + 0x40, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root metadata slab with ref", func(t *testing.T) { + rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootID1 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootID2 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + childRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 4}} + + expectedRefIDs := []StorageID{nonRootID1, nonRootID2, childRootID} + expectedBrokenRefIDs := []StorageID{} + + data := map[StorageID][]byte{ + rootID: { + // extra data + // version + 0x00, + // extra data flag + 0x81, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array meta data slab flag + 0x81, + // child header count + 0x00, 0x02, + // child header 1 (storage id, count, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0x00, 0x00, 0xe4, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x0b, + 0x00, 0x00, 0x01, 0x0e, + }, + + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + nonRootID1: { + // version + 0x00, + // array data slab flag + 0x00, + // next storage id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... StorageID(...)] + nonRootID2: { + // version + 0x00, + // array data slab flag + 0x40, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + }, + + childRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root metadata slab with broken ref to nested element", func(t *testing.T) { + rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootID1 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootID2 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + childRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 4}} + + expectedRefIDs := []StorageID{nonRootID1, nonRootID2} + expectedBrokenRefIDs := []StorageID{childRootID} + + data := map[StorageID][]byte{ + rootID: { + // extra data + // version + 0x00, + // extra data flag + 0x81, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array meta data slab flag + 0x81, + // child header count + 0x00, 0x02, + // child header 1 (storage id, count, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0x00, 0x00, 0xe4, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x0b, + 0x00, 0x00, 0x01, 0x0e, + }, + + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + nonRootID1: { + // version + 0x00, + // array data slab flag + 0x00, + // next storage id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... StorageID(...)] + nonRootID2: { + // version + 0x00, + // array data slab flag + 0x40, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("3-level of nested containers", func(t *testing.T) { + parentRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} + childRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} + gchildRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + expectedRefIDs := []StorageID{childRootID, gchildRootID} + expectedBrokenRefIDs := []StorageID{} + + data := map[StorageID][]byte{ + parentRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + }, + + childRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + }, + + gchildRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + }, + } + + testGetAllChildReferences(t, data, parentRootID, expectedRefIDs, expectedBrokenRefIDs) + }) +} + +func TestGetAllChildReferencesFromMap(t *testing.T) { + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("empty", func(t *testing.T) { + rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + expectedRefIDs := []StorageID{} + expectedBrokenRefIDs := []StorageID{} + + data := map[StorageID][]byte{ + rootID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 0 + 0x00, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 0 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root data slab without refs", func(t *testing.T) { + rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + expectedRefIDs := []StorageID{} + expectedBrokenRefIDs := []StorageID{} + + data := map[StorageID][]byte{ + rootID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 1 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // element: [uint64(0):uint64(0)] + 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root data slab with ref", func(t *testing.T) { + rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} + childRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} + + expectedRefIDs := []StorageID{childRootID} + expectedBrokenRefIDs := []StorageID{} + + data := map[StorageID][]byte{ + rootID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 1 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // element: [uint64(0):uint64(0)] + 0x82, + 0xd8, 0xa4, 0x00, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + }, + + childRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root data slab with broken ref", func(t *testing.T) { + rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} + childRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} + + expectedRefIDs := []StorageID{} + expectedBrokenRefIDs := []StorageID{childRootID} + + data := map[StorageID][]byte{ + rootID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 1 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // element: [uint64(0):uint64(0)] + 0x82, + 0xd8, 0xa4, 0x00, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root metadata slab", func(t *testing.T) { + rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootID1 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootID2 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + expectedRefIDs := []StorageID{nonRootID1, nonRootID2} + expectedBrokenRefIDs := []StorageID{} + + data := map[StorageID][]byte{ + // metadata slab + rootID: { + // extra data + // version + 0x00, + // flag: root + map meta + 0x89, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + meta + 0x89, + // child header count + 0x00, 0x02, + // child header 1 (storage id, first key, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x02, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0xfe, + }, + + // data slab + nonRootID1: { + // version + 0x00, + // flag: map data + 0x08, + // next storage id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [cccccccccccccccccccccc:cccccccccccccccccccccc] + 0x82, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + // element: [dddddddddddddddddddddd:dddddddddddddddddddddd] + 0x82, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + }, + + // data slab + nonRootID2: { + // version + 0x00, + // flag: has pointer + map data + 0x48, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] + 0x82, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] + 0x82, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] + 0x82, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + // element: [hhhhhhhhhhhhhhhhhhhhhh:hhhhhhhhhhhhhhhhhhhhhh] + 0x82, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root metadata slab with broken ref to first data slab", func(t *testing.T) { + rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootID1 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootID2 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + expectedRefIDs := []StorageID{nonRootID2} + expectedBrokenRefIDs := []StorageID{nonRootID1} + + data := map[StorageID][]byte{ + // metadata slab + rootID: { + // extra data + // version + 0x00, + // flag: root + map meta + 0x89, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + meta + 0x89, + // child header count + 0x00, 0x02, + // child header 1 (storage id, first key, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x02, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0xfe, + }, + + // data slab + nonRootID2: { + // version + 0x00, + // flag: has pointer + map data + 0x48, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] + 0x82, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] + 0x82, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] + 0x82, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + // element: [hhhhhhhhhhhhhhhhhhhhhh:hhhhhhhhhhhhhhhhhhhhhh] + 0x82, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root metadata slab with ref", func(t *testing.T) { + rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootID1 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootID2 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + childRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 4}} + + expectedRefIDs := []StorageID{nonRootID1, nonRootID2, childRootID} + expectedBrokenRefIDs := []StorageID{} + + data := map[StorageID][]byte{ + // metadata slab + rootID: { + // extra data + // version + 0x00, + // flag: root + map meta + 0x89, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + meta + 0x89, + // child header count + 0x00, 0x02, + // child header 1 (storage id, first key, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x02, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0xfe, + }, + + // data slab + nonRootID1: { + // version + 0x00, + // flag: map data + 0x08, + // next storage id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [cccccccccccccccccccccc:cccccccccccccccccccccc] + 0x82, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + // element: [dddddddddddddddddddddd:dddddddddddddddddddddd] + 0x82, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + }, + + // data slab + nonRootID2: { + // version + 0x00, + // flag: has pointer + map data + 0x48, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] + 0x82, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] + 0x82, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] + 0x82, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + // element: [hhhhhhhhhhhhhhhhhhhhhh:hhhhhhhhhhhhhhhhhhhhhh] + 0x82, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + }, + + childRootID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 0 + 0x00, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 0 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root metadata slab with broken ref to nested element", func(t *testing.T) { + rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootID1 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootID2 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + childRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 4}} + + expectedRefIDs := []StorageID{nonRootID1, nonRootID2} + expectedBrokenRefIDs := []StorageID{childRootID} + + data := map[StorageID][]byte{ + // metadata slab + rootID: { + // extra data + // version + 0x00, + // flag: root + map meta + 0x89, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + meta + 0x89, + // child header count + 0x00, 0x02, + // child header 1 (storage id, first key, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x02, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0xfe, + }, + + // data slab + nonRootID1: { + // version + 0x00, + // flag: map data + 0x08, + // next storage id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [cccccccccccccccccccccc:cccccccccccccccccccccc] + 0x82, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + // element: [dddddddddddddddddddddd:dddddddddddddddddddddd] + 0x82, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + }, + + // data slab + nonRootID2: { + // version + 0x00, + // flag: has pointer + map data + 0x48, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] + 0x82, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] + 0x82, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] + 0x82, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + // element: [hhhhhhhhhhhhhhhhhhhhhh:hhhhhhhhhhhhhhhhhhhhhh] + 0x82, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("3-level containers", func(t *testing.T) { + rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} + childRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} + gchildRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + expectedRefIDs := []StorageID{childRootID, gchildRootID} + expectedBrokenRefIDs := []StorageID{} + + data := map[StorageID][]byte{ + rootID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 1 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // element: [uint64(0):uint64(0)] + 0x82, + 0xd8, 0xa4, 0x00, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + }, + + childRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + }, + + gchildRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) +} + +func testGetAllChildReferences( + t *testing.T, + data map[StorageID][]byte, + rootID StorageID, + expectedRefIDs []StorageID, + expectedBrokenRefIDs []StorageID, +) { + storage := newTestPersistentStorageWithData(t, data) + + refs, brokenRefs, err := storage.GetAllChildReferences(rootID) + require.NoError(t, err) + + require.Equal(t, len(expectedRefIDs), len(refs)) + require.ElementsMatch(t, expectedRefIDs, refs) + + require.Equal(t, len(expectedBrokenRefIDs), len(brokenRefs)) + require.ElementsMatch(t, expectedBrokenRefIDs, brokenRefs) +} From 8febf26a11258c0dd0e0dd3adba27f40b6740390 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 17 Apr 2024 09:25:35 -0500 Subject: [PATCH 111/126] Fix tests --- storage.go | 6 +- storage_test.go | 216 ++++++++++++++++++++++++------------------------ 2 files changed, 111 insertions(+), 111 deletions(-) diff --git a/storage.go b/storage.go index e8843fac..394ecf23 100644 --- a/storage.go +++ b/storage.go @@ -1299,9 +1299,9 @@ func (s *PersistentSlabStorage) existIfLoaded(id SlabID) bool { // GetAllChildReferences returns child references of given slab (all levels), // including nested container and theirs child references. -func (s *PersistentSlabStorage) GetAllChildReferences(id StorageID) ( - references []StorageID, - brokenReferences []StorageID, +func (s *PersistentSlabStorage) GetAllChildReferences(id SlabID) ( + references []SlabID, + brokenReferences []SlabID, err error, ) { slab, found, err := s.Retrieve(id) diff --git a/storage_test.go b/storage_test.go index 74ff3a25..1b407fb4 100644 --- a/storage_test.go +++ b/storage_test.go @@ -2977,12 +2977,12 @@ func TestGetAllChildReferencesFromArray(t *testing.T) { address := Address{1, 2, 3, 4, 5, 6, 7, 8} t.Run("empty", func(t *testing.T) { - rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - expectedRefIDs := []StorageID{} - expectedBrokenRefIDs := []StorageID{} + expectedRefIDs := []SlabID{} + expectedBrokenRefIDs := []SlabID{} - data := map[StorageID][]byte{ + data := map[SlabID][]byte{ rootID: { // extra data // version @@ -3007,12 +3007,12 @@ func TestGetAllChildReferencesFromArray(t *testing.T) { }) t.Run("root data slab without refs", func(t *testing.T) { - rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - expectedRefIDs := []StorageID{} - expectedBrokenRefIDs := []StorageID{} + expectedRefIDs := []SlabID{} + expectedBrokenRefIDs := []SlabID{} - data := map[StorageID][]byte{ + data := map[SlabID][]byte{ rootID: { // extra data // version @@ -3039,13 +3039,13 @@ func TestGetAllChildReferencesFromArray(t *testing.T) { }) t.Run("root data slab with ref to nested element", func(t *testing.T) { - parentRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} - childRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} + parentRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} - expectedRefIDs := []StorageID{childRootID} - expectedBrokenRefIDs := []StorageID{} + expectedRefIDs := []SlabID{childRootID} + expectedBrokenRefIDs := []SlabID{} - data := map[StorageID][]byte{ + data := map[SlabID][]byte{ parentRootID: { // extra data // version @@ -3093,13 +3093,13 @@ func TestGetAllChildReferencesFromArray(t *testing.T) { }) t.Run("root data slab with broken ref", func(t *testing.T) { - parentRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} - childRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} + parentRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} - expectedRefIDs := []StorageID{} - expectedBrokenRefIDs := []StorageID{childRootID} + expectedRefIDs := []SlabID{} + expectedBrokenRefIDs := []SlabID{childRootID} - data := map[StorageID][]byte{ + data := map[SlabID][]byte{ parentRootID: { // extra data // version @@ -3126,14 +3126,14 @@ func TestGetAllChildReferencesFromArray(t *testing.T) { }) t.Run("root metadata slab", func(t *testing.T) { - rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} - nonRootID1 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} - nonRootID2 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 3}} + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - expectedRefIDs := []StorageID{nonRootID1, nonRootID2} - expectedBrokenRefIDs := []StorageID{} + expectedRefIDs := []SlabID{nonRootID1, nonRootID2} + expectedBrokenRefIDs := []SlabID{} - data := map[StorageID][]byte{ + data := map[SlabID][]byte{ rootID: { // extra data // version @@ -3183,7 +3183,7 @@ func TestGetAllChildReferencesFromArray(t *testing.T) { 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, }, - // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... StorageID(...)] + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... SlabID(...)] nonRootID2: { // version 0x00, @@ -3212,14 +3212,14 @@ func TestGetAllChildReferencesFromArray(t *testing.T) { }) t.Run("root metadata slab with broken ref to first data slab", func(t *testing.T) { - rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} - nonRootID1 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} - nonRootID2 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 3}} + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - expectedRefIDs := []StorageID{nonRootID2} - expectedBrokenRefIDs := []StorageID{nonRootID1} + expectedRefIDs := []SlabID{nonRootID2} + expectedBrokenRefIDs := []SlabID{nonRootID1} - data := map[StorageID][]byte{ + data := map[SlabID][]byte{ rootID: { // extra data // version @@ -3247,7 +3247,7 @@ func TestGetAllChildReferencesFromArray(t *testing.T) { 0x00, 0x00, 0x01, 0x0e, }, - // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... StorageID(...)] + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... SlabID(...)] nonRootID2: { // version 0x00, @@ -3276,16 +3276,16 @@ func TestGetAllChildReferencesFromArray(t *testing.T) { }) t.Run("root metadata slab with ref", func(t *testing.T) { - rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} - nonRootID1 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} - nonRootID2 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 3}} + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - childRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 4}} + childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} - expectedRefIDs := []StorageID{nonRootID1, nonRootID2, childRootID} - expectedBrokenRefIDs := []StorageID{} + expectedRefIDs := []SlabID{nonRootID1, nonRootID2, childRootID} + expectedBrokenRefIDs := []SlabID{} - data := map[StorageID][]byte{ + data := map[SlabID][]byte{ rootID: { // extra data // version @@ -3335,7 +3335,7 @@ func TestGetAllChildReferencesFromArray(t *testing.T) { 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, }, - // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... StorageID(...)] + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... SlabID(...)] nonRootID2: { // version 0x00, @@ -3385,16 +3385,16 @@ func TestGetAllChildReferencesFromArray(t *testing.T) { }) t.Run("root metadata slab with broken ref to nested element", func(t *testing.T) { - rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} - nonRootID1 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} - nonRootID2 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 3}} + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - childRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 4}} + childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} - expectedRefIDs := []StorageID{nonRootID1, nonRootID2} - expectedBrokenRefIDs := []StorageID{childRootID} + expectedRefIDs := []SlabID{nonRootID1, nonRootID2} + expectedBrokenRefIDs := []SlabID{childRootID} - data := map[StorageID][]byte{ + data := map[SlabID][]byte{ rootID: { // extra data // version @@ -3444,7 +3444,7 @@ func TestGetAllChildReferencesFromArray(t *testing.T) { 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, }, - // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... StorageID(...)] + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... SlabID(...)] nonRootID2: { // version 0x00, @@ -3473,14 +3473,14 @@ func TestGetAllChildReferencesFromArray(t *testing.T) { }) t.Run("3-level of nested containers", func(t *testing.T) { - parentRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} - childRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} - gchildRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 3}} + parentRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + gchildRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - expectedRefIDs := []StorageID{childRootID, gchildRootID} - expectedBrokenRefIDs := []StorageID{} + expectedRefIDs := []SlabID{childRootID, gchildRootID} + expectedBrokenRefIDs := []SlabID{} - data := map[StorageID][]byte{ + data := map[SlabID][]byte{ parentRootID: { // extra data // version @@ -3553,12 +3553,12 @@ func TestGetAllChildReferencesFromMap(t *testing.T) { address := Address{1, 2, 3, 4, 5, 6, 7, 8} t.Run("empty", func(t *testing.T) { - rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - expectedRefIDs := []StorageID{} - expectedBrokenRefIDs := []StorageID{} + expectedRefIDs := []SlabID{} + expectedBrokenRefIDs := []SlabID{} - data := map[StorageID][]byte{ + data := map[SlabID][]byte{ rootID: { // extra data // version @@ -3600,12 +3600,12 @@ func TestGetAllChildReferencesFromMap(t *testing.T) { }) t.Run("root data slab without refs", func(t *testing.T) { - rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - expectedRefIDs := []StorageID{} - expectedBrokenRefIDs := []StorageID{} + expectedRefIDs := []SlabID{} + expectedBrokenRefIDs := []SlabID{} - data := map[StorageID][]byte{ + data := map[SlabID][]byte{ rootID: { // extra data // version @@ -3651,13 +3651,13 @@ func TestGetAllChildReferencesFromMap(t *testing.T) { }) t.Run("root data slab with ref", func(t *testing.T) { - rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} - childRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} - expectedRefIDs := []StorageID{childRootID} - expectedBrokenRefIDs := []StorageID{} + expectedRefIDs := []SlabID{childRootID} + expectedBrokenRefIDs := []SlabID{} - data := map[StorageID][]byte{ + data := map[SlabID][]byte{ rootID: { // extra data // version @@ -3726,13 +3726,13 @@ func TestGetAllChildReferencesFromMap(t *testing.T) { }) t.Run("root data slab with broken ref", func(t *testing.T) { - rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} - childRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} - expectedRefIDs := []StorageID{} - expectedBrokenRefIDs := []StorageID{childRootID} + expectedRefIDs := []SlabID{} + expectedBrokenRefIDs := []SlabID{childRootID} - data := map[StorageID][]byte{ + data := map[SlabID][]byte{ rootID: { // extra data // version @@ -3780,14 +3780,14 @@ func TestGetAllChildReferencesFromMap(t *testing.T) { }) t.Run("root metadata slab", func(t *testing.T) { - rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} - nonRootID1 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} - nonRootID2 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 3}} + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - expectedRefIDs := []StorageID{nonRootID1, nonRootID2} - expectedBrokenRefIDs := []StorageID{} + expectedRefIDs := []SlabID{nonRootID1, nonRootID2} + expectedBrokenRefIDs := []SlabID{} - data := map[StorageID][]byte{ + data := map[SlabID][]byte{ // metadata slab rootID: { // extra data @@ -3924,14 +3924,14 @@ func TestGetAllChildReferencesFromMap(t *testing.T) { }) t.Run("root metadata slab with broken ref to first data slab", func(t *testing.T) { - rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} - nonRootID1 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} - nonRootID2 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 3}} + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - expectedRefIDs := []StorageID{nonRootID2} - expectedBrokenRefIDs := []StorageID{nonRootID1} + expectedRefIDs := []SlabID{nonRootID2} + expectedBrokenRefIDs := []SlabID{nonRootID1} - data := map[StorageID][]byte{ + data := map[SlabID][]byte{ // metadata slab rootID: { // extra data @@ -4019,16 +4019,16 @@ func TestGetAllChildReferencesFromMap(t *testing.T) { }) t.Run("root metadata slab with ref", func(t *testing.T) { - rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} - nonRootID1 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} - nonRootID2 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 3}} + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - childRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 4}} + childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} - expectedRefIDs := []StorageID{nonRootID1, nonRootID2, childRootID} - expectedBrokenRefIDs := []StorageID{} + expectedRefIDs := []SlabID{nonRootID1, nonRootID2, childRootID} + expectedBrokenRefIDs := []SlabID{} - data := map[StorageID][]byte{ + data := map[SlabID][]byte{ // metadata slab rootID: { // extra data @@ -4200,16 +4200,16 @@ func TestGetAllChildReferencesFromMap(t *testing.T) { }) t.Run("root metadata slab with broken ref to nested element", func(t *testing.T) { - rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} - nonRootID1 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} - nonRootID2 := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 3}} + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - childRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 4}} + childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} - expectedRefIDs := []StorageID{nonRootID1, nonRootID2} - expectedBrokenRefIDs := []StorageID{childRootID} + expectedRefIDs := []SlabID{nonRootID1, nonRootID2} + expectedBrokenRefIDs := []SlabID{childRootID} - data := map[StorageID][]byte{ + data := map[SlabID][]byte{ // metadata slab rootID: { // extra data @@ -4345,14 +4345,14 @@ func TestGetAllChildReferencesFromMap(t *testing.T) { }) t.Run("3-level containers", func(t *testing.T) { - rootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 1}} - childRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 2}} - gchildRootID := StorageID{Address: address, Index: StorageIndex{0, 0, 0, 0, 0, 0, 0, 3}} + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + gchildRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - expectedRefIDs := []StorageID{childRootID, gchildRootID} - expectedBrokenRefIDs := []StorageID{} + expectedRefIDs := []SlabID{childRootID, gchildRootID} + expectedBrokenRefIDs := []SlabID{} - data := map[StorageID][]byte{ + data := map[SlabID][]byte{ rootID: { // extra data // version @@ -4444,10 +4444,10 @@ func TestGetAllChildReferencesFromMap(t *testing.T) { func testGetAllChildReferences( t *testing.T, - data map[StorageID][]byte, - rootID StorageID, - expectedRefIDs []StorageID, - expectedBrokenRefIDs []StorageID, + data map[SlabID][]byte, + rootID SlabID, + expectedRefIDs []SlabID, + expectedBrokenRefIDs []SlabID, ) { storage := newTestPersistentStorageWithData(t, data) From a6b615bc173087a9175933d07819564b679c7b2b Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Fri, 26 Apr 2024 08:56:02 -0500 Subject: [PATCH 112/126] Fix GetAllChildReferences used by migration filter Migration programs in onflow/flow-go added a flag to filter old unreferenced slabs and onflow/atree added some functions to support that. However, some of the old unreferenced slabs are not filtered during migration. This commit fixes the migration filter by handling nested storage ID inside element such as Cadence SomeValue. --- storage.go | 71 ++++++++---- storage_test.go | 283 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 331 insertions(+), 23 deletions(-) diff --git a/storage.go b/storage.go index 394ecf23..fe354ed7 100644 --- a/storage.go +++ b/storage.go @@ -1128,40 +1128,60 @@ func (s *PersistentSlabStorage) FixLoadedBrokenReferences(needToFix func(old Val return false } - var isMetaDataSlab bool - switch slab.(type) { - case *ArrayMetaDataSlab, *MapMetaDataSlab: - isMetaDataSlab = true - } + case *ArrayMetaDataSlab, *MapMetaDataSlab: // metadata slabs + var foundBrokenRef bool - var foundBrokenRef bool - for _, childStorable := range slab.ChildStorables() { + for _, childStorable := range slab.ChildStorables() { - slabIDStorable, ok := childStorable.(SlabIDStorable) - if !ok { - continue - } + if slabIDStorable, ok := childStorable.(SlabIDStorable); ok { - childID := SlabID(slabIDStorable) + childID := SlabID(slabIDStorable) - // Track parent-child relationship of root slabs and non-root slabs. - if isMetaDataSlab { - parentOf[childID] = id - } + // Track parent-child relationship of root slabs and non-root slabs. + parentOf[childID] = id - if s.existIfLoaded(childID) { - continue + if !s.existIfLoaded(childID) { + foundBrokenRef = true + } + + // Continue with remaining child storables to track parent-child relationship. + } } - foundBrokenRef = true + return foundBrokenRef + + default: // data slabs + childStorables := slab.ChildStorables() + + for len(childStorables) > 0 { + + var nextChildStorables []Storable + + for _, childStorable := range childStorables { + + if slabIDStorable, ok := childStorable.(SlabIDStorable); ok { - if !isMetaDataSlab { - return true + if !s.existIfLoaded(SlabID(slabIDStorable)) { + return true + } + + continue + } + + // Append child storables of this childStorable to + // handle nested SlabIDStorable, such as Cadence SomeValue. + nextChildStorables = append( + nextChildStorables, + childStorable.ChildStorables()..., + ) + } + + childStorables = nextChildStorables } - } - return foundBrokenRef + return false + } } var brokenSlabIDs []SlabID @@ -1330,6 +1350,11 @@ func (s *PersistentSlabStorage) getAllChildReferences(slab Slab) ( slabIDStorable, ok := childStorable.(SlabIDStorable) if !ok { + nextChildStorables = append( + nextChildStorables, + childStorable.ChildStorables()..., + ) + continue } diff --git a/storage_test.go b/storage_test.go index 1b407fb4..25c9c837 100644 --- a/storage_test.go +++ b/storage_test.go @@ -1851,6 +1851,160 @@ func TestFixLoadedBrokenReferences(t *testing.T) { require.Equal(t, fixedData[rootID], savedData) }) + t.Run("broken nested storable in root map data slab", func(t *testing.T) { + + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + brokenRefs := map[SlabID][]SlabID{ + rootID: {rootID}, + } + + data := map[SlabID][]byte{ + rootID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 1 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // element: [uint64(0):SomeValue(SlabID(0x0.1))] + 0x82, + 0xd8, 0xa4, 0x00, + 0xd8, cborTagSomeValue, 0xd8, 0xff, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + }, + } + + fixedData := map[SlabID][]byte{ + rootID: { + // version + 0x10, + // flag: root + map data + 0x88, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 0 + 0x00, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x00, + + // elements (array of 0 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x00, + }, + } + + storage := newTestPersistentStorageWithData(t, data) + + // Load data in storage + for id := range data { + _, found, err := storage.Retrieve(id) + require.NoError(t, err) + require.True(t, found) + } + + // Check health before fixing broken reference + _, err := CheckStorageHealth(storage, -1) + require.ErrorContains(t, err, "slab (0x0.1) not found: slab not found during slab iteration") + + var fixedRootIDs map[SlabID][]SlabID + var skippedRootIDs map[SlabID][]SlabID + + // Don't fix any broken references + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return false + }) + require.NoError(t, err) + require.Equal(t, 0, len(fixedRootIDs)) + require.Equal(t, len(brokenRefs), len(skippedRootIDs)) + + for rootID, slabIDsWithBrokenRef := range brokenRefs { + require.ElementsMatch(t, slabIDsWithBrokenRef, skippedRootIDs[rootID]) + } + + // No data is modified because no fix happened + require.Equal(t, 0, len(storage.deltas)) + + // Fix broken references + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return true + }) + require.NoError(t, err) + require.Equal(t, len(brokenRefs), len(fixedRootIDs)) + + for rootID, slabIDsWithBrokenRef := range brokenRefs { + require.ElementsMatch(t, slabIDsWithBrokenRef, fixedRootIDs[rootID]) + } + + require.Equal(t, 0, len(skippedRootIDs)) + require.Equal(t, 1, len(storage.deltas)) + + // Check health after fixing broken reference + rootIDs, err := CheckStorageHealth(storage, -1) + require.NoError(t, err) + require.Equal(t, 1, len(rootIDs)) + + _, ok := rootIDs[rootID] + require.True(t, ok) + + // Save data in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + require.Equal(t, 0, len(storage.deltas)) + + // Check encoded data + baseStorage := storage.baseStorage.(*InMemBaseStorage) + require.Equal(t, 1, len(baseStorage.segments)) + + savedData, found, err := baseStorage.Retrieve(rootID) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, fixedData[rootID], savedData) + }) + t.Run("broken non-root map data slab", func(t *testing.T) { rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} nonRootDataID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} @@ -3092,6 +3246,60 @@ func TestGetAllChildReferencesFromArray(t *testing.T) { testGetAllChildReferences(t, data, parentRootID, expectedRefIDs, expectedBrokenRefIDs) }) + t.Run("root data slab with ref in nested storable", func(t *testing.T) { + parentRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + + expectedRefIDs := []SlabID{childRootID} + expectedBrokenRefIDs := []SlabID{} + + data := map[SlabID][]byte{ + parentRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, cborTagSomeValue, 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + }, + + childRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + }, + } + + testGetAllChildReferences(t, data, parentRootID, expectedRefIDs, expectedBrokenRefIDs) + }) + t.Run("root data slab with broken ref", func(t *testing.T) { parentRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} @@ -3725,6 +3933,81 @@ func TestGetAllChildReferencesFromMap(t *testing.T) { testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) }) + t.Run("root data slab with ref in nested storable", func(t *testing.T) { + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + + expectedRefIDs := []SlabID{childRootID} + expectedBrokenRefIDs := []SlabID{} + + data := map[SlabID][]byte{ + rootID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 1 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // element: [uint64(0):uint64(0)] + 0x82, + 0xd8, 0xa4, 0x00, + 0xd8, cborTagSomeValue, 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + }, + + childRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + t.Run("root data slab with broken ref", func(t *testing.T) { rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} From 7162eabcbf3bbe25fd2816bc4bace147bc6fa7d3 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 8 May 2024 14:48:59 -0500 Subject: [PATCH 113/126] Add NonderterministicFastCommit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit NonderterministicFastCommit commits changes in nondeterministic order. It can be used by migration program when ordering isn't required. │ before.txt │ after.txt │ │ sec/op │ sec/op vs base │ StorageFastCommit/10-12 89.72µ ± 4% 57.50µ ± 3% -35.92% (p=0.000 n=10) StorageFastCommit/100-12 118.9µ ± 1% 116.0µ ± 4% ~ (p=0.436 n=10) StorageFastCommit/1000-12 4.086m ± 5% 2.397m ± 25% -41.35% (p=0.000 n=10) StorageFastCommit/10000-12 12.629m ± 4% 9.857m ± 3% -21.95% (p=0.000 n=10) StorageFastCommit/100000-12 102.73m ± 0% 72.26m ± 1% -29.66% (p=0.000 n=10) StorageFastCommit/1000000-12 1.544 ± 2% 1.141 ± 2% -26.09% (p=0.000 n=10) geomean 6.661m 4.848m -27.21% │ before.txt │ after.txt │ │ B/op │ B/op vs base │ StorageFastCommit/10-12 28.92Ki ± 0% 28.05Ki ± 0% -3.00% (p=0.000 n=10) StorageFastCommit/100-12 286.4Ki ± 0% 278.6Ki ± 0% -2.71% (p=0.000 n=10) StorageFastCommit/1000-12 3.009Mi ± 0% 2.901Mi ± 0% -3.58% (p=0.000 n=10) StorageFastCommit/10000-12 28.65Mi ± 0% 27.79Mi ± 0% -2.98% (p=0.000 n=10) StorageFastCommit/100000-12 278.8Mi ± 0% 271.1Mi ± 0% -2.75% (p=0.000 n=10) StorageFastCommit/1000000-12 2.923Gi ± 0% 2.821Gi ± 0% -3.49% (p=0.000 n=10) geomean 9.101Mi 8.820Mi -3.09% │ before.txt │ after.txt │ │ allocs/op │ allocs/op vs base │ StorageFastCommit/10-12 219.0 ± 0% 205.0 ± 0% -6.39% (p=0.000 n=10) StorageFastCommit/100-12 1.980k ± 0% 1.875k ± 0% -5.30% (p=0.000 n=10) StorageFastCommit/1000-12 19.23k ± 0% 18.23k ± 0% -5.22% (p=0.000 n=10) StorageFastCommit/10000-12 191.1k ± 0% 181.1k ± 0% -5.24% (p=0.000 n=10) StorageFastCommit/100000-12 1.918M ± 0% 1.816M ± 0% -5.30% (p=0.000 n=10) StorageFastCommit/1000000-12 19.15M ± 0% 18.15M ± 0% -5.22% (p=0.000 n=10) geomean 62.31k 58.91k -5.45% --- storage.go | 207 +++++++++++++++++++++++++++++++++++++++++- storage_bench_test.go | 134 +++++++++++++++++++++++++++ storage_test.go | 150 +++++++++++++++++++++++++++--- 3 files changed, 474 insertions(+), 17 deletions(-) create mode 100644 storage_bench_test.go diff --git a/storage.go b/storage.go index 394ecf23..19d0c9c0 100644 --- a/storage.go +++ b/storage.go @@ -777,12 +777,17 @@ func (s *PersistentSlabStorage) sortedOwnedDeltaKeys() []SlabID { } func (s *PersistentSlabStorage) Commit() error { - var err error // this part ensures the keys are sorted so commit operation is deterministic keysWithOwners := s.sortedOwnedDeltaKeys() - for _, id := range keysWithOwners { + return s.commit(keysWithOwners) +} + +func (s *PersistentSlabStorage) commit(keys []SlabID) error { + var err error + + for _, id := range keys { slab := s.deltas[id] // deleted slabs @@ -964,6 +969,204 @@ func (s *PersistentSlabStorage) FastCommit(numWorkers int) error { return nil } +// NonderterministicFastCommit commits changes in nondeterministic order. +// This is used by migration program when ordering isn't required. +func (s *PersistentSlabStorage) NonderterministicFastCommit(numWorkers int) error { + // No changes + if len(s.deltas) == 0 { + return nil + } + + type slabToBeEncoded struct { + slabID SlabID + slab Slab + } + + type encodedSlab struct { + slabID SlabID + data []byte + err error + } + + // Define encoder (worker) to encode slabs in parallel + encoder := func( + wg *sync.WaitGroup, + done <-chan struct{}, + jobs <-chan slabToBeEncoded, + results chan<- encodedSlab, + ) { + defer wg.Done() + + for job := range jobs { + // Check if goroutine is signaled to stop before proceeding. + select { + case <-done: + return + default: + } + + id := job.slabID + slab := job.slab + + if slab == nil { + results <- encodedSlab{ + slabID: id, + data: nil, + err: nil, + } + continue + } + + // Serialize + data, err := EncodeSlab(slab, s.cborEncMode) + results <- encodedSlab{ + slabID: id, + data: data, + err: err, + } + } + } + + // Modified slabs need to be encoded (in parallel) and stored in underlying storage. + modifiedSlabCount := 0 + // Deleted slabs need to be removed from underlying storage. + deletedSlabCount := 0 + for k, v := range s.deltas { + // Ignore slabs not owned by accounts + if k.address == AddressUndefined { + continue + } + if v == nil { + deletedSlabCount++ + } else { + modifiedSlabCount++ + } + } + + if modifiedSlabCount == 0 && deletedSlabCount == 0 { + return nil + } + + if modifiedSlabCount < 2 { + // Avoid goroutine overhead + ids := make([]SlabID, 0, modifiedSlabCount+deletedSlabCount) + for k := range s.deltas { + // Ignore slabs not owned by accounts + if k.address == AddressUndefined { + continue + } + ids = append(ids, k) + } + + return s.commit(ids) + } + + if numWorkers > modifiedSlabCount { + numWorkers = modifiedSlabCount + } + + var wg sync.WaitGroup + + // Create done signal channel + done := make(chan struct{}) + + // Create job queue + jobs := make(chan slabToBeEncoded, modifiedSlabCount) + + // Create result queue + results := make(chan encodedSlab, modifiedSlabCount) + + defer func() { + // This ensures that all goroutines are stopped before output channel is closed. + + // Wait for all goroutines to finish + wg.Wait() + + // Close output channel + close(results) + }() + + // Launch workers to encode slabs + wg.Add(numWorkers) + for i := 0; i < numWorkers; i++ { + go encoder(&wg, done, jobs, results) + } + + // Send jobs + deletedSlabIDs := make([]SlabID, 0, deletedSlabCount) + for k, v := range s.deltas { + // ignore the ones that are not owned by accounts + if k.address == AddressUndefined { + continue + } + if v == nil { + deletedSlabIDs = append(deletedSlabIDs, k) + } else { + jobs <- slabToBeEncoded{k, v} + } + } + close(jobs) + + // Remove deleted slabs from underlying storage. + for _, id := range deletedSlabIDs { + + err := s.baseStorage.Remove(id) + if err != nil { + // Closing done channel signals goroutines to stop. + close(done) + // Wrap err as external error (if needed) because err is returned by BaseStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to remove slab %s", id)) + } + + // Deleted slabs are removed from deltas and added to read cache so that: + // 1. next read is from in-memory read cache + // 2. deleted slabs are not re-committed in next commit + s.cache[id] = nil + delete(s.deltas, id) + } + + // Process encoded slabs + for i := 0; i < modifiedSlabCount; i++ { + result := <-results + + if result.err != nil { + // Closing done channel signals goroutines to stop. + close(done) + // result.err is already categorized by Encode(). + return result.err + } + + id := result.slabID + data := result.data + + if data == nil { + // Closing done channel signals goroutines to stop. + close(done) + // This is unexpected because deleted slabs are processed separately. + return NewEncodingErrorf("unexpectd encoded empty data") + } + + // Store + err := s.baseStorage.Store(id, data) + if err != nil { + // Closing done channel signals goroutines to stop. + close(done) + // Wrap err as external error (if needed) because err is returned by BaseStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", id)) + } + + s.cache[id] = s.deltas[id] + // It's safe to remove slab from deltas because + // iteration is on non-temp slabs and temp slabs + // are still in deltas. + delete(s.deltas, id) + } + + // Do NOT reset deltas because slabs with empty address are not saved. + + return nil +} + func (s *PersistentSlabStorage) DropDeltas() { s.deltas = make(map[SlabID]Slab) } diff --git a/storage_bench_test.go b/storage_bench_test.go new file mode 100644 index 00000000..ae76f260 --- /dev/null +++ b/storage_bench_test.go @@ -0,0 +1,134 @@ +/* + * Atree - Scalable Arrays and Ordered Maps + * + * Copyright 2024 Dapper Labs, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package atree + +import ( + "encoding/binary" + "math/rand" + "runtime" + "strconv" + "testing" + + "github.com/fxamacker/cbor/v2" + "github.com/stretchr/testify/require" +) + +func benchmarkFastCommit(b *testing.B, seed int64, numberOfSlabs int) { + r := rand.New(rand.NewSource(seed)) + + encMode, err := cbor.EncOptions{}.EncMode() + require.NoError(b, err) + + decMode, err := cbor.DecOptions{}.DecMode() + require.NoError(b, err) + + slabs := make([]Slab, numberOfSlabs) + for i := 0; i < numberOfSlabs; i++ { + addr := generateRandomAddress(r) + + var index SlabIndex + binary.BigEndian.PutUint64(index[:], uint64(i)) + + id := SlabID{addr, index} + + slabs[i] = generateLargeSlab(id) + } + + b.Run(strconv.Itoa(numberOfSlabs), func(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + + baseStorage := NewInMemBaseStorage() + storage := NewPersistentSlabStorage(baseStorage, encMode, decMode, nil, nil) + + for _, slab := range slabs { + err = storage.Store(slab.SlabID(), slab) + require.NoError(b, err) + } + + b.StartTimer() + + err := storage.FastCommit(runtime.NumCPU()) + require.NoError(b, err) + } + }) +} + +func benchmarkNondeterministicFastCommit(b *testing.B, seed int64, numberOfSlabs int) { + r := rand.New(rand.NewSource(seed)) + + encMode, err := cbor.EncOptions{}.EncMode() + require.NoError(b, err) + + decMode, err := cbor.DecOptions{}.DecMode() + require.NoError(b, err) + + slabs := make([]Slab, numberOfSlabs) + for i := 0; i < numberOfSlabs; i++ { + addr := generateRandomAddress(r) + + var index SlabIndex + binary.BigEndian.PutUint64(index[:], uint64(i)) + + id := SlabID{addr, index} + + slabs[i] = generateLargeSlab(id) + } + + b.Run(strconv.Itoa(numberOfSlabs), func(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + + baseStorage := NewInMemBaseStorage() + storage := NewPersistentSlabStorage(baseStorage, encMode, decMode, nil, nil) + + for _, slab := range slabs { + err = storage.Store(slab.SlabID(), slab) + require.NoError(b, err) + } + + b.StartTimer() + + err := storage.NonderterministicFastCommit(runtime.NumCPU()) + require.NoError(b, err) + } + }) +} + +func BenchmarkStorageFastCommit(b *testing.B) { + fixedSeed := int64(1234567) // intentionally use fixed constant rather than time, etc. + + benchmarkFastCommit(b, fixedSeed, 10) + benchmarkFastCommit(b, fixedSeed, 100) + benchmarkFastCommit(b, fixedSeed, 1_000) + benchmarkFastCommit(b, fixedSeed, 10_000) + benchmarkFastCommit(b, fixedSeed, 100_000) + benchmarkFastCommit(b, fixedSeed, 1_000_000) +} + +func BenchmarkStorageNondeterministicFastCommit(b *testing.B) { + fixedSeed := int64(1234567) // intentionally use fixed constant rather than time, etc. + + benchmarkNondeterministicFastCommit(b, fixedSeed, 10) + benchmarkNondeterministicFastCommit(b, fixedSeed, 100) + benchmarkNondeterministicFastCommit(b, fixedSeed, 1_000) + benchmarkNondeterministicFastCommit(b, fixedSeed, 10_000) + benchmarkNondeterministicFastCommit(b, fixedSeed, 100_000) + benchmarkNondeterministicFastCommit(b, fixedSeed, 1_000_000) +} diff --git a/storage_test.go b/storage_test.go index 1b407fb4..00756c25 100644 --- a/storage_test.go +++ b/storage_test.go @@ -412,8 +412,8 @@ func TestBasicSlabStorageStore(t *testing.T) { r := newRand(t) address := Address{1} slabs := map[SlabID]Slab{ - {address, SlabIndex{1}}: generateRandomSlab(address, r), - {address, SlabIndex{2}}: generateRandomSlab(address, r), + {address, SlabIndex{1}}: generateRandomSlab(SlabID{address, SlabIndex{1}}, r), + {address, SlabIndex{2}}: generateRandomSlab(SlabID{address, SlabIndex{2}}, r), } // Store values @@ -424,7 +424,7 @@ func TestBasicSlabStorageStore(t *testing.T) { // Overwrite stored values for id := range slabs { - slab := generateRandomSlab(id.address, r) + slab := generateRandomSlab(id, r) slabs[id] = slab err := storage.Store(id, slab) require.NoError(t, err) @@ -446,7 +446,7 @@ func TestBasicSlabStorageRetrieve(t *testing.T) { r := newRand(t) id := SlabID{Address{1}, SlabIndex{1}} - slab := generateRandomSlab(id.address, r) + slab := generateRandomSlab(id, r) // Retrieve value from empty storage retrievedSlab, found, err := storage.Retrieve(id) @@ -476,7 +476,7 @@ func TestBasicSlabStorageRemove(t *testing.T) { r := newRand(t) id := SlabID{Address{1}, SlabIndex{1}} - slab := generateRandomSlab(id.address, r) + slab := generateRandomSlab(id, r) // Remove value from empty storage err := storage.Remove(id) @@ -546,7 +546,7 @@ func TestBasicSlabStorageSlabIDs(t *testing.T) { // Store values for id := range wantIDs { - err := storage.Store(id, generateRandomSlab(id.address, r)) + err := storage.Store(id, generateRandomSlab(id, r)) require.NoError(t, err) } @@ -569,9 +569,9 @@ func TestBasicSlabStorageSlabIterat(t *testing.T) { id3 := SlabID{address: address, index: index.Next()} want := map[SlabID]Slab{ - id1: generateRandomSlab(id1.address, r), - id2: generateRandomSlab(id2.address, r), - id3: generateRandomSlab(id3.address, r), + id1: generateRandomSlab(id1, r), + id2: generateRandomSlab(id2, r), + id3: generateRandomSlab(id3, r), } storage := NewBasicSlabStorage(nil, nil, nil, nil) @@ -642,8 +642,8 @@ func TestPersistentStorage(t *testing.T) { permSlabID, err := NewSlabIDFromRawBytes([]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}) require.NoError(t, err) - slab1 := generateRandomSlab(tempSlabID.address, r) - slab2 := generateRandomSlab(permSlabID.address, r) + slab1 := generateRandomSlab(tempSlabID, r) + slab2 := generateRandomSlab(permSlabID, r) // no temp ids should be in the base storage err = storage.Store(tempSlabID, slab1) @@ -724,8 +724,10 @@ func TestPersistentStorage(t *testing.T) { numberOfSlabsPerAccount := 10 r := newRand(t) + baseStorage := newAccessOrderTrackerBaseStorage() storage := NewPersistentSlabStorage(baseStorage, encMode, decMode, nil, nil) + baseStorage2 := newAccessOrderTrackerBaseStorage() storageWithFastCommit := NewPersistentSlabStorage(baseStorage2, encMode, decMode, nil, nil) @@ -735,16 +737,19 @@ func TestPersistentStorage(t *testing.T) { for i := 0; i < numberOfAccounts; i++ { for j := 0; j < numberOfSlabsPerAccount; j++ { addr := generateRandomAddress(r) - slab := generateRandomSlab(addr, r) - slabSize += uint64(slab.ByteSize()) slabID, err := storage.GenerateSlabID(addr) require.NoError(t, err) + + slab := generateRandomSlab(slabID, r) + slabSize += uint64(slab.ByteSize()) + err = storage.Store(slabID, slab) require.NoError(t, err) slabID2, err := storageWithFastCommit.GenerateSlabID(addr) require.NoError(t, err) + err = storageWithFastCommit.Store(slabID2, slab) require.NoError(t, err) @@ -1042,12 +1047,12 @@ func TestPersistentStorageGenerateSlabID(t *testing.T) { }) } -func generateRandomSlab(address Address, r *rand.Rand) Slab { +func generateRandomSlab(id SlabID, r *rand.Rand) Slab { storable := Uint64Value(r.Uint64()) return &ArrayDataSlab{ header: ArraySlabHeader{ - slabID: NewSlabID(address, SlabIndex{1}), + slabID: id, size: arrayRootDataSlabPrefixSize + storable.ByteSize(), count: 1, }, @@ -1055,6 +1060,28 @@ func generateRandomSlab(address Address, r *rand.Rand) Slab { } } +func generateLargeSlab(id SlabID) Slab { + + const elementCount = 100 + + storables := make([]Storable, elementCount) + size := uint32(0) + for i := 0; i < elementCount; i++ { + storable := Uint64Value(uint64(i)) + size += storable.ByteSize() + storables[i] = storable + } + + return &ArrayDataSlab{ + header: ArraySlabHeader{ + slabID: id, + size: arrayRootDataSlabPrefixSize + size, + count: elementCount, + }, + elements: storables, + } +} + func generateRandomAddress(r *rand.Rand) Address { address := Address{} r.Read(address[:]) @@ -4460,3 +4487,96 @@ func testGetAllChildReferences( require.Equal(t, len(expectedBrokenRefIDs), len(brokenRefs)) require.ElementsMatch(t, expectedBrokenRefIDs, brokenRefs) } + +func TestStorageNondeterministicFastCommit(t *testing.T) { + numberOfAccounts := 10 + + t.Run("small", func(t *testing.T) { + numberOfSlabsPerAccount := 10 + testStorageNondeterministicFastCommit(t, numberOfAccounts, numberOfSlabsPerAccount) + }) + + t.Run("large", func(t *testing.T) { + numberOfSlabsPerAccount := 1_000 + testStorageNondeterministicFastCommit(t, numberOfAccounts, numberOfSlabsPerAccount) + }) +} + +func testStorageNondeterministicFastCommit(t *testing.T, numberOfAccounts int, numberOfSlabsPerAccount int) { + encMode, err := cbor.EncOptions{}.EncMode() + require.NoError(t, err) + + decMode, err := cbor.DecOptions{}.DecMode() + require.NoError(t, err) + + r := newRand(t) + + baseStorage := NewInMemBaseStorage() + storage := NewPersistentSlabStorage(baseStorage, encMode, decMode, nil, nil) + + encodedSlabs := make(map[SlabID][]byte) + slabSize := uint64(0) + + // Storage slabs + for i := 0; i < numberOfAccounts; i++ { + + addr := generateRandomAddress(r) + + for j := 0; j < numberOfSlabsPerAccount; j++ { + + slabID, err := storage.GenerateSlabID(addr) + require.NoError(t, err) + + slab := generateRandomSlab(slabID, r) + slabSize += uint64(slab.ByteSize()) + + err = storage.Store(slabID, slab) + require.NoError(t, err) + + // capture data for accuracy testing + encodedSlabs[slabID], err = EncodeSlab(slab, encMode) + require.NoError(t, err) + } + } + + require.Equal(t, uint(len(encodedSlabs)), storage.DeltasWithoutTempAddresses()) + require.Equal(t, slabSize, storage.DeltasSizeWithoutTempAddresses()) + + // Commit deltas + err = storage.NonderterministicFastCommit(10) + require.NoError(t, err) + + require.Equal(t, uint(0), storage.DeltasWithoutTempAddresses()) + require.Equal(t, uint64(0), storage.DeltasSizeWithoutTempAddresses()) + require.Equal(t, len(encodedSlabs), storage.Count()) + + // Compare encoded data + for sid, value := range encodedSlabs { + storedValue, found, err := baseStorage.Retrieve(sid) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, value, storedValue) + } + + // Remove all slabs from storage + for sid := range encodedSlabs { + err = storage.Remove(sid) + require.NoError(t, err) + require.Equal(t, uint64(0), storage.DeltasSizeWithoutTempAddresses()) + } + + // Commit deltas + err = storage.NonderterministicFastCommit(10) + require.NoError(t, err) + + require.Equal(t, 0, storage.Count()) + require.Equal(t, uint64(0), storage.DeltasSizeWithoutTempAddresses()) + + // Check remove functionality + for sid := range encodedSlabs { + storedValue, found, err := storage.Retrieve(sid) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, storedValue) + } +} From e739c6e4bc4c3bccb279618261daf212fc2a61f8 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 9 May 2024 10:01:55 -0500 Subject: [PATCH 114/126] Add more tests --- storage_test.go | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/storage_test.go b/storage_test.go index 00756c25..12ebdf44 100644 --- a/storage_test.go +++ b/storage_test.go @@ -4489,14 +4489,32 @@ func testGetAllChildReferences( } func TestStorageNondeterministicFastCommit(t *testing.T) { - numberOfAccounts := 10 + t.Run("0 slabs", func(t *testing.T) { + numberOfAccounts := 0 + numberOfSlabsPerAccount := 0 + testStorageNondeterministicFastCommit(t, numberOfAccounts, numberOfSlabsPerAccount) + }) + + t.Run("1 slabs", func(t *testing.T) { + numberOfAccounts := 1 + numberOfSlabsPerAccount := 1 + testStorageNondeterministicFastCommit(t, numberOfAccounts, numberOfSlabsPerAccount) + }) + + t.Run("10 slabs", func(t *testing.T) { + numberOfAccounts := 1 + numberOfSlabsPerAccount := 10 + testStorageNondeterministicFastCommit(t, numberOfAccounts, numberOfSlabsPerAccount) + }) - t.Run("small", func(t *testing.T) { + t.Run("100 slabs", func(t *testing.T) { + numberOfAccounts := 10 numberOfSlabsPerAccount := 10 testStorageNondeterministicFastCommit(t, numberOfAccounts, numberOfSlabsPerAccount) }) - t.Run("large", func(t *testing.T) { + t.Run("10_000 slabs", func(t *testing.T) { + numberOfAccounts := 10 numberOfSlabsPerAccount := 1_000 testStorageNondeterministicFastCommit(t, numberOfAccounts, numberOfSlabsPerAccount) }) From a459d967c7005323020d0df9045ba09661abdf92 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 9 May 2024 08:28:39 -0500 Subject: [PATCH 115/126] Add BatchPreload to decode slabs in parallel MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The intended use for BatchPreload is to speedup migrations. BatchPreload decodes slabs in parallel and stores decoded slabs in cache for later retrieval. This is useful for migration program when most or all slabs are expected to be migrated. │ before.txt │ after.txt │ │ sec/op │ sec/op vs base │ StorageRetrieve/10-12 36.23µ ± 3% 35.33µ ± 4% ~ (p=0.075 n=10) StorageRetrieve/100-12 469.6µ ± 8% 124.3µ ± 0% -73.52% (p=0.000 n=10) StorageRetrieve/1000-12 6.678m ± 7% 2.303m ± 20% -65.51% (p=0.000 n=10) StorageRetrieve/10000-12 29.81m ± 2% 12.26m ± 5% -58.86% (p=0.000 n=10) StorageRetrieve/100000-12 303.33m ± 1% 88.40m ± 1% -70.86% (p=0.000 n=10) StorageRetrieve/1000000-12 3.442 ± 1% 1.137 ± 3% -66.96% (p=0.000 n=10) geomean 12.34m 4.816m -60.98% │ before.txt │ after.txt │ │ B/op │ B/op vs base │ StorageRetrieve/10-12 21.59Ki ± 0% 21.59Ki ± 0% ~ (p=1.000 n=10) StorageRetrieve/100-12 219.8Ki ± 0% 224.7Ki ± 0% +2.24% (p=0.000 n=10) StorageRetrieve/1000-12 2.266Mi ± 0% 2.272Mi ± 0% +0.27% (p=0.000 n=10) StorageRetrieve/10000-12 21.94Mi ± 0% 22.14Mi ± 0% +0.91% (p=0.000 n=10) StorageRetrieve/100000-12 215.3Mi ± 0% 218.5Mi ± 0% +1.50% (p=0.000 n=10) StorageRetrieve/1000000-12 2.211Gi ± 0% 2.212Gi ± 0% +0.05% (p=0.000 n=10) geomean 6.919Mi 6.976Mi +0.82% │ before.txt │ after.txt │ │ allocs/op │ allocs/op vs base │ StorageRetrieve/10-12 76.00 ± 0% 76.00 ± 0% ~ (p=1.000 n=10) ¹ StorageRetrieve/100-12 745.0 ± 0% 759.0 ± 0% +1.88% (p=0.000 n=10) StorageRetrieve/1000-12 7.161k ± 0% 7.153k ± 0% -0.11% (p=0.000 n=10) StorageRetrieve/10000-12 70.77k ± 0% 70.58k ± 0% -0.27% (p=0.000 n=10) StorageRetrieve/100000-12 711.9k ± 0% 709.7k ± 0% -0.31% (p=0.000 n=10) StorageRetrieve/1000000-12 7.115M ± 0% 7.077M ± 0% -0.54% (p=0.000 n=10) geomean 22.93k 22.95k +0.11% --- storage.go | 148 ++++++++++++++++++++++++++++++++++++ storage_bench_test.go | 119 +++++++++++++++++++++++++++++ storage_test.go | 170 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 437 insertions(+) diff --git a/storage.go b/storage.go index 19d0c9c0..34b37607 100644 --- a/storage.go +++ b/storage.go @@ -1560,3 +1560,151 @@ func (s *PersistentSlabStorage) getAllChildReferences(slab Slab) ( return references, brokenReferences, nil } + +func (s *PersistentSlabStorage) BatchPreload(ids []SlabID, numWorkers int) error { + if len(ids) == 0 { + return nil + } + + minCountForBatchPreload := 11 + if len(ids) < minCountForBatchPreload { + + for _, id := range ids { + // fetch from base storage last + data, ok, err := s.baseStorage.Retrieve(id) + if err != nil { + // Wrap err as external error (if needed) because err is returned by BaseStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve slab %s", id)) + } + if !ok { + continue + } + + slab, err := DecodeSlab(id, data, s.cborDecMode, s.DecodeStorable, s.DecodeTypeInfo) + if err != nil { + // err is already categorized by DecodeSlab(). + return err + } + + // save decoded slab to cache + s.cache[id] = slab + } + + return nil + } + + type slabToBeDecoded struct { + slabID SlabID + data []byte + } + + type decodedSlab struct { + slabID SlabID + slab Slab + err error + } + + // Define decoder (worker) to decode slabs in parallel + decoder := func(wg *sync.WaitGroup, done <-chan struct{}, jobs <-chan slabToBeDecoded, results chan<- decodedSlab) { + defer wg.Done() + + for slabData := range jobs { + // Check if goroutine is signaled to stop before proceeding. + select { + case <-done: + return + default: + } + + id := slabData.slabID + data := slabData.data + + slab, err := DecodeSlab(id, data, s.cborDecMode, s.DecodeStorable, s.DecodeTypeInfo) + // err is already categorized by DecodeSlab(). + results <- decodedSlab{ + slabID: id, + slab: slab, + err: err, + } + } + } + + if numWorkers > len(ids) { + numWorkers = len(ids) + } + + var wg sync.WaitGroup + + // Construct done signal channel + done := make(chan struct{}) + + // Construct job queue + jobs := make(chan slabToBeDecoded, len(ids)) + + // Construct result queue + results := make(chan decodedSlab, len(ids)) + + defer func() { + // This ensures that all goroutines are stopped before output channel is closed. + + // Wait for all goroutines to finish + wg.Wait() + + // Close output channel + close(results) + }() + + // Preallocate cache map if empty + if len(s.cache) == 0 { + s.cache = make(map[SlabID]Slab, len(ids)) + } + + // Launch workers + wg.Add(numWorkers) + for i := 0; i < numWorkers; i++ { + go decoder(&wg, done, jobs, results) + } + + // Send jobs + jobCount := 0 + { + // Need to close input channel (jobs) here because + // if there isn't any job in jobs channel, + // done is never processed inside loop "for slabData := range jobs". + defer close(jobs) + + for _, id := range ids { + // fetch from base storage last + data, ok, err := s.baseStorage.Retrieve(id) + if err != nil { + // Closing done channel signals goroutines to stop. + close(done) + // Wrap err as external error (if needed) because err is returned by BaseStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve slab %s", id)) + } + if !ok { + continue + } + + jobs <- slabToBeDecoded{id, data} + jobCount++ + } + } + + // Process results + for i := 0; i < jobCount; i++ { + result := <-results + + if result.err != nil { + // Closing done channel signals goroutines to stop. + close(done) + // result.err is already categorized by DecodeSlab(). + return result.err + } + + // save decoded slab to cache + s.cache[result.slabID] = result.slab + } + + return nil +} diff --git a/storage_bench_test.go b/storage_bench_test.go index ae76f260..5fb80f1b 100644 --- a/storage_bench_test.go +++ b/storage_bench_test.go @@ -132,3 +132,122 @@ func BenchmarkStorageNondeterministicFastCommit(b *testing.B) { benchmarkNondeterministicFastCommit(b, fixedSeed, 100_000) benchmarkNondeterministicFastCommit(b, fixedSeed, 1_000_000) } + +func benchmarkRetrieve(b *testing.B, seed int64, numberOfSlabs int) { + + r := rand.New(rand.NewSource(seed)) + + encMode, err := cbor.EncOptions{}.EncMode() + require.NoError(b, err) + + decMode, err := cbor.DecOptions{}.DecMode() + require.NoError(b, err) + + encodedSlabs := make(map[SlabID][]byte) + ids := make([]SlabID, 0, numberOfSlabs) + for i := 0; i < numberOfSlabs; i++ { + addr := generateRandomAddress(r) + + var index SlabIndex + binary.BigEndian.PutUint64(index[:], uint64(i)) + + id := SlabID{addr, index} + + slab := generateLargeSlab(id) + + data, err := EncodeSlab(slab, encMode) + require.NoError(b, err) + + encodedSlabs[id] = data + ids = append(ids, id) + } + + b.Run(strconv.Itoa(numberOfSlabs), func(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + + baseStorage := NewInMemBaseStorageFromMap(encodedSlabs) + storage := NewPersistentSlabStorage(baseStorage, encMode, decMode, decodeStorable, decodeTypeInfo) + + b.StartTimer() + + for _, id := range ids { + _, found, err := storage.Retrieve(id) + require.True(b, found) + require.NoError(b, err) + } + } + }) +} + +func benchmarkBatchPreload(b *testing.B, seed int64, numberOfSlabs int) { + + r := rand.New(rand.NewSource(seed)) + + encMode, err := cbor.EncOptions{}.EncMode() + require.NoError(b, err) + + decMode, err := cbor.DecOptions{}.DecMode() + require.NoError(b, err) + + encodedSlabs := make(map[SlabID][]byte) + ids := make([]SlabID, 0, numberOfSlabs) + for i := 0; i < numberOfSlabs; i++ { + addr := generateRandomAddress(r) + + var index SlabIndex + binary.BigEndian.PutUint64(index[:], uint64(i)) + + id := SlabID{addr, index} + + slab := generateLargeSlab(id) + + data, err := EncodeSlab(slab, encMode) + require.NoError(b, err) + + encodedSlabs[id] = data + ids = append(ids, id) + } + + b.Run(strconv.Itoa(numberOfSlabs), func(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + + baseStorage := NewInMemBaseStorageFromMap(encodedSlabs) + storage := NewPersistentSlabStorage(baseStorage, encMode, decMode, decodeStorable, decodeTypeInfo) + + b.StartTimer() + + err = storage.BatchPreload(ids, runtime.NumCPU()) + require.NoError(b, err) + + for _, id := range ids { + _, found, err := storage.Retrieve(id) + require.True(b, found) + require.NoError(b, err) + } + } + }) +} + +func BenchmarkStorageRetrieve(b *testing.B) { + fixedSeed := int64(1234567) // intentionally use fixed constant rather than time, etc. + + benchmarkRetrieve(b, fixedSeed, 10) + benchmarkRetrieve(b, fixedSeed, 100) + benchmarkRetrieve(b, fixedSeed, 1_000) + benchmarkRetrieve(b, fixedSeed, 10_000) + benchmarkRetrieve(b, fixedSeed, 100_000) + benchmarkRetrieve(b, fixedSeed, 1_000_000) +} + +func BenchmarkStorageBatchPreload(b *testing.B) { + fixedSeed := int64(1234567) // intentionally use fixed constant rather than time, etc. + + benchmarkBatchPreload(b, fixedSeed, 10) + benchmarkBatchPreload(b, fixedSeed, 100) + benchmarkBatchPreload(b, fixedSeed, 1_000) + benchmarkBatchPreload(b, fixedSeed, 10_000) + benchmarkBatchPreload(b, fixedSeed, 100_000) + benchmarkBatchPreload(b, fixedSeed, 1_000_000) +} diff --git a/storage_test.go b/storage_test.go index 12ebdf44..a808e470 100644 --- a/storage_test.go +++ b/storage_test.go @@ -19,6 +19,7 @@ package atree import ( + "encoding/binary" "errors" "math/rand" "runtime" @@ -4598,3 +4599,172 @@ func testStorageNondeterministicFastCommit(t *testing.T, numberOfAccounts int, n require.Nil(t, storedValue) } } + +func TestStorageBatchPreload(t *testing.T) { + t.Run("0 slab", func(t *testing.T) { + numberOfAccounts := 0 + numberOfSlabsPerAccount := 0 + testStorageBatchPreload(t, numberOfAccounts, numberOfSlabsPerAccount) + }) + + t.Run("1 slab", func(t *testing.T) { + numberOfAccounts := 1 + numberOfSlabsPerAccount := 1 + testStorageBatchPreload(t, numberOfAccounts, numberOfSlabsPerAccount) + }) + + t.Run("10 slab", func(t *testing.T) { + numberOfAccounts := 1 + numberOfSlabsPerAccount := 10 + testStorageBatchPreload(t, numberOfAccounts, numberOfSlabsPerAccount) + }) + + t.Run("100 slabs", func(t *testing.T) { + numberOfAccounts := 10 + numberOfSlabsPerAccount := 10 + testStorageBatchPreload(t, numberOfAccounts, numberOfSlabsPerAccount) + }) + + t.Run("10_000 slabs", func(t *testing.T) { + numberOfAccounts := 10 + numberOfSlabsPerAccount := 1_000 + testStorageBatchPreload(t, numberOfAccounts, numberOfSlabsPerAccount) + }) +} + +func testStorageBatchPreload(t *testing.T, numberOfAccounts int, numberOfSlabsPerAccount int) { + + indexesByAddress := make(map[Address]uint64) + + generateSlabID := func(address Address) SlabID { + nextIndex := indexesByAddress[address] + 1 + + var idx SlabIndex + binary.BigEndian.PutUint64(idx[:], nextIndex) + + indexesByAddress[address] = nextIndex + + return NewSlabID(address, idx) + } + + encMode, err := cbor.EncOptions{}.EncMode() + require.NoError(t, err) + + decMode, err := cbor.DecOptions{}.DecMode() + require.NoError(t, err) + + r := newRand(t) + + encodedSlabs := make(map[SlabID][]byte) + + // Generate and encode slabs + for i := 0; i < numberOfAccounts; i++ { + + addr := generateRandomAddress(r) + + for j := 0; j < numberOfSlabsPerAccount; j++ { + + slabID := generateSlabID(addr) + + slab := generateRandomSlab(slabID, r) + + encodedSlabs[slabID], err = EncodeSlab(slab, encMode) + require.NoError(t, err) + } + } + + baseStorage := NewInMemBaseStorageFromMap(encodedSlabs) + storage := NewPersistentSlabStorage(baseStorage, encMode, decMode, decodeStorable, decodeTypeInfo) + + ids := make([]SlabID, 0, len(encodedSlabs)) + for id := range encodedSlabs { + ids = append(ids, id) + } + + // Batch preload slabs from base storage + err = storage.BatchPreload(ids, runtime.NumCPU()) + require.NoError(t, err) + require.Equal(t, len(encodedSlabs), len(storage.cache)) + require.Equal(t, 0, len(storage.deltas)) + + // Compare encoded data + for id, data := range encodedSlabs { + cachedData, err := EncodeSlab(storage.cache[id], encMode) + require.NoError(t, err) + + require.Equal(t, cachedData, data) + } +} + +func TestStorageBatchPreloadNotFoundSlabs(t *testing.T) { + + encMode, err := cbor.EncOptions{}.EncMode() + require.NoError(t, err) + + decMode, err := cbor.DecOptions{}.DecMode() + require.NoError(t, err) + + r := newRand(t) + + t.Run("empty storage", func(t *testing.T) { + const numberOfSlabs = 10 + + ids := make([]SlabID, numberOfSlabs) + for i := 0; i < numberOfSlabs; i++ { + var index SlabIndex + binary.BigEndian.PutUint64(index[:], uint64(i)) + + ids[i] = NewSlabID(generateRandomAddress(r), index) + } + + baseStorage := NewInMemBaseStorage() + storage := NewPersistentSlabStorage(baseStorage, encMode, decMode, decodeStorable, decodeTypeInfo) + + err := storage.BatchPreload(ids, runtime.NumCPU()) + require.NoError(t, err) + + require.Equal(t, 0, len(storage.cache)) + require.Equal(t, 0, len(storage.deltas)) + }) + + t.Run("non-empty storage", func(t *testing.T) { + const numberOfSlabs = 10 + + ids := make([]SlabID, numberOfSlabs) + encodedSlabs := make(map[SlabID][]byte) + + for i := 0; i < numberOfSlabs; i++ { + var index SlabIndex + binary.BigEndian.PutUint64(index[:], uint64(i)) + + id := NewSlabID(generateRandomAddress(r), index) + + slab := generateRandomSlab(id, r) + + encodedSlabs[id], err = EncodeSlab(slab, encMode) + require.NoError(t, err) + + ids[i] = id + } + + // Append a slab ID that doesn't exist in storage. + ids = append(ids, NewSlabID(generateRandomAddress(r), SlabIndex{numberOfSlabs})) + + baseStorage := NewInMemBaseStorageFromMap(encodedSlabs) + storage := NewPersistentSlabStorage(baseStorage, encMode, decMode, decodeStorable, decodeTypeInfo) + + err := storage.BatchPreload(ids, runtime.NumCPU()) + require.NoError(t, err) + + require.Equal(t, len(encodedSlabs), len(storage.cache)) + require.Equal(t, 0, len(storage.deltas)) + + // Compare encoded data + for id, data := range encodedSlabs { + cachedData, err := EncodeSlab(storage.cache[id], encMode) + require.NoError(t, err) + + require.Equal(t, cachedData, data) + } + }) +} From a2a4f5c08636bd27e3c436814dff3c7db1da2db5 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 13 May 2024 14:18:29 -0500 Subject: [PATCH 116/126] Refactor to iterate deltas once in NonderterministicFastCommit This change reduces number of lines in the function but is not expected to yield significant speed improvements. --- storage.go | 36 ++++++++++++++++-------------------- 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/storage.go b/storage.go index 19d0c9c0..a82fb03f 100644 --- a/storage.go +++ b/storage.go @@ -1027,6 +1027,12 @@ func (s *PersistentSlabStorage) NonderterministicFastCommit(numWorkers int) erro } } + // slabIDsWithOwner contains slab IDs with owner: + // - modified slab IDs are stored from front to back + // - deleted slab IDs are stored from back to front + // This is to avoid extra allocations. + slabIDsWithOwner := make([]SlabID, len(s.deltas)) + // Modified slabs need to be encoded (in parallel) and stored in underlying storage. modifiedSlabCount := 0 // Deleted slabs need to be removed from underlying storage. @@ -1037,27 +1043,26 @@ func (s *PersistentSlabStorage) NonderterministicFastCommit(numWorkers int) erro continue } if v == nil { + index := len(slabIDsWithOwner) - 1 - deletedSlabCount + slabIDsWithOwner[index] = k deletedSlabCount++ } else { + slabIDsWithOwner[modifiedSlabCount] = k modifiedSlabCount++ } } + modifiedSlabIDs := slabIDsWithOwner[:modifiedSlabCount] + + deletedSlabIDs := slabIDsWithOwner[len(slabIDsWithOwner)-deletedSlabCount:] + if modifiedSlabCount == 0 && deletedSlabCount == 0 { return nil } if modifiedSlabCount < 2 { // Avoid goroutine overhead - ids := make([]SlabID, 0, modifiedSlabCount+deletedSlabCount) - for k := range s.deltas { - // Ignore slabs not owned by accounts - if k.address == AddressUndefined { - continue - } - ids = append(ids, k) - } - + ids := append(modifiedSlabIDs, deletedSlabIDs...) return s.commit(ids) } @@ -1093,17 +1098,8 @@ func (s *PersistentSlabStorage) NonderterministicFastCommit(numWorkers int) erro } // Send jobs - deletedSlabIDs := make([]SlabID, 0, deletedSlabCount) - for k, v := range s.deltas { - // ignore the ones that are not owned by accounts - if k.address == AddressUndefined { - continue - } - if v == nil { - deletedSlabIDs = append(deletedSlabIDs, k) - } else { - jobs <- slabToBeEncoded{k, v} - } + for _, id := range modifiedSlabIDs { + jobs <- slabToBeEncoded{id, s.deltas[id]} } close(jobs) From aa1c121a7695e6efedb3426f1c822ff906f806b6 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 13 May 2024 15:14:19 -0500 Subject: [PATCH 117/126] Lint --- storage.go | 4 ++-- storage_bench_test.go | 2 +- storage_test.go | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/storage.go b/storage.go index a82fb03f..b4b76d76 100644 --- a/storage.go +++ b/storage.go @@ -969,9 +969,9 @@ func (s *PersistentSlabStorage) FastCommit(numWorkers int) error { return nil } -// NonderterministicFastCommit commits changes in nondeterministic order. +// NondeterministicFastCommit commits changes in nondeterministic order. // This is used by migration program when ordering isn't required. -func (s *PersistentSlabStorage) NonderterministicFastCommit(numWorkers int) error { +func (s *PersistentSlabStorage) NondeterministicFastCommit(numWorkers int) error { // No changes if len(s.deltas) == 0 { return nil diff --git a/storage_bench_test.go b/storage_bench_test.go index ae76f260..f97a60e0 100644 --- a/storage_bench_test.go +++ b/storage_bench_test.go @@ -105,7 +105,7 @@ func benchmarkNondeterministicFastCommit(b *testing.B, seed int64, numberOfSlabs b.StartTimer() - err := storage.NonderterministicFastCommit(runtime.NumCPU()) + err := storage.NondeterministicFastCommit(runtime.NumCPU()) require.NoError(b, err) } }) diff --git a/storage_test.go b/storage_test.go index 12ebdf44..fcadcfb1 100644 --- a/storage_test.go +++ b/storage_test.go @@ -4561,7 +4561,7 @@ func testStorageNondeterministicFastCommit(t *testing.T, numberOfAccounts int, n require.Equal(t, slabSize, storage.DeltasSizeWithoutTempAddresses()) // Commit deltas - err = storage.NonderterministicFastCommit(10) + err = storage.NondeterministicFastCommit(10) require.NoError(t, err) require.Equal(t, uint(0), storage.DeltasWithoutTempAddresses()) @@ -4584,7 +4584,7 @@ func testStorageNondeterministicFastCommit(t *testing.T, numberOfAccounts int, n } // Commit deltas - err = storage.NonderterministicFastCommit(10) + err = storage.NondeterministicFastCommit(10) require.NoError(t, err) require.Equal(t, 0, storage.Count()) From e83159f855e51fdd7c2442aaa887f11a7e138119 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 13 May 2024 15:16:58 -0500 Subject: [PATCH 118/126] Bump golangci-lint from 1.52.2 to 1.53.3 --- .github/workflows/safer-golangci-lint.yml | 40 +++-------------------- 1 file changed, 4 insertions(+), 36 deletions(-) diff --git a/.github/workflows/safer-golangci-lint.yml b/.github/workflows/safer-golangci-lint.yml index 6722fa80..ec1ececa 100644 --- a/.github/workflows/safer-golangci-lint.yml +++ b/.github/workflows/safer-golangci-lint.yml @@ -4,38 +4,6 @@ # Safer GitHub Actions Workflow for golangci-lint. # https://github.com/x448/safer-golangci-lint # -# safer-golangci-lint.yml -# -# This workflow downloads, verifies, and runs golangci-lint in a -# deterministic, reviewable, and safe manner. -# -# To use: -# Step 1. Copy this file into [your_github_repo]/.github/workflows/ -# Step 2. There's no step 2 if you like the default settings. -# -# See golangci-lint docs for more info at -# https://github.com/golangci/golangci-lint -# -# 100% of the script for downloading, installing, and running golangci-lint -# is embedded in this file. The embedded SHA-256 digest is used to verify the -# downloaded golangci-lint tarball (golangci-lint-1.xx.x-linux-amd64.tar.gz). -# -# The embedded SHA-256 digest matches golangci-lint-1.xx.x-checksums.txt at -# https://github.com/golangci/golangci-lint/releases -# -# To use a newer version of golangci-lint, change these values: -# 1. GOLINTERS_VERSION -# 2. GOLINTERS_TGZ_DGST -# -# Release v1.52.2 (May 14, 2023) -# - Bump Go to 1.20 -# - Bump actions/setup-go to v4 -# - Bump golangci-lint to 1.52.2 -# - Hash of golangci-lint-1.52.2-linux-amd64.tar.gz -# - SHA-256: c9cf72d12058a131746edd409ed94ccd578fbd178899d1ed41ceae3ce5f54501 -# This SHA-256 digest matches golangci-lint-1.52.2-checksums.txt at -# https://github.com/golangci/golangci-lint/releases -# name: linters # Remove default permissions and grant only what is required in each job. @@ -49,9 +17,9 @@ on: env: GO_VERSION: '1.20' - GOLINTERS_VERSION: 1.52.2 + GOLINTERS_VERSION: 1.53.3 GOLINTERS_ARCH: linux-amd64 - GOLINTERS_TGZ_DGST: c9cf72d12058a131746edd409ed94ccd578fbd178899d1ed41ceae3ce5f54501 + GOLINTERS_TGZ_DGST: 4f62007ca96372ccba54760e2ed39c2446b40ec24d9a90c21aad9f2fdf6cf0da GOLINTERS_TIMEOUT: 15m OPENSSL_DGST_CMD: openssl dgst -sha256 -r CURL_CMD: curl --proto =https --tlsv1.2 --location --silent --show-error --fail @@ -64,12 +32,12 @@ jobs: contents: read steps: - name: Checkout source - uses: actions/checkout@v3 + uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 with: fetch-depth: 1 - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 with: go-version: ${{ env.GO_VERSION }} check-latest: true From 5808810b6a39852840d1aaa5b55c697c4281e5f4 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 13 May 2024 15:33:12 -0500 Subject: [PATCH 119/126] Lint --- storage.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/storage.go b/storage.go index b4b76d76..909beb90 100644 --- a/storage.go +++ b/storage.go @@ -1061,8 +1061,10 @@ func (s *PersistentSlabStorage) NondeterministicFastCommit(numWorkers int) error } if modifiedSlabCount < 2 { - // Avoid goroutine overhead - ids := append(modifiedSlabIDs, deletedSlabIDs...) + // Avoid goroutine overhead. + // Return after committing modified and deleted slabs. + ids := modifiedSlabIDs + ids = append(ids, deletedSlabIDs...) return s.commit(ids) } From aa2ee903d629a0347d2cd418a857d14d09f8d667 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 13 May 2024 14:58:30 -0500 Subject: [PATCH 120/126] Add more comments in BatchPreload --- storage.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/storage.go b/storage.go index 34b37607..045e5683 100644 --- a/storage.go +++ b/storage.go @@ -1561,12 +1561,16 @@ func (s *PersistentSlabStorage) getAllChildReferences(slab Slab) ( return references, brokenReferences, nil } +// BatchPreload decodeds and caches slabs of given ids in parallel. +// This is useful for storage health or data validation in migration programs. func (s *PersistentSlabStorage) BatchPreload(ids []SlabID, numWorkers int) error { if len(ids) == 0 { return nil } - minCountForBatchPreload := 11 + // Use 11 for min slab count for parallel decoding because micro benchmarks showed + // performance regression for <= 10 slabs when decoding slabs in parallel. + const minCountForBatchPreload = 11 if len(ids) < minCountForBatchPreload { for _, id := range ids { From 81b6dcdd24950438cee0ddae0bdd768877edc13d Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 14 May 2024 07:12:39 -0500 Subject: [PATCH 121/126] Revert "Bump golangci-lint from 1.52.2 to 1.53.3" This reverts commit e83159f855e51fdd7c2442aaa887f11a7e138119. --- .github/workflows/safer-golangci-lint.yml | 40 ++++++++++++++++++++--- 1 file changed, 36 insertions(+), 4 deletions(-) diff --git a/.github/workflows/safer-golangci-lint.yml b/.github/workflows/safer-golangci-lint.yml index ec1ececa..6722fa80 100644 --- a/.github/workflows/safer-golangci-lint.yml +++ b/.github/workflows/safer-golangci-lint.yml @@ -4,6 +4,38 @@ # Safer GitHub Actions Workflow for golangci-lint. # https://github.com/x448/safer-golangci-lint # +# safer-golangci-lint.yml +# +# This workflow downloads, verifies, and runs golangci-lint in a +# deterministic, reviewable, and safe manner. +# +# To use: +# Step 1. Copy this file into [your_github_repo]/.github/workflows/ +# Step 2. There's no step 2 if you like the default settings. +# +# See golangci-lint docs for more info at +# https://github.com/golangci/golangci-lint +# +# 100% of the script for downloading, installing, and running golangci-lint +# is embedded in this file. The embedded SHA-256 digest is used to verify the +# downloaded golangci-lint tarball (golangci-lint-1.xx.x-linux-amd64.tar.gz). +# +# The embedded SHA-256 digest matches golangci-lint-1.xx.x-checksums.txt at +# https://github.com/golangci/golangci-lint/releases +# +# To use a newer version of golangci-lint, change these values: +# 1. GOLINTERS_VERSION +# 2. GOLINTERS_TGZ_DGST +# +# Release v1.52.2 (May 14, 2023) +# - Bump Go to 1.20 +# - Bump actions/setup-go to v4 +# - Bump golangci-lint to 1.52.2 +# - Hash of golangci-lint-1.52.2-linux-amd64.tar.gz +# - SHA-256: c9cf72d12058a131746edd409ed94ccd578fbd178899d1ed41ceae3ce5f54501 +# This SHA-256 digest matches golangci-lint-1.52.2-checksums.txt at +# https://github.com/golangci/golangci-lint/releases +# name: linters # Remove default permissions and grant only what is required in each job. @@ -17,9 +49,9 @@ on: env: GO_VERSION: '1.20' - GOLINTERS_VERSION: 1.53.3 + GOLINTERS_VERSION: 1.52.2 GOLINTERS_ARCH: linux-amd64 - GOLINTERS_TGZ_DGST: 4f62007ca96372ccba54760e2ed39c2446b40ec24d9a90c21aad9f2fdf6cf0da + GOLINTERS_TGZ_DGST: c9cf72d12058a131746edd409ed94ccd578fbd178899d1ed41ceae3ce5f54501 GOLINTERS_TIMEOUT: 15m OPENSSL_DGST_CMD: openssl dgst -sha256 -r CURL_CMD: curl --proto =https --tlsv1.2 --location --silent --show-error --fail @@ -32,12 +64,12 @@ jobs: contents: read steps: - name: Checkout source - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@v3 with: fetch-depth: 1 - name: Setup Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + uses: actions/setup-go@v4 with: go-version: ${{ env.GO_VERSION }} check-latest: true From 88fa22fd1a11905d697d8acd6733a9cdc71b5024 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 14 May 2024 07:20:30 -0500 Subject: [PATCH 122/126] Improve code readability by adding comments, etc. --- storage.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/storage.go b/storage.go index 909beb90..8c401610 100644 --- a/storage.go +++ b/storage.go @@ -1037,17 +1037,19 @@ func (s *PersistentSlabStorage) NondeterministicFastCommit(numWorkers int) error modifiedSlabCount := 0 // Deleted slabs need to be removed from underlying storage. deletedSlabCount := 0 - for k, v := range s.deltas { + for id, slab := range s.deltas { // Ignore slabs not owned by accounts - if k.address == AddressUndefined { + if id.address == AddressUndefined { continue } - if v == nil { + if slab == nil { + // Set deleted slab ID from the end of slabIDsWithOwner. index := len(slabIDsWithOwner) - 1 - deletedSlabCount - slabIDsWithOwner[index] = k + slabIDsWithOwner[index] = id deletedSlabCount++ } else { - slabIDsWithOwner[modifiedSlabCount] = k + // Set modified slab ID from the start of slabIDsWithOwner. + slabIDsWithOwner[modifiedSlabCount] = id modifiedSlabCount++ } } From 0d63aaf0e9ea7a0b471e5a3e0be16b559fb5c42f Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 21 May 2024 10:38:38 -0500 Subject: [PATCH 123/126] Check mutation of elements from readonly map iterator This commit returns ReadOnlyIteratorElementMutationError when elements of readonly map iterator are mutated. Also, a callback can be provided by the caller to log or debug such mutations with more context. As always, mutation of elements from readonly iterators are not guaranteed to persist. Instead of relying on other projects not to mutate readonly elements, this commit returns ReadOnlyIteratorElementMutationError when elements from readonly iterators are mutated. This commit also adds readonly iterator functions that receive mutation callbacks. Callbacks are useful for logging, etc. with more context when mutation occurs. Mutation handling is the same with or without callbacks. If needed, other projects using atree can choose to panic in the callback when mutation is detected. If elements from readonly iterators are mutated: - those changes are not guaranteed to persist. - mutation functions of child containers return ReadOnlyIteratorElementMutationError. - ReadOnlyMapIteratorMutationCallback are called if provided --- errors.go | 18 + map.go | 165 ++++++++- map_test.go | 881 +++++++++++++++++++++++++++++++++++++++++++++++ storable_test.go | 58 ++++ 4 files changed, 1111 insertions(+), 11 deletions(-) diff --git a/errors.go b/errors.go index 80c51359..6ea2298a 100644 --- a/errors.go +++ b/errors.go @@ -456,6 +456,24 @@ func (e *MapElementCountError) Error() string { return e.msg } +// ReadOnlyIteratorElementMutationError is the error returned when readonly iterator element is mutated. +type ReadOnlyIteratorElementMutationError struct { + containerValueID ValueID + elementValueID ValueID +} + +// NewReadOnlyIteratorElementMutationError creates ReadOnlyIteratorElementMutationError. +func NewReadOnlyIteratorElementMutationError(containerValueID, elementValueID ValueID) error { + return NewFatalError(&ReadOnlyIteratorElementMutationError{ + containerValueID: containerValueID, + elementValueID: elementValueID, + }) +} + +func (e *ReadOnlyIteratorElementMutationError) Error() string { + return fmt.Sprintf("element (%s) cannot be mutated because it is from readonly iterator of container (%s)", e.elementValueID, e.containerValueID) +} + func wrapErrorAsExternalErrorIfNeeded(err error) error { return wrapErrorfAsExternalErrorIfNeeded(err, "") } diff --git a/map.go b/map.go index b89474cd..e4531e10 100644 --- a/map.go +++ b/map.go @@ -5746,14 +5746,37 @@ func (i *mutableMapIterator) NextValue() (Value, error) { return v, nil } +type ReadOnlyMapIteratorMutationCallback func(mutatedValue Value) + type readOnlyMapIterator struct { - m *OrderedMap - nextDataSlabID SlabID - elemIterator *mapElementIterator + m *OrderedMap + nextDataSlabID SlabID + elemIterator *mapElementIterator + keyMutationCallback ReadOnlyMapIteratorMutationCallback + valueMutationCallback ReadOnlyMapIteratorMutationCallback } +// defaultReadOnlyMapIteratorMutatinCallback is no-op. +var defaultReadOnlyMapIteratorMutatinCallback ReadOnlyMapIteratorMutationCallback = func(Value) {} + var _ MapIterator = &readOnlyMapIterator{} +func (i *readOnlyMapIterator) setMutationCallback(key, value Value) { + if k, ok := key.(mutableValueNotifier); ok { + k.setParentUpdater(func() (found bool, err error) { + i.keyMutationCallback(key) + return true, NewReadOnlyIteratorElementMutationError(i.m.ValueID(), k.ValueID()) + }) + } + + if v, ok := value.(mutableValueNotifier); ok { + v.setParentUpdater(func() (found bool, err error) { + i.valueMutationCallback(value) + return true, NewReadOnlyIteratorElementMutationError(i.m.ValueID(), v.ValueID()) + }) + } +} + func (i *readOnlyMapIterator) Next() (key Value, value Value, err error) { if i.elemIterator == nil { if i.nextDataSlabID == SlabIDUndefined { @@ -5786,6 +5809,8 @@ func (i *readOnlyMapIterator) Next() (key Value, value Value, err error) { return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get map value's stored value") } + i.setMutationCallback(key, value) + return key, value, nil } @@ -5821,6 +5846,8 @@ func (i *readOnlyMapIterator) NextKey() (key Value, err error) { return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get map key's stored value") } + i.setMutationCallback(key, nil) + return key, nil } @@ -5856,6 +5883,8 @@ func (i *readOnlyMapIterator) NextValue() (value Value, err error) { return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get map value's stored value") } + i.setMutationCallback(nil, value) + return value, nil } @@ -5932,9 +5961,31 @@ func (m *OrderedMap) Iterator(comparator ValueComparator, hip HashInputProvider) } // ReadOnlyIterator returns readonly iterator for map elements. -// If elements are mutated, those changes are not guaranteed to persist. -// NOTE: Use readonly iterator if mutation is not needed for better performance. +// If elements are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback is needed (e.g. for logging mutation, etc.), use ReadOnlyIteratorWithMutationCallback(). func (m *OrderedMap) ReadOnlyIterator() (MapIterator, error) { + return m.ReadOnlyIteratorWithMutationCallback(nil, nil) +} + +// ReadOnlyIteratorWithMutationCallback returns readonly iterator for map elements. +// keyMutatinCallback and valueMutationCallback are useful for logging, etc. with +// more context when mutation occurs. Mutation handling here is the same with or +// without these callbacks. +// If elements are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// - keyMutatinCallback and valueMutationCallback are called if provided +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback isn't needed, use ReadOnlyIterator(). +func (m *OrderedMap) ReadOnlyIteratorWithMutationCallback( + keyMutatinCallback ReadOnlyMapIteratorMutationCallback, + valueMutationCallback ReadOnlyMapIteratorMutationCallback, +) (MapIterator, error) { if m.Count() == 0 { return emptyReadOnlyMapIterator, nil } @@ -5945,6 +5996,14 @@ func (m *OrderedMap) ReadOnlyIterator() (MapIterator, error) { return nil, err } + if keyMutatinCallback == nil { + keyMutatinCallback = defaultReadOnlyMapIteratorMutatinCallback + } + + if valueMutationCallback == nil { + valueMutationCallback = defaultReadOnlyMapIteratorMutatinCallback + } + return &readOnlyMapIterator{ m: m, nextDataSlabID: dataSlab.next, @@ -5952,6 +6011,8 @@ func (m *OrderedMap) ReadOnlyIterator() (MapIterator, error) { storage: m.Storage, elements: dataSlab.elements, }, + keyMutationCallback: keyMutatinCallback, + valueMutationCallback: valueMutationCallback, }, nil } @@ -5987,8 +6048,36 @@ func (m *OrderedMap) Iterate(comparator ValueComparator, hip HashInputProvider, return iterateMap(iterator, fn) } -func (m *OrderedMap) IterateReadOnly(fn MapEntryIterationFunc) error { - iterator, err := m.ReadOnlyIterator() +// IterateReadOnly iterates readonly map elements. +// If elements are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback is needed (e.g. for logging mutation, etc.), use IterateReadOnlyWithMutationCallback(). +func (m *OrderedMap) IterateReadOnly( + fn MapEntryIterationFunc, +) error { + return m.IterateReadOnlyWithMutationCallback(fn, nil, nil) +} + +// IterateReadOnlyWithMutationCallback iterates readonly map elements. +// keyMutatinCallback and valueMutationCallback are useful for logging, etc. with +// more context when mutation occurs. Mutation handling here is the same with or +// without these callbacks. +// If elements are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// - keyMutatinCallback/valueMutationCallback is called if provided +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback isn't needed, use IterateReadOnly(). +func (m *OrderedMap) IterateReadOnlyWithMutationCallback( + fn MapEntryIterationFunc, + keyMutatinCallback ReadOnlyMapIteratorMutationCallback, + valueMutationCallback ReadOnlyMapIteratorMutationCallback, +) error { + iterator, err := m.ReadOnlyIteratorWithMutationCallback(keyMutatinCallback, valueMutationCallback) if err != nil { // Don't need to wrap error as external error because err is already categorized by OrderedMap.ReadOnlyIterator(). return err @@ -6028,8 +6117,35 @@ func (m *OrderedMap) IterateKeys(comparator ValueComparator, hip HashInputProvid return iterateMapKeys(iterator, fn) } -func (m *OrderedMap) IterateReadOnlyKeys(fn MapElementIterationFunc) error { - iterator, err := m.ReadOnlyIterator() +// IterateReadOnlyKeys iterates readonly map keys. +// If keys are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of key containers return ReadOnlyIteratorElementMutationError. +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback is needed (e.g. for logging mutation, etc.), use IterateReadOnlyKeysWithMutationCallback(). +func (m *OrderedMap) IterateReadOnlyKeys( + fn MapElementIterationFunc, +) error { + return m.IterateReadOnlyKeysWithMutationCallback(fn, nil) +} + +// IterateReadOnlyKeysWithMutationCallback iterates readonly map keys. +// keyMutatinCallback is useful for logging, etc. with more context +// when mutation occurs. Mutation handling here is the same with or +// without this callback. +// If keys are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of key containers return ReadOnlyIteratorElementMutationError. +// - keyMutatinCallback is called if provided +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback isn't needed, use IterateReadOnlyKeys(). +func (m *OrderedMap) IterateReadOnlyKeysWithMutationCallback( + fn MapElementIterationFunc, + keyMutatinCallback ReadOnlyMapIteratorMutationCallback, +) error { + iterator, err := m.ReadOnlyIteratorWithMutationCallback(keyMutatinCallback, nil) if err != nil { // Don't need to wrap error as external error because err is already categorized by OrderedMap.ReadOnlyIterator(). return err @@ -6069,8 +6185,35 @@ func (m *OrderedMap) IterateValues(comparator ValueComparator, hip HashInputProv return iterateMapValues(iterator, fn) } -func (m *OrderedMap) IterateReadOnlyValues(fn MapElementIterationFunc) error { - iterator, err := m.ReadOnlyIterator() +// IterateReadOnlyValues iterates readonly map values. +// If values are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback is needed (e.g. for logging mutation, etc.), use IterateReadOnlyValuesWithMutationCallback(). +func (m *OrderedMap) IterateReadOnlyValues( + fn MapElementIterationFunc, +) error { + return m.IterateReadOnlyValuesWithMutationCallback(fn, nil) +} + +// IterateReadOnlyValuesWithMutationCallback iterates readonly map values. +// valueMutationCallback is useful for logging, etc. with more context +// when mutation occurs. Mutation handling here is the same with or +// without this callback. +// If values are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// - keyMutatinCallback is called if provided +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback isn't needed, use IterateReadOnlyValues(). +func (m *OrderedMap) IterateReadOnlyValuesWithMutationCallback( + fn MapElementIterationFunc, + valueMutationCallback ReadOnlyMapIteratorMutationCallback, +) error { + iterator, err := m.ReadOnlyIteratorWithMutationCallback(nil, valueMutationCallback) if err != nil { // Don't need to wrap error as external error because err is already categorized by OrderedMap.ReadOnlyIterator(). return err diff --git a/map_test.go b/map_test.go index 344ccdd2..f5361205 100644 --- a/map_test.go +++ b/map_test.go @@ -1306,6 +1306,887 @@ func TestReadOnlyMapIterate(t *testing.T) { }) } +func TestMutateElementFromReadOnlyMapIterator(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + var mutationError *ReadOnlyIteratorElementMutationError + + t.Run("mutate inlined map key from IterateReadOnly", func(t *testing.T) { + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + // child map key {} + childMapKey, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + require.False(t, childMapKey.Inlined()) + + // parent map {{}: 0} + existingStorable, err := parentMap.Set(compare, hashInputProvider, NewHashableMap(childMapKey), Uint64Value(0)) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.True(t, childMapKey.Inlined()) + + // Iterate elements and modify key + var keyMutationCallbackCalled, valueMutationCallbackCalled bool + err = parentMap.IterateReadOnlyWithMutationCallback( + func(k Value, v Value) (resume bool, err error) { + c, ok := k.(*OrderedMap) + require.True(t, ok) + require.True(t, c.Inlined()) + + existingStorable, err := c.Set(compare, hashInputProvider, Uint64Value(0), Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingStorable) + + return true, err + }, + func(k Value) { + keyMutationCallbackCalled = true + require.Equal(t, childMapKey.ValueID(), k.(mutableValueNotifier).ValueID()) + }, + func(v Value) { + valueMutationCallbackCalled = true + }) + + require.ErrorAs(t, err, &mutationError) + require.True(t, keyMutationCallbackCalled) + require.False(t, valueMutationCallbackCalled) + }) + + t.Run("mutate inlined map value from IterateReadOnly", func(t *testing.T) { + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + // child map {} + childMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + require.False(t, childMap.Inlined()) + + // parent map {0: {}} + existingStorable, err := parentMap.Set(compare, hashInputProvider, Uint64Value(0), childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.True(t, childMap.Inlined()) + + // Iterate elements and modify value + var keyMutationCallbackCalled, valueMutationCallbackCalled bool + err = parentMap.IterateReadOnlyWithMutationCallback( + func(k Value, v Value) (resume bool, err error) { + c, ok := v.(*OrderedMap) + require.True(t, ok) + require.True(t, c.Inlined()) + + existingStorable, err := c.Set(compare, hashInputProvider, Uint64Value(0), Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingStorable) + + return true, err + }, + func(k Value) { + keyMutationCallbackCalled = true + }, + func(v Value) { + valueMutationCallbackCalled = true + require.Equal(t, childMap.ValueID(), v.(mutableValueNotifier).ValueID()) + }) + + require.ErrorAs(t, err, &mutationError) + require.False(t, keyMutationCallbackCalled) + require.True(t, valueMutationCallbackCalled) + }) + + t.Run("mutate inlined map key from IterateReadOnlyKeys", func(t *testing.T) { + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + // child map key {} + childMapKey, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + require.False(t, childMapKey.Inlined()) + + // parent map {{}: 0} + existingStorable, err := parentMap.Set(compare, hashInputProvider, NewHashableMap(childMapKey), Uint64Value(0)) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.True(t, childMapKey.Inlined()) + + // Iterate and modify key + var keyMutationCallbackCalled bool + err = parentMap.IterateReadOnlyKeysWithMutationCallback( + func(v Value) (resume bool, err error) { + c, ok := v.(*OrderedMap) + require.True(t, ok) + require.True(t, c.Inlined()) + + existingStorable, err := c.Set(compare, hashInputProvider, Uint64Value(0), Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingStorable) + + return true, err + }, + func(v Value) { + keyMutationCallbackCalled = true + require.Equal(t, childMapKey.ValueID(), v.(mutableValueNotifier).ValueID()) + }) + + require.ErrorAs(t, err, &mutationError) + require.True(t, keyMutationCallbackCalled) + }) + + t.Run("mutate inlined map value from IterateReadOnlyValues", func(t *testing.T) { + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + // child map {} + childMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + require.False(t, childMap.Inlined()) + + // parent map {0: {}} + existingStorable, err := parentMap.Set(compare, hashInputProvider, Uint64Value(0), childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.True(t, childMap.Inlined()) + + // Iterate and modify value + var valueMutationCallbackCalled bool + err = parentMap.IterateReadOnlyValuesWithMutationCallback( + func(v Value) (resume bool, err error) { + c, ok := v.(*OrderedMap) + require.True(t, ok) + require.True(t, c.Inlined()) + + existingStorable, err := c.Set(compare, hashInputProvider, Uint64Value(1), Uint64Value(1)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingStorable) + + return true, err + }, + func(v Value) { + valueMutationCallbackCalled = true + require.Equal(t, childMap.ValueID(), v.(mutableValueNotifier).ValueID()) + }) + + require.ErrorAs(t, err, &mutationError) + require.True(t, valueMutationCallbackCalled) + }) + + t.Run("mutate not inlined map key from IterateReadOnly", func(t *testing.T) { + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + // child map key {} + childMapKey, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + require.False(t, childMapKey.Inlined()) + + // Inserting elements into childMapKey so it can't be inlined + const size = 20 + for i := 0; i < size; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + existingStorable, err := childMapKey.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // parent map {{...}: 0} + existingStorable, err := parentMap.Set(compare, hashInputProvider, NewHashableMap(childMapKey), Uint64Value(0)) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.False(t, childMapKey.Inlined()) + + // Iterate elements and modify key + var keyMutationCallbackCalled, valueMutationCallbackCalled bool + err = parentMap.IterateReadOnlyWithMutationCallback( + func(k Value, v Value) (resume bool, err error) { + c, ok := k.(*OrderedMap) + require.True(t, ok) + require.False(t, c.Inlined()) + + existingKeyStorable, existingValueStorable, err := c.Remove(compare, hashInputProvider, Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingKeyStorable) + require.Nil(t, existingValueStorable) + + return true, err + }, + func(k Value) { + keyMutationCallbackCalled = true + require.Equal(t, childMapKey.ValueID(), k.(mutableValueNotifier).ValueID()) + }, + func(v Value) { + valueMutationCallbackCalled = true + }) + + require.ErrorAs(t, err, &mutationError) + require.True(t, keyMutationCallbackCalled) + require.False(t, valueMutationCallbackCalled) + }) + + t.Run("mutate not inlined map value from IterateReadOnly", func(t *testing.T) { + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + // child map {} + childMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + require.False(t, childMap.Inlined()) + + // parent map {0: {}} + existingStorable, err := parentMap.Set(compare, hashInputProvider, Uint64Value(0), childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.True(t, childMap.Inlined()) + + // Inserting elements into childMap until it is no longer inlined + for i := 0; childMap.Inlined(); i++ { + k := Uint64Value(i) + v := Uint64Value(i) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // Iterate elements and modify value + var keyMutationCallbackCalled, valueMutationCallbackCalled bool + err = parentMap.IterateReadOnlyWithMutationCallback( + func(k Value, v Value) (resume bool, err error) { + c, ok := v.(*OrderedMap) + require.True(t, ok) + require.False(t, c.Inlined()) + + existingKeyStorable, existingValueStorable, err := c.Remove(compare, hashInputProvider, Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingKeyStorable) + require.Nil(t, existingValueStorable) + + return true, err + }, + func(k Value) { + keyMutationCallbackCalled = true + }, + func(v Value) { + valueMutationCallbackCalled = true + require.Equal(t, childMap.ValueID(), v.(mutableValueNotifier).ValueID()) + }) + + require.ErrorAs(t, err, &mutationError) + require.False(t, keyMutationCallbackCalled) + require.True(t, valueMutationCallbackCalled) + }) + + t.Run("mutate not inlined map key from IterateReadOnlyKeys", func(t *testing.T) { + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + // child map key {} + childMapKey, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + require.False(t, childMapKey.Inlined()) + + // Inserting elements into childMap so it can't be inlined. + const size = 20 + for i := 0; i < size; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + existingStorable, err := childMapKey.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // parent map {{...}: 0} + existingStorable, err := parentMap.Set(compare, hashInputProvider, NewHashableMap(childMapKey), Uint64Value(0)) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.False(t, childMapKey.Inlined()) + + // Iterate and modify key + var keyMutationCallbackCalled bool + err = parentMap.IterateReadOnlyKeysWithMutationCallback( + func(v Value) (resume bool, err error) { + c, ok := v.(*OrderedMap) + require.True(t, ok) + require.False(t, c.Inlined()) + + existingKeyStorable, existingValueStorable, err := c.Remove(compare, hashInputProvider, Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingKeyStorable) + require.Nil(t, existingValueStorable) + + return true, err + }, + func(v Value) { + keyMutationCallbackCalled = true + require.Equal(t, childMapKey.ValueID(), v.(mutableValueNotifier).ValueID()) + }) + + require.ErrorAs(t, err, &mutationError) + require.True(t, keyMutationCallbackCalled) + }) + + t.Run("mutate not inlined map value from IterateReadOnlyValues", func(t *testing.T) { + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + // child map {} + childMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + require.False(t, childMap.Inlined()) + + // parent map {0: {}} + existingStorable, err := parentMap.Set(compare, hashInputProvider, Uint64Value(0), childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.True(t, childMap.Inlined()) + + // Inserting elements into childMap until it is no longer inlined + for i := 0; childMap.Inlined(); i++ { + k := Uint64Value(i) + v := Uint64Value(i) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // Iterate and modify value + var valueMutationCallbackCalled bool + err = parentMap.IterateReadOnlyValuesWithMutationCallback( + func(v Value) (resume bool, err error) { + c, ok := v.(*OrderedMap) + require.True(t, ok) + require.False(t, c.Inlined()) + + existingKeyStorable, existingValueStorable, err := c.Remove(compare, hashInputProvider, Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingKeyStorable) + require.Nil(t, existingValueStorable) + + return true, err + }, + func(v Value) { + valueMutationCallbackCalled = true + require.Equal(t, childMap.ValueID(), v.(mutableValueNotifier).ValueID()) + }) + + require.ErrorAs(t, err, &mutationError) + require.True(t, valueMutationCallbackCalled) + }) + + t.Run("mutate inlined map key in collision from IterateReadOnly", func(t *testing.T) { + digesterBuilder := &mockDigesterBuilder{} + + // childMapKey1 {} + childMapKey1, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) + + // childMapKey2 {} + childMapKey2, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) + + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + // parentMap {{}:0, {}:1} with all elements in the same collision group + for i, m := range []*OrderedMap{childMapKey1, childMapKey2} { + k := NewHashableMap(m) + v := Uint64Value(i) + + digests := []Digest{Digest(0)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + // This is needed because Digest is called again on OrderedMap when inserting collision element. + digesterBuilder.On("Digest", m).Return(mockDigester{digests}) + + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // Iterate element and modify key + var keyMutationCallbackCalled, valueMutationCallbackCalled bool + err = parentMap.IterateReadOnlyWithMutationCallback( + func(k Value, v Value) (resume bool, err error) { + c, ok := k.(*OrderedMap) + require.True(t, ok) + require.True(t, c.Inlined()) + + existingStorable, err := c.Set(compare, hashInputProvider, Uint64Value(0), Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingStorable) + + return true, err + }, + func(k Value) { + keyMutationCallbackCalled = true + vid := k.(mutableValueNotifier).ValueID() + require.True(t, childMapKey1.ValueID() == vid || childMapKey2.ValueID() == vid) + }, + func(v Value) { + valueMutationCallbackCalled = true + }) + + require.ErrorAs(t, err, &mutationError) + require.True(t, keyMutationCallbackCalled) + require.False(t, valueMutationCallbackCalled) + }) + + t.Run("mutate inlined map value in collision from IterateReadOnly", func(t *testing.T) { + digesterBuilder := &mockDigesterBuilder{} + + // childMap1 {} + childMap1, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) + + // childMap2 {} + childMap2, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) + + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + // parentMap {0: {}, 1:{}} with all elements in the same collision group + for i, m := range []*OrderedMap{childMap1, childMap2} { + k := Uint64Value(i) + + digests := []Digest{Digest(0)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, m) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // Iterate elements and modify values + var keyMutationCallbackCalled, valueMutationCallbackCalled bool + err = parentMap.IterateReadOnlyWithMutationCallback( + func(k Value, v Value) (resume bool, err error) { + c, ok := v.(*OrderedMap) + require.True(t, ok) + require.True(t, c.Inlined()) + + existingStorable, err := c.Set(compare, hashInputProvider, Uint64Value(1), Uint64Value(1)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingStorable) + + return true, err + }, + func(k Value) { + keyMutationCallbackCalled = true + }, + func(v Value) { + valueMutationCallbackCalled = true + vid := v.(mutableValueNotifier).ValueID() + require.True(t, childMap1.ValueID() == vid || childMap2.ValueID() == vid) + }) + + require.ErrorAs(t, err, &mutationError) + require.False(t, keyMutationCallbackCalled) + require.True(t, valueMutationCallbackCalled) + }) + + t.Run("mutate inlined map key in collision from IterateReadOnlyKeys", func(t *testing.T) { + digesterBuilder := &mockDigesterBuilder{} + + // childMapKey1 {} + childMapKey1, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) + + // childMapKey2 {} + childMapKey2, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) + + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + // parentMap {{}: 0, {}: 1} with all elements in the same collision group + for i, m := range []*OrderedMap{childMapKey1, childMapKey2} { + k := NewHashableMap(m) + v := Uint64Value(i) + + digests := []Digest{Digest(0)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + // This is needed because Digest is called again on OrderedMap when inserting collision element. + digesterBuilder.On("Digest", m).Return(mockDigester{digests}) + + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // Iterate and modify keys + var keyMutationCallbackCalled bool + err = parentMap.IterateReadOnlyKeysWithMutationCallback( + func(v Value) (resume bool, err error) { + c, ok := v.(*OrderedMap) + require.True(t, ok) + require.True(t, c.Inlined()) + + existingStorable, err := c.Set(compare, hashInputProvider, Uint64Value(0), Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingStorable) + + return true, err + }, + func(v Value) { + keyMutationCallbackCalled = true + vid := v.(mutableValueNotifier).ValueID() + require.True(t, childMapKey1.ValueID() == vid || childMapKey2.ValueID() == vid) + }) + + require.ErrorAs(t, err, &mutationError) + require.True(t, keyMutationCallbackCalled) + }) + + t.Run("mutate inlined map value in collision from IterateReadOnlyValues", func(t *testing.T) { + digesterBuilder := &mockDigesterBuilder{} + + // childMap1 {} + childMap1, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) + + // childMap2 {} + childMap2, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) + + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + // parentMap {0: {}, 1:{}} with all elements in the same collision group + for i, m := range []*OrderedMap{childMap1, childMap2} { + k := Uint64Value(i) + + digests := []Digest{Digest(0)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, m) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // Iterate and modify values + var valueMutationCallbackCalled bool + err = parentMap.IterateReadOnlyValuesWithMutationCallback( + func(v Value) (resume bool, err error) { + c, ok := v.(*OrderedMap) + require.True(t, ok) + require.True(t, c.Inlined()) + + existingStorable, err := c.Set(compare, hashInputProvider, Uint64Value(0), Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingStorable) + + return true, err + }, + func(v Value) { + valueMutationCallbackCalled = true + vid := v.(mutableValueNotifier).ValueID() + require.True(t, childMap1.ValueID() == vid || childMap2.ValueID() == vid) + }) + + require.ErrorAs(t, err, &mutationError) + require.True(t, valueMutationCallbackCalled) + }) + + t.Run("mutate not inlined map key in collision from IterateReadOnly", func(t *testing.T) { + digesterBuilder := &mockDigesterBuilder{} + + // childMapKey1 {} + childMapKey1, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) + + const size = 20 + for i := 0; i < size; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := childMapKey1.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // childMapKey2 {} + childMapKey2, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) + + for i := 0; i < size; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := childMapKey2.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + // parentMap {0: {}, 1:{}} with all elements in the same collision group + for i, m := range []*OrderedMap{childMapKey1, childMapKey2} { + k := NewHashableMap(m) + v := Uint64Value(i) + + digests := []Digest{Digest(0)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + // This is needed because Digest is called again on OrderedMap when inserting collision element. + digesterBuilder.On("Digest", m).Return(mockDigester{digests}) + + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // Iterate elements and modify keys + var keyMutationCallbackCalled, valueMutationCallbackCalled bool + err = parentMap.IterateReadOnlyWithMutationCallback( + func(k Value, v Value) (resume bool, err error) { + c, ok := k.(*OrderedMap) + require.True(t, ok) + require.False(t, c.Inlined()) + + existingKeyStorable, existingValueStorable, err := c.Remove(compare, hashInputProvider, Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingKeyStorable) + require.Nil(t, existingValueStorable) + + return true, err + }, + func(k Value) { + keyMutationCallbackCalled = true + vid := k.(mutableValueNotifier).ValueID() + require.True(t, childMapKey1.ValueID() == vid || childMapKey2.ValueID() == vid) + }, + func(v Value) { + valueMutationCallbackCalled = true + }) + + require.ErrorAs(t, err, &mutationError) + require.True(t, keyMutationCallbackCalled) + require.False(t, valueMutationCallbackCalled) + }) + + t.Run("mutate not inlined map value in collision from IterateReadOnly", func(t *testing.T) { + digesterBuilder := &mockDigesterBuilder{} + + // childMap1 {} + childMap1, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) + + // childMap2 {} + childMap2, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) + + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + // parentMap {0: {}, 1:{}} with all elements in the same collision group + for i, m := range []*OrderedMap{childMap1, childMap2} { + k := Uint64Value(i) + + digests := []Digest{Digest(0)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, m) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + for i := 0; childMap1.Inlined(); i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := childMap1.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + for i := 0; childMap2.Inlined(); i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := childMap2.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // Iterate elements and modify values + var keyMutationCallbackCalled, valueMutationCallbackCalled bool + err = parentMap.IterateReadOnlyWithMutationCallback( + func(k Value, v Value) (resume bool, err error) { + c, ok := v.(*OrderedMap) + require.True(t, ok) + require.False(t, c.Inlined()) + + existingKeyStorable, existingValueStorable, err := c.Remove(compare, hashInputProvider, Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingKeyStorable) + require.Nil(t, existingValueStorable) + + return true, err + }, + func(k Value) { + keyMutationCallbackCalled = true + }, + func(v Value) { + valueMutationCallbackCalled = true + vid := v.(mutableValueNotifier).ValueID() + require.True(t, childMap1.ValueID() == vid || childMap2.ValueID() == vid) + }) + + require.ErrorAs(t, err, &mutationError) + require.False(t, keyMutationCallbackCalled) + require.True(t, valueMutationCallbackCalled) + }) + + t.Run("mutate not inlined map key in collision from IterateReadOnlyKeys", func(t *testing.T) { + digesterBuilder := &mockDigesterBuilder{} + + // childMapKey1 {} + childMapKey1, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) + + size := 20 + for i := 0; i < size; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := childMapKey1.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // childMapKey2 {} + childMapKey2, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) + + for i := 0; i < size; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := childMapKey2.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + // parentMap {0: {}, 1:{}} with all elements in the same collision group + for i, m := range []*OrderedMap{childMapKey1, childMapKey2} { + k := NewHashableMap(m) + v := Uint64Value(i) + + digests := []Digest{Digest(0)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + // This is needed because Digest is called again on OrderedMap when inserting collision element. + digesterBuilder.On("Digest", m).Return(mockDigester{digests}) + + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // Iterate and modify keys + var keyMutationCallbackCalled bool + err = parentMap.IterateReadOnlyKeysWithMutationCallback( + func(v Value) (resume bool, err error) { + c, ok := v.(*OrderedMap) + require.True(t, ok) + require.False(t, c.Inlined()) + + existingKeyStorable, existingValueStorable, err := c.Remove(compare, hashInputProvider, Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingKeyStorable) + require.Nil(t, existingValueStorable) + + return true, err + }, + func(v Value) { + keyMutationCallbackCalled = true + vid := v.(mutableValueNotifier).ValueID() + require.True(t, childMapKey1.ValueID() == vid || childMapKey2.ValueID() == vid) + }) + + require.ErrorAs(t, err, &mutationError) + require.True(t, keyMutationCallbackCalled) + }) + + t.Run("mutate not inlined map value in collision from IterateReadOnlyValues", func(t *testing.T) { + digesterBuilder := &mockDigesterBuilder{} + + // childMap1 {} + childMap1, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) + + // childMap2 {} + childMap2, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) + + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + // parentMap {0: {}, 1:{}} with all elements in the same collision group + for i, m := range []*OrderedMap{childMap1, childMap2} { + k := Uint64Value(i) + + digests := []Digest{Digest(0)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, m) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + for i := 0; childMap1.Inlined(); i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := childMap1.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + for i := 0; childMap2.Inlined(); i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := childMap2.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // Iterate and modify values + var valueMutationCallbackCalled bool + err = parentMap.IterateReadOnlyValuesWithMutationCallback( + func(v Value) (resume bool, err error) { + c, ok := v.(*OrderedMap) + require.True(t, ok) + require.False(t, c.Inlined()) + + existingKeyStorable, existingValueStorable, err := c.Remove(compare, hashInputProvider, Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingKeyStorable) + require.Nil(t, existingValueStorable) + + return true, err + }, + func(v Value) { + valueMutationCallbackCalled = true + vid := v.(mutableValueNotifier).ValueID() + require.True(t, childMap1.ValueID() == vid || childMap2.ValueID() == vid) + }) + + require.ErrorAs(t, err, &mutationError) + require.True(t, valueMutationCallbackCalled) + }) +} + func TestMutableMapIterate(t *testing.T) { t.Run("empty", func(t *testing.T) { diff --git a/storable_test.go b/storable_test.go index 4f705ab6..912207dc 100644 --- a/storable_test.go +++ b/storable_test.go @@ -34,6 +34,7 @@ const ( cborTagUInt32Value = 163 cborTagUInt64Value = 164 cborTagSomeValue = 165 + cborTagHashableMap = 166 ) type HashableValue interface { @@ -634,6 +635,19 @@ func compare(storage SlabStorage, value Value, storable Storable) (bool, error) } return compare(storage, v.Value, other.Storable) + + case *HashableMap: + other, err := storable.StoredValue(storage) + if err != nil { + return false, err + } + + otherMap, ok := other.(*OrderedMap) + if !ok { + return false, nil + } + + return v.m.ValueID() == otherMap.ValueID(), nil } return false, fmt.Errorf("value %T not supported for comparison", value) @@ -784,3 +798,47 @@ func (*mutableStorable) Encode(*Encoder) error { // no-op for testing return nil } + +type HashableMap struct { + m *OrderedMap +} + +var _ Value = &HashableMap{} +var _ HashableValue = &HashableMap{} + +func NewHashableMap(m *OrderedMap) *HashableMap { + return &HashableMap{m} +} + +func (v *HashableMap) Storable(storage SlabStorage, address Address, maxInlineSize uint64) (Storable, error) { + return v.m.Storable(storage, address, maxInlineSize) +} + +func (v *HashableMap) HashInput(scratch []byte) ([]byte, error) { + const ( + cborTypeByteString = 0x40 + + valueIDLength = len(ValueID{}) + cborTagNumSize = 2 + cborByteStringHeadSize = 1 + cborByteStringSize = valueIDLength + hashInputSize = cborTagNumSize + cborByteStringHeadSize + cborByteStringSize + ) + + var buf []byte + if len(scratch) >= hashInputSize { + buf = scratch[:hashInputSize] + } else { + buf = make([]byte, hashInputSize) + } + + // CBOR tag number + buf[0], buf[1] = 0xd8, cborTagHashableMap + + // CBOR byte string head + buf[2] = cborTypeByteString | byte(valueIDLength) + + vid := v.m.ValueID() + copy(buf[3:], vid[:]) + return buf, nil +} From 019775e608797948ca4281bb9876cd9bf9d25d4b Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 21 May 2024 14:48:08 -0500 Subject: [PATCH 124/126] Check mutation of elements from readonly array iterator This commit returns ReadOnlyIteratorElementMutationError when elements of readonly array iterator are mutated. Also, a callback can be provided by the caller to log or debug such mutations with more context. As always, mutation of elements from readonly iterators are not guaranteed to persist. Instead of relying on other projects not to mutate readonly elements, this commit returns ReadOnlyIteratorElementMutationError when elements from readonly iterators are mutated. This commit also adds readonly iterator functions that receive mutation callbacks. Callbacks are useful for logging, etc. with more context when mutation occurs. Mutation handling is the same with or without callbacks. If needed, other projects using atree can choose to panic in the callback when mutation is detected. If elements from readonly iterators are mutated: - those changes are not guaranteed to persist. - mutation functions of child containers return ReadOnlyIteratorElementMutationError. - ReadOnlyMapIteratorMutationCallback are called if provided --- array.go | 168 +++++++++++++++++++++++++++++++++++++++++++----- array_test.go | 172 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 323 insertions(+), 17 deletions(-) diff --git a/array.go b/array.go index ab2f6514..4ed70c97 100644 --- a/array.go +++ b/array.go @@ -3345,15 +3345,30 @@ func (i *mutableArrayIterator) Next() (Value, error) { return v, nil } +type ReadOnlyArrayIteratorMutationCallback func(mutatedValue Value) + type readOnlyArrayIterator struct { - array *Array - dataSlab *ArrayDataSlab - indexInDataSlab uint64 - remainingCount uint64 // needed for range iteration + array *Array + dataSlab *ArrayDataSlab + indexInDataSlab uint64 + remainingCount uint64 // needed for range iteration + valueMutationCallback ReadOnlyArrayIteratorMutationCallback } +// defaultReadOnlyArrayIteratorMutatinCallback is no-op. +var defaultReadOnlyArrayIteratorMutatinCallback ReadOnlyArrayIteratorMutationCallback = func(Value) {} + var _ ArrayIterator = &readOnlyArrayIterator{} +func (i *readOnlyArrayIterator) setMutationCallback(value Value) { + if v, ok := value.(mutableValueNotifier); ok { + v.setParentUpdater(func() (found bool, err error) { + i.valueMutationCallback(value) + return true, NewReadOnlyIteratorElementMutationError(i.array.ValueID(), v.ValueID()) + }) + } +} + func (i *readOnlyArrayIterator) CanMutate() bool { return false } @@ -3405,6 +3420,8 @@ func (i *readOnlyArrayIterator) Next() (Value, error) { i.indexInDataSlab++ i.remainingCount-- + i.setMutationCallback(element) + return element, nil } @@ -3428,9 +3445,29 @@ func (a *Array) Iterator() (ArrayIterator, error) { } // ReadOnlyIterator returns readonly iterator for array elements. -// If elements are mutated, those changes are not guaranteed to persist. -// NOTE: Use readonly iterator if mutation is not needed for better performance. +// If elements are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback is needed (e.g. for logging mutation, etc.), use ReadOnlyIteratorWithMutationCallback(). func (a *Array) ReadOnlyIterator() (ArrayIterator, error) { + return a.ReadOnlyIteratorWithMutationCallback(nil) +} + +// ReadOnlyIteratorWithMutationCallback returns readonly iterator for array elements. +// valueMutationCallback is useful for logging, etc. with more context when mutation +// occurs. Mutation handling here is the same with or without callback. +// If elements are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// - valueMutationCallback is called if provided +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback isn't needed, use ReadOnlyIterator(). +func (a *Array) ReadOnlyIteratorWithMutationCallback( + valueMutationCallback ReadOnlyArrayIteratorMutationCallback, +) (ArrayIterator, error) { if a.Count() == 0 { return emptyReadOnlyArrayIterator, nil } @@ -3441,10 +3478,15 @@ func (a *Array) ReadOnlyIterator() (ArrayIterator, error) { return nil, err } + if valueMutationCallback == nil { + valueMutationCallback = defaultReadOnlyArrayIteratorMutatinCallback + } + return &readOnlyArrayIterator{ - array: a, - dataSlab: slab, - remainingCount: a.Count(), + array: a, + dataSlab: slab, + remainingCount: a.Count(), + valueMutationCallback: valueMutationCallback, }, nil } @@ -3470,7 +3512,37 @@ func (a *Array) RangeIterator(startIndex uint64, endIndex uint64) (ArrayIterator }, nil } -func (a *Array) ReadOnlyRangeIterator(startIndex uint64, endIndex uint64) (ArrayIterator, error) { +// ReadOnlyRangeIterator iterates readonly array elements from +// specified startIndex to endIndex. +// If elements are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback is needed (e.g. for logging mutation, etc.), use ReadOnlyRangeIteratorWithMutationCallback(). +func (a *Array) ReadOnlyRangeIterator( + startIndex uint64, + endIndex uint64, +) (ArrayIterator, error) { + return a.ReadOnlyRangeIteratorWithMutationCallback(startIndex, endIndex, nil) +} + +// ReadOnlyRangeIteratorWithMutationCallback iterates readonly array elements +// from specified startIndex to endIndex. +// valueMutationCallback is useful for logging, etc. with more context when +// mutation occurs. Mutation handling here is the same with or without callback. +// If elements are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// - valueMutationCallback is called if provided +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback isn't needed, use ReadOnlyRangeIterator(). +func (a *Array) ReadOnlyRangeIteratorWithMutationCallback( + startIndex uint64, + endIndex uint64, + valueMutationCallback ReadOnlyArrayIteratorMutationCallback, +) (ArrayIterator, error) { count := a.Count() if startIndex > count || endIndex > count { @@ -3511,11 +3583,16 @@ func (a *Array) ReadOnlyRangeIterator(startIndex uint64, endIndex uint64) (Array } } + if valueMutationCallback == nil { + valueMutationCallback = defaultReadOnlyArrayIteratorMutatinCallback + } + return &readOnlyArrayIterator{ - array: a, - dataSlab: dataSlab, - indexInDataSlab: index, - remainingCount: numberOfElements, + array: a, + dataSlab: dataSlab, + indexInDataSlab: index, + remainingCount: numberOfElements, + valueMutationCallback: valueMutationCallback, }, nil } @@ -3551,8 +3628,33 @@ func (a *Array) Iterate(fn ArrayIterationFunc) error { return iterateArray(iterator, fn) } +// IterateReadOnly iterates readonly array elements. +// If elements are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback is needed (e.g. for logging mutation, etc.), use IterateReadOnlyWithMutationCallback(). func (a *Array) IterateReadOnly(fn ArrayIterationFunc) error { - iterator, err := a.ReadOnlyIterator() + return a.IterateReadOnlyWithMutationCallback(fn, nil) +} + +// IterateReadOnlyWithMutationCallback iterates readonly array elements. +// valueMutationCallback is useful for logging, etc. with more context +// when mutation occurs. Mutation handling here is the same with or +// without this callback. +// If values are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// - valueMutatinCallback is called if provided +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback isn't needed, use IterateReadOnly(). +func (a *Array) IterateReadOnlyWithMutationCallback( + fn ArrayIterationFunc, + valueMutationCallback ReadOnlyArrayIteratorMutationCallback, +) error { + iterator, err := a.ReadOnlyIteratorWithMutationCallback(valueMutationCallback) if err != nil { // Don't need to wrap error as external error because err is already categorized by Array.ReadOnlyIterator(). return err @@ -3569,8 +3671,40 @@ func (a *Array) IterateRange(startIndex uint64, endIndex uint64, fn ArrayIterati return iterateArray(iterator, fn) } -func (a *Array) IterateReadOnlyRange(startIndex uint64, endIndex uint64, fn ArrayIterationFunc) error { - iterator, err := a.ReadOnlyRangeIterator(startIndex, endIndex) +// IterateReadOnlyRange iterates readonly array elements from specified startIndex to endIndex. +// If elements are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback is needed (e.g. for logging mutation, etc.), use IterateReadOnlyRangeWithMutatinoCallback(). +func (a *Array) IterateReadOnlyRange( + startIndex uint64, + endIndex uint64, + fn ArrayIterationFunc, +) error { + return a.IterateReadOnlyRangeWithMutationCallback(startIndex, endIndex, fn, nil) +} + +// IterateReadOnlyRangeWithMutationCallback iterates readonly array elements +// from specified startIndex to endIndex. +// valueMutationCallback is useful for logging, etc. with more context +// when mutation occurs. Mutation handling here is the same with or +// without this callback. +// If values are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// - valueMutatinCallback is called if provided +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback isn't needed, use IterateReadOnlyRange(). +func (a *Array) IterateReadOnlyRangeWithMutationCallback( + startIndex uint64, + endIndex uint64, + fn ArrayIterationFunc, + valueMutationCallback ReadOnlyArrayIteratorMutationCallback, +) error { + iterator, err := a.ReadOnlyRangeIteratorWithMutationCallback(startIndex, endIndex, valueMutationCallback) if err != nil { // Don't need to wrap error as external error because err is already categorized by Array.ReadOnlyRangeIterator(). return err diff --git a/array_test.go b/array_test.go index 41830fdb..4225e9f1 100644 --- a/array_test.go +++ b/array_test.go @@ -909,6 +909,178 @@ func TestReadOnlyArrayIterate(t *testing.T) { }) } +func TestMutateElementFromReadOnlyArrayIterator(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + var mutationError *ReadOnlyIteratorElementMutationError + + t.Run("mutate inlined element from IterateReadOnly", func(t *testing.T) { + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + // child array [] + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + require.False(t, childArray.Inlined()) + + // parent array [[]] + err = parentArray.Append(childArray) + require.NoError(t, err) + require.True(t, childArray.Inlined()) + + // Iterate and modify element + var valueMutationCallbackCalled bool + err = parentArray.IterateReadOnlyWithMutationCallback( + func(v Value) (resume bool, err error) { + c, ok := v.(*Array) + require.True(t, ok) + require.True(t, c.Inlined()) + + err = c.Append(Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + + return true, err + }, + func(v Value) { + valueMutationCallbackCalled = true + }) + + require.ErrorAs(t, err, &mutationError) + require.True(t, valueMutationCallbackCalled) + }) + + t.Run("mutate inlined element from IterateReadOnlyRange", func(t *testing.T) { + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + // child array [] + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + require.False(t, childArray.Inlined()) + + // parent array [[]] + err = parentArray.Append(childArray) + require.NoError(t, err) + require.True(t, childArray.Inlined()) + + // Iterate and modify element + var valueMutationCallbackCalled bool + err = parentArray.IterateReadOnlyRangeWithMutationCallback( + 0, + parentArray.Count(), + func(v Value) (resume bool, err error) { + c, ok := v.(*Array) + require.True(t, ok) + require.True(t, c.Inlined()) + + err = c.Append(Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + + return true, err + }, + func(v Value) { + valueMutationCallbackCalled = true + }) + + require.ErrorAs(t, err, &mutationError) + require.True(t, valueMutationCallbackCalled) + }) + + t.Run("mutate not inlined array element from IterateReadOnly", func(t *testing.T) { + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + // child array [] + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + require.False(t, childArray.Inlined()) + + // parent array [[]] + err = parentArray.Append(childArray) + require.NoError(t, err) + require.True(t, childArray.Inlined()) + + // Inserting elements into childArray so it can't be inlined + for i := 0; childArray.Inlined(); i++ { + v := Uint64Value(i) + err = childArray.Append(v) + require.NoError(t, err) + } + + // Iterate and modify element + var valueMutationCallbackCalled bool + err = parentArray.IterateReadOnlyWithMutationCallback( + func(v Value) (resume bool, err error) { + c, ok := v.(*Array) + require.True(t, ok) + require.False(t, c.Inlined()) + + existingStorable, err := c.Remove(0) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingStorable) + + return true, err + }, + func(v Value) { + valueMutationCallbackCalled = true + }) + + require.ErrorAs(t, err, &mutationError) + require.True(t, valueMutationCallbackCalled) + }) + + t.Run("mutate not inlined array element from IterateReadOnlyRange", func(t *testing.T) { + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + // child array [] + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + require.False(t, childArray.Inlined()) + + // parent array [[]] + err = parentArray.Append(childArray) + require.NoError(t, err) + require.True(t, childArray.Inlined()) + + // Inserting elements into childArray so it can't be inlined + for i := 0; childArray.Inlined(); i++ { + v := Uint64Value(i) + err = childArray.Append(v) + require.NoError(t, err) + } + + // Iterate and modify element + var valueMutationCallbackCalled bool + err = parentArray.IterateReadOnlyRangeWithMutationCallback( + 0, + parentArray.Count(), + func(v Value) (resume bool, err error) { + c, ok := v.(*Array) + require.True(t, ok) + require.False(t, c.Inlined()) + + existingStorable, err := c.Remove(0) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingStorable) + + return true, err + }, + func(v Value) { + valueMutationCallbackCalled = true + }) + + require.ErrorAs(t, err, &mutationError) + require.True(t, valueMutationCallbackCalled) + }) +} + func TestMutableArrayIterate(t *testing.T) { t.Run("empty", func(t *testing.T) { From 393e179015431b4cd2363dfce4f79a480815889f Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Fri, 24 May 2024 09:51:16 -0500 Subject: [PATCH 125/126] Update comment for NondeterministicFastCommit Make it clearer that encoded slabs are still deterministic, so array and map iterations will remain deterministic. Only the sequence of changed slabs getting committed is nondeterministic. This is useful for migration programs that don't require commit sequence of slabs to be deterministic while still preserving deterministic encoding of slabs (e.g. iteration of arrays and maps remain deterministic). --- storage.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/storage.go b/storage.go index 3270f978..1732451c 100644 --- a/storage.go +++ b/storage.go @@ -969,8 +969,10 @@ func (s *PersistentSlabStorage) FastCommit(numWorkers int) error { return nil } -// NondeterministicFastCommit commits changes in nondeterministic order. -// This is used by migration program when ordering isn't required. +// NondeterministicFastCommit commits changed slabs in nondeterministic order. +// Encoded slab data is deterministic (e.g. array and map iteration is deterministic). +// IMPORTANT: This function is used by migration programs when commit order of slabs +// is not required to be deterministic (while preserving deterministic array and map iteration). func (s *PersistentSlabStorage) NondeterministicFastCommit(numWorkers int) error { // No changes if len(s.deltas) == 0 { From 7bcf32f87e0295f5ca46827c800b9d48c1711573 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 4 Jun 2024 14:44:04 -0500 Subject: [PATCH 126/126] Update copyright notice to Flow Foundation --- NOTICE | 4 ++++ README.md | 2 +- array.go | 2 +- array_bench_test.go | 2 +- array_benchmark_test.go | 2 +- array_debug.go | 2 +- array_test.go | 2 +- basicarray.go | 2 +- basicarray_benchmark_test.go | 2 +- basicarray_test.go | 2 +- blake3_regression_test.go | 2 +- circlehash64_regression_test.go | 2 +- cmd/stress/array.go | 2 +- cmd/stress/main.go | 2 +- cmd/stress/map.go | 2 +- cmd/stress/storable.go | 2 +- cmd/stress/typeinfo.go | 2 +- cmd/stress/utils.go | 2 +- doc.go | 2 +- encode.go | 2 +- errors.go | 2 +- flag.go | 2 +- flag_test.go | 2 +- hash.go | 2 +- map.go | 2 +- map_debug.go | 2 +- map_test.go | 2 +- mapcollision_bench_test.go | 2 +- settings.go | 2 +- slab.go | 2 +- slab_test.go | 2 +- storable.go | 2 +- storable_slab.go | 2 +- storable_test.go | 2 +- storage.go | 2 +- storage_bench_test.go | 2 +- storage_test.go | 2 +- typeinfo.go | 2 +- utils_test.go | 2 +- value.go | 2 +- 40 files changed, 43 insertions(+), 39 deletions(-) create mode 100644 NOTICE diff --git a/NOTICE b/NOTICE new file mode 100644 index 00000000..119b6eaa --- /dev/null +++ b/NOTICE @@ -0,0 +1,4 @@ +Atree +Copyright 2021-2024 Flow Foundation + +This product includes software developed at the Flow Foundation (https://flow.com/flow-foundation). \ No newline at end of file diff --git a/README.md b/README.md index 6539ca1f..d6a5d5fc 100644 --- a/README.md +++ b/README.md @@ -82,4 +82,4 @@ The Atree library is licensed under the terms of the Apache license. See [LICENS Logo is based on the artwork of Raisul Hadi licensed under Creative Commons. -Copyright © 2021-2022 Dapper Labs, Inc. +Copyright © 2021-2024 Flow Foundation diff --git a/array.go b/array.go index 4ed70c97..f8a3ae35 100644 --- a/array.go +++ b/array.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/array_bench_test.go b/array_bench_test.go index b8c06cd0..d3a35885 100644 --- a/array_bench_test.go +++ b/array_bench_test.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/array_benchmark_test.go b/array_benchmark_test.go index 2c6b3918..baba5559 100644 --- a/array_benchmark_test.go +++ b/array_benchmark_test.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/array_debug.go b/array_debug.go index 7ffa335d..89a17022 100644 --- a/array_debug.go +++ b/array_debug.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/array_test.go b/array_test.go index 4225e9f1..172d02ca 100644 --- a/array_test.go +++ b/array_test.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/basicarray.go b/basicarray.go index 58b77e47..1c7c3991 100644 --- a/basicarray.go +++ b/basicarray.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/basicarray_benchmark_test.go b/basicarray_benchmark_test.go index e6ee564e..f5afa3b1 100644 --- a/basicarray_benchmark_test.go +++ b/basicarray_benchmark_test.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/basicarray_test.go b/basicarray_test.go index 7c9dfdc4..cb23f439 100644 --- a/basicarray_test.go +++ b/basicarray_test.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/blake3_regression_test.go b/blake3_regression_test.go index 0d5707bb..870930a8 100644 --- a/blake3_regression_test.go +++ b/blake3_regression_test.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2022 Dapper Labs, Inc. + * Copyright Flow Foundation * Copyright 2021 Faye Amacker * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/circlehash64_regression_test.go b/circlehash64_regression_test.go index ddd2991e..b5d424e9 100644 --- a/circlehash64_regression_test.go +++ b/circlehash64_regression_test.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2022 Dapper Labs, Inc. + * Copyright Flow Foundation * Copyright 2021 Faye Amacker * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/cmd/stress/array.go b/cmd/stress/array.go index 1a8e94d3..adbaf63d 100644 --- a/cmd/stress/array.go +++ b/cmd/stress/array.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cmd/stress/main.go b/cmd/stress/main.go index e93364c6..fbd735b9 100644 --- a/cmd/stress/main.go +++ b/cmd/stress/main.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cmd/stress/map.go b/cmd/stress/map.go index c560bb5a..0e11b627 100644 --- a/cmd/stress/map.go +++ b/cmd/stress/map.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cmd/stress/storable.go b/cmd/stress/storable.go index 0aaf1aa4..fcd57b66 100644 --- a/cmd/stress/storable.go +++ b/cmd/stress/storable.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cmd/stress/typeinfo.go b/cmd/stress/typeinfo.go index ddeee106..b3c8120f 100644 --- a/cmd/stress/typeinfo.go +++ b/cmd/stress/typeinfo.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cmd/stress/utils.go b/cmd/stress/utils.go index ba3653ca..fd7abe05 100644 --- a/cmd/stress/utils.go +++ b/cmd/stress/utils.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/doc.go b/doc.go index 2c66bb02..31512257 100644 --- a/doc.go +++ b/doc.go @@ -1,6 +1,6 @@ // Atree - Scalable Arrays and Ordered Maps // -// Copyright 2021 Dapper Labs, Inc. +// Copyright Flow Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/encode.go b/encode.go index d82d56aa..a416d121 100644 --- a/encode.go +++ b/encode.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/errors.go b/errors.go index 6ea2298a..60597862 100644 --- a/errors.go +++ b/errors.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/flag.go b/flag.go index 44230072..758a9b46 100644 --- a/flag.go +++ b/flag.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/flag_test.go b/flag_test.go index e4a81564..c506184a 100644 --- a/flag_test.go +++ b/flag_test.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/hash.go b/hash.go index 614d51d6..0e4bf087 100644 --- a/hash.go +++ b/hash.go @@ -1,5 +1,5 @@ /* - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/map.go b/map.go index e4531e10..51493c39 100644 --- a/map.go +++ b/map.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/map_debug.go b/map_debug.go index dbfd6834..7e5aea8a 100644 --- a/map_debug.go +++ b/map_debug.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/map_test.go b/map_test.go index f5361205..e7838114 100644 --- a/map_test.go +++ b/map_test.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mapcollision_bench_test.go b/mapcollision_bench_test.go index fcc2cbf8..6b36d600 100644 --- a/mapcollision_bench_test.go +++ b/mapcollision_bench_test.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2022 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/settings.go b/settings.go index fed9aea0..ed134db2 100644 --- a/settings.go +++ b/settings.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/slab.go b/slab.go index 2f0a6a9b..9b26e71c 100644 --- a/slab.go +++ b/slab.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/slab_test.go b/slab_test.go index 433f2e81..62f45856 100644 --- a/slab_test.go +++ b/slab_test.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/storable.go b/storable.go index 634c4572..59e41ce1 100644 --- a/storable.go +++ b/storable.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/storable_slab.go b/storable_slab.go index 62c80308..162c2585 100644 --- a/storable_slab.go +++ b/storable_slab.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/storable_test.go b/storable_test.go index 912207dc..6072e839 100644 --- a/storable_test.go +++ b/storable_test.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/storage.go b/storage.go index 1732451c..b4cf7af7 100644 --- a/storage.go +++ b/storage.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021-2022 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/storage_bench_test.go b/storage_bench_test.go index 7008d2a3..0736b75d 100644 --- a/storage_bench_test.go +++ b/storage_bench_test.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2024 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/storage_test.go b/storage_test.go index ad8434a5..bba94619 100644 --- a/storage_test.go +++ b/storage_test.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021-2022 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/typeinfo.go b/typeinfo.go index a5dadeaa..ef7f2186 100644 --- a/typeinfo.go +++ b/typeinfo.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/utils_test.go b/utils_test.go index 84aba2c6..1e5b9805 100644 --- a/utils_test.go +++ b/utils_test.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/value.go b/value.go index c8be86e3..0652d1a3 100644 --- a/value.go +++ b/value.go @@ -1,7 +1,7 @@ /* * Atree - Scalable Arrays and Ordered Maps * - * Copyright 2021 Dapper Labs, Inc. + * Copyright Flow Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License.