diff --git a/components/debugapi/blocks.go b/components/debugapi/blocks.go index 4f9a07abb..948aa76e0 100644 --- a/components/debugapi/blocks.go +++ b/components/debugapi/blocks.go @@ -16,7 +16,13 @@ func getSlotBlockIDs(index iotago.SlotIndex) (*BlockChangesResponse, error) { } includedBlocks := make([]string, 0) - tangleTree := ads.NewSet[iotago.Identifier](mapdb.NewMapDB(), iotago.BlockID.Bytes, iotago.BlockIDFromBytes) + tangleTree := ads.NewSet[iotago.Identifier]( + mapdb.NewMapDB(), + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, + iotago.BlockID.Bytes, + iotago.BlockIDFromBytes, + ) _ = blocksForSlot.StreamKeys(func(blockID iotago.BlockID) error { includedBlocks = append(includedBlocks, blockID.String()) diff --git a/components/debugapi/transactions.go b/components/debugapi/transactions.go index 6724032c9..8a325e98c 100644 --- a/components/debugapi/transactions.go +++ b/components/debugapi/transactions.go @@ -21,7 +21,13 @@ func storeTransactionsPerSlot(scd *notarization.SlotCommittedDetails) error { if err != nil { return ierrors.Wrapf(err, "failed to retrieve state diff for slot %d", slot) } - mutationsTree := ads.NewSet[iotago.Identifier](mapdb.NewMapDB(), iotago.TransactionID.Bytes, iotago.TransactionIDFromBytes) + mutationsTree := ads.NewSet[iotago.Identifier]( + mapdb.NewMapDB(), + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, + iotago.TransactionID.Bytes, + iotago.TransactionIDFromBytes, + ) tcs := &TransactionsChangesResponse{ Index: slot, IncludedTransactions: make([]string, 0), diff --git a/go.mod b/go.mod index 006f26e31..a9dd74376 100644 --- a/go.mod +++ b/go.mod @@ -10,22 +10,22 @@ require ( github.com/google/uuid v1.4.0 github.com/gorilla/websocket v1.5.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/iotaledger/hive.go/ads v0.0.0-20231027195901-620bd7470e42 - github.com/iotaledger/hive.go/app v0.0.0-20231027195901-620bd7470e42 - github.com/iotaledger/hive.go/constraints v0.0.0-20231027195901-620bd7470e42 - github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231027195901-620bd7470e42 - github.com/iotaledger/hive.go/crypto v0.0.0-20231027195901-620bd7470e42 - github.com/iotaledger/hive.go/ds v0.0.0-20231027195901-620bd7470e42 - github.com/iotaledger/hive.go/ierrors v0.0.0-20231027195901-620bd7470e42 - github.com/iotaledger/hive.go/kvstore v0.0.0-20231027195901-620bd7470e42 - github.com/iotaledger/hive.go/lo v0.0.0-20231027195901-620bd7470e42 - github.com/iotaledger/hive.go/logger v0.0.0-20231027195901-620bd7470e42 - github.com/iotaledger/hive.go/runtime v0.0.0-20231027195901-620bd7470e42 - github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231027195901-620bd7470e42 - github.com/iotaledger/hive.go/stringify v0.0.0-20231027195901-620bd7470e42 + github.com/iotaledger/hive.go/ads v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/app v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/constraints v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/crypto v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/ds v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/ierrors v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/kvstore v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/lo v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/logger v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/runtime v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/stringify v0.0.0-20231108050255-98e0fa35e936 github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231031135002-4c79ea5193f5 github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231031134131-b6ad918dc1ac - github.com/iotaledger/iota.go/v4 v4.0.0-20231102113728-20b8d01e826e + github.com/iotaledger/iota.go/v4 v4.0.0-20231108050608-afce96cfe8a6 github.com/labstack/echo/v4 v4.11.2 github.com/labstack/gommon v0.4.0 github.com/libp2p/go-libp2p v0.32.0 @@ -33,7 +33,6 @@ require ( github.com/mr-tron/base58 v1.2.0 github.com/multiformats/go-multiaddr v0.12.0 github.com/multiformats/go-varint v0.0.7 - github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e github.com/otiai10/copy v1.14.0 github.com/prometheus/client_golang v1.17.0 github.com/spf13/pflag v1.0.5 @@ -89,7 +88,7 @@ require ( github.com/huin/goupnp v1.3.0 // indirect github.com/iancoleman/orderedmap v0.3.0 // indirect github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7 // indirect - github.com/iotaledger/hive.go/log v0.0.0-20231027195901-620bd7470e42 // indirect + github.com/iotaledger/hive.go/log v0.0.0-20231108050255-98e0fa35e936 // indirect github.com/ipfs/boxo v0.13.1 // indirect github.com/ipfs/go-cid v0.4.1 // indirect github.com/ipfs/go-datastore v0.6.0 // indirect diff --git a/go.sum b/go.sum index 2af6a376f..8ca757948 100644 --- a/go.sum +++ b/go.sum @@ -275,40 +275,40 @@ github.com/iancoleman/orderedmap v0.3.0/go.mod h1:XuLcCUkdL5owUCQeF2Ue9uuw1EptkJ github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7 h1:dTrD7X2PTNgli6EbS4tV9qu3QAm/kBU3XaYZV2xdzys= github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7/go.mod h1:ZRdPu684P0fQ1z8sXz4dj9H5LWHhz4a9oCtvjunkSrw= -github.com/iotaledger/hive.go/ads v0.0.0-20231027195901-620bd7470e42 h1:EOfxTuAiBmED1VHuVh7/UIeB27cCRe13gdSzyioNMBw= -github.com/iotaledger/hive.go/ads v0.0.0-20231027195901-620bd7470e42/go.mod h1:IFh0gDfeMgZtfCo+5afK59IDR4xXh+cTR9YtLnZPcbY= -github.com/iotaledger/hive.go/app v0.0.0-20231027195901-620bd7470e42 h1:xAER9M9Uoz2EOWT43E9wmXRe+RmAk8OBSUoboH4Su8M= -github.com/iotaledger/hive.go/app v0.0.0-20231027195901-620bd7470e42/go.mod h1:8ZbIKR84oQd/3iQ5eeT7xpudO9/ytzXP7veIYnk7Orc= -github.com/iotaledger/hive.go/constraints v0.0.0-20231027195901-620bd7470e42 h1:drmpgLlJy7kZ09Dt1qKSnbILU+27Qu2jp4VdPDNwbFk= -github.com/iotaledger/hive.go/constraints v0.0.0-20231027195901-620bd7470e42/go.mod h1:dOBOM2s4se3HcWefPe8sQLUalGXJ8yVXw58oK8jke3s= -github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231027195901-620bd7470e42 h1:BC5GkIHyXdoJGdw6Tu5ds2kjw9grFLtwQiuMaKfdLU8= -github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231027195901-620bd7470e42/go.mod h1:Mc+ACqBGPxrPMIPUBOm6/HL0J6m0iVMwjtIEKW3uow8= -github.com/iotaledger/hive.go/crypto v0.0.0-20231027195901-620bd7470e42 h1:r8TkdQJB7/bJd8cF8z5GQ+rX/7JpbPdPoN7wMoV1OCM= -github.com/iotaledger/hive.go/crypto v0.0.0-20231027195901-620bd7470e42/go.mod h1:h3o6okvMSEK3KOX6pOp3yq1h9ohTkTfo6X8MzEadeb0= -github.com/iotaledger/hive.go/ds v0.0.0-20231027195901-620bd7470e42 h1:ytzZZPtclAzLfjxv26frbinCGx3Z6ouUENbx5U7lFGg= -github.com/iotaledger/hive.go/ds v0.0.0-20231027195901-620bd7470e42/go.mod h1:3XkUSKfHaVxGbT0XAvjNlVYqPzhfLTGhDtdNA5UBPco= -github.com/iotaledger/hive.go/ierrors v0.0.0-20231027195901-620bd7470e42 h1:QMxd32Y/veVhTDPCiOFgetjUbG7sr9MryF29/rSPkMA= -github.com/iotaledger/hive.go/ierrors v0.0.0-20231027195901-620bd7470e42/go.mod h1:HcE8B5lP96enc/OALTb2/rIIi+yOLouRoHOKRclKmC8= -github.com/iotaledger/hive.go/kvstore v0.0.0-20231027195901-620bd7470e42 h1:/xPwStUckZ2V0XPoY496cXU+c5elpHyvYoT6JAmuvRY= -github.com/iotaledger/hive.go/kvstore v0.0.0-20231027195901-620bd7470e42/go.mod h1:O/U3jtiUDeqqM0MZQFu2UPqS9fUm0C5hNISxlmg/thE= -github.com/iotaledger/hive.go/lo v0.0.0-20231027195901-620bd7470e42 h1:AvNLzONVMspwx7nD/NyYUgb5Hi7/zgzIOegr1uRD/M8= -github.com/iotaledger/hive.go/lo v0.0.0-20231027195901-620bd7470e42/go.mod h1:s4kzx9QY1MVWHJralj+3q5kI0eARtrJhphYD/iBbPfo= -github.com/iotaledger/hive.go/log v0.0.0-20231027195901-620bd7470e42 h1:e1uJAlXE3zeXpa+c4uFOG+/AMFbUlLt2mcrSK5NMxVs= -github.com/iotaledger/hive.go/log v0.0.0-20231027195901-620bd7470e42/go.mod h1:JvokzmpmFZPDskMlUqqjgHtD8usVJU4nAY/TNMGge8M= -github.com/iotaledger/hive.go/logger v0.0.0-20231027195901-620bd7470e42 h1:7wjs4t1snBDJ8LOTl+tZhr2ORywSOTgJMppxiIAMA0A= -github.com/iotaledger/hive.go/logger v0.0.0-20231027195901-620bd7470e42/go.mod h1:aBfAfIB2GO/IblhYt5ipCbyeL9bXSNeAwtYVA3hZaHg= -github.com/iotaledger/hive.go/runtime v0.0.0-20231027195901-620bd7470e42 h1:1QMJ39qXIx/IZVzus3+97IV7Pa++e+d340TvbMjhiBU= -github.com/iotaledger/hive.go/runtime v0.0.0-20231027195901-620bd7470e42/go.mod h1:jRw8yFipiPaqmTPHh7hTcxAP9u6pjRGpByS3REJKkbY= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231027195901-620bd7470e42 h1:hZli4E9kJUAEQ7gzZR1XbPcpgqvqMPYq8YBPMbrBuos= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231027195901-620bd7470e42/go.mod h1:SdK26z8/VhWtxaqCuQrufm80SELgowQPmu9T/8eUQ8g= -github.com/iotaledger/hive.go/stringify v0.0.0-20231027195901-620bd7470e42 h1:OlDhgvJ48bZxcvTeebJ1b96xtNnJAddejd2Q4rlH1mU= -github.com/iotaledger/hive.go/stringify v0.0.0-20231027195901-620bd7470e42/go.mod h1:FTo/UWzNYgnQ082GI9QVM9HFDERqf9rw9RivNpqrnTs= +github.com/iotaledger/hive.go/ads v0.0.0-20231108050255-98e0fa35e936 h1:2r4FgIGdc2lHcIbXiUFCCVq4+B0oZk9t6Z0SSLjrzCE= +github.com/iotaledger/hive.go/ads v0.0.0-20231108050255-98e0fa35e936/go.mod h1:gbUvr01B5ha530GnNm8K2OsHXOd2BtzBYOMxyTX3iDg= +github.com/iotaledger/hive.go/app v0.0.0-20231108050255-98e0fa35e936 h1:SnmQt9GxrWIvpW7pgQS049x1b8T+lQutTQbo35FImug= +github.com/iotaledger/hive.go/app v0.0.0-20231108050255-98e0fa35e936/go.mod h1:+riYmeLApkLlj4+EpuJpEJAsj/KGfD7cqLGy7oTsPOM= +github.com/iotaledger/hive.go/constraints v0.0.0-20231108050255-98e0fa35e936 h1:qkq0Wz+Y3J8QYRLd0fwTgHuur/A3k7d82BxOKSfvk8c= +github.com/iotaledger/hive.go/constraints v0.0.0-20231108050255-98e0fa35e936/go.mod h1:dOBOM2s4se3HcWefPe8sQLUalGXJ8yVXw58oK8jke3s= +github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231108050255-98e0fa35e936 h1:GtsYwcCqRomhMo190TPxBrOzs6YnVmqkmQgT/lJrJRo= +github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231108050255-98e0fa35e936/go.mod h1:CdixkrB7VdQzEDlVuwsxPtsiJL/WXrQgz3PELIqlLko= +github.com/iotaledger/hive.go/crypto v0.0.0-20231108050255-98e0fa35e936 h1:Xeb4w0g0Kv2ZjdCZQqz8oiqAU5qAy8OXG8kGTXSPzuY= +github.com/iotaledger/hive.go/crypto v0.0.0-20231108050255-98e0fa35e936/go.mod h1:OQ9EVTTQT1mkO/16BgwSIyQlAhEg+Cptud/yutevWsI= +github.com/iotaledger/hive.go/ds v0.0.0-20231108050255-98e0fa35e936 h1:NtQLSS0Lq5qg/w5nbMpXrlQpmcK3KiOaQmgZWoRc4mM= +github.com/iotaledger/hive.go/ds v0.0.0-20231108050255-98e0fa35e936/go.mod h1:JE8cbZSvzbB5TrwXibg6M0B7ck35YxF30ItHBzQRlgc= +github.com/iotaledger/hive.go/ierrors v0.0.0-20231108050255-98e0fa35e936 h1:o5S4KUAwToOLXoYYRj9ZgqeDsFv1VRM4+Mni0Tdj2Ck= +github.com/iotaledger/hive.go/ierrors v0.0.0-20231108050255-98e0fa35e936/go.mod h1:HcE8B5lP96enc/OALTb2/rIIi+yOLouRoHOKRclKmC8= +github.com/iotaledger/hive.go/kvstore v0.0.0-20231108050255-98e0fa35e936 h1:kXKJQ8UvbA8kI0Jx0EnlXbwDeZFY8pEX0Q6KaOPsYlQ= +github.com/iotaledger/hive.go/kvstore v0.0.0-20231108050255-98e0fa35e936/go.mod h1:ytfKoHr/nF8u0y0G4mamfG0yjFtJiJVk0kgjnPOtsSY= +github.com/iotaledger/hive.go/lo v0.0.0-20231108050255-98e0fa35e936 h1:coXPklQ7JgqTXIUXh3b4OHml1VIvI8x7pQsjsES/u/s= +github.com/iotaledger/hive.go/lo v0.0.0-20231108050255-98e0fa35e936/go.mod h1:6Ee7i6b4tuTHuRYnPP8VUb0wr9XFI5qlqtnttBd9jRg= +github.com/iotaledger/hive.go/log v0.0.0-20231108050255-98e0fa35e936 h1:VBvGnsVwqhoT9zMyMIlK5fPmz6fsbiPZOwdU1E8WU7o= +github.com/iotaledger/hive.go/log v0.0.0-20231108050255-98e0fa35e936/go.mod h1:vzO4/wRkEJDEZb/9fD10oKU9k1bj4qLir2Uhl5U1FkM= +github.com/iotaledger/hive.go/logger v0.0.0-20231108050255-98e0fa35e936 h1:05EbTaladbyo7mD8yBaWYJh9P8u/TUTmrjVmcUjoW8A= +github.com/iotaledger/hive.go/logger v0.0.0-20231108050255-98e0fa35e936/go.mod h1:w1psHM2MuKsen1WdsPKrpqElYH7ZOQ+YdQIgJZg4HTo= +github.com/iotaledger/hive.go/runtime v0.0.0-20231108050255-98e0fa35e936 h1:XbC1fmY87UJ/yMs8U2YqlUdJsqb0Xqj/ZYQKlZ7AUG8= +github.com/iotaledger/hive.go/runtime v0.0.0-20231108050255-98e0fa35e936/go.mod h1:DrZPvUvLarK8C2qb+3H2vdypp/MuhpQmB3iMJbDCr/Q= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231108050255-98e0fa35e936 h1:LXhLW2cN9bQYoHQsgmJRb/jiRBRU5s2rLoCNjZfgHdg= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231108050255-98e0fa35e936/go.mod h1:FoH3T6yKlZJp8xm8K+zsQiibSynp32v21CpWx8xkek8= +github.com/iotaledger/hive.go/stringify v0.0.0-20231108050255-98e0fa35e936 h1:Y4HgL5gm9S27usg5M2t6wi1BSdCxVorM62lwnpKuMd4= +github.com/iotaledger/hive.go/stringify v0.0.0-20231108050255-98e0fa35e936/go.mod h1:FTo/UWzNYgnQ082GI9QVM9HFDERqf9rw9RivNpqrnTs= github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231031135002-4c79ea5193f5 h1:17JDzMKTMXKF3xys6gPURRddkZhg1LY+xwfhbr/sVqg= github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231031135002-4c79ea5193f5/go.mod h1:LsJvoBUVVnY7tkwwByCVtAwmp5bFXdyJNGU/+KVQJVM= github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231031134131-b6ad918dc1ac h1:c7R33+TQGMYP6pvLUQQaqpdDFl+GZbhAcfGMI0285fo= github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231031134131-b6ad918dc1ac/go.mod h1:qPuMUvCTaghsnYRDnRoRuztTyEKFlmi2S7gb44rH7WM= -github.com/iotaledger/iota.go/v4 v4.0.0-20231102113728-20b8d01e826e h1:ZYRC1MHn/ghsqtjIpYGTxLQrh5n5eUmC0/YWnJiTRhk= -github.com/iotaledger/iota.go/v4 v4.0.0-20231102113728-20b8d01e826e/go.mod h1:jqbLYq4a/FwuiPBqFfkAwwxU8vs3+kReRq2/tyX5qRA= +github.com/iotaledger/iota.go/v4 v4.0.0-20231108050608-afce96cfe8a6 h1:4kvG+BB4GOBsNYPY/enPo3xeC65A133L9cD73Kf1p9Q= +github.com/iotaledger/iota.go/v4 v4.0.0-20231108050608-afce96cfe8a6/go.mod h1:8iDORW4/e4NztyAGqjW07uSMjbhs7snbxw+81IWOczY= github.com/ipfs/boxo v0.13.1 h1:nQ5oQzcMZR3oL41REJDcTbrvDvuZh3J9ckc9+ILeRQI= github.com/ipfs/boxo v0.13.1/go.mod h1:btrtHy0lmO1ODMECbbEY1pxNtrLilvKSYLoGQt1yYCk= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= @@ -507,8 +507,6 @@ github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/ github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e h1:s2RNOM/IGdY0Y6qfTeUKhDawdHDpK9RGBdx80qN4Ttw= -github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e/go.mod h1:nBdnFKj15wFbf94Rwfq4m30eAcyY9V/IyKAGQFtqkW0= github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= diff --git a/pkg/core/account/accounts.go b/pkg/core/account/accounts.go index 770a72de1..7cfdfa3bd 100644 --- a/pkg/core/account/accounts.go +++ b/pkg/core/account/accounts.go @@ -1,8 +1,6 @@ package account import ( - "bytes" - "encoding/binary" "io" "sync/atomic" @@ -10,7 +8,8 @@ import ( "github.com/iotaledger/hive.go/ds/shrinkingmap" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/runtime/syncutils" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" + "github.com/iotaledger/hive.go/serializer/v2" + "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) @@ -27,14 +26,9 @@ type Accounts struct { // NewAccounts creates a new Weights instance. func NewAccounts() *Accounts { - a := new(Accounts) - a.initialize() - - return a -} - -func (a *Accounts) initialize() { - a.accountPools = shrinkingmap.New[iotago.AccountID, *Pool]() + return &Accounts{ + accountPools: shrinkingmap.New[iotago.AccountID, *Pool](), + } } func (a *Accounts) Has(id iotago.AccountID) bool { @@ -137,91 +131,80 @@ func (a *Accounts) SelectCommittee(members ...iotago.AccountID) *SeatedAccounts } func AccountsFromBytes(b []byte) (*Accounts, int, error) { - return AccountsFromReader(bytes.NewReader(b)) -} - -func AccountsFromReader(readSeeker io.ReadSeeker) (*Accounts, int, error) { - a := new(Accounts) - n, err := a.readFromReadSeeker(readSeeker) + reader := stream.NewByteReader(b) - return a, n, err -} - -func (a *Accounts) readFromReadSeeker(reader io.ReadSeeker) (n int, err error) { - a.mutex.Lock() - defer a.mutex.Unlock() - - a.initialize() - - var accountCount uint32 - if err = binary.Read(reader, binary.LittleEndian, &accountCount); err != nil { - return n, ierrors.Wrap(err, "unable to read accounts count") + a, err := AccountsFromReader(reader) + if err != nil { + return nil, 0, ierrors.Wrap(err, "unable to read accounts from bytes") } - n += 4 - - for i := uint32(0); i < accountCount; i++ { - var accountID iotago.AccountID - if _, err = io.ReadFull(reader, accountID[:]); err != nil { - return 0, ierrors.Wrap(err, "unable to read accountID") - } - n += iotago.AccountIDLength + return a, reader.BytesRead(), nil +} - poolBytes := make([]byte, poolBytesLength) - if _, err = io.ReadFull(reader, poolBytes); err != nil { - return 0, ierrors.Wrap(err, "unable to read pool bytes") - } - n += poolBytesLength +func AccountsFromReader(reader io.Reader) (*Accounts, error) { + a := NewAccounts() - pool, c, err := PoolFromBytes(poolBytes) + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(i int) error { + accountID, err := stream.Read[iotago.AccountID](reader) if err != nil { - return 0, ierrors.Wrap(err, "failed to parse pool") + return ierrors.Wrapf(err, "unable to read accountID at index %d", i) } - if c != poolBytesLength { - return 0, ierrors.Wrap(err, "invalid pool bytes length") + pool, err := stream.ReadObject(reader, poolBytesLength, PoolFromBytes) + if err != nil { + return ierrors.Wrapf(err, "unable to read pool at index %d", i) } if err := a.setWithoutLocking(accountID, pool); err != nil { - return 0, ierrors.Wrapf(err, "failed to set pool for account %s", accountID.String()) + return ierrors.Wrapf(err, "failed to set pool for account %s", accountID.String()) } + + return nil + }); err != nil { + return nil, ierrors.Wrap(err, "failed to read account data") } - var reused bool - if err = binary.Read(reader, binary.LittleEndian, &reused); err != nil { - return n, ierrors.Wrap(err, "unable to read reused flag") + reused, err := stream.Read[bool](reader) + if err != nil { + return nil, ierrors.Wrap(err, "failed to read reused flag") } + a.reused.Store(reused) - n++ - return n, nil + return a, nil } -func (a *Accounts) Bytes() (bytes []byte, err error) { +func (a *Accounts) Bytes() ([]byte, error) { a.mutex.RLock() defer a.mutex.RUnlock() - m := marshalutil.New() + byteBuffer := stream.NewByteBuffer() - m.WriteUint32(uint32(a.accountPools.Size())) - var innerErr error - a.ForEach(func(id iotago.AccountID, pool *Pool) bool { - m.WriteBytes(id[:]) - poolBytes, err := pool.Bytes() - if err != nil { - innerErr = err - return false - } - m.WriteBytes(poolBytes) + if err := stream.WriteCollection(byteBuffer, serializer.SeriLengthPrefixTypeAsUint32, func() (elementsCount int, err error) { + var innerErr error + a.ForEach(func(id iotago.AccountID, pool *Pool) bool { + if innerErr = stream.Write(byteBuffer, id); innerErr != nil { + return false + } - return true - }) + if innerErr = stream.WriteObject(byteBuffer, pool, (*Pool).Bytes); innerErr != nil { + return false + } - m.WriteBool(a.reused.Load()) + return true + }) + if innerErr != nil { + return 0, innerErr + } + + return a.accountPools.Size(), nil + }); err != nil { + return nil, ierrors.Wrap(err, "failed to write accounts") + } - if innerErr != nil { - return nil, innerErr + if err := stream.Write(byteBuffer, a.reused.Load()); err != nil { + return nil, ierrors.Wrap(err, "failed to write reused flag") } - return m.Bytes(), nil + return byteBuffer.Bytes() } diff --git a/pkg/core/account/accounts_test.go b/pkg/core/account/accounts_test.go index ffc7dbe8e..49639141d 100644 --- a/pkg/core/account/accounts_test.go +++ b/pkg/core/account/accounts_test.go @@ -99,7 +99,7 @@ func TestAccounts(t *testing.T) { require.Equal(t, accounts, accounts2) // check "AccountsFromReader" - accounts3, _, err := account.AccountsFromReader(bytes.NewReader(accountBytes)) + accounts3, err := account.AccountsFromReader(bytes.NewReader(accountBytes)) require.NoError(t, err) // check if the new account is the same diff --git a/pkg/core/account/pool.go b/pkg/core/account/pool.go index 5815420db..44827eb11 100644 --- a/pkg/core/account/pool.go +++ b/pkg/core/account/pool.go @@ -2,11 +2,12 @@ package account import ( "github.com/iotaledger/hive.go/ierrors" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" + "github.com/iotaledger/hive.go/serializer/v2" + "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) -const poolBytesLength = 3 * marshalutil.Uint64Size +const poolBytesLength = 3 * serializer.UInt64ByteSize // Pool represents all the data we need for a given validator and epoch to calculate its rewards data. type Pool struct { @@ -19,33 +20,35 @@ type Pool struct { func PoolFromBytes(bytes []byte) (*Pool, int, error) { p := new(Pool) - m := marshalutil.New(bytes) - poolStake, err := m.ReadUint64() - if err != nil { - return nil, m.ReadOffset(), ierrors.Wrap(err, "failed to parse pool stake") - } - p.PoolStake = iotago.BaseToken(poolStake) - validatorStake, err := m.ReadUint64() - if err != nil { - return nil, m.ReadOffset(), ierrors.Wrap(err, "failed to parse validator stake") - } - p.ValidatorStake = iotago.BaseToken(validatorStake) + var err error + byteReader := stream.NewByteReader(bytes) - fixedCost, err := m.ReadUint64() - if err != nil { - return nil, m.ReadOffset(), ierrors.Wrap(err, "failed to parse fixed cost") + if p.PoolStake, err = stream.Read[iotago.BaseToken](byteReader); err != nil { + return nil, 0, ierrors.Wrap(err, "failed to read PoolStake") + } + if p.ValidatorStake, err = stream.Read[iotago.BaseToken](byteReader); err != nil { + return nil, 0, ierrors.Wrap(err, "failed to read ValidatorStake") + } + if p.FixedCost, err = stream.Read[iotago.Mana](byteReader); err != nil { + return nil, 0, ierrors.Wrap(err, "failed to read FixedCost") } - p.FixedCost = iotago.Mana(fixedCost) - return p, m.ReadOffset(), nil + return p, byteReader.BytesRead(), nil } -func (p *Pool) Bytes() (bytes []byte, err error) { - m := marshalutil.New() - m.WriteUint64(uint64(p.PoolStake)) - m.WriteUint64(uint64(p.ValidatorStake)) - m.WriteUint64(uint64(p.FixedCost)) +func (p *Pool) Bytes() ([]byte, error) { + byteBuffer := stream.NewByteBuffer(poolBytesLength) + + if err := stream.Write(byteBuffer, p.PoolStake); err != nil { + return nil, ierrors.Wrap(err, "failed to write PoolStake") + } + if err := stream.Write(byteBuffer, p.ValidatorStake); err != nil { + return nil, ierrors.Wrap(err, "failed to write ValidatorStake") + } + if err := stream.Write(byteBuffer, p.FixedCost); err != nil { + return nil, ierrors.Wrap(err, "failed to write FixedCost") + } - return m.Bytes(), nil + return byteBuffer.Bytes() } diff --git a/pkg/model/account_diff.go b/pkg/model/account_diff.go index fadef8c74..80647fb5e 100644 --- a/pkg/model/account_diff.go +++ b/pkg/model/account_diff.go @@ -1,15 +1,10 @@ package model import ( - "bytes" - "context" - "encoding/binary" "io" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/lo" - "github.com/iotaledger/hive.go/serializer/v2" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) @@ -18,7 +13,7 @@ import ( type AccountDiff struct { BICChange iotago.BlockIssuanceCredits - PreviousUpdatedTime iotago.SlotIndex + PreviousUpdatedSlot iotago.SlotIndex NewExpirySlot iotago.SlotIndex PreviousExpirySlot iotago.SlotIndex @@ -44,7 +39,7 @@ type AccountDiff struct { func NewAccountDiff() *AccountDiff { return &AccountDiff{ BICChange: 0, - PreviousUpdatedTime: 0, + PreviousUpdatedSlot: 0, NewExpirySlot: 0, PreviousExpirySlot: 0, NewOutputID: iotago.EmptyOutputID, @@ -60,37 +55,10 @@ func NewAccountDiff() *AccountDiff { } } -func (d AccountDiff) Bytes() ([]byte, error) { - m := marshalutil.New() - - m.WriteInt64(int64(d.BICChange)) - m.WriteUint32(uint32(d.PreviousUpdatedTime)) - m.WriteUint32(uint32(d.NewExpirySlot)) - m.WriteUint32(uint32(d.PreviousExpirySlot)) - m.WriteBytes(lo.PanicOnErr(d.NewOutputID.Bytes())) - m.WriteBytes(lo.PanicOnErr(d.PreviousOutputID.Bytes())) - - if err := writeBlockIssuerKeys(m, d.BlockIssuerKeysAdded); err != nil { - return nil, err - } - if err := writeBlockIssuerKeys(m, d.BlockIssuerKeysRemoved); err != nil { - return nil, err - } - - m.WriteInt64(d.ValidatorStakeChange) - m.WriteInt64(d.DelegationStakeChange) - m.WriteInt64(d.FixedCostChange) - m.WriteUint64(uint64(d.StakeEndEpochChange)) - m.WriteBytes(lo.PanicOnErr(d.NewLatestSupportedVersionAndHash.Bytes())) - m.WriteBytes(lo.PanicOnErr(d.PrevLatestSupportedVersionAndHash.Bytes())) - - return m.Bytes(), nil -} - func (d *AccountDiff) Clone() *AccountDiff { return &AccountDiff{ BICChange: d.BICChange, - PreviousUpdatedTime: d.PreviousUpdatedTime, + PreviousUpdatedSlot: d.PreviousUpdatedSlot, NewExpirySlot: d.NewExpirySlot, PreviousExpirySlot: d.PreviousExpirySlot, NewOutputID: d.NewOutputID, @@ -106,131 +74,113 @@ func (d *AccountDiff) Clone() *AccountDiff { } } -func (d *AccountDiff) FromBytes(b []byte) (int, error) { - return d.readFromReadSeeker(bytes.NewReader(b)) -} - -func (d *AccountDiff) FromReader(readSeeker io.ReadSeeker) error { - return lo.Return2(d.readFromReadSeeker(readSeeker)) -} +func (d *AccountDiff) Bytes() ([]byte, error) { + byteBuffer := stream.NewByteBuffer() -func (d *AccountDiff) readFromReadSeeker(reader io.ReadSeeker) (offset int, err error) { - if err = binary.Read(reader, binary.LittleEndian, &d.BICChange); err != nil { - return offset, ierrors.Wrap(err, "unable to read account BIC balance value in the diff") + if err := stream.Write(byteBuffer, d.BICChange); err != nil { + return nil, ierrors.Wrap(err, "unable to write BICChange value in the diff") } - offset += 8 - - if err = binary.Read(reader, binary.LittleEndian, &d.PreviousUpdatedTime); err != nil { - return offset, ierrors.Wrap(err, "unable to read previous updated time in the diff") + if err := stream.Write(byteBuffer, d.PreviousUpdatedSlot); err != nil { + return nil, ierrors.Wrap(err, "unable to write PreviousUpdatedSlot in the diff") } - offset += iotago.SlotIndexLength - - if err = binary.Read(reader, binary.LittleEndian, &d.NewExpirySlot); err != nil { - return offset, ierrors.Wrap(err, "unable to read new expiry slot in the diff") + if err := stream.Write(byteBuffer, d.NewExpirySlot); err != nil { + return nil, ierrors.Wrap(err, "unable to write NewExpirySlot in the diff") } - offset += iotago.SlotIndexLength - - if err = binary.Read(reader, binary.LittleEndian, &d.PreviousExpirySlot); err != nil { - return offset, ierrors.Wrap(err, "unable to read previous expiry slot in the diff") + if err := stream.Write(byteBuffer, d.PreviousExpirySlot); err != nil { + return nil, ierrors.Wrap(err, "unable to write PreviousExpirySlot in the diff") } - offset += iotago.SlotIndexLength - - if err = binary.Read(reader, binary.LittleEndian, &d.NewOutputID); err != nil { - return offset, ierrors.Wrap(err, "unable to read new outputID in the diff") + if err := stream.Write(byteBuffer, d.NewOutputID); err != nil { + return nil, ierrors.Wrap(err, "unable to write NewOutputID in the diff") } - offset += iotago.OutputIDLength - - if err = binary.Read(reader, binary.LittleEndian, &d.PreviousOutputID); err != nil { - return offset, ierrors.Wrap(err, "unable to read previous outputID in the diff") + if err := stream.Write(byteBuffer, d.PreviousOutputID); err != nil { + return nil, ierrors.Wrap(err, "unable to write PreviousOutputID in the diff") } - offset += iotago.OutputIDLength - keysAdded, bytesRead, err := readBlockIssuerKeys(reader) - if err != nil { - return offset, ierrors.Wrap(err, "unable to read added blockIssuerKeys in the diff") + if err := stream.WriteObject(byteBuffer, d.BlockIssuerKeysAdded, iotago.BlockIssuerKeys.Bytes); err != nil { + return nil, ierrors.Wrap(err, "unable to write added blockIssuerKeys in the diff") + } + if err := stream.WriteObject(byteBuffer, d.BlockIssuerKeysRemoved, iotago.BlockIssuerKeys.Bytes); err != nil { + return nil, ierrors.Wrap(err, "unable to write removed blockIssuerKeys in the diff") } - offset += bytesRead - - d.BlockIssuerKeysAdded = keysAdded - keysRemoved, bytesRead, err := readBlockIssuerKeys(reader) - if err != nil { - return offset, ierrors.Wrap(err, "unable to read removed blockIssuerKeys in the diff") + if err := stream.Write(byteBuffer, d.ValidatorStakeChange); err != nil { + return nil, ierrors.Wrap(err, "unable to write ValidatorStakeChange in the diff") + } + if err := stream.Write(byteBuffer, d.DelegationStakeChange); err != nil { + return nil, ierrors.Wrap(err, "unable to write DelegationStakeChange in the diff") + } + if err := stream.Write(byteBuffer, d.FixedCostChange); err != nil { + return nil, ierrors.Wrap(err, "unable to write FixedCostChange in the diff") + } + if err := stream.Write(byteBuffer, d.StakeEndEpochChange); err != nil { + return nil, ierrors.Wrap(err, "unable to write StakeEndEpochChange in the diff") + } + if err := stream.WriteObject(byteBuffer, d.NewLatestSupportedVersionAndHash, VersionAndHash.Bytes); err != nil { + return nil, ierrors.Wrap(err, "unable to write NewLatestSupportedVersionAndHash in the diff") + } + if err := stream.WriteObject(byteBuffer, d.PrevLatestSupportedVersionAndHash, VersionAndHash.Bytes); err != nil { + return nil, ierrors.Wrap(err, "unable to write PrevLatestSupportedVersionAndHash in the diff") } - offset += bytesRead - d.BlockIssuerKeysRemoved = keysRemoved + return byteBuffer.Bytes() +} - if err = binary.Read(reader, binary.LittleEndian, &d.ValidatorStakeChange); err != nil { - return offset, ierrors.Wrap(err, "unable to read validator stake change in the diff") - } - offset += 8 +func AccountDiffFromReader(reader io.ReadSeeker) (*AccountDiff, error) { + var err error + d := NewAccountDiff() - if err = binary.Read(reader, binary.LittleEndian, &d.DelegationStakeChange); err != nil { - return offset, ierrors.Wrap(err, "unable to read delegation stake change in the diff") + if d.BICChange, err = stream.Read[iotago.BlockIssuanceCredits](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read account BIC balance value in the diff") } - offset += 8 - - if err = binary.Read(reader, binary.LittleEndian, &d.FixedCostChange); err != nil { - return offset, ierrors.Wrap(err, "unable to read fixed cost change in the diff") + if d.PreviousUpdatedSlot, err = stream.Read[iotago.SlotIndex](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read previous updated time in the diff") } - offset += 8 - - if err = binary.Read(reader, binary.LittleEndian, &d.StakeEndEpochChange); err != nil { - return offset, ierrors.Wrap(err, "unable to read new stake end epoch in the diff") + if d.NewExpirySlot, err = stream.Read[iotago.SlotIndex](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read new expiry slot in the diff") } - offset += 8 - - newVersionAndHashBytes := make([]byte, VersionAndHashSize) - if err = binary.Read(reader, binary.LittleEndian, newVersionAndHashBytes); err != nil { - return offset, ierrors.Wrap(err, "unable to read new version and hash bytes in the diff") + if d.PreviousExpirySlot, err = stream.Read[iotago.SlotIndex](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read previous expiry slot in the diff") + } + if d.NewOutputID, err = stream.Read[iotago.OutputID](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read new outputID in the diff") } - d.NewLatestSupportedVersionAndHash, _, err = VersionAndHashFromBytes(newVersionAndHashBytes) - if err != nil { - return offset, ierrors.Wrap(err, "unable to parse new version and hash bytes in the diff") + if d.PreviousOutputID, err = stream.Read[iotago.OutputID](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read previous outputID in the diff") } - offset += len(newVersionAndHashBytes) - prevVersionAndHashBytes := make([]byte, VersionAndHashSize) - if err = binary.Read(reader, binary.LittleEndian, prevVersionAndHashBytes); err != nil { - return offset, ierrors.Wrap(err, "unable to read prev version and hash bytes in the diff") + if d.BlockIssuerKeysAdded, err = stream.ReadObjectFromReader(reader, iotago.BlockIssuerKeysFromReader); err != nil { + return nil, ierrors.Wrap(err, "unable to read added blockIssuerKeys in the diff") } - d.PrevLatestSupportedVersionAndHash, _, err = VersionAndHashFromBytes(prevVersionAndHashBytes) - if err != nil { - return offset, ierrors.Wrap(err, "unable to parse prev version and hash bytes in the diff") + if d.BlockIssuerKeysRemoved, err = stream.ReadObjectFromReader(reader, iotago.BlockIssuerKeysFromReader); err != nil { + return nil, ierrors.Wrap(err, "unable to read removed blockIssuerKeys in the diff") } - offset += len(prevVersionAndHashBytes) - - return offset, nil -} -func writeBlockIssuerKeys(m *marshalutil.MarshalUtil, blockIssuerKeys iotago.BlockIssuerKeys) error { - blockIssuerKeysBytes, err := iotago.CommonSerixAPI().Encode(context.TODO(), blockIssuerKeys) - if err != nil { - return ierrors.Wrap(err, "unable to encode blockIssuerKeys in the diff") + if d.ValidatorStakeChange, err = stream.Read[int64](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read validator stake change in the diff") + } + if d.DelegationStakeChange, err = stream.Read[int64](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read delegation stake change in the diff") + } + if d.FixedCostChange, err = stream.Read[int64](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read fixed cost change in the diff") + } + if d.StakeEndEpochChange, err = stream.Read[int64](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read new stake end epoch in the diff") + } + if d.NewLatestSupportedVersionAndHash, err = stream.ReadObject(reader, VersionAndHashSize, VersionAndHashFromBytes); err != nil { + return nil, ierrors.Wrap(err, "unable to read new latest supported version and hash in the diff") + } + if d.PrevLatestSupportedVersionAndHash, err = stream.ReadObject(reader, VersionAndHashSize, VersionAndHashFromBytes); err != nil { + return nil, ierrors.Wrap(err, "unable to read prev latest supported version and hash in the diff") } - m.WriteUint64(uint64(len(blockIssuerKeysBytes))) - m.WriteBytes(blockIssuerKeysBytes) - - return nil + return d, nil } -func readBlockIssuerKeys(reader io.ReadSeeker) (iotago.BlockIssuerKeys, int, error) { - var bytesConsumed int - - blockIssuerKeysBytes, err := stream.ReadBlob(reader) - if err != nil { - return nil, bytesConsumed, ierrors.Wrap(err, "unable to read blockIssuerKeysBytes in the diff") - } +func AccountDiffFromBytes(b []byte) (*AccountDiff, int, error) { + reader := stream.NewByteReader(b) - bytesConsumed += serializer.UInt64ByteSize // add the blob size - bytesConsumed += len(blockIssuerKeysBytes) - - var blockIssuerKeys iotago.BlockIssuerKeys - if _, err := iotago.CommonSerixAPI().Decode(context.TODO(), blockIssuerKeysBytes, &blockIssuerKeys); err != nil { - return nil, bytesConsumed, ierrors.Wrap(err, "unable to decode blockIssuerKeys in the diff") - } + a, err := AccountDiffFromReader(reader) - return blockIssuerKeys, bytesConsumed, nil + return a, reader.BytesRead(), err } diff --git a/pkg/model/commitment.go b/pkg/model/commitment.go index cf17c8c4f..d7cafb979 100644 --- a/pkg/model/commitment.go +++ b/pkg/model/commitment.go @@ -50,28 +50,39 @@ func CommitmentFromCommitment(iotaCommitment *iotago.Commitment, api iotago.API, return newCommitment(commitmentID, iotaCommitment, data, api) } -func CommitmentFromBytes(data []byte, apiProvider iotago.APIProvider, opts ...serix.Option) (*Commitment, error) { - version, _, err := iotago.VersionFromBytes(data) - if err != nil { - return nil, ierrors.Wrap(err, "failed to determine version") - } - - apiForVersion, err := apiProvider.APIForVersion(version) - if err != nil { - return nil, ierrors.Wrapf(err, "failed to get API for version %d", version) +func CommitmentFromBytes(apiProvider iotago.APIProvider) func([]byte) (*Commitment, int, error) { + return func(bytes []byte) (*Commitment, int, error) { + totalBytesRead := 0 + + // We read the version byte here to determine the API to use, but then we decode the entire commitment again. + // Thus, we don't count the version byte as read bytes. + version, _, err := iotago.VersionFromBytes(bytes) + if err != nil { + return nil, 0, ierrors.Wrap(err, "failed to determine version") + } + + apiForVersion, err := apiProvider.APIForVersion(version) + if err != nil { + return nil, 0, ierrors.Wrapf(err, "failed to get API for version %d", version) + } + + iotaCommitment := new(iotago.Commitment) + if totalBytesRead, err = apiForVersion.Decode(bytes, iotaCommitment, serix.WithValidation()); err != nil { + return nil, 0, ierrors.Wrap(err, "failed to decode commitment") + } + + commitmentID, err := iotaCommitment.ID() + if err != nil { + return nil, 0, ierrors.Wrap(err, "failed to determine commitment ID") + } + + commitment, err := newCommitment(commitmentID, iotaCommitment, bytes, apiForVersion) + if err != nil { + return nil, 0, ierrors.Wrap(err, "failed to create commitment") + } + + return commitment, totalBytesRead, nil } - - iotaCommitment := new(iotago.Commitment) - if _, err := apiForVersion.Decode(data, iotaCommitment, opts...); err != nil { - return nil, err - } - - commitmentID, err := iotaCommitment.ID() - if err != nil { - return nil, err - } - - return newCommitment(commitmentID, iotaCommitment, data, apiForVersion) } func (c *Commitment) ID() iotago.CommitmentID { @@ -102,6 +113,10 @@ func (c *Commitment) Data() []byte { return c.data } +func (c *Commitment) Bytes() ([]byte, error) { + return c.data, nil +} + func (c *Commitment) Commitment() *iotago.Commitment { return c.commitment } diff --git a/pkg/model/poolstats.go b/pkg/model/poolstats.go index b8ce0a5c0..eada06d32 100644 --- a/pkg/model/poolstats.go +++ b/pkg/model/poolstats.go @@ -1,8 +1,10 @@ package model import ( + "io" + "github.com/iotaledger/hive.go/ierrors" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" + "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) @@ -13,36 +15,48 @@ type PoolsStats struct { ProfitMargin uint64 } -func PoolsStatsFromBytes(bytes []byte) (*PoolsStats, int, error) { +func PoolStatsFromReader(reader io.ReadSeeker) (*PoolsStats, error) { p := new(PoolsStats) - m := marshalutil.New(bytes) - totalStake, err := m.ReadUint64() - if err != nil { - return nil, m.ReadOffset(), ierrors.Wrap(err, "failed to parse total stake") - } - p.TotalStake = iotago.BaseToken(totalStake) - totalValidatorStake, err := m.ReadUint64() - if err != nil { - return nil, m.ReadOffset(), ierrors.Wrap(err, "failed to parse total validator stake") + var err error + if p.TotalStake, err = stream.Read[iotago.BaseToken](reader); err != nil { + return nil, ierrors.Wrap(err, "failed to read TotalStake") + } + if p.TotalValidatorStake, err = stream.Read[iotago.BaseToken](reader); err != nil { + return nil, ierrors.Wrap(err, "failed to read TotalValidatorStake") + } + if p.ProfitMargin, err = stream.Read[uint64](reader); err != nil { + return nil, ierrors.Wrap(err, "failed to read ProfitMargin") } - p.TotalValidatorStake = iotago.BaseToken(totalValidatorStake) - p.ProfitMargin, err = m.ReadUint64() + return p, nil +} + +func PoolsStatsFromBytes(bytes []byte) (*PoolsStats, int, error) { + byteReader := stream.NewByteReader(bytes) + + p, err := PoolStatsFromReader(byteReader) if err != nil { - return nil, m.ReadOffset(), ierrors.Wrap(err, "failed to parse profit margin") + return nil, 0, ierrors.Wrap(err, "failed to parse PoolStats") } - return p, m.ReadOffset(), nil + return p, byteReader.BytesRead(), nil } func (p *PoolsStats) Bytes() ([]byte, error) { - m := marshalutil.New() - m.WriteUint64(uint64(p.TotalStake)) - m.WriteUint64(uint64(p.TotalValidatorStake)) - m.WriteUint64(p.ProfitMargin) + byteBuffer := stream.NewByteBuffer() + + if err := stream.Write(byteBuffer, p.TotalStake); err != nil { + return nil, ierrors.Wrap(err, "failed to write TotalStake") + } + if err := stream.Write(byteBuffer, p.TotalValidatorStake); err != nil { + return nil, ierrors.Wrap(err, "failed to write TotalValidatorStake") + } + if err := stream.Write(byteBuffer, p.ProfitMargin); err != nil { + return nil, ierrors.Wrap(err, "failed to write ProfitMargin") + } - return m.Bytes(), nil + return byteBuffer.Bytes() } type PoolRewards struct { @@ -54,36 +68,46 @@ type PoolRewards struct { FixedCost iotago.Mana } -func PoolRewardsFromBytes(bytes []byte) (*PoolRewards, int, error) { +func PoolRewardsFromReader(reader io.ReadSeeker) (*PoolRewards, error) { + var err error p := new(PoolRewards) - m := marshalutil.New(bytes) - poolStake, err := m.ReadUint64() - if err != nil { - return nil, m.ReadOffset(), ierrors.Wrap(err, "failed to parse pool stake") + if p.PoolStake, err = stream.Read[iotago.BaseToken](reader); err != nil { + return nil, ierrors.Wrap(err, "failed to read PoolStake") } - p.PoolStake = iotago.BaseToken(poolStake) - - poolRewards, err := m.ReadUint64() - if err != nil { - return nil, m.ReadOffset(), ierrors.Wrap(err, "failed to parse pool rewards") + if p.PoolRewards, err = stream.Read[iotago.Mana](reader); err != nil { + return nil, ierrors.Wrap(err, "failed to read PoolRewards") + } + if p.FixedCost, err = stream.Read[iotago.Mana](reader); err != nil { + return nil, ierrors.Wrap(err, "failed to read FixedCost") } - p.PoolRewards = iotago.Mana(poolRewards) - fixedCost, err := m.ReadUint64() + return p, nil +} + +func PoolRewardsFromBytes(bytes []byte) (*PoolRewards, int, error) { + byteReader := stream.NewByteReader(bytes) + + p, err := PoolRewardsFromReader(byteReader) if err != nil { - return nil, m.ReadOffset(), ierrors.Wrap(err, "failed to parse fixed cost") + return nil, 0, ierrors.Wrap(err, "failed to parse PoolRewards") } - p.FixedCost = iotago.Mana(fixedCost) - return p, m.ReadOffset(), nil + return p, byteReader.BytesRead(), nil } func (p *PoolRewards) Bytes() ([]byte, error) { - m := marshalutil.New() - m.WriteUint64(uint64(p.PoolStake)) - m.WriteUint64(uint64(p.PoolRewards)) - m.WriteUint64(uint64(p.FixedCost)) + byteBuffer := stream.NewByteBuffer() + + if err := stream.Write(byteBuffer, p.PoolStake); err != nil { + return nil, ierrors.Wrap(err, "failed to write PoolStake") + } + if err := stream.Write(byteBuffer, p.PoolRewards); err != nil { + return nil, ierrors.Wrap(err, "failed to write PoolRewards") + } + if err := stream.Write(byteBuffer, p.FixedCost); err != nil { + return nil, ierrors.Wrap(err, "failed to write FixedCost") + } - return m.Bytes(), nil + return byteBuffer.Bytes() } diff --git a/pkg/model/validator_performance.go b/pkg/model/validator_performance.go index bd72a1be1..c121cfc23 100644 --- a/pkg/model/validator_performance.go +++ b/pkg/model/validator_performance.go @@ -1,37 +1,68 @@ package model import ( - iotago "github.com/iotaledger/iota.go/v4" + "io" + + "github.com/iotaledger/hive.go/ierrors" + "github.com/iotaledger/hive.go/serializer/v2/stream" ) type ValidatorPerformance struct { // works if ValidatorBlocksPerSlot is less than 32 because we use it as bit vector - SlotActivityVector uint32 `serix:"0"` + SlotActivityVector uint32 // can be uint8 because max count per slot is maximally ValidatorBlocksPerSlot + 1 - BlockIssuedCount uint8 `serix:"1"` - HighestSupportedVersionAndHash VersionAndHash `serix:"2"` + BlocksIssuedCount uint8 + HighestSupportedVersionAndHash VersionAndHash } func NewValidatorPerformance() *ValidatorPerformance { return &ValidatorPerformance{ SlotActivityVector: 0, - BlockIssuedCount: 0, + BlocksIssuedCount: 0, HighestSupportedVersionAndHash: VersionAndHash{}, } } -func ValidatorPerformanceFromBytes(decodeAPI iotago.API) func([]byte) (*ValidatorPerformance, int, error) { - return func(bytes []byte) (*ValidatorPerformance, int, error) { - validatorPerformance := new(ValidatorPerformance) - consumedBytes, err := decodeAPI.Decode(bytes, validatorPerformance) - if err != nil { - return nil, 0, err - } +func ValidatorPerformanceFromBytes(bytes []byte) (*ValidatorPerformance, int, error) { + byteReader := stream.NewByteReader(bytes) + + v, err := ValidatorPerformanceFromReader(byteReader) + if err != nil { + return nil, 0, ierrors.Wrap(err, "failed to parse ValidatorPerformance") + } - return validatorPerformance, consumedBytes, nil + return v, byteReader.BytesRead(), nil +} + +func ValidatorPerformanceFromReader(reader io.ReadSeeker) (*ValidatorPerformance, error) { + var err error + v := NewValidatorPerformance() + + if v.SlotActivityVector, err = stream.Read[uint32](reader); err != nil { + return nil, ierrors.Wrap(err, "failed to read SlotActivityVector") + } + if v.BlocksIssuedCount, err = stream.Read[uint8](reader); err != nil { + return nil, ierrors.Wrap(err, "failed to read BlocksIssuedCount") } + if v.HighestSupportedVersionAndHash, err = stream.ReadObject(reader, VersionAndHashSize, VersionAndHashFromBytes); err != nil { + return nil, ierrors.Wrap(err, "failed to read HighestSupportedVersionAndHash") + } + + return v, nil } -func (p *ValidatorPerformance) Bytes(api iotago.API) ([]byte, error) { - return api.Encode(p) +func (p *ValidatorPerformance) Bytes() ([]byte, error) { + byteBuffer := stream.NewByteBuffer() + + if err := stream.Write(byteBuffer, p.SlotActivityVector); err != nil { + return nil, ierrors.Wrap(err, "failed to write SlotActivityVector") + } + if err := stream.Write(byteBuffer, p.BlocksIssuedCount); err != nil { + return nil, ierrors.Wrap(err, "failed to write BlocksIssuedCount") + } + if err := stream.WriteObject(byteBuffer, p.HighestSupportedVersionAndHash, VersionAndHash.Bytes); err != nil { + return nil, ierrors.Wrap(err, "failed to write HighestSupportedVersionAndHash") + } + + return byteBuffer.Bytes() } diff --git a/pkg/model/version_and_hash.go b/pkg/model/version_and_hash.go index 8475cc04c..46747799d 100644 --- a/pkg/model/version_and_hash.go +++ b/pkg/model/version_and_hash.go @@ -7,7 +7,7 @@ import ( iotago "github.com/iotaledger/iota.go/v4" ) -const VersionAndHashSize = iotago.IdentifierLength + iotago.VersionLength +const VersionAndHashSize = iotago.VersionLength + iotago.IdentifierLength type VersionAndHash struct { Version iotago.Version `serix:"0"` diff --git a/pkg/network/peer.go b/pkg/network/peer.go index 4b5b0a298..18554f10d 100644 --- a/pkg/network/peer.go +++ b/pkg/network/peer.go @@ -10,7 +10,8 @@ import ( "github.com/iotaledger/hive.go/crypto/ed25519" "github.com/iotaledger/hive.go/ierrors" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" + "github.com/iotaledger/hive.go/serializer/v2" + "github.com/iotaledger/hive.go/serializer/v2/stream" ) const DefaultReconnectInterval = 5 * time.Second @@ -78,17 +79,29 @@ func (p *Peer) SetConnStatus(cs ConnectionStatus) { } func (p *Peer) Bytes() ([]byte, error) { - m := marshalutil.New() - m.WriteUint64(uint64(len(p.ID))) - m.WriteBytes([]byte(p.ID)) - m.WriteUint8(uint8(len(p.PeerAddresses))) - for _, addr := range p.PeerAddresses { - addrBytes := addr.Bytes() - m.WriteUint64(uint64(len(addrBytes))) - m.WriteBytes(addrBytes) + byteBuffer := stream.NewByteBuffer() + + if err := stream.WriteObjectWithSize(byteBuffer, p.ID, serializer.SeriLengthPrefixTypeAsUint16, func(id peer.ID) ([]byte, error) { + return []byte(id), nil + }); err != nil { + return nil, ierrors.Wrap(err, "failed to write peer ID") + } + + if err := stream.WriteCollection(byteBuffer, serializer.SeriLengthPrefixTypeAsByte, func() (elementsCount int, err error) { + for _, addr := range p.PeerAddresses { + if err = stream.WriteObjectWithSize(byteBuffer, addr, serializer.SeriLengthPrefixTypeAsUint16, func(m multiaddr.Multiaddr) ([]byte, error) { + return m.Bytes(), nil + }); err != nil { + return 0, ierrors.Wrap(err, "failed to write peer address") + } + } + + return len(p.PeerAddresses), nil + }); err != nil { + return nil, ierrors.Wrap(err, "failed to write peer addresses") } - return m.Bytes(), nil + return byteBuffer.Bytes() } func (p *Peer) String() string { @@ -97,46 +110,48 @@ func (p *Peer) String() string { // peerFromBytes parses a peer from a byte slice. func peerFromBytes(bytes []byte) (*Peer, error) { - m := marshalutil.New(bytes) - idLen, err := m.ReadUint64() - if err != nil { - return nil, err - } - idBytes, err := m.ReadBytes(int(idLen)) - if err != nil { - return nil, err - } - id := peer.ID(idBytes) - - peer := &Peer{ - ID: id, + p := &Peer{ PeerAddresses: make([]multiaddr.Multiaddr, 0), ConnStatus: &atomic.Value{}, RemoveCh: make(chan struct{}), DoneCh: make(chan struct{}), } - peer.SetConnStatus(ConnStatusDisconnected) + var err error + byteReader := stream.NewByteReader(bytes) - peerAddrLen, err := m.ReadUint8() - if err != nil { - return nil, err - } - for i := 0; i < int(peerAddrLen); i++ { - addrLen, err := m.ReadUint64() - if err != nil { - return nil, err - } - addrBytes, err := m.ReadBytes(int(addrLen)) + if p.ID, err = stream.ReadObjectWithSize(byteReader, serializer.SeriLengthPrefixTypeAsUint16, func(bytes []byte) (peer.ID, int, error) { + id, err := peer.IDFromBytes(bytes) if err != nil { - return nil, err + return "", 0, ierrors.Wrap(err, "failed to parse peerID") } - addr, err := multiaddr.NewMultiaddrBytes(addrBytes) + + return id, len(bytes), nil + }); err != nil { + return nil, ierrors.Wrap(err, "failed to read peer ID") + } + + p.SetConnStatus(ConnStatusDisconnected) + + if err = stream.ReadCollection(byteReader, serializer.SeriLengthPrefixTypeAsByte, func(i int) error { + addr, err := stream.ReadObjectWithSize(byteReader, serializer.SeriLengthPrefixTypeAsUint16, func(bytes []byte) (multiaddr.Multiaddr, int, error) { + m, err := multiaddr.NewMultiaddrBytes(bytes) + if err != nil { + return nil, 0, ierrors.Wrap(err, "failed to parse peer address") + } + + return m, len(bytes), nil + }) if err != nil { - return nil, err + return ierrors.Wrap(err, "failed to read peer address") } - peer.PeerAddresses = append(peer.PeerAddresses, addr) + + p.PeerAddresses = append(p.PeerAddresses, addr) + + return nil + }); err != nil { + return nil, ierrors.Wrap(err, "failed to read peer addresses") } - return peer, nil + return p, nil } diff --git a/pkg/network/protocols/core/protocol.go b/pkg/network/protocols/core/protocol.go index 0ced4c663..cdf696117 100644 --- a/pkg/network/protocols/core/protocol.go +++ b/pkg/network/protocols/core/protocol.go @@ -1,8 +1,6 @@ package core import ( - "encoding/binary" - "github.com/libp2p/go-libp2p/core/peer" "google.golang.org/protobuf/proto" @@ -14,8 +12,8 @@ import ( "github.com/iotaledger/hive.go/runtime/options" "github.com/iotaledger/hive.go/runtime/syncutils" "github.com/iotaledger/hive.go/runtime/workerpool" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" - "github.com/iotaledger/hive.go/serializer/v2/serix" + "github.com/iotaledger/hive.go/serializer/v2" + "github.com/iotaledger/hive.go/serializer/v2/stream" "github.com/iotaledger/iota-core/pkg/model" "github.com/iotaledger/iota-core/pkg/network" nwmodels "github.com/iotaledger/iota-core/pkg/network/protocols/core/models" @@ -73,16 +71,23 @@ func (p *Protocol) SendSlotCommitment(cm *model.Commitment, to ...peer.ID) { } func (p *Protocol) SendAttestations(cm *model.Commitment, attestations []*iotago.Attestation, merkleProof *merklehasher.Proof[iotago.Identifier], to ...peer.ID) { - encodedAttestations := marshalutil.New() - encodedAttestations.WriteUint32(uint32(len(attestations))) - for _, att := range attestations { - iotagoAPI := lo.PanicOnErr(p.apiProvider.APIForVersion(att.Header.ProtocolVersion)) - encodedAttestations.WriteBytes(lo.PanicOnErr(iotagoAPI.Encode(att))) + byteBuffer := stream.NewByteBuffer() + + if err := stream.WriteCollection(byteBuffer, serializer.SeriLengthPrefixTypeAsUint32, func() (elementsCount int, err error) { + for _, att := range attestations { + if err = stream.WriteObjectWithSize(byteBuffer, att, serializer.SeriLengthPrefixTypeAsUint16, (*iotago.Attestation).Bytes); err != nil { + return 0, ierrors.Wrapf(err, "failed to write attestation %v", att) + } + } + + return len(attestations), nil + }); err != nil { + panic(err) } p.network.Send(&nwmodels.Packet{Body: &nwmodels.Packet_Attestations{Attestations: &nwmodels.Attestations{ Commitment: cm.Data(), - Attestations: encodedAttestations.Bytes(), + Attestations: lo.PanicOnErr(byteBuffer.Bytes()), MerkleProof: lo.PanicOnErr(merkleProof.Bytes()), }}}, to...) } @@ -172,7 +177,7 @@ func (p *Protocol) onBlockRequest(idBytes []byte, id peer.ID) { } func (p *Protocol) onSlotCommitment(commitmentBytes []byte, id peer.ID) { - receivedCommitment, err := model.CommitmentFromBytes(commitmentBytes, p.apiProvider, serix.WithValidation()) + receivedCommitment, err := lo.DropCount(model.CommitmentFromBytes(p.apiProvider)(commitmentBytes)) if err != nil { p.Events.Error.Trigger(ierrors.Wrap(err, "failed to deserialize slot commitment"), id) @@ -193,35 +198,38 @@ func (p *Protocol) onSlotCommitmentRequest(idBytes []byte, id peer.ID) { } func (p *Protocol) onAttestations(commitmentBytes []byte, attestationsBytes []byte, merkleProof []byte, id peer.ID) { - cm, err := model.CommitmentFromBytes(commitmentBytes, p.apiProvider, serix.WithValidation()) + cm, err := lo.DropCount(model.CommitmentFromBytes(p.apiProvider)(commitmentBytes)) if err != nil { p.Events.Error.Trigger(ierrors.Wrap(err, "failed to deserialize commitment"), id) return } - if len(attestationsBytes) < 4 { - p.Events.Error.Trigger(ierrors.Errorf("failed to deserialize attestations, invalid attestation count"), id) + reader := stream.NewByteReader(attestationsBytes) + + attestationsCount, err := stream.PeekSize(reader, serializer.SeriLengthPrefixTypeAsUint32) + if err != nil { + p.Events.Error.Trigger(ierrors.Errorf("failed peek attestations count"), id) return } - attestationCount := binary.LittleEndian.Uint32(attestationsBytes[0:4]) - readOffset := 4 - attestations := make([]*iotago.Attestation, attestationCount) - for i := uint32(0); i < attestationCount; i++ { - attestation, consumed, err := iotago.AttestationFromBytes(p.apiProvider)(attestationsBytes[readOffset:]) + attestations := make([]*iotago.Attestation, attestationsCount) + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(i int) error { + attestations[i], err = stream.ReadObjectWithSize(reader, serializer.SeriLengthPrefixTypeAsUint16, iotago.AttestationFromBytes(p.apiProvider)) if err != nil { - p.Events.Error.Trigger(ierrors.Wrap(err, "failed to deserialize attestations"), id) - - return + return ierrors.Wrapf(err, "failed to deserialize attestation %d", i) } - readOffset += consumed - attestations[i] = attestation + return nil + }); err != nil { + p.Events.Error.Trigger(ierrors.Wrap(err, "failed to deserialize attestations"), id) + + return } - if readOffset != len(attestationsBytes) { - p.Events.Error.Trigger(ierrors.Errorf("failed to deserialize attestations: %d bytes remaining", len(attestationsBytes)-readOffset), id) + + if reader.BytesRead() != len(attestationsBytes) { + p.Events.Error.Trigger(ierrors.Errorf("failed to deserialize attestations: %d bytes remaining", len(attestationsBytes)-reader.BytesRead()), id) return } diff --git a/pkg/protocol/block_dispatcher.go b/pkg/protocol/block_dispatcher.go index 15b2fcfa3..4c973de2b 100644 --- a/pkg/protocol/block_dispatcher.go +++ b/pkg/protocol/block_dispatcher.go @@ -220,7 +220,13 @@ func (b *BlockDispatcher) processWarpSyncResponse(commitmentID iotago.Commitment return nil } - acceptedBlocks := ads.NewSet[iotago.Identifier, iotago.BlockID](mapdb.NewMapDB(), iotago.BlockID.Bytes, iotago.BlockIDFromBytes) + acceptedBlocks := ads.NewSet[iotago.Identifier, iotago.BlockID]( + mapdb.NewMapDB(), + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, + iotago.BlockID.Bytes, + iotago.BlockIDFromBytes, + ) for _, blockID := range blockIDs { _ = acceptedBlocks.Add(blockID) // a mapdb can newer return an error } @@ -229,7 +235,13 @@ func (b *BlockDispatcher) processWarpSyncResponse(commitmentID iotago.Commitment return ierrors.Errorf("failed to verify tangle merkle proof for %s", commitmentID) } - acceptedTransactionIDs := ads.NewSet[iotago.Identifier, iotago.TransactionID](mapdb.NewMapDB(), iotago.TransactionID.Bytes, iotago.TransactionIDFromBytes) + acceptedTransactionIDs := ads.NewSet[iotago.Identifier, iotago.TransactionID]( + mapdb.NewMapDB(), + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, + iotago.TransactionID.Bytes, + iotago.TransactionIDFromBytes, + ) for _, transactionID := range transactionIDs { _ = acceptedTransactionIDs.Add(transactionID) // a mapdb can never return an error } diff --git a/pkg/protocol/commitment_verifier.go b/pkg/protocol/commitment_verifier.go index 7289fd86d..2172c92e7 100644 --- a/pkg/protocol/commitment_verifier.go +++ b/pkg/protocol/commitment_verifier.go @@ -41,31 +41,12 @@ func NewCommitmentVerifier(mainEngine *engine.Engine, lastCommonCommitmentBefore func (c *CommitmentVerifier) verifyCommitment(commitment *model.Commitment, attestations []*iotago.Attestation, merkleProof *merklehasher.Proof[iotago.Identifier]) (blockIDsFromAttestations iotago.BlockIDs, cumulativeWeight uint64, err error) { // 1. Verify that the provided attestations are indeed the ones that were included in the commitment. tree := ads.NewMap[iotago.Identifier](mapdb.NewMapDB(), + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, iotago.AccountID.Bytes, iotago.AccountIDFromBytes, - func(attestation *iotago.Attestation) ([]byte, error) { - apiForVersion, err := c.engine.APIForVersion(attestation.Header.ProtocolVersion) - if err != nil { - return nil, ierrors.Wrapf(err, "failed to get API for version %d", attestation.Header.ProtocolVersion) - } - - return apiForVersion.Encode(attestation) - }, - func(bytes []byte) (*iotago.Attestation, int, error) { - version, _, err := iotago.VersionFromBytes(bytes) - if err != nil { - return nil, 0, ierrors.Wrap(err, "failed to determine version") - } - - a := new(iotago.Attestation) - apiForVersion, err := c.engine.APIForVersion(version) - if err != nil { - return nil, 0, ierrors.Wrapf(err, "failed to get API for version %d", version) - } - n, err := apiForVersion.Decode(bytes, a) - - return a, n, err - }, + (*iotago.Attestation).Bytes, + iotago.AttestationFromBytes(c.engine), ) for _, att := range attestations { diff --git a/pkg/protocol/engine/accounts/accounts.go b/pkg/protocol/engine/accounts/accounts.go index 403f569c0..8acd50a53 100644 --- a/pkg/protocol/engine/accounts/accounts.go +++ b/pkg/protocol/engine/accounts/accounts.go @@ -1,15 +1,11 @@ package accounts import ( - "bytes" - "encoding/binary" "io" - "github.com/iotaledger/hive.go/crypto/ed25519" "github.com/iotaledger/hive.go/ierrors" - "github.com/iotaledger/hive.go/lo" "github.com/iotaledger/hive.go/runtime/options" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" + "github.com/iotaledger/hive.go/serializer/v2/stream" "github.com/iotaledger/iota-core/pkg/model" iotago "github.com/iotaledger/iota.go/v4" ) @@ -64,7 +60,7 @@ func (a *AccountData) Clone() *AccountData { ID: a.ID, Credits: &BlockIssuanceCredits{ Value: a.Credits.Value, - UpdateTime: a.Credits.UpdateTime, + UpdateSlot: a.Credits.UpdateSlot, }, ExpirySlot: a.ExpirySlot, OutputID: a.OutputID, @@ -78,138 +74,96 @@ func (a *AccountData) Clone() *AccountData { } } -func (a *AccountData) FromBytes(b []byte) (int, error) { - return a.readFromReadSeeker(bytes.NewReader(b)) -} - -func (a *AccountData) FromReader(readSeeker io.ReadSeeker) error { - return lo.Return2(a.readFromReadSeeker(readSeeker)) -} - -func (a *AccountData) readFromReadSeeker(reader io.ReadSeeker) (int, error) { - var bytesConsumed int - - bytesRead, err := io.ReadFull(reader, a.ID[:]) +func AccountDataFromReader(reader io.ReadSeeker) (*AccountData, error) { + accountID, err := stream.Read[iotago.AccountID](reader) if err != nil { - return bytesConsumed, ierrors.Wrap(err, "unable to read accountID") + return nil, ierrors.Wrap(err, "unable to read accountID") } - bytesConsumed += bytesRead - - a.Credits = &BlockIssuanceCredits{} + a := NewAccountData(accountID) - if err := binary.Read(reader, binary.LittleEndian, &a.Credits.Value); err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read account balance value for accountID %s", a.ID) + if a.Credits, err = stream.ReadObject(reader, BlockIssuanceCreditsBytesLength, BlockIssuanceCreditsFromBytes); err != nil { + return nil, ierrors.Wrap(err, "unable to read credits") } - bytesConsumed += 8 - - if err := binary.Read(reader, binary.LittleEndian, &a.Credits.UpdateTime); err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read updatedTime for account balance for accountID %s", a.ID) + if a.ExpirySlot, err = stream.Read[iotago.SlotIndex](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read expiry slot") } - bytesConsumed += iotago.SlotIndexLength - - if err := binary.Read(reader, binary.LittleEndian, &a.ExpirySlot); err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read expiry slot for accountID %s", a.ID) - } - bytesConsumed += iotago.SlotIndexLength - - if err := binary.Read(reader, binary.LittleEndian, &a.OutputID); err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read outputID for accountID %s", a.ID) + if a.OutputID, err = stream.Read[iotago.OutputID](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read outputID") } - bytesConsumed += iotago.OutputIDLength - var blockIssuerKeyCount uint8 - if err := binary.Read(reader, binary.LittleEndian, &blockIssuerKeyCount); err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read blockIssuerKeyCount count for accountID %s", a.ID) + if a.BlockIssuerKeys, err = stream.ReadObjectFromReader(reader, iotago.BlockIssuerKeysFromReader); err != nil { + return nil, ierrors.Wrap(err, "unable to read block issuer keys") } - bytesConsumed++ - a.BlockIssuerKeys = iotago.NewBlockIssuerKeys() - for i := uint8(0); i < blockIssuerKeyCount; i++ { - var blockIssuerKeyType iotago.BlockIssuerKeyType - if err := binary.Read(reader, binary.LittleEndian, &blockIssuerKeyType); err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read block issuer key type for accountID %s", a.ID) - } - bytesConsumed++ - - switch blockIssuerKeyType { - case iotago.BlockIssuerKeyEd25519PublicKey: - var ed25519PublicKey ed25519.PublicKey - bytesRead, err = io.ReadFull(reader, ed25519PublicKey[:]) - if err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read public key index %d for accountID %s", i, a.ID) - } - bytesConsumed += bytesRead - a.BlockIssuerKeys.Add(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519PublicKey)) - case iotago.BlockIssuerKeyPublicKeyHash: - var implicitAccountCreationAddress iotago.ImplicitAccountCreationAddress - bytesRead, err = io.ReadFull(reader, implicitAccountCreationAddress[:]) - if err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read address %d for accountID %s", i, a.ID) - } - bytesConsumed += bytesRead - a.BlockIssuerKeys.Add(iotago.Ed25519PublicKeyHashBlockIssuerKeyFromImplicitAccountCreationAddress(&implicitAccountCreationAddress)) - default: - return bytesConsumed, ierrors.Wrapf(err, "unsupported block issuer key type %d for accountID %s at offset %d", blockIssuerKeyType, a.ID, i) - } + if a.ValidatorStake, err = stream.Read[iotago.BaseToken](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read validator stake") } - if err := binary.Read(reader, binary.LittleEndian, &(a.ValidatorStake)); err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read validator stake for accountID %s", a.ID) + if a.DelegationStake, err = stream.Read[iotago.BaseToken](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read delegation stake") } - bytesConsumed += iotago.BaseTokenSize - if err := binary.Read(reader, binary.LittleEndian, &(a.DelegationStake)); err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read delegation stake for accountID %s", a.ID) + if a.FixedCost, err = stream.Read[iotago.Mana](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read fixed cost") } - bytesConsumed += iotago.BaseTokenSize - if err := binary.Read(reader, binary.LittleEndian, &(a.FixedCost)); err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read fixed cost for accountID %s", a.ID) + if a.StakeEndEpoch, err = stream.Read[iotago.EpochIndex](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read stake end epoch") } - bytesConsumed += iotago.ManaSize - if err := binary.Read(reader, binary.LittleEndian, &(a.StakeEndEpoch)); err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read stake end epoch for accountID %s", a.ID) + if a.LatestSupportedProtocolVersionAndHash, err = stream.ReadObject(reader, model.VersionAndHashSize, model.VersionAndHashFromBytes); err != nil { + return nil, ierrors.Wrap(err, "unable to read latest supported protocol version and hash") } - bytesConsumed += iotago.EpochIndexLength - versionAndHashBytes := make([]byte, model.VersionAndHashSize) - if err := binary.Read(reader, binary.LittleEndian, versionAndHashBytes); err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read latest supported protocol version for accountID %s", a.ID) - } + return a, nil +} - if a.LatestSupportedProtocolVersionAndHash, _, err = model.VersionAndHashFromBytes(versionAndHashBytes[:]); err != nil { - return 0, err - } +func AccountDataFromBytes(b []byte) (*AccountData, int, error) { + reader := stream.NewByteReader(b) - bytesConsumed += len(versionAndHashBytes) + a, err := AccountDataFromReader(reader) - return bytesConsumed, nil + return a, reader.BytesRead(), err } -func (a AccountData) Bytes() ([]byte, error) { - idBytes, err := a.ID.Bytes() - if err != nil { - return nil, ierrors.Wrap(err, "failed to marshal account id") +func (a *AccountData) Bytes() ([]byte, error) { + byteBuffer := stream.NewByteBuffer() + + if err := stream.Write(byteBuffer, a.ID); err != nil { + return nil, ierrors.Wrap(err, "failed to write AccountID") } - m := marshalutil.New() - m.WriteBytes(idBytes) - m.WriteBytes(lo.PanicOnErr(a.Credits.Bytes())) - m.WriteUint32(uint32(a.ExpirySlot)) - m.WriteBytes(lo.PanicOnErr(a.OutputID.Bytes())) - m.WriteByte(byte(len(a.BlockIssuerKeys))) - for _, key := range a.BlockIssuerKeys { - m.WriteBytes(key.Bytes()) + if err := stream.WriteObject(byteBuffer, a.Credits, (*BlockIssuanceCredits).Bytes); err != nil { + return nil, ierrors.Wrap(err, "failed to write Credits") + } + if err := stream.Write(byteBuffer, a.ExpirySlot); err != nil { + return nil, ierrors.Wrap(err, "failed to write ExpirySlot") + } + if err := stream.Write(byteBuffer, a.OutputID); err != nil { + return nil, ierrors.Wrap(err, "failed to write OutputID") } - m.WriteUint64(uint64(a.ValidatorStake)) - m.WriteUint64(uint64(a.DelegationStake)) - m.WriteUint64(uint64(a.FixedCost)) - m.WriteUint32(uint32(a.StakeEndEpoch)) - m.WriteBytes(lo.PanicOnErr(a.LatestSupportedProtocolVersionAndHash.Bytes())) + if err := stream.WriteObject(byteBuffer, a.BlockIssuerKeys, iotago.BlockIssuerKeys.Bytes); err != nil { + return nil, ierrors.Wrap(err, "failed to write BlockIssuerKeys") + } + + if err := stream.Write(byteBuffer, a.ValidatorStake); err != nil { + return nil, ierrors.Wrap(err, "failed to write ValidatorStake") + } + if err := stream.Write(byteBuffer, a.DelegationStake); err != nil { + return nil, ierrors.Wrap(err, "failed to write DelegationStake") + } + if err := stream.Write(byteBuffer, a.FixedCost); err != nil { + return nil, ierrors.Wrap(err, "failed to write FixedCost") + } + if err := stream.Write(byteBuffer, a.StakeEndEpoch); err != nil { + return nil, ierrors.Wrap(err, "failed to write StakeEndEpoch") + } + if err := stream.WriteObject(byteBuffer, a.LatestSupportedProtocolVersionAndHash, model.VersionAndHash.Bytes); err != nil { + return nil, ierrors.Wrap(err, "failed to write LatestSupportedProtocolVersionAndHash") + } - return m.Bytes(), nil + return byteBuffer.Bytes() } func WithCredits(credits *BlockIssuanceCredits) options.Option[AccountData] { diff --git a/pkg/protocol/engine/accounts/accountsledger/manager.go b/pkg/protocol/engine/accounts/accountsledger/manager.go index 0d47ec95a..729295032 100644 --- a/pkg/protocol/engine/accounts/accountsledger/manager.go +++ b/pkg/protocol/engine/accounts/accountsledger/manager.go @@ -60,15 +60,13 @@ func New( blockBurns: shrinkingmap.New[iotago.SlotIndex, ds.Set[iotago.BlockID]](), latestSupportedVersionSignals: memstorage.NewIndexedStorage[iotago.SlotIndex, iotago.AccountID, *model.SignaledBlock](), accountsTree: ads.NewMap[iotago.Identifier](accountsStore, + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, iotago.AccountID.Bytes, iotago.AccountIDFromBytes, (*accounts.AccountData).Bytes, - func(bytes []byte) (object *accounts.AccountData, consumed int, err error) { - a := new(accounts.AccountData) - consumed, err = a.FromBytes(bytes) - - return a, consumed, err - }), + accounts.AccountDataFromBytes, + ), block: blockFunc, slotDiff: slotDiffFunc, } @@ -379,7 +377,7 @@ func (m *Manager) rollbackAccountTo(accountData *accounts.AccountData, targetSlo } // update the account data with the diff - accountData.Credits.Update(-diffChange.BICChange, diffChange.PreviousUpdatedTime) + accountData.Credits.Update(-diffChange.BICChange, diffChange.PreviousUpdatedSlot) // update the expiry slot of the account if it was changed if diffChange.PreviousExpirySlot != diffChange.NewExpirySlot { accountData.ExpirySlot = diffChange.PreviousExpirySlot @@ -444,7 +442,7 @@ func (m *Manager) preserveDestroyedAccountData(accountID iotago.AccountID) (acco slotDiff.PreviousExpirySlot = accountData.ExpirySlot slotDiff.NewOutputID = iotago.EmptyOutputID slotDiff.PreviousOutputID = accountData.OutputID - slotDiff.PreviousUpdatedTime = accountData.Credits.UpdateTime + slotDiff.PreviousUpdatedSlot = accountData.Credits.UpdateSlot slotDiff.BlockIssuerKeysRemoved = accountData.BlockIssuerKeys.Clone() slotDiff.ValidatorStakeChange = -int64(accountData.ValidatorStake) @@ -519,7 +517,7 @@ func (m *Manager) commitAccountTree(slot iotago.SlotIndex, accountDiffChanges ma if diffChange.BICChange != 0 || !exists { // decay the credits to the current slot if the account exists if exists { - decayedPreviousCredits, err := m.apiProvider.APIForSlot(slot).ManaDecayProvider().ManaWithDecay(iotago.Mana(accountData.Credits.Value), accountData.Credits.UpdateTime, slot) + decayedPreviousCredits, err := m.apiProvider.APIForSlot(slot).ManaDecayProvider().ManaWithDecay(iotago.Mana(accountData.Credits.Value), accountData.Credits.UpdateSlot, slot) if err != nil { return ierrors.Wrapf(err, "can't retrieve account, could not decay credits for account (%s) in slot (%d)", accountData.ID, slot) } diff --git a/pkg/protocol/engine/accounts/accountsledger/snapshot.go b/pkg/protocol/engine/accounts/accountsledger/snapshot.go index 8f8d565c4..de3dbe89a 100644 --- a/pkg/protocol/engine/accounts/accountsledger/snapshot.go +++ b/pkg/protocol/engine/accounts/accountsledger/snapshot.go @@ -1,14 +1,14 @@ package accountsledger import ( - "encoding/binary" "io" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/serializer/v2" + "github.com/iotaledger/hive.go/serializer/v2/stream" "github.com/iotaledger/iota-core/pkg/model" "github.com/iotaledger/iota-core/pkg/protocol/engine/accounts" - "github.com/iotaledger/iota-core/pkg/utils" iotago "github.com/iotaledger/iota.go/v4" ) @@ -16,24 +16,23 @@ func (m *Manager) Import(reader io.ReadSeeker) error { m.mutex.Lock() defer m.mutex.Unlock() - var accountCount uint64 - var slotDiffCount uint64 - - // The number of accounts contained within this snapshot. - if err := binary.Read(reader, binary.LittleEndian, &accountCount); err != nil { - return ierrors.Wrap(err, "unable to read account count") - } + // populate the account tree, account tree should be empty at this point + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint64, func(i int) error { + accountData, err := stream.ReadObjectFromReader(reader, accounts.AccountDataFromReader) + if err != nil { + return ierrors.Wrapf(err, "unable to read account data at index %d", i) + } - // The number of slot diffs contained within this snapshot. - if err := binary.Read(reader, binary.LittleEndian, &slotDiffCount); err != nil { - return ierrors.Wrap(err, "unable to read slot diffs count") - } + if err := m.accountsTree.Set(accountData.ID, accountData); err != nil { + return ierrors.Wrapf(err, "unable to set account %s", accountData.ID) + } - if err := m.importAccountTree(reader, accountCount); err != nil { - return ierrors.Wrap(err, "unable to import account tree") + return nil + }); err != nil { + return ierrors.Wrap(err, "failed to read account data") } - if err := m.readSlotDiffs(reader, slotDiffCount); err != nil { + if err := m.readSlotDiffs(reader); err != nil { return ierrors.Wrap(err, "unable to import slot diffs") } @@ -44,64 +43,42 @@ func (m *Manager) Export(writer io.WriteSeeker, targetIndex iotago.SlotIndex) er m.mutex.Lock() defer m.mutex.Unlock() - var accountCount uint64 - var slotDiffsCount uint64 - - pWriter := utils.NewPositionedWriter(writer) - - if err := pWriter.WriteValue("accounts count", accountCount, true); err != nil { - return ierrors.Wrap(err, "unable to write accounts count") - } - - if err := pWriter.WriteValue("slot diffs count", slotDiffsCount, true); err != nil { - return ierrors.Wrap(err, "unable to write slot diffs count") - } - - accountCount, err := m.exportAccountTree(pWriter, targetIndex) - if err != nil { - return ierrors.Wrapf(err, "unable to export account for target index %d", targetIndex) - } - - if err = pWriter.WriteValueAtBookmark("accounts count", accountCount); err != nil { - return ierrors.Wrap(err, "unable to write accounts count") - } - - if slotDiffsCount, err = m.writeSlotDiffs(pWriter, targetIndex); err != nil { - return ierrors.Wrapf(err, "unable to export slot diffs for target index %d", targetIndex) - } + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint64, func() (int, error) { + elements, err := m.exportAccountTree(writer, targetIndex) + if err != nil { + return 0, ierrors.Wrap(err, "can't write account tree") + } - if err = pWriter.WriteValueAtBookmark("slot diffs count", slotDiffsCount); err != nil { - return ierrors.Wrap(err, "unable to write slot diffs count") + return elements, nil + }); err != nil { + return ierrors.Wrapf(err, "unable to export accounts for slot %d", targetIndex) } - return nil -} - -func (m *Manager) importAccountTree(reader io.ReadSeeker, accountCount uint64) error { - // populate the account tree, account tree should be empty at this point - for i := uint64(0); i < accountCount; i++ { - accountData := &accounts.AccountData{} - if err := accountData.FromReader(reader); err != nil { - return ierrors.Wrap(err, "unable to read account data") + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint64, func() (elementsCount int, err error) { + elementsCount, err = m.writeSlotDiffs(writer, targetIndex) + if err != nil { + return 0, ierrors.Wrap(err, "can't write slot diffs") } - if err := m.accountsTree.Set(accountData.ID, accountData); err != nil { - return ierrors.Wrapf(err, "unable to set account %s", accountData.ID) - } + return elementsCount, nil + }); err != nil { + return ierrors.Wrapf(err, "unable to export slot diffs for slot %d", targetIndex) } return nil } // exportAccountTree exports the AccountTree at a certain target slot, returning the total amount of exported accounts. -func (m *Manager) exportAccountTree(pWriter *utils.PositionedWriter, targetIndex iotago.SlotIndex) (accountCount uint64, err error) { - if err = m.accountsTree.Stream(func(accountID iotago.AccountID, accountData *accounts.AccountData) error { - if _, err = m.rollbackAccountTo(accountData, targetIndex); err != nil { +func (m *Manager) exportAccountTree(writer io.WriteSeeker, targetIndex iotago.SlotIndex) (int, error) { + var accountCount int + + if err := m.accountsTree.Stream(func(accountID iotago.AccountID, accountData *accounts.AccountData) error { + if _, err := m.rollbackAccountTo(accountData, targetIndex); err != nil { return ierrors.Wrapf(err, "unable to rollback account %s", accountID) } - if err = writeAccountData(pWriter, accountData); err != nil { - return ierrors.Wrapf(err, "unable to write data for account %s", accountID) + if err := stream.WriteObject(writer, accountData, (*accounts.AccountData).Bytes); err != nil { + return ierrors.Wrapf(err, "unable to write account %s", accountID) } accountCount++ @@ -112,17 +89,18 @@ func (m *Manager) exportAccountTree(pWriter *utils.PositionedWriter, targetIndex } // we might have entries that were destroyed, that are present in diffs but not in the tree from the latestCommittedIndex we streamed above - recreatedAccountsCount, err := m.recreateDestroyedAccounts(pWriter, targetIndex) + recreatedAccountsCount, err := m.recreateDestroyedAccounts(writer, targetIndex) return accountCount + recreatedAccountsCount, err } -func (m *Manager) recreateDestroyedAccounts(pWriter *utils.PositionedWriter, targetSlot iotago.SlotIndex) (recreatedAccountsCount uint64, err error) { +func (m *Manager) recreateDestroyedAccounts(writer io.WriteSeeker, targetSlot iotago.SlotIndex) (int, error) { + var recreatedAccountsCount int destroyedAccounts := make(map[iotago.AccountID]*accounts.AccountData) for slot := m.latestCommittedSlot; slot > targetSlot; slot-- { // it should be impossible that `m.slotDiff(slot)` returns an error, because it is impossible to export a pruned slot - err = lo.PanicOnErr(m.slotDiff(slot)).StreamDestroyed(func(accountID iotago.AccountID) bool { + err := lo.PanicOnErr(m.slotDiff(slot)).StreamDestroyed(func(accountID iotago.AccountID) bool { // actual data will be filled in by rollbackAccountTo accountData := accounts.NewAccountData(accountID) @@ -143,136 +121,127 @@ func (m *Manager) recreateDestroyedAccounts(pWriter *utils.PositionedWriter, tar return 0, ierrors.Errorf("account %s was not destroyed", accountID) } - if err = writeAccountData(pWriter, accountData); err != nil { - return 0, ierrors.Wrapf(err, "unable to write account %s to snapshot", accountID) + if err := stream.WriteObject(writer, accountData, (*accounts.AccountData).Bytes); err != nil { + return 0, ierrors.Wrapf(err, "unable to write account %s", accountID) } } return recreatedAccountsCount, nil } -func writeAccountData(writer *utils.PositionedWriter, accountData *accounts.AccountData) error { - accountBytes, err := accountData.Bytes() - if err != nil { - return ierrors.Wrapf(err, "unable to get account data snapshot bytes for accountID %s", accountData.ID) - } - - if err = writer.WriteBytes(accountBytes); err != nil { - return ierrors.Wrapf(err, "unable to write account data for accountID %s", accountData.ID) - } - - return nil -} - -func (m *Manager) readSlotDiffs(reader io.ReadSeeker, slotDiffCount uint64) error { - for i := uint64(0); i < slotDiffCount; i++ { - var slot iotago.SlotIndex - var accountsInDiffCount uint64 - - if err := binary.Read(reader, binary.LittleEndian, &slot); err != nil { - return ierrors.Wrap(err, "unable to read slot index") - } - - if err := binary.Read(reader, binary.LittleEndian, &accountsInDiffCount); err != nil { - return ierrors.Wrap(err, "unable to read accounts in diff count") - } - if accountsInDiffCount == 0 { - continue - } - - diffStore, err := m.slotDiff(slot) +func (m *Manager) readSlotDiffs(reader io.ReadSeeker) error { + // Read all the slots. + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint64, func(i int) error { + slot, err := stream.Read[iotago.SlotIndex](reader) if err != nil { - return ierrors.Errorf("unable to import account slot diffs for slot %d", slot) + return ierrors.Wrapf(err, "unable to read slot index at index %d", i) } - for j := uint64(0); j < accountsInDiffCount; j++ { - var accountID iotago.AccountID - if _, err := io.ReadFull(reader, accountID[:]); err != nil { + // Read all the slot diffs within each slot. + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint64, func(j int) error { + diffStore, err := m.slotDiff(slot) + if err != nil { + return ierrors.Wrapf(err, "unable to get account diff storage for slot %d", slot) + } + + accountID, err := stream.Read[iotago.AccountID](reader) + if err != nil { return ierrors.Wrapf(err, "unable to read accountID for index %d", j) } - var destroyed bool - if err := binary.Read(reader, binary.LittleEndian, &destroyed); err != nil { + destroyed, err := stream.Read[bool](reader) + if err != nil { return ierrors.Wrapf(err, "unable to read destroyed flag for accountID %s", accountID) } - accountDiff := model.NewAccountDiff() + var accountDiff *model.AccountDiff if !destroyed { - if err := accountDiff.FromReader(reader); err != nil { + if accountDiff, err = stream.ReadObjectFromReader(reader, model.AccountDiffFromReader); err != nil { return ierrors.Wrapf(err, "unable to read account diff for accountID %s", accountID) } + } else { + accountDiff = model.NewAccountDiff() } if err := diffStore.Store(accountID, accountDiff, destroyed); err != nil { return ierrors.Wrapf(err, "unable to store slot diff for accountID %s", accountID) } + + return nil + }); err != nil { + return ierrors.Wrapf(err, "unable to read accounts in diff count at index %d", i) } + + return nil + }); err != nil { + return ierrors.Wrap(err, "failed to read slot diffs") } return nil } -func (m *Manager) writeSlotDiffs(pWriter *utils.PositionedWriter, targetSlot iotago.SlotIndex) (slotDiffsCount uint64, err error) { +func (m *Manager) writeSlotDiffs(writer io.WriteSeeker, targetSlot iotago.SlotIndex) (int, error) { + var slotDiffsCount int + // write slot diffs until being able to reach targetSlot, where the exported tree is at slot := iotago.SlotIndex(1) maxCommittableAge := m.apiProvider.APIForSlot(targetSlot).ProtocolParameters().MaxCommittableAge() - if targetSlot > maxCommittableAge { slot = targetSlot - maxCommittableAge } for ; slot <= targetSlot; slot++ { - var accountsInDiffCount uint64 - - // The index of the slot diffs. - if err = pWriter.WriteValue("slot index", slot); err != nil { - return 0, err - } + var accountsInDiffCount int - // The number of account entries within this slot diff. - if err = pWriter.WriteValue("inDiff accounts count", accountsInDiffCount, true); err != nil { - return 0, err + if err := stream.Write(writer, slot); err != nil { + return 0, ierrors.Wrapf(err, "unable to write slot %d", slot) } - slotDiffsCount++ - - var innerErr error slotDiffs, err := m.slotDiff(slot) if err != nil { // if slot is already pruned, then don't write anything continue } - if err = slotDiffs.Stream(func(accountID iotago.AccountID, accountDiff *model.AccountDiff, destroyed bool) bool { - if err = pWriter.WriteBytes(lo.PanicOnErr(accountID.Bytes())); err != nil { - innerErr = ierrors.Wrapf(err, "unable to write accountID for account %s", accountID) - } + if err = stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint64, func() (int, error) { + var innerErr error - if err = pWriter.WriteValue("destroyed flag", destroyed); err != nil { - innerErr = ierrors.Wrapf(err, "unable to write destroyed flag for account %s", accountID) - } + if err = slotDiffs.Stream(func(accountID iotago.AccountID, accountDiff *model.AccountDiff, destroyed bool) bool { - if !destroyed { - if err = pWriter.WriteBytes(lo.PanicOnErr(accountDiff.Bytes())); err != nil { - innerErr = ierrors.Wrapf(err, "unable to write account diff for account %s", accountID) + if err = stream.Write(writer, accountID); err != nil { + innerErr = ierrors.Wrapf(err, "unable to write accountID for account %s", accountID) + return false + } + + if err = stream.Write(writer, destroyed); err != nil { + innerErr = ierrors.Wrapf(err, "unable to write destroyed flag for account %s", accountID) + return false + } + + if !destroyed { + if err = stream.WriteObject(writer, accountDiff, (*model.AccountDiff).Bytes); err != nil { + innerErr = ierrors.Wrapf(err, "unable to write account diff for account %s", accountID) + return false + } } + + accountsInDiffCount++ + + return true + }); err != nil { + return 0, ierrors.Wrapf(err, "unable to stream slot diff for index %d", slot) } - accountsInDiffCount++ + if innerErr != nil { + return 0, ierrors.Wrapf(innerErr, "unable to stream slot diff for index %d", slot) + } - return true + return accountsInDiffCount, nil }); err != nil { - return 0, ierrors.Wrapf(err, "unable to stream slot diff for index %d", slot) + return 0, ierrors.Wrapf(err, "unable to write slot diff %d", slot) } - if innerErr != nil { - return 0, ierrors.Wrapf(innerErr, "unable to write slot diff for index %d", slot) - } - - // The number of diffs contained within this slot. - if err = pWriter.WriteValueAtBookmark("inDiff accounts count", accountsInDiffCount); err != nil { - return 0, err - } + slotDiffsCount++ } return slotDiffsCount, nil diff --git a/pkg/protocol/engine/accounts/accountsledger/snapshot_test.go b/pkg/protocol/engine/accounts/accountsledger/snapshot_test.go index 89b8f4058..c94f5d06c 100644 --- a/pkg/protocol/engine/accounts/accountsledger/snapshot_test.go +++ b/pkg/protocol/engine/accounts/accountsledger/snapshot_test.go @@ -3,9 +3,9 @@ package accountsledger_test import ( "testing" - "github.com/orcaman/writerseeker" "github.com/stretchr/testify/require" + "github.com/iotaledger/hive.go/serializer/v2/stream" "github.com/iotaledger/iota-core/pkg/model" iotago "github.com/iotaledger/iota.go/v4" "github.com/iotaledger/iota.go/v4/tpkg" @@ -176,15 +176,15 @@ func TestManager_Import_Export(t *testing.T) { }, }) - //// Export and import the account ledger into new manager for the latest slot. + // Export and import the account ledger into new manager for the latest slot. { - writer := &writerseeker.WriterSeeker{} + writer := stream.NewByteBuffer() err := ts.Instance.Export(writer, iotago.SlotIndex(3)) require.NoError(t, err) ts.Instance = ts.initAccountLedger() - err = ts.Instance.Import(writer.BytesReader()) + err = ts.Instance.Import(writer.Reader()) require.NoError(t, err) ts.Instance.SetLatestCommittedSlot(3) @@ -193,13 +193,13 @@ func TestManager_Import_Export(t *testing.T) { // Export and import for pre-latest slot. { - writer := &writerseeker.WriterSeeker{} + writer := stream.NewByteBuffer() err := ts.Instance.Export(writer, iotago.SlotIndex(2)) require.NoError(t, err) ts.Instance = ts.initAccountLedger() - err = ts.Instance.Import(writer.BytesReader()) + err = ts.Instance.Import(writer.Reader()) require.NoError(t, err) ts.Instance.SetLatestCommittedSlot(2) diff --git a/pkg/protocol/engine/accounts/accountsledger/testsuite_test.go b/pkg/protocol/engine/accounts/accountsledger/testsuite_test.go index 64d7f8152..3decfe1cb 100644 --- a/pkg/protocol/engine/accounts/accountsledger/testsuite_test.go +++ b/pkg/protocol/engine/accounts/accountsledger/testsuite_test.go @@ -131,7 +131,7 @@ func (t *TestSuite) ApplySlotActions(slot iotago.SlotIndex, rmc iotago.Mana, act BICChange: iotago.BlockIssuanceCredits(action.TotalAllotments), // manager takes AccountDiff only with allotments filled in when applyDiff is triggered BlockIssuerKeysAdded: t.BlockIssuerKeys(action.AddedKeys, true), BlockIssuerKeysRemoved: t.BlockIssuerKeys(action.RemovedKeys, true), - PreviousUpdatedTime: prevAccountFields.BICUpdatedAt, + PreviousUpdatedSlot: prevAccountFields.BICUpdatedAt, NewExpirySlot: prevAccountFields.ExpirySlot, DelegationStakeChange: action.DelegationStakeChange, @@ -265,7 +265,7 @@ func (t *TestSuite) assertDiff(slot iotago.SlotIndex, accountID iotago.AccountID expectedAccountDiff := accountsSlotBuildData.SlotDiff[accountID] require.Equal(t.T, expectedAccountDiff.PreviousOutputID, actualDiff.PreviousOutputID) - require.Equal(t.T, expectedAccountDiff.PreviousUpdatedTime, actualDiff.PreviousUpdatedTime) + require.Equal(t.T, expectedAccountDiff.PreviousUpdatedSlot, actualDiff.PreviousUpdatedSlot) require.Equal(t.T, expectedAccountDiff.NewExpirySlot, actualDiff.NewExpirySlot) require.Equal(t.T, expectedAccountDiff.PreviousExpirySlot, actualDiff.PreviousExpirySlot) diff --git a/pkg/protocol/engine/accounts/credits.go b/pkg/protocol/engine/accounts/credits.go index a4df3e5b8..3d04118c3 100644 --- a/pkg/protocol/engine/accounts/credits.go +++ b/pkg/protocol/engine/accounts/credits.go @@ -1,49 +1,64 @@ package accounts import ( - "github.com/iotaledger/hive.go/lo" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" + "github.com/iotaledger/hive.go/ierrors" + "github.com/iotaledger/hive.go/serializer/v2" + "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) +const BlockIssuanceCreditsBytesLength = serializer.Int64ByteSize + iotago.SlotIndexLength + // BlockIssuanceCredits is a weight annotated with the slot it was last updated in. type BlockIssuanceCredits struct { Value iotago.BlockIssuanceCredits - UpdateTime iotago.SlotIndex + UpdateSlot iotago.SlotIndex } // NewBlockIssuanceCredits creates a new Credits instance. func NewBlockIssuanceCredits(value iotago.BlockIssuanceCredits, updateTime iotago.SlotIndex) (newCredits *BlockIssuanceCredits) { return &BlockIssuanceCredits{ Value: value, - UpdateTime: updateTime, + UpdateSlot: updateTime, } } // Bytes returns a serialized version of the Credits. -func (c BlockIssuanceCredits) Bytes() ([]byte, error) { - m := marshalutil.New() +func (c *BlockIssuanceCredits) Bytes() ([]byte, error) { + byteBuffer := stream.NewByteBuffer() + + if err := stream.Write(byteBuffer, c.Value); err != nil { + return nil, ierrors.Wrap(err, "failed to write value") + } - m.WriteInt64(int64(c.Value)) - m.WriteUint32(uint32(c.UpdateTime)) + if err := stream.Write(byteBuffer, c.UpdateSlot); err != nil { + return nil, ierrors.Wrap(err, "failed to write updateTime") + } - return m.Bytes(), nil + return byteBuffer.Bytes() } -// FromBytes parses a serialized version of the Credits. -func (c *BlockIssuanceCredits) FromBytes(bytes []byte) (int, error) { - m := marshalutil.New(bytes) +func BlockIssuanceCreditsFromBytes(bytes []byte) (*BlockIssuanceCredits, int, error) { + c := new(BlockIssuanceCredits) - c.Value = iotago.BlockIssuanceCredits(lo.PanicOnErr(m.ReadInt64())) - c.UpdateTime = iotago.SlotIndex(lo.PanicOnErr(m.ReadUint32())) + var err error + byteReader := stream.NewByteReader(bytes) + + if c.Value, err = stream.Read[iotago.BlockIssuanceCredits](byteReader); err != nil { + return nil, 0, ierrors.Wrap(err, "failed to read value") + } + + if c.UpdateSlot, err = stream.Read[iotago.SlotIndex](byteReader); err != nil { + return nil, 0, ierrors.Wrap(err, "failed to read updateTime") + } - return m.ReadOffset(), nil + return c, byteReader.BytesRead(), nil } // Update updates the Credits increasing Value and updateTime. -func (c *BlockIssuanceCredits) Update(change iotago.BlockIssuanceCredits, updateTime ...iotago.SlotIndex) { +func (c *BlockIssuanceCredits) Update(change iotago.BlockIssuanceCredits, updateSlot ...iotago.SlotIndex) { c.Value += change - if len(updateTime) > 0 { - c.UpdateTime = updateTime[0] + if len(updateSlot) > 0 { + c.UpdateSlot = updateSlot[0] } } diff --git a/pkg/protocol/engine/accounts/mana/manager.go b/pkg/protocol/engine/accounts/mana/manager.go index b53432329..d9c6ae32f 100644 --- a/pkg/protocol/engine/accounts/mana/manager.go +++ b/pkg/protocol/engine/accounts/mana/manager.go @@ -222,5 +222,5 @@ func (m *Manager) getBIC(accountID iotago.AccountID, slot iotago.SlotIndex) (bic return 0, 0, nil } - return iotago.Mana(accountBIC.Credits.Value), accountBIC.Credits.UpdateTime, nil + return iotago.Mana(accountBIC.Credits.Value), accountBIC.Credits.UpdateSlot, nil } diff --git a/pkg/protocol/engine/accounts/mana/manager_test.go b/pkg/protocol/engine/accounts/mana/manager_test.go index f4d58dd62..767367087 100644 --- a/pkg/protocol/engine/accounts/mana/manager_test.go +++ b/pkg/protocol/engine/accounts/mana/manager_test.go @@ -72,7 +72,7 @@ func TestManager_GetManaOnAccountOverflow(t *testing.T) { ID: id, Credits: &accounts.BlockIssuanceCredits{ Value: iotago.MaxBlockIssuanceCredits/2 + iotago.MaxBlockIssuanceCredits/4, - UpdateTime: 1, + UpdateSlot: 1, }, ExpirySlot: iotago.MaxSlotIndex, OutputID: iotago.OutputID{}, diff --git a/pkg/protocol/engine/attestation/slotattestation/snapshot.go b/pkg/protocol/engine/attestation/slotattestation/snapshot.go index ec2ae69fd..0b4b3c14e 100644 --- a/pkg/protocol/engine/attestation/slotattestation/snapshot.go +++ b/pkg/protocol/engine/attestation/slotattestation/snapshot.go @@ -4,6 +4,7 @@ import ( "io" "github.com/iotaledger/hive.go/ierrors" + "github.com/iotaledger/hive.go/serializer/v2" "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) @@ -13,28 +14,14 @@ func (m *Manager) Import(reader io.ReadSeeker) error { defer m.commitmentMutex.Unlock() var attestations []*iotago.Attestation - if err := stream.ReadCollection(reader, func(i int) error { - attestationBytes, err := stream.ReadBlob(reader) - if err != nil { - return ierrors.Wrap(err, "failed to read attestation") - } - - version, _, err := iotago.VersionFromBytes(attestationBytes) - if err != nil { - return ierrors.Wrap(err, "failed to determine version") - } + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(i int) error { - apiForVersion, err := m.apiProvider.APIForVersion(version) + attestation, err := stream.ReadObjectWithSize[*iotago.Attestation](reader, serializer.SeriLengthPrefixTypeAsUint16, iotago.AttestationFromBytes(m.apiProvider)) if err != nil { - return ierrors.Wrapf(err, "failed to get API for version %d", version) + return ierrors.Wrapf(err, "failed to read attestation %d", i) } - importedAttestation := new(iotago.Attestation) - if _, err = apiForVersion.Decode(attestationBytes, importedAttestation); err != nil { - return ierrors.Wrapf(err, "failed to decode attestation %d", i) - } - - attestations = append(attestations, importedAttestation) + attestations = append(attestations, attestation) return nil }); err != nil { @@ -88,23 +75,14 @@ func (m *Manager) Export(writer io.WriteSeeker, targetSlot iotago.SlotIndex) err return ierrors.Wrapf(err, "failed to stream attestations of slot %d", targetSlot) } - if err = stream.WriteCollection(writer, func() (uint64, error) { + if err = stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint32, func() (int, error) { for _, a := range attestations { - apiForVersion, err := m.apiProvider.APIForVersion(a.Header.ProtocolVersion) - if err != nil { - return 0, ierrors.Wrapf(err, "failed to get API for version %d", a.Header.ProtocolVersion) - } - bytes, err := apiForVersion.Encode(a) - if err != nil { - return 0, ierrors.Wrapf(err, "failed to encode attestation %v", a) - } - - if writeErr := stream.WriteBlob(writer, bytes); writeErr != nil { - return 0, ierrors.Wrapf(writeErr, "failed to write attestation %v", a) + if err := stream.WriteObjectWithSize(writer, a, serializer.SeriLengthPrefixTypeAsUint16, (*iotago.Attestation).Bytes); err != nil { + return 0, ierrors.Wrapf(err, "failed to write attestation %v", a) } } - return uint64(len(attestations)), nil + return len(attestations), nil }); err != nil { return ierrors.Wrapf(err, "failed to write attestations of slot %d", targetSlot) } diff --git a/pkg/protocol/engine/attestation/slotattestation/storage.go b/pkg/protocol/engine/attestation/slotattestation/storage.go index 6f74e60c0..55e9b5c6b 100644 --- a/pkg/protocol/engine/attestation/slotattestation/storage.go +++ b/pkg/protocol/engine/attestation/slotattestation/storage.go @@ -72,20 +72,11 @@ func (m *Manager) trackerStorage(index iotago.SlotIndex) (*kvstore.TypedStore[io return nil, ierrors.Wrapf(err, "failed to get extended realm for tracker of slot %d", index) } - api := m.apiProvider.APIForSlot(index) - return kvstore.NewTypedStore[iotago.AccountID, *iotago.Attestation](trackerStorage, iotago.AccountID.Bytes, iotago.AccountIDFromBytes, - func(v *iotago.Attestation) ([]byte, error) { - return api.Encode(v) - }, - func(bytes []byte) (object *iotago.Attestation, consumed int, err error) { - attestation := new(iotago.Attestation) - consumed, err = api.Decode(bytes, attestation) - - return attestation, consumed, err - }, + (*iotago.Attestation).Bytes, + iotago.AttestationFromBytes(m.apiProvider), ), nil } @@ -99,7 +90,10 @@ func (m *Manager) attestationsForSlot(index iotago.SlotIndex) (ads.Map[iotago.Id return nil, ierrors.Wrapf(err, "failed to get extended realm for attestations of slot %d", index) } - return ads.NewMap[iotago.Identifier](attestationsStorage, + return ads.NewMap[iotago.Identifier]( + attestationsStorage, + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, iotago.AccountID.Bytes, iotago.AccountIDFromBytes, (*iotago.Attestation).Bytes, diff --git a/pkg/protocol/engine/attestation/slotattestation/testframework_test.go b/pkg/protocol/engine/attestation/slotattestation/testframework_test.go index 1214dd6b3..07fa7abe1 100644 --- a/pkg/protocol/engine/attestation/slotattestation/testframework_test.go +++ b/pkg/protocol/engine/attestation/slotattestation/testframework_test.go @@ -150,7 +150,10 @@ func (t *TestFramework) AssertCommit(slot iotago.SlotIndex, expectedCW uint64, e require.EqualValues(t.test, expectedCW, cw) - expectedTree := ads.NewMap[iotago.Identifier](mapdb.NewMapDB(), + expectedTree := ads.NewMap[iotago.Identifier]( + mapdb.NewMapDB(), + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, iotago.AccountID.Bytes, iotago.AccountIDFromBytes, (*iotago.Attestation).Bytes, diff --git a/pkg/protocol/engine/committed_slot_api.go b/pkg/protocol/engine/committed_slot_api.go index edb604814..63f750a6d 100644 --- a/pkg/protocol/engine/committed_slot_api.go +++ b/pkg/protocol/engine/committed_slot_api.go @@ -83,7 +83,13 @@ func (c *CommittedSlotAPI) TransactionIDs() (iotago.TransactionIDs, error) { return nil, ierrors.Errorf("failed to get mutations of slot index %d", c.CommitmentID.Slot()) } - set := ads.NewSet[iotago.Identifier](store, iotago.TransactionID.Bytes, iotago.TransactionIDFromBytes) + set := ads.NewSet[iotago.Identifier]( + store, + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, + iotago.TransactionID.Bytes, + iotago.TransactionIDFromBytes, + ) transactionIDs := make(iotago.TransactionIDs, 0, set.Size()) if err = set.Stream(func(txID iotago.TransactionID) error { diff --git a/pkg/protocol/engine/eviction/state.go b/pkg/protocol/engine/eviction/state.go index 104af2c3e..f107aa71b 100644 --- a/pkg/protocol/engine/eviction/state.go +++ b/pkg/protocol/engine/eviction/state.go @@ -10,6 +10,7 @@ import ( "github.com/iotaledger/hive.go/lo" "github.com/iotaledger/hive.go/runtime/options" "github.com/iotaledger/hive.go/runtime/syncutils" + "github.com/iotaledger/hive.go/serializer/v2" "github.com/iotaledger/hive.go/serializer/v2/stream" "github.com/iotaledger/iota-core/pkg/storage/prunable/slotstore" iotago "github.com/iotaledger/iota.go/v4" @@ -185,18 +186,18 @@ func (s *State) Export(writer io.WriteSeeker, lowerTarget iotago.SlotIndex, targ latestNonEmptySlot := iotago.SlotIndex(0) - if err := stream.WriteCollection(writer, func() (elementsCount uint64, err error) { + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint32, func() (elementsCount int, err error) { for currentSlot := start; currentSlot <= targetSlot; currentSlot++ { storage, err := s.rootBlockStorageFunc(currentSlot) if err != nil { continue } if err = storage.StreamBytes(func(rootBlockIDBytes []byte, commitmentIDBytes []byte) (err error) { - if err = stream.WriteBlob(writer, rootBlockIDBytes); err != nil { + if err = stream.WriteBytes(writer, rootBlockIDBytes); err != nil { return ierrors.Wrapf(err, "failed to write root block ID %s", rootBlockIDBytes) } - if err = stream.WriteBlob(writer, commitmentIDBytes); err != nil { + if err = stream.WriteBytes(writer, commitmentIDBytes); err != nil { return ierrors.Wrapf(err, "failed to write root block's %s commitment %s", rootBlockIDBytes, commitmentIDBytes) } @@ -221,7 +222,7 @@ func (s *State) Export(writer io.WriteSeeker, lowerTarget iotago.SlotIndex, targ latestNonEmptySlot = 0 } - if err := stream.WriteSerializable(writer, latestNonEmptySlot, iotago.SlotIndexLength); err != nil { + if err := stream.Write(writer, latestNonEmptySlot); err != nil { return ierrors.Wrap(err, "failed to write latest non empty slot") } @@ -230,26 +231,15 @@ func (s *State) Export(writer io.WriteSeeker, lowerTarget iotago.SlotIndex, targ // Import imports the root blocks from the given reader. func (s *State) Import(reader io.ReadSeeker) error { - if err := stream.ReadCollection(reader, func(i int) error { - - blockIDBytes, err := stream.ReadBlob(reader) + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(i int) error { + rootBlockID, err := stream.Read[iotago.BlockID](reader) if err != nil { return ierrors.Wrapf(err, "failed to read root block id %d", i) } - rootBlockID, _, err := iotago.BlockIDFromBytes(blockIDBytes) - if err != nil { - return ierrors.Wrapf(err, "failed to parse root block id %d", i) - } - - commitmentIDBytes, err := stream.ReadBlob(reader) + commitmentID, err := stream.Read[iotago.CommitmentID](reader) if err != nil { - return ierrors.Wrapf(err, "failed to read root block's %s commitment id", rootBlockID) - } - - commitmentID, _, err := iotago.CommitmentIDFromBytes(commitmentIDBytes) - if err != nil { - return ierrors.Wrapf(err, "failed to parse root block's %s commitment id", rootBlockID) + return ierrors.Wrapf(err, "failed to read root block's %s commitment id %d", rootBlockID, i) } if s.rootBlocks.Get(rootBlockID.Slot(), true).Set(rootBlockID, commitmentID) { @@ -263,16 +253,11 @@ func (s *State) Import(reader io.ReadSeeker) error { return ierrors.Wrap(err, "failed to read root blocks") } - latestNonEmptySlotBytes, err := stream.ReadBytes(reader, iotago.SlotIndexLength) + latestNonEmptySlot, err := stream.Read[iotago.SlotIndex](reader) if err != nil { return ierrors.Wrap(err, "failed to read latest non empty slot") } - latestNonEmptySlot, _, err := iotago.SlotIndexFromBytes(latestNonEmptySlotBytes) - if err != nil { - return ierrors.Wrap(err, "failed to parse latest non empty slot") - } - s.setLatestNonEmptySlot(latestNonEmptySlot) return nil diff --git a/pkg/protocol/engine/ledger/ledger/ledger.go b/pkg/protocol/engine/ledger/ledger/ledger.go index 31632f082..764fdf2be 100644 --- a/pkg/protocol/engine/ledger/ledger/ledger.go +++ b/pkg/protocol/engine/ledger/ledger/ledger.go @@ -656,7 +656,7 @@ func (l *Ledger) processStateDiffTransactions(stateDiff mempool.StateDiff) (spen } accountDiff.BICChange += iotago.BlockIssuanceCredits(allotment.Mana) - accountDiff.PreviousUpdatedTime = accountData.Credits.UpdateTime + accountDiff.PreviousUpdatedSlot = accountData.Credits.UpdateSlot // we are not transitioning the allotted account, so the new and previous expiry slots are the same accountDiff.PreviousExpirySlot = accountData.ExpirySlot diff --git a/pkg/protocol/engine/mempool/v1/state_diff.go b/pkg/protocol/engine/mempool/v1/state_diff.go index 3fe5bb794..f5cbf214b 100644 --- a/pkg/protocol/engine/mempool/v1/state_diff.go +++ b/pkg/protocol/engine/mempool/v1/state_diff.go @@ -31,7 +31,13 @@ func NewStateDiff(slot iotago.SlotIndex, kv kvstore.KVStore) *StateDiff { createdOutputs: shrinkingmap.New[mempool.StateID, mempool.StateMetadata](), executedTransactions: orderedmap.New[iotago.TransactionID, mempool.TransactionMetadata](), stateUsageCounters: shrinkingmap.New[mempool.StateID, int](), - mutations: ads.NewSet[iotago.Identifier](kv, iotago.TransactionID.Bytes, iotago.TransactionIDFromBytes), + mutations: ads.NewSet[iotago.Identifier]( + kv, + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, + iotago.TransactionID.Bytes, + iotago.TransactionIDFromBytes, + ), } } diff --git a/pkg/protocol/engine/notarization/slotnotarization/slotmutations.go b/pkg/protocol/engine/notarization/slotnotarization/slotmutations.go index 8e60dd53f..08aa10799 100644 --- a/pkg/protocol/engine/notarization/slotnotarization/slotmutations.go +++ b/pkg/protocol/engine/notarization/slotnotarization/slotmutations.go @@ -76,7 +76,13 @@ func (m *SlotMutations) Reset(index iotago.SlotIndex) { func (m *SlotMutations) AcceptedBlocks(index iotago.SlotIndex, createIfMissing ...bool) ads.Set[iotago.Identifier, iotago.BlockID] { if len(createIfMissing) > 0 && createIfMissing[0] { return lo.Return1(m.acceptedBlocksBySlot.GetOrCreate(index, func() ads.Set[iotago.Identifier, iotago.BlockID] { - return ads.NewSet[iotago.Identifier](mapdb.NewMapDB(), iotago.BlockID.Bytes, iotago.BlockIDFromBytes) + return ads.NewSet[iotago.Identifier]( + mapdb.NewMapDB(), + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, + iotago.BlockID.Bytes, + iotago.BlockIDFromBytes, + ) })) } diff --git a/pkg/protocol/engine/upgrade/signalingupgradeorchestrator/snapshot.go b/pkg/protocol/engine/upgrade/signalingupgradeorchestrator/snapshot.go index 3d31872ea..f4b6a1926 100644 --- a/pkg/protocol/engine/upgrade/signalingupgradeorchestrator/snapshot.go +++ b/pkg/protocol/engine/upgrade/signalingupgradeorchestrator/snapshot.go @@ -4,6 +4,7 @@ import ( "io" "github.com/iotaledger/hive.go/ierrors" + "github.com/iotaledger/hive.go/serializer/v2" "github.com/iotaledger/hive.go/serializer/v2/stream" "github.com/iotaledger/iota-core/pkg/core/account" "github.com/iotaledger/iota-core/pkg/model" @@ -21,13 +22,13 @@ func (o *Orchestrator) Import(reader io.ReadSeeker) error { o.lastCommittedSlot = slot upgradeSignalMap := make(map[account.SeatIndex]*model.SignaledBlock) - if err := stream.ReadCollection(reader, func(i int) error { + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(i int) error { seat, err := stream.Read[account.SeatIndex](reader) if err != nil { return ierrors.Wrap(err, "failed to read seat") } - signaledBlock, err := stream.ReadFunc(reader, model.SignaledBlockFromBytesFunc(o.apiProvider.APIForSlot(slot))) + signaledBlock, err := stream.ReadObjectWithSize(reader, serializer.SeriLengthPrefixTypeAsUint16, model.SignaledBlockFromBytesFunc(o.apiProvider.APIForSlot(slot))) if err != nil { return ierrors.Wrap(err, "failed to read signaled block") } @@ -55,13 +56,13 @@ func (o *Orchestrator) Import(reader io.ReadSeeker) error { latestSignals.Set(seat, signaledBlock) } - if err := stream.ReadCollection(reader, func(i int) error { + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsByte, func(i int) error { epoch, err := stream.Read[iotago.EpochIndex](reader) if err != nil { return ierrors.Wrap(err, "failed to read epoch") } - versionAndHash, err := stream.ReadFunc(reader, model.VersionAndHashFromBytes) + versionAndHash, err := stream.ReadObject(reader, model.VersionAndHashSize, model.VersionAndHashFromBytes) if err != nil { return ierrors.Wrap(err, "failed to read versionAndHash") } @@ -87,19 +88,19 @@ func (o *Orchestrator) Export(writer io.WriteSeeker, targetSlot iotago.SlotIndex } // Export the upgrade signals for the target slot. Since these are rolled forward exporting the last slot is sufficient. - if err := stream.WriteCollection(writer, func() (elementsCount uint64, err error) { - var exportedCount uint64 + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint32, func() (elementsCount int, err error) { + var exportedCount int upgradeSignals, err := o.upgradeSignalsPerSlotFunc(targetSlot) if err != nil { return 0, ierrors.Wrapf(err, "failed to get upgrade signals for target slot %d", targetSlot) } if err := upgradeSignals.StreamBytes(func(seatBytes []byte, signaledBlockBytes []byte) error { - if err := stream.Write(writer, seatBytes); err != nil { + if err := stream.WriteBytes(writer, seatBytes); err != nil { return ierrors.Wrap(err, "failed to write seat") } - if err := stream.WriteBlob(writer, signaledBlockBytes); err != nil { + if err := stream.WriteBytesWithSize(writer, signaledBlockBytes, serializer.SeriLengthPrefixTypeAsUint16); err != nil { return ierrors.Wrap(err, "failed to write signaled block") } @@ -116,8 +117,8 @@ func (o *Orchestrator) Export(writer io.WriteSeeker, targetSlot iotago.SlotIndex } // Export the successfully signaled epochs for the signaling window. - if err := stream.WriteCollection(writer, func() (elementsCount uint64, err error) { - var exportedCount uint64 + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsByte, func() (elementsCount int, err error) { + var exportedCount int apiForSlot := o.apiProvider.APIForSlot(targetSlot) currentEpoch := apiForSlot.TimeProvider().EpochFromSlot(targetSlot) @@ -137,7 +138,7 @@ func (o *Orchestrator) Export(writer io.WriteSeeker, targetSlot iotago.SlotIndex if err := stream.Write(writer, epoch); err != nil { return 0, ierrors.Wrapf(err, "failed to write epoch %d", epoch) } - if err := stream.WriteSerializable(writer, versionAndHash); err != nil { + if err := stream.WriteObject(writer, versionAndHash, model.VersionAndHash.Bytes); err != nil { return 0, ierrors.Wrapf(err, "failed to write versionAndHash for epoch %d", epoch) } diff --git a/pkg/protocol/engine/utxoledger/database_prefixes.go b/pkg/protocol/engine/utxoledger/database_prefixes.go index 12dcd559e..8ff909c73 100644 --- a/pkg/protocol/engine/utxoledger/database_prefixes.go +++ b/pkg/protocol/engine/utxoledger/database_prefixes.go @@ -36,7 +36,7 @@ const ( 1 byte + 34 bytes Value: - BlockID + iotago.SlotIndex + TransactionCreationSlot (time.Time) + iotago.Output.Serialized() + BlockID + iotago.SlotIndex + TransactionCreationSlot (time.Slot) + iotago.Output.Serialized() 40 bytes + 4 bytes + 8 byte s + 1 byte type + X bytes Spent Output: @@ -46,7 +46,7 @@ const ( 1 byte + 34 bytes Value: - TargetTransactionID (iotago.SignedTransactionID) + TransactionAcceptedSlotIndex (iotago.SlotIndex) + TransactionCreationSlot (time.Time) + TargetTransactionID (iotago.SignedTransactionID) + TransactionAcceptedSlotIndex (iotago.SlotIndex) + TransactionCreationSlot (time.Slot) 32 bytes + 8 bytes + 8 bytes Unspent Output: diff --git a/pkg/protocol/engine/utxoledger/manager.go b/pkg/protocol/engine/utxoledger/manager.go index 83fd376f5..e4109254c 100644 --- a/pkg/protocol/engine/utxoledger/manager.go +++ b/pkg/protocol/engine/utxoledger/manager.go @@ -2,13 +2,13 @@ package utxoledger import ( "crypto/sha256" - "encoding/binary" "github.com/iotaledger/hive.go/ads" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" "github.com/iotaledger/hive.go/lo" "github.com/iotaledger/hive.go/runtime/syncutils" + "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) @@ -28,6 +28,8 @@ func New(store kvstore.KVStore, apiProvider iotago.APIProvider) *Manager { return &Manager{ store: store, stateTree: ads.NewMap[iotago.Identifier](lo.PanicOnErr(store.WithExtendedRealm(kvstore.Realm{StoreKeyPrefixStateTree})), + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, iotago.OutputID.Bytes, iotago.OutputIDFromBytes, (*stateTreeMetadata).Bytes, @@ -368,7 +370,8 @@ func (m *Manager) LedgerStateSHA256Sum() ([]byte, error) { if err != nil { return nil, err } - if err := binary.Write(ledgerStateHash, binary.LittleEndian, ledgerSlot); err != nil { + + if err := stream.Write(ledgerStateHash, ledgerSlot); err != nil { return nil, err } @@ -384,22 +387,16 @@ func (m *Manager) LedgerStateSHA256Sum() ([]byte, error) { return nil, err } - if _, err := ledgerStateHash.Write(output.outputID[:]); err != nil { + if err := stream.Write(ledgerStateHash, outputID); err != nil { return nil, err } - if _, err := ledgerStateHash.Write(output.KVStorableValue()); err != nil { + if err := stream.WriteBytes(ledgerStateHash, output.KVStorableValue()); err != nil { return nil, err } } - // Add root of the state tree - stateTreeBytes, err := m.StateTreeRoot().Bytes() - if err != nil { - return nil, err - } - - if _, err := ledgerStateHash.Write(stateTreeBytes); err != nil { + if err := stream.Write(ledgerStateHash, m.StateTreeRoot()); err != nil { return nil, err } diff --git a/pkg/protocol/engine/utxoledger/marshalutils_helper.go b/pkg/protocol/engine/utxoledger/marshalutils_helper.go deleted file mode 100644 index b19e747fc..000000000 --- a/pkg/protocol/engine/utxoledger/marshalutils_helper.go +++ /dev/null @@ -1,43 +0,0 @@ -package utxoledger - -import ( - "github.com/iotaledger/hive.go/lo" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" - iotago "github.com/iotaledger/iota.go/v4" -) - -func ParseOutputID(ms *marshalutil.MarshalUtil) (iotago.OutputID, error) { - bytes, err := ms.ReadBytes(iotago.OutputIDLength) - if err != nil { - return iotago.EmptyOutputID, err - } - - return iotago.OutputID(bytes), nil -} - -func parseTransactionID(ms *marshalutil.MarshalUtil) (iotago.TransactionID, error) { - bytes, err := ms.ReadBytes(iotago.TransactionIDLength) - if err != nil { - return iotago.EmptyTransactionID, err - } - - return iotago.TransactionID(bytes), nil -} - -func ParseBlockID(ms *marshalutil.MarshalUtil) (iotago.BlockID, error) { - bytes, err := ms.ReadBytes(iotago.BlockIDLength) - if err != nil { - return iotago.EmptyBlockID, err - } - - return iotago.BlockID(bytes), nil -} - -func parseSlotIndex(ms *marshalutil.MarshalUtil) (iotago.SlotIndex, error) { - bytes, err := ms.ReadBytes(iotago.SlotIndexLength) - if err != nil { - return 0, err - } - - return lo.DropCount(iotago.SlotIndexFromBytes(bytes)) -} diff --git a/pkg/protocol/engine/utxoledger/output.go b/pkg/protocol/engine/utxoledger/output.go index 3b6b5e396..0194b65c1 100644 --- a/pkg/protocol/engine/utxoledger/output.go +++ b/pkg/protocol/engine/utxoledger/output.go @@ -8,7 +8,7 @@ import ( "github.com/iotaledger/hive.go/kvstore" "github.com/iotaledger/hive.go/lo" "github.com/iotaledger/hive.go/serializer/v2" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" + "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) @@ -193,11 +193,13 @@ func (o *Output) CopyWithBlockIDAndSlotBooked(blockID iotago.BlockID, slotBooked // - kvStorable func outputStorageKeyForOutputID(outputID iotago.OutputID) []byte { - ms := marshalutil.New(iotago.OutputIDLength + 1) - ms.WriteByte(StoreKeyPrefixOutput) // 1 byte - ms.WriteBytes(outputID[:]) // iotago.OutputIDLength bytes + byteBuffer := stream.NewByteBuffer(iotago.OutputIDLength + serializer.OneByte) - return ms.Bytes() + // There can't be any errors. + _ = stream.Write(byteBuffer, StoreKeyPrefixOutput) + _ = stream.Write(byteBuffer, outputID) + + return lo.PanicOnErr(byteBuffer.Bytes()) } func (o *Output) KVStorableKey() (key []byte) { @@ -205,68 +207,41 @@ func (o *Output) KVStorableKey() (key []byte) { } func (o *Output) KVStorableValue() (value []byte) { - ms := marshalutil.New() - ms.WriteBytes(o.blockID[:]) // BlockIDLength bytes - ms.WriteBytes(o.slotBooked.MustBytes()) // 4 bytes - - ms.WriteUint32(uint32(len(o.encodedOutput))) // 4 bytes - ms.WriteBytes(o.encodedOutput) + byteBuffer := stream.NewByteBuffer() - ms.WriteUint32(uint32(len(o.encodedProof))) // 4 bytes - ms.WriteBytes(o.encodedProof) + // There can't be any errors. + _ = stream.Write(byteBuffer, o.blockID) + _ = stream.Write(byteBuffer, o.slotBooked) + _ = stream.WriteBytesWithSize(byteBuffer, o.encodedOutput, serializer.SeriLengthPrefixTypeAsUint32) + _ = stream.WriteBytesWithSize(byteBuffer, o.encodedProof, serializer.SeriLengthPrefixTypeAsUint32) - return ms.Bytes() + return lo.PanicOnErr(byteBuffer.Bytes()) } func (o *Output) kvStorableLoad(_ *Manager, key []byte, value []byte) error { - // Parse key - keyUtil := marshalutil.New(key) - - // Read prefix output - _, err := keyUtil.ReadByte() - if err != nil { - return err - } - - // Read OutputID - if o.outputID, err = ParseOutputID(keyUtil); err != nil { - return err - } + var err error - // Parse value - valueUtil := marshalutil.New(value) + keyReader := stream.NewByteReader(key) - // Read BlockID - if o.blockID, err = ParseBlockID(valueUtil); err != nil { - return err + if _, err = stream.Read[byte](keyReader); err != nil { + return ierrors.Wrap(err, "unable to read prefix") } - - // Read Slot - o.slotBooked, err = parseSlotIndex(valueUtil) - if err != nil { - return err + if o.outputID, err = stream.Read[iotago.OutputID](keyReader); err != nil { + return ierrors.Wrap(err, "unable to read outputID") } - // Read Output - outputLen, err := valueUtil.ReadUint32() - if err != nil { - return err + valueReader := stream.NewByteReader(value) + if o.blockID, err = stream.Read[iotago.BlockID](valueReader); err != nil { + return ierrors.Wrap(err, "unable to read blockID") } - - o.encodedOutput, err = valueUtil.ReadBytes(int(outputLen)) - if err != nil { - return err + if o.slotBooked, err = stream.Read[iotago.SlotIndex](valueReader); err != nil { + return ierrors.Wrap(err, "unable to read slotBooked") } - - // Read Output proof - proofLen, err := valueUtil.ReadUint32() - if err != nil { - return err + if o.encodedOutput, err = stream.ReadBytesWithSize(valueReader, serializer.SeriLengthPrefixTypeAsUint32); err != nil { + return ierrors.Wrap(err, "unable to read encodedOutput") } - - o.encodedProof, err = valueUtil.ReadBytes(int(proofLen)) - if err != nil { - return err + if o.encodedProof, err = stream.ReadBytesWithSize(valueReader, serializer.SeriLengthPrefixTypeAsUint32); err != nil { + return ierrors.Wrap(err, "unable to read encodedProof") } return nil diff --git a/pkg/protocol/engine/utxoledger/slot_diff.go b/pkg/protocol/engine/utxoledger/slot_diff.go index dd54bd492..be304e321 100644 --- a/pkg/protocol/engine/utxoledger/slot_diff.go +++ b/pkg/protocol/engine/utxoledger/slot_diff.go @@ -2,12 +2,13 @@ package utxoledger import ( "crypto/sha256" - "encoding/binary" "sort" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" + "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/serializer/v2" + "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) @@ -21,12 +22,14 @@ type SlotDiff struct { Spents Spents } -func slotDiffKeyForIndex(index iotago.SlotIndex) []byte { - m := marshalutil.New(iotago.SlotIndexLength + 1) - m.WriteByte(StoreKeyPrefixSlotDiffs) - m.WriteBytes(index.MustBytes()) +func slotDiffKeyForIndex(slot iotago.SlotIndex) []byte { + byteBuffer := stream.NewByteBuffer(serializer.OneByte + iotago.SlotIndexLength) - return m.Bytes() + // There can't be any errors. + _ = stream.Write(byteBuffer, StoreKeyPrefixSlotDiffs) + _ = stream.Write(byteBuffer, slot) + + return lo.PanicOnErr(byteBuffer.Bytes()) } func (sd *SlotDiff) KVStorableKey() []byte { @@ -34,40 +37,48 @@ func (sd *SlotDiff) KVStorableKey() []byte { } func (sd *SlotDiff) KVStorableValue() []byte { - m := marshalutil.New() + byteBuffer := stream.NewByteBuffer() - m.WriteUint32(uint32(len(sd.Outputs))) - for _, output := range sd.sortedOutputs() { - m.WriteBytes(output.outputID[:]) - } + // There can't be any errors. + _ = stream.WriteCollection(byteBuffer, serializer.SeriLengthPrefixTypeAsUint32, func() (elementsCount int, err error) { + for _, output := range sd.sortedOutputs() { + _ = stream.Write(byteBuffer, output.outputID) + } - m.WriteUint32(uint32(len(sd.Spents))) - for _, spent := range sd.sortedSpents() { - m.WriteBytes(spent.output.outputID[:]) - } + return len(sd.Outputs), nil + }) - return m.Bytes() + _ = stream.WriteCollection(byteBuffer, serializer.SeriLengthPrefixTypeAsUint32, func() (elementsCount int, err error) { + for _, spent := range sd.sortedSpents() { + _ = stream.Write(byteBuffer, spent.output.outputID) + } + + return len(sd.Spents), nil + }) + + return lo.PanicOnErr(byteBuffer.Bytes()) } // note that this method relies on the data being available within other "tables". func (sd *SlotDiff) kvStorableLoad(manager *Manager, key []byte, value []byte) error { - slot, _, err := iotago.SlotIndexFromBytes(key[1:]) - if err != nil { + var err error + + if sd.Slot, _, err = iotago.SlotIndexFromBytes(key[1:]); err != nil { return err } - marshalUtil := marshalutil.New(value) + byteReader := stream.NewByteReader(value) - outputCount, err := marshalUtil.ReadUint32() + outputsCount, err := stream.PeekSize(byteReader, serializer.SeriLengthPrefixTypeAsUint32) if err != nil { - return err + return ierrors.Wrap(err, "unable to peek outputs count") } - outputs := make(Outputs, int(outputCount)) - for i := 0; i < int(outputCount); i++ { - var outputID iotago.OutputID - if outputID, err = ParseOutputID(marshalUtil); err != nil { - return err + outputs := make(Outputs, outputsCount) + if err = stream.ReadCollection(byteReader, serializer.SeriLengthPrefixTypeAsUint32, func(i int) error { + outputID, err := stream.Read[iotago.OutputID](byteReader) + if err != nil { + return ierrors.Wrap(err, "unable to read outputID") } output, err := manager.ReadOutputByOutputIDWithoutLocking(outputID) @@ -76,18 +87,22 @@ func (sd *SlotDiff) kvStorableLoad(manager *Manager, key []byte, value []byte) e } outputs[i] = output + + return nil + }); err != nil { + return ierrors.Wrapf(err, "unable to read slot diff outputs") } - spentCount, err := marshalUtil.ReadUint32() + spentsCount, err := stream.PeekSize(byteReader, serializer.SeriLengthPrefixTypeAsUint32) if err != nil { - return err + return ierrors.Wrap(err, "unable to peek spents count") } - spents := make(Spents, spentCount) - for i := 0; i < int(spentCount); i++ { - var outputID iotago.OutputID - if outputID, err = ParseOutputID(marshalUtil); err != nil { - return err + spents := make(Spents, spentsCount) + if err = stream.ReadCollection(byteReader, serializer.SeriLengthPrefixTypeAsUint32, func(i int) error { + outputID, err := stream.Read[iotago.OutputID](byteReader) + if err != nil { + return ierrors.Wrap(err, "unable to read outputID") } spent, err := manager.ReadSpentForOutputIDWithoutLocking(outputID) @@ -96,9 +111,12 @@ func (sd *SlotDiff) kvStorableLoad(manager *Manager, key []byte, value []byte) e } spents[i] = spent + + return nil + }); err != nil { + return ierrors.Wrapf(err, "unable to read slot diff spents") } - sd.Slot = slot sd.Outputs = outputs sd.Spents = spents @@ -127,11 +145,11 @@ func (sd *SlotDiff) sortedSpents() LexicalOrderedSpents { func (sd *SlotDiff) SHA256Sum() ([]byte, error) { sdDiffHash := sha256.New() - if err := binary.Write(sdDiffHash, binary.LittleEndian, sd.KVStorableKey()); err != nil { + if err := stream.WriteBytes(sdDiffHash, sd.KVStorableKey()); err != nil { return nil, ierrors.Errorf("unable to serialize slot diff: %w", err) } - if err := binary.Write(sdDiffHash, binary.LittleEndian, sd.KVStorableValue()); err != nil { + if err := stream.WriteBytes(sdDiffHash, sd.KVStorableValue()); err != nil { return nil, ierrors.Errorf("unable to serialize slot diff: %w", err) } diff --git a/pkg/protocol/engine/utxoledger/snapshot.go b/pkg/protocol/engine/utxoledger/snapshot.go index 719a69b30..a6af13e95 100644 --- a/pkg/protocol/engine/utxoledger/snapshot.go +++ b/pkg/protocol/engine/utxoledger/snapshot.go @@ -1,87 +1,76 @@ package utxoledger import ( - "encoding/binary" "io" "github.com/iotaledger/hive.go/ierrors" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" + "github.com/iotaledger/hive.go/serializer/v2" + "github.com/iotaledger/hive.go/serializer/v2/byteutils" "github.com/iotaledger/hive.go/serializer/v2/serix" - "github.com/iotaledger/iota-core/pkg/utils" + "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) // Helpers to serialize/deserialize into/from snapshots func (o *Output) SnapshotBytes() []byte { - m := marshalutil.New() - m.WriteBytes(o.outputID[:]) - m.WriteBytes(o.blockID[:]) - m.WriteUint32(uint32(o.slotBooked)) - m.WriteUint32(uint32(len(o.encodedOutput))) - m.WriteBytes(o.encodedOutput) - m.WriteUint32(uint32(len(o.encodedProof))) - m.WriteBytes(o.encodedProof) - - return m.Bytes() + return byteutils.ConcatBytes(o.outputID[:], o.KVStorableValue()) } func OutputFromSnapshotReader(reader io.ReadSeeker, apiProvider iotago.APIProvider) (*Output, error) { - outputID := iotago.OutputID{} - if _, err := io.ReadFull(reader, outputID[:]); err != nil { - return nil, ierrors.Errorf("unable to read LS output ID: %w", err) + outputID, err := stream.Read[iotago.OutputID](reader) + if err != nil { + return nil, ierrors.Wrap(err, "unable to read LS output ID") } - blockID := iotago.BlockID{} - if _, err := io.ReadFull(reader, blockID[:]); err != nil { - return nil, ierrors.Errorf("unable to read LS block ID: %w", err) + blockID, err := stream.Read[iotago.BlockID](reader) + if err != nil { + return nil, ierrors.Wrap(err, "unable to read LS block ID") } - var slotBooked iotago.SlotIndex - if err := binary.Read(reader, binary.LittleEndian, &slotBooked); err != nil { - return nil, ierrors.Errorf("unable to read LS output milestone index booked: %w", err) + slotBooked, err := stream.Read[iotago.SlotIndex](reader) + if err != nil { + return nil, ierrors.Wrap(err, "unable to read LS output slot booked") } - var outputLength uint32 - if err := binary.Read(reader, binary.LittleEndian, &outputLength); err != nil { - return nil, ierrors.Errorf("unable to read LS output length: %w", err) - } + var outputBytes []byte + output, err := stream.ReadObjectWithSize(reader, serializer.SeriLengthPrefixTypeAsUint32, func(bytes []byte) (iotago.TxEssenceOutput, int, error) { + outputBytes = bytes - outputBytes := make([]byte, outputLength) - if _, err := io.ReadFull(reader, outputBytes); err != nil { - return nil, ierrors.Errorf("unable to read LS output bytes: %w", err) - } + var o iotago.TxEssenceOutput + readBytes, err := apiProvider.APIForSlot(blockID.Slot()).Decode(bytes, &o, serix.WithValidation()) + if err != nil { + return nil, 0, ierrors.Wrap(err, "invalid LS output address") + } - var output iotago.TxEssenceOutput - if _, err := apiProvider.APIForSlot(blockID.Slot()).Decode(outputBytes, &output, serix.WithValidation()); err != nil { - return nil, ierrors.Errorf("invalid LS output address: %w", err) + return o, readBytes, nil + }) + if err != nil { + return nil, ierrors.Wrap(err, "unable to read LS output") } - var proofLength uint32 - if err := binary.Read(reader, binary.LittleEndian, &proofLength); err != nil { - return nil, ierrors.Errorf("unable to read LS output proof length: %w", err) - } + var proofBytes []byte + proof, err := stream.ReadObjectWithSize(reader, serializer.SeriLengthPrefixTypeAsUint32, func(bytes []byte) (*iotago.OutputIDProof, int, error) { + proofBytes = bytes - proofBytes := make([]byte, proofLength) - if _, err := io.ReadFull(reader, proofBytes); err != nil { - return nil, ierrors.Errorf("unable to read LS output proof bytes: %w", err) - } + proof, readBytes, err := iotago.OutputIDProofFromBytes(apiProvider.APIForSlot(blockID.Slot()))(proofBytes) + if err != nil { + return nil, 0, ierrors.Wrap(err, "invalid LS output proof") + } - proof, _, err := iotago.OutputIDProofFromBytes(apiProvider.APIForSlot(blockID.Slot()))(proofBytes) + return proof, readBytes, nil + }) if err != nil { - return nil, ierrors.Errorf("invalid LS output proof: %w", err) + return nil, ierrors.Wrap(err, "unable to read LS output proof") } return NewOutput(apiProvider, outputID, blockID, slotBooked, output, outputBytes, proof, proofBytes), nil } func (s *Spent) SnapshotBytes() []byte { - m := marshalutil.New() - m.WriteBytes(s.Output().SnapshotBytes()) - m.WriteBytes(s.transactionIDSpent[:]) - // we don't need to write indexSpent because this info is available in the milestoneDiff that consumes the output - return m.Bytes() + + return byteutils.ConcatBytes(s.Output().SnapshotBytes(), s.transactionIDSpent[:]) } func SpentFromSnapshotReader(reader io.ReadSeeker, apiProvider iotago.APIProvider, indexSpent iotago.SlotIndex) (*Spent, error) { @@ -90,84 +79,89 @@ func SpentFromSnapshotReader(reader io.ReadSeeker, apiProvider iotago.APIProvide return nil, err } - transactionIDSpent := iotago.TransactionID{} - if _, err := io.ReadFull(reader, transactionIDSpent[:]); err != nil { - return nil, ierrors.Errorf("unable to read LS transaction ID spent: %w", err) + transactionIDSpent, err := stream.Read[iotago.TransactionID](reader) + if err != nil { + return nil, ierrors.Wrap(err, "unable to read LS transaction ID spent") } return NewSpent(output, transactionIDSpent, indexSpent), nil } func ReadSlotDiffToSnapshotReader(reader io.ReadSeeker, apiProvider iotago.APIProvider) (*SlotDiff, error) { + var err error slotDiff := &SlotDiff{} - var diffIndex iotago.SlotIndex - if err := binary.Read(reader, binary.LittleEndian, &diffIndex); err != nil { - return nil, ierrors.Errorf("unable to read slot diff index: %w", err) + if slotDiff.Slot, err = stream.Read[iotago.SlotIndex](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read slot diff index") } - slotDiff.Slot = diffIndex - var createdCount uint64 - if err := binary.Read(reader, binary.LittleEndian, &createdCount); err != nil { - return nil, ierrors.Errorf("unable to read slot diff created count: %w", err) + createdCount, err := stream.PeekSize(reader, serializer.SeriLengthPrefixTypeAsUint32) + if err != nil { + return nil, ierrors.Wrap(err, "unable to peek slot diff created count") } - slotDiff.Outputs = make(Outputs, createdCount) - for i := uint64(0); i < createdCount; i++ { - var err error + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(i int) error { slotDiff.Outputs[i], err = OutputFromSnapshotReader(reader, apiProvider) if err != nil { - return nil, ierrors.Errorf("unable to read slot diff output: %w", err) + return ierrors.Wrap(err, "unable to read slot diff output") } - } - var consumedCount uint64 - if err := binary.Read(reader, binary.LittleEndian, &consumedCount); err != nil { - return nil, ierrors.Errorf("unable to read slot diff consumed count: %w", err) + return nil + }); err != nil { + return nil, ierrors.Wrap(err, "unable to read slot diff created collection") } + consumedCount, err := stream.PeekSize(reader, serializer.SeriLengthPrefixTypeAsUint32) + if err != nil { + return nil, ierrors.Wrap(err, "unable to peek slot diff consumed count") + } slotDiff.Spents = make(Spents, consumedCount) - for i := uint64(0); i < consumedCount; i++ { - var err error + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(i int) error { slotDiff.Spents[i], err = SpentFromSnapshotReader(reader, apiProvider, slotDiff.Slot) if err != nil { - return nil, ierrors.Errorf("unable to read slot diff spent: %w", err) + return ierrors.Wrap(err, "unable to read slot diff spent") } + + return nil + }); err != nil { + return nil, ierrors.Wrap(err, "unable to read slot diff consumed collection") } return slotDiff, nil } -func WriteSlotDiffToSnapshotWriter(writer io.WriteSeeker, diff *SlotDiff) (written int64, err error) { - var totalBytesWritten int64 - - if err := utils.WriteValueFunc(writer, diff.Slot.MustBytes(), &totalBytesWritten); err != nil { - return 0, ierrors.Wrap(err, "unable to write slot diff index") +func WriteSlotDiffToSnapshotWriter(writer io.WriteSeeker, diff *SlotDiff) error { + if err := stream.Write(writer, diff.Slot); err != nil { + return ierrors.Wrap(err, "unable to write slot diff index") } - if err := utils.WriteValueFunc(writer, uint64(len(diff.Outputs)), &totalBytesWritten); err != nil { - return 0, ierrors.Wrap(err, "unable to write slot diff created count") - } - - for _, output := range diff.sortedOutputs() { - if err := utils.WriteBytesFunc(writer, output.SnapshotBytes(), &totalBytesWritten); err != nil { - return 0, ierrors.Wrap(err, "unable to write slot diff created output") + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint32, func() (elementsCount int, err error) { + for _, output := range diff.sortedOutputs() { + if err := stream.WriteBytes(writer, output.SnapshotBytes()); err != nil { + return 0, ierrors.Wrap(err, "unable to write slot diff created output") + } } - } - if err := utils.WriteValueFunc(writer, uint64(len(diff.Spents)), &totalBytesWritten); err != nil { - return 0, ierrors.Wrap(err, "unable to write slot diff consumed count") + return len(diff.Outputs), nil + }); err != nil { + return ierrors.Wrap(err, "unable to write slot diff created collection") } - for _, spent := range diff.sortedSpents() { - if err := utils.WriteBytesFunc(writer, spent.SnapshotBytes(), &totalBytesWritten); err != nil { - return 0, ierrors.Wrap(err, "unable to write slot diff created output") + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint32, func() (elementsCount int, err error) { + for _, spent := range diff.sortedSpents() { + if err := stream.WriteBytes(writer, spent.SnapshotBytes()); err != nil { + return 0, ierrors.Wrap(err, "unable to write slot diff spent output") + } } + + return len(diff.Spents), nil + }); err != nil { + return ierrors.Wrap(err, "unable to write slot diff spent collection") } - return totalBytesWritten, nil + return nil } // Import imports the ledger state from the given reader. @@ -175,40 +169,33 @@ func (m *Manager) Import(reader io.ReadSeeker) error { m.WriteLockLedger() defer m.WriteUnlockLedger() - var snapshotLedgerIndex iotago.SlotIndex - if err := binary.Read(reader, binary.LittleEndian, &snapshotLedgerIndex); err != nil { - return ierrors.Errorf("unable to read LS ledger index: %w", err) + snapshotLedgerIndex, err := stream.Read[iotago.SlotIndex](reader) + if err != nil { + return ierrors.Wrap(err, "unable to read LS ledger index") } - if err := m.StoreLedgerIndexWithoutLocking(snapshotLedgerIndex); err != nil { return err } - var outputCount uint64 - if err := binary.Read(reader, binary.LittleEndian, &outputCount); err != nil { - return ierrors.Errorf("unable to read LS output count: %w", err) - } - - var slotDiffCount uint64 - if err := binary.Read(reader, binary.LittleEndian, &slotDiffCount); err != nil { - return ierrors.Errorf("unable to read LS slot diff count: %w", err) - } - - for i := uint64(0); i < outputCount; i++ { + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint64, func(i int) error { output, err := OutputFromSnapshotReader(reader, m.apiProvider) if err != nil { - return ierrors.Errorf("at pos %d: %w", i, err) + return ierrors.Wrapf(err, "at pos %d", i) } if err := m.importUnspentOutputWithoutLocking(output); err != nil { - return err + return ierrors.Wrap(err, "unable to import LS output") } + + return nil + }); err != nil { + return ierrors.Wrap(err, "unable to read LS output collection") } - for i := uint64(0); i < slotDiffCount; i++ { + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(i int) error { slotDiff, err := ReadSlotDiffToSnapshotReader(reader, m.apiProvider) if err != nil { - return err + return ierrors.Wrapf(err, "unable to read LS slot diff at index %d", i) } if slotDiff.Slot != snapshotLedgerIndex-iotago.SlotIndex(i) { @@ -216,8 +203,12 @@ func (m *Manager) Import(reader io.ReadSeeker) error { } if err := m.RollbackDiffWithoutLocking(slotDiff.Slot, slotDiff.Outputs, slotDiff.Spents); err != nil { - return err + return ierrors.Wrapf(err, "unable to rollback LS slot diff at index %d", i) } + + return nil + }); err != nil { + return ierrors.Wrap(err, "unable to read LS slot diff collection") } if err := m.stateTree.Commit(); err != nil { @@ -236,83 +227,55 @@ func (m *Manager) Export(writer io.WriteSeeker, targetIndex iotago.SlotIndex) er if err != nil { return err } - if err := utils.WriteValueFunc(writer, ledgerIndex); err != nil { - return ierrors.Wrap(err, "unable to write ledger index") - } - - var relativeCountersPosition int64 - - var outputCount uint64 - var slotDiffCount uint64 - - // Outputs Count - // The amount of UTXOs contained within this snapshot. - if err := utils.WriteValueFunc(writer, outputCount, &relativeCountersPosition); err != nil { - return ierrors.Wrap(err, "unable to write outputs count") - } - - // Slot Diffs Count - // The amount of slot diffs contained within this snapshot. - if err := utils.WriteValueFunc(writer, slotDiffCount, &relativeCountersPosition); err != nil { - return ierrors.Wrap(err, "unable to write slot diffs count") - } - // Get all UTXOs and sort them by outputID - outputIDs, err := m.UnspentOutputsIDs(ReadLockLedger(false)) - if err != nil { - return ierrors.Wrap(err, "error while retrieving unspent outputIDs") + if err := stream.Write(writer, ledgerIndex); err != nil { + return ierrors.Wrap(err, "unable to write ledger index") } - for _, outputID := range outputIDs.RemoveDupsAndSort() { - output, err := m.ReadOutputByOutputIDWithoutLocking(outputID) + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint64, func() (int, error) { + // Get all UTXOs and sort them by outputID + outputIDs, err := m.UnspentOutputsIDs(ReadLockLedger(false)) if err != nil { - return ierrors.Wrapf(err, "error while retrieving output %s", outputID) + return 0, ierrors.Wrap(err, "error while retrieving unspent outputIDs") } - if err := utils.WriteBytesFunc(writer, output.SnapshotBytes(), &relativeCountersPosition); err != nil { - return ierrors.Wrap(err, "unable to write output ID") - } + var outputCount int + for _, outputID := range outputIDs.RemoveDupsAndSort() { + output, err := m.ReadOutputByOutputIDWithoutLocking(outputID) + if err != nil { + return 0, ierrors.Wrapf(err, "error while retrieving output %s", outputID) + } - outputCount++ - } + if err := stream.WriteBytes(writer, output.SnapshotBytes()); err != nil { + return 0, ierrors.Wrapf(err, "unable to write output with ID %s", outputID) + } - for diffIndex := ledgerIndex; diffIndex > targetIndex; diffIndex-- { - slotDiff, err := m.SlotDiffWithoutLocking(diffIndex) - if err != nil { - return ierrors.Wrapf(err, "error while retrieving slot diffs for slot %s", diffIndex) + outputCount++ } - written, err := WriteSlotDiffToSnapshotWriter(writer, slotDiff) - if err != nil { - return ierrors.Wrapf(err, "error while writing slot diffs for slot %s", diffIndex) - } - - relativeCountersPosition += written - slotDiffCount++ + return outputCount, nil + }); err != nil { + return ierrors.Wrap(err, "unable to write unspent output collection") } - // seek back to the file position of the counters - if _, err := writer.Seek(-relativeCountersPosition, io.SeekCurrent); err != nil { - return ierrors.Errorf("unable to seek to LS counter placeholders: %w", err) - } + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint32, func() (int, error) { + var slotDiffCount int + for diffIndex := ledgerIndex; diffIndex > targetIndex; diffIndex-- { + slotDiff, err := m.SlotDiffWithoutLocking(diffIndex) + if err != nil { + return 0, ierrors.Wrapf(err, "error while retrieving slot diffs for slot %s", diffIndex) + } - var countersSize int64 + if WriteSlotDiffToSnapshotWriter(writer, slotDiff) != nil { + return 0, ierrors.Wrapf(err, "error while writing slot diffs for slot %s", diffIndex) + } - // Outputs Count - // The amount of UTXOs contained within this snapshot. - if err := utils.WriteValueFunc(writer, outputCount, &countersSize); err != nil { - return ierrors.Wrap(err, "unable to write outputs count") - } - - // Slot Diffs Count - // The amount of slot diffs contained within this snapshot. - if err := utils.WriteValueFunc(writer, slotDiffCount, &countersSize); err != nil { - return ierrors.Wrap(err, "unable to write slot diffs count") - } + slotDiffCount++ + } - // seek back to the last write position - if _, err := writer.Seek(relativeCountersPosition-countersSize, io.SeekCurrent); err != nil { - return ierrors.Errorf("unable to seek to LS last written position: %w", err) + return slotDiffCount, nil + }); err != nil { + return ierrors.Wrap(err, "unable to write slot diff collection") } return nil diff --git a/pkg/protocol/engine/utxoledger/snapshot_test.go b/pkg/protocol/engine/utxoledger/snapshot_test.go index 5e174887a..4f24112bc 100644 --- a/pkg/protocol/engine/utxoledger/snapshot_test.go +++ b/pkg/protocol/engine/utxoledger/snapshot_test.go @@ -5,12 +5,12 @@ import ( "encoding/binary" "testing" - "github.com/orcaman/writerseeker" "github.com/stretchr/testify/require" "github.com/iotaledger/hive.go/kvstore" "github.com/iotaledger/hive.go/kvstore/mapdb" "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/serializer/v2/stream" "github.com/iotaledger/iota-core/pkg/protocol/engine/utxoledger" "github.com/iotaledger/iota-core/pkg/protocol/engine/utxoledger/tpkg" "github.com/iotaledger/iota-core/pkg/utils" @@ -151,13 +151,11 @@ func TestReadSlotDiffToSnapshotReader(t *testing.T) { }, } - writer := &writerseeker.WriterSeeker{} - written, err := utxoledger.WriteSlotDiffToSnapshotWriter(writer, slotDiff) + writer := stream.NewByteBuffer() + err := utxoledger.WriteSlotDiffToSnapshotWriter(writer, slotDiff) require.NoError(t, err) - require.Equal(t, int64(writer.BytesReader().Len()), written) - - reader := writer.BytesReader() + reader := writer.Reader() readSlotDiff, err := utxoledger.ReadSlotDiffToSnapshotReader(reader, api.SingleVersionProvider(iotago_tpkg.TestAPI)) require.NoError(t, err) @@ -181,21 +179,19 @@ func TestWriteSlotDiffToSnapshotWriter(t *testing.T) { }, } - writer := &writerseeker.WriterSeeker{} - written, err := utxoledger.WriteSlotDiffToSnapshotWriter(writer, slotDiff) + writer := stream.NewByteBuffer() + err := utxoledger.WriteSlotDiffToSnapshotWriter(writer, slotDiff) require.NoError(t, err) - require.Equal(t, int64(writer.BytesReader().Len()), written) - - reader := writer.BytesReader() + reader := writer.Reader() var readSlot iotago.SlotIndex require.NoError(t, binary.Read(reader, binary.LittleEndian, &readSlot)) require.Equal(t, slot, readSlot) - var createdCount uint64 + var createdCount uint32 require.NoError(t, binary.Read(reader, binary.LittleEndian, &createdCount)) - require.Equal(t, uint64(len(slotDiff.Outputs)), createdCount) + require.Equal(t, uint32(len(slotDiff.Outputs)), createdCount) var snapshotOutputs utxoledger.Outputs for i := 0; i < len(slotDiff.Outputs); i++ { @@ -206,9 +202,9 @@ func TestWriteSlotDiffToSnapshotWriter(t *testing.T) { tpkg.EqualOutputs(t, slotDiff.Outputs, snapshotOutputs) - var consumedCount uint64 + var consumedCount uint32 require.NoError(t, binary.Read(reader, binary.LittleEndian, &consumedCount)) - require.Equal(t, uint64(len(slotDiff.Spents)), consumedCount) + require.Equal(t, uint32(len(slotDiff.Spents)), consumedCount) var snapshotSpents utxoledger.Spents for i := 0; i < len(slotDiff.Spents); i++ { @@ -271,10 +267,10 @@ func TestManager_Import(t *testing.T) { // Test exporting and importing at the current slot 2 { - writer := &writerseeker.WriterSeeker{} + writer := stream.NewByteBuffer() require.NoError(t, manager.Export(writer, 2)) - reader := writer.BytesReader() + reader := writer.Reader() importedSlot2 := utxoledger.New(mapdb.NewMapDB(), api.SingleVersionProvider(iotago_tpkg.TestAPI)) require.NoError(t, importedSlot2.Import(reader)) @@ -285,10 +281,10 @@ func TestManager_Import(t *testing.T) { // Test exporting and importing at slot 1 { - writer := &writerseeker.WriterSeeker{} + writer := stream.NewByteBuffer() require.NoError(t, manager.Export(writer, 1)) - reader := writer.BytesReader() + reader := writer.Reader() importedSlot1 := utxoledger.New(mapdb.NewMapDB(), api.SingleVersionProvider(iotago_tpkg.TestAPI)) require.NoError(t, importedSlot1.Import(reader)) @@ -302,10 +298,10 @@ func TestManager_Import(t *testing.T) { // Test exporting and importing at slot 0 { - writer := &writerseeker.WriterSeeker{} + writer := stream.NewByteBuffer() require.NoError(t, manager.Export(writer, 0)) - reader := writer.BytesReader() + reader := writer.Reader() importedSlot0 := utxoledger.New(mapdb.NewMapDB(), api.SingleVersionProvider(iotago_tpkg.TestAPI)) require.NoError(t, importedSlot0.Import(reader)) @@ -358,10 +354,10 @@ func TestManager_Export(t *testing.T) { // Test exporting at the current slot 2 { - writer := &writerseeker.WriterSeeker{} + writer := stream.NewByteBuffer() require.NoError(t, manager.Export(writer, 2)) - reader := writer.BytesReader() + reader := writer.Reader() var snapshotLedgerSlot iotago.SlotIndex require.NoError(t, binary.Read(reader, binary.LittleEndian, &snapshotLedgerSlot)) @@ -371,10 +367,6 @@ func TestManager_Export(t *testing.T) { require.NoError(t, binary.Read(reader, binary.LittleEndian, &outputCount)) require.Equal(t, uint64(8), outputCount) - var slotDiffCount uint64 - require.NoError(t, binary.Read(reader, binary.LittleEndian, &slotDiffCount)) - require.Equal(t, uint64(0), slotDiffCount) - var snapshotOutputs utxoledger.Outputs for i := uint64(0); i < outputCount; i++ { output, err := utxoledger.OutputFromSnapshotReader(reader, api.SingleVersionProvider(iotago_tpkg.TestAPI)) @@ -387,14 +379,18 @@ func TestManager_Export(t *testing.T) { require.NoError(t, err) tpkg.EqualOutputs(t, unspentOutputs, snapshotOutputs) + + var slotDiffCount uint32 + require.NoError(t, binary.Read(reader, binary.LittleEndian, &slotDiffCount)) + require.Equal(t, uint32(0), slotDiffCount) } // Test exporting at slot 1 { - writer := &writerseeker.WriterSeeker{} + writer := stream.NewByteBuffer() require.NoError(t, manager.Export(writer, 1)) - reader := writer.BytesReader() + reader := writer.Reader() var snapshotLedgerSlot iotago.SlotIndex require.NoError(t, binary.Read(reader, binary.LittleEndian, &snapshotLedgerSlot)) @@ -404,10 +400,6 @@ func TestManager_Export(t *testing.T) { require.NoError(t, binary.Read(reader, binary.LittleEndian, &outputCount)) require.Equal(t, uint64(8), outputCount) - var slotDiffCount uint64 - require.NoError(t, binary.Read(reader, binary.LittleEndian, &slotDiffCount)) - require.Equal(t, uint64(1), slotDiffCount) - var snapshotOutputs utxoledger.Outputs for i := uint64(0); i < outputCount; i++ { output, err := utxoledger.OutputFromSnapshotReader(reader, api.SingleVersionProvider(iotago_tpkg.TestAPI)) @@ -420,7 +412,11 @@ func TestManager_Export(t *testing.T) { tpkg.EqualOutputs(t, unspentOutputs, snapshotOutputs) - for i := uint64(0); i < slotDiffCount; i++ { + var slotDiffCount uint32 + require.NoError(t, binary.Read(reader, binary.LittleEndian, &slotDiffCount)) + require.Equal(t, uint32(1), slotDiffCount) + + for i := uint32(0); i < slotDiffCount; i++ { diff, err := utxoledger.ReadSlotDiffToSnapshotReader(reader, api.SingleVersionProvider(iotago_tpkg.TestAPI)) require.NoError(t, err) require.Equal(t, snapshotLedgerSlot-iotago.SlotIndex(i), diff.Slot) @@ -429,10 +425,10 @@ func TestManager_Export(t *testing.T) { // Test exporting at slot 0 { - writer := &writerseeker.WriterSeeker{} + writer := stream.NewByteBuffer() require.NoError(t, manager.Export(writer, 0)) - reader := writer.BytesReader() + reader := writer.Reader() var snapshotLedgerSlot iotago.SlotIndex require.NoError(t, binary.Read(reader, binary.LittleEndian, &snapshotLedgerSlot)) @@ -442,10 +438,6 @@ func TestManager_Export(t *testing.T) { require.NoError(t, binary.Read(reader, binary.LittleEndian, &outputCount)) require.Equal(t, uint64(8), outputCount) - var slotDiffCount uint64 - require.NoError(t, binary.Read(reader, binary.LittleEndian, &slotDiffCount)) - require.Equal(t, uint64(2), slotDiffCount) - var snapshotOutputs utxoledger.Outputs for i := uint64(0); i < outputCount; i++ { output, err := utxoledger.OutputFromSnapshotReader(reader, api.SingleVersionProvider(iotago_tpkg.TestAPI)) @@ -458,7 +450,11 @@ func TestManager_Export(t *testing.T) { tpkg.EqualOutputs(t, unspentOutputs, snapshotOutputs) - for i := uint64(0); i < slotDiffCount; i++ { + var slotDiffCount uint32 + require.NoError(t, binary.Read(reader, binary.LittleEndian, &slotDiffCount)) + require.Equal(t, uint32(2), slotDiffCount) + + for i := uint32(0); i < slotDiffCount; i++ { diff, err := utxoledger.ReadSlotDiffToSnapshotReader(reader, api.SingleVersionProvider(iotago_tpkg.TestAPI)) require.NoError(t, err) require.Equal(t, snapshotLedgerSlot-iotago.SlotIndex(i), diff.Slot) diff --git a/pkg/protocol/engine/utxoledger/spent.go b/pkg/protocol/engine/utxoledger/spent.go index cdb41baeb..1b1a0a888 100644 --- a/pkg/protocol/engine/utxoledger/spent.go +++ b/pkg/protocol/engine/utxoledger/spent.go @@ -3,8 +3,11 @@ package utxoledger import ( "bytes" + "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" + "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/serializer/v2" + "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) @@ -84,11 +87,13 @@ func NewSpent(output *Output, transactionIDSpent iotago.TransactionID, slotSpent } func spentStorageKeyForOutputID(outputID iotago.OutputID) []byte { - ms := marshalutil.New(iotago.OutputIDLength + 1) - ms.WriteByte(StoreKeyPrefixOutputSpent) // 1 byte - ms.WriteBytes(outputID[:]) // iotago.OutputIDLength bytes + byteBuffer := stream.NewByteBuffer(iotago.OutputIDLength + serializer.OneByte) - return ms.Bytes() + // There can't be any errors. + _ = stream.Write(byteBuffer, StoreKeyPrefixOutputSpent) // 1 byte + _ = stream.Write(byteBuffer, outputID) + + return lo.PanicOnErr(byteBuffer.Bytes()) } func (s *Spent) KVStorableKey() (key []byte) { @@ -96,40 +101,33 @@ func (s *Spent) KVStorableKey() (key []byte) { } func (s *Spent) KVStorableValue() (value []byte) { - ms := marshalutil.New(iotago.TransactionIDLength + iotago.SlotIndexLength) - ms.WriteBytes(s.transactionIDSpent[:]) // iotago.TransactionIDLength bytes - ms.WriteBytes(s.slotSpent.MustBytes()) // iotago.SlotIndexLength bytes + byteBuffer := stream.NewByteBuffer(iotago.TransactionIDLength + iotago.SlotIndexLength) + + // There can't be any errors. + _ = stream.Write(byteBuffer, s.transactionIDSpent) + _ = stream.Write(byteBuffer, s.slotSpent) - return ms.Bytes() + return lo.PanicOnErr(byteBuffer.Bytes()) } func (s *Spent) kvStorableLoad(_ *Manager, key []byte, value []byte) error { - // Parse key - keyUtil := marshalutil.New(key) + var err error + keyReader := stream.NewByteReader(key) - // Read prefix output - _, err := keyUtil.ReadByte() - if err != nil { - return err + if _, err = stream.Read[byte](keyReader); err != nil { + return ierrors.Wrap(err, "unable to read prefix") } - - // Read OutputID - if s.outputID, err = ParseOutputID(keyUtil); err != nil { - return err + if s.outputID, err = stream.Read[iotago.OutputID](keyReader); err != nil { + return ierrors.Wrap(err, "unable to read outputID") } - // Parse value - valueUtil := marshalutil.New(value) + valueReader := stream.NewByteReader(value) - // Read transaction ID - if s.transactionIDSpent, err = parseTransactionID(valueUtil); err != nil { - return err + if s.transactionIDSpent, err = stream.Read[iotago.TransactionID](valueReader); err != nil { + return ierrors.Wrap(err, "unable to read transactionIDSpent") } - - // Read slot index spent index - s.slotSpent, err = parseSlotIndex(valueUtil) - if err != nil { - return err + if s.slotSpent, err = stream.Read[iotago.SlotIndex](valueReader); err != nil { + return ierrors.Wrap(err, "unable to read slotSpent") } return nil diff --git a/pkg/protocol/engine/utxoledger/spent_status.go b/pkg/protocol/engine/utxoledger/spent_status.go index 2a2bb6fc4..bfd34409e 100644 --- a/pkg/protocol/engine/utxoledger/spent_status.go +++ b/pkg/protocol/engine/utxoledger/spent_status.go @@ -2,7 +2,9 @@ package utxoledger import ( "github.com/iotaledger/hive.go/kvstore" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" + "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/serializer/v2" + "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) @@ -17,11 +19,13 @@ type OutputConsumer func(output *Output) bool type LookupKey []byte func lookupKeyUnspentOutput(outputID iotago.OutputID) LookupKey { - ms := marshalutil.New(iotago.OutputIDLength + 1) - ms.WriteByte(StoreKeyPrefixOutputUnspent) // 1 byte - ms.WriteBytes(outputID[:]) // iotago.OutputIDLength bytes + byteBuffer := stream.NewByteBuffer(serializer.OneByte + iotago.OutputIDLength) - return ms.Bytes() + // There can't be any errors. + _ = stream.Write(byteBuffer, StoreKeyPrefixOutputUnspent) + _ = stream.Write(byteBuffer, outputID) + + return lo.PanicOnErr(byteBuffer.Bytes()) } func (o *Output) UnspentLookupKey() LookupKey { @@ -29,14 +33,10 @@ func (o *Output) UnspentLookupKey() LookupKey { } func outputIDFromDatabaseKey(key LookupKey) (iotago.OutputID, error) { - ms := marshalutil.New([]byte(key)) - - // prefix - if _, err := ms.ReadByte(); err != nil { - return iotago.OutputID{}, err - } + // Skip 1 byte prefix. + outputID, _, err := iotago.OutputIDFromBytes(key[1:]) - return ParseOutputID(ms) + return outputID, err } func markAsUnspent(output *Output, mutations kvstore.BatchedMutations) error { diff --git a/pkg/protocol/engine/utxoledger/state_tree.go b/pkg/protocol/engine/utxoledger/state_tree.go index 06f04fab5..1e5f3af3f 100644 --- a/pkg/protocol/engine/utxoledger/state_tree.go +++ b/pkg/protocol/engine/utxoledger/state_tree.go @@ -6,17 +6,16 @@ import ( "github.com/iotaledger/hive.go/ads" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore/mapdb" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" iotago "github.com/iotaledger/iota.go/v4" ) type stateTreeMetadata struct { - Time iotago.SlotIndex + Slot iotago.SlotIndex } func newStateMetadata(output *Output) *stateTreeMetadata { return &stateTreeMetadata{ - Time: output.SlotCreated(), + Slot: output.SlotCreated(), } } @@ -25,7 +24,7 @@ func stateMetadataFromBytes(b []byte) (*stateTreeMetadata, int, error) { var err error var n int - s.Time, n, err = iotago.SlotIndexFromBytes(b) + s.Slot, n, err = iotago.SlotIndexFromBytes(b) if err != nil { return nil, 0, err } @@ -34,10 +33,7 @@ func stateMetadataFromBytes(b []byte) (*stateTreeMetadata, int, error) { } func (s *stateTreeMetadata) Bytes() ([]byte, error) { - ms := marshalutil.New(iotago.SlotIndexLength) - ms.WriteBytes(s.Time.MustBytes()) - - return ms.Bytes(), nil + return s.Slot.Bytes() } func (m *Manager) StateTreeRoot() iotago.Identifier { @@ -46,6 +42,8 @@ func (m *Manager) StateTreeRoot() iotago.Identifier { func (m *Manager) CheckStateTree() bool { comparisonTree := ads.NewMap[iotago.Identifier](mapdb.NewMapDB(), + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, iotago.OutputID.Bytes, iotago.OutputIDFromBytes, (*stateTreeMetadata).Bytes, diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/performance.go b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/performance.go index f5f024a56..ceac2fdf5 100644 --- a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/performance.go +++ b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/performance.go @@ -296,7 +296,7 @@ func (t *Tracker) aggregatePerformanceFactors(slotActivityVector []*model.Valida // we reward not only total number of blocks issued, but also regularity based on block timestamp slotPerformanceFactor := bits.OnesCount32(pf.SlotActivityVector) - if pf.BlockIssuedCount > protoParamsForEpoch.ValidationBlocksPerSlot() { + if pf.BlocksIssuedCount > protoParamsForEpoch.ValidationBlocksPerSlot() { // we harshly punish validators that issue any blocks more than allowed return 0 @@ -345,8 +345,8 @@ func (t *Tracker) trackCommitteeMemberPerformance(validationBlock *iotago.Valida // we restrict the number up to ValidatorBlocksPerSlot + 1 to know later if the validator issued more blocks than allowed and be able to punish for it // also it can fint into uint8 - if validatorPerformance.BlockIssuedCount < apiForSlot.ProtocolParameters().ValidationBlocksPerSlot()+1 { - validatorPerformance.BlockIssuedCount++ + if validatorPerformance.BlocksIssuedCount < apiForSlot.ProtocolParameters().ValidationBlocksPerSlot()+1 { + validatorPerformance.BlocksIssuedCount++ } validatorPerformance.HighestSupportedVersionAndHash = model.VersionAndHash{ diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/rewards.go b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/rewards.go index a2dbdeaad..498d00d99 100644 --- a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/rewards.go +++ b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/rewards.go @@ -190,6 +190,8 @@ func (t *Tracker) rewardsMap(epoch iotago.EpochIndex) (ads.Map[iotago.Identifier } return ads.NewMap[iotago.Identifier](kv, + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, iotago.AccountID.Bytes, iotago.AccountIDFromBytes, (*model.PoolRewards).Bytes, diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/snapshot.go b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/snapshot.go index cf02cd05f..585aff575 100644 --- a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/snapshot.go +++ b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/snapshot.go @@ -1,14 +1,14 @@ package performance import ( - "encoding/binary" "io" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/serializer/v2" + "github.com/iotaledger/hive.go/serializer/v2/stream" "github.com/iotaledger/iota-core/pkg/core/account" "github.com/iotaledger/iota-core/pkg/model" - "github.com/iotaledger/iota-core/pkg/utils" iotago "github.com/iotaledger/iota.go/v4" ) @@ -46,26 +46,25 @@ func (t *Tracker) Export(writer io.WriteSeeker, targetSlotIndex iotago.SlotIndex timeProvider := t.apiProvider.APIForSlot(targetSlotIndex).TimeProvider() targetEpoch := timeProvider.EpochFromSlot(targetSlotIndex) - positionedWriter := utils.NewPositionedWriter(writer) // if the target index is the last slot of the epoch, the epoch was committed if timeProvider.EpochEnd(targetEpoch) != targetSlotIndex { targetEpoch-- } - if err := t.exportPerformanceFactor(positionedWriter, timeProvider.EpochStart(targetEpoch+1), targetSlotIndex); err != nil { + if err := t.exportPerformanceFactor(writer, timeProvider.EpochStart(targetEpoch+1), targetSlotIndex); err != nil { return ierrors.Wrap(err, "unable to export performance factor") } - if err := t.exportPoolRewards(positionedWriter, targetEpoch); err != nil { + if err := t.exportPoolRewards(writer, targetEpoch); err != nil { return ierrors.Wrap(err, "unable to export pool rewards") } - if err := t.exportPoolsStats(positionedWriter, targetEpoch); err != nil { + if err := t.exportPoolsStats(writer, targetEpoch); err != nil { return ierrors.Wrap(err, "unable to export pool stats") } - if err := t.exportCommittees(positionedWriter, targetSlotIndex); err != nil { + if err := t.exportCommittees(writer, targetSlotIndex); err != nil { return ierrors.Wrap(err, "unable to export committees") } @@ -73,20 +72,10 @@ func (t *Tracker) Export(writer io.WriteSeeker, targetSlotIndex iotago.SlotIndex } func (t *Tracker) importPerformanceFactor(reader io.ReadSeeker) error { - var slotCount uint64 - if err := binary.Read(reader, binary.LittleEndian, &slotCount); err != nil { - return ierrors.Wrap(err, "unable to read slot count") - } - - for i := uint64(0); i < slotCount; i++ { - var slot iotago.SlotIndex - if err := binary.Read(reader, binary.LittleEndian, &slot); err != nil { - return ierrors.Wrap(err, "unable to read slot index") - } - - var accountsCount uint64 - if err := binary.Read(reader, binary.LittleEndian, &accountsCount); err != nil { - return ierrors.Wrapf(err, "unable to read accounts count for slot index %d", slot) + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(i int) error { + slot, err := stream.Read[iotago.SlotIndex](reader) + if err != nil { + return ierrors.Wrapf(err, "unable to read slot index at index %d", i) } performanceFactors, err := t.validatorPerformancesFunc(slot) @@ -94,36 +83,37 @@ func (t *Tracker) importPerformanceFactor(reader io.ReadSeeker) error { return ierrors.Wrapf(err, "unable to get performance factors for slot index %d", slot) } - for j := uint64(0); j < accountsCount; j++ { - var accountID iotago.AccountID - if err = binary.Read(reader, binary.LittleEndian, &accountID); err != nil { - return ierrors.Wrapf(err, "unable to read account id for the slot index %d", slot) + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint64, func(j int) error { + accountID, err := stream.Read[iotago.AccountID](reader) + if err != nil { + return ierrors.Wrapf(err, "unable to read account id at index %d", j) } - - var performanceFactor model.ValidatorPerformance - if err = binary.Read(reader, binary.LittleEndian, &performanceFactor); err != nil { - return ierrors.Wrapf(err, "unable to read performance factor for account %s and slot index %d", accountID, slot) + performanceFactor, err := stream.ReadObjectFromReader(reader, model.ValidatorPerformanceFromReader) + if err != nil { + return ierrors.Wrapf(err, "unable to read performance factor for account %s and slot %d", accountID, slot) } - - if err = performanceFactors.Store(accountID, &performanceFactor); err != nil { + if err = performanceFactors.Store(accountID, performanceFactor); err != nil { return ierrors.Wrapf(err, "unable to store performance factor for account %s and slot index %d", accountID, slot) } + + return nil + }); err != nil { + return ierrors.Wrapf(err, "unable to read performance factors for slot %d", slot) } + + return nil + }); err != nil { + return ierrors.Wrap(err, "unable to read performance factors collection") } return nil } func (t *Tracker) importPoolRewards(reader io.ReadSeeker) error { - var epochCount uint64 - if err := binary.Read(reader, binary.LittleEndian, &epochCount); err != nil { - return ierrors.Wrap(err, "unable to read epoch count") - } - - for i := uint64(0); i < epochCount; i++ { - var epoch iotago.EpochIndex - if err := binary.Read(reader, binary.LittleEndian, &epoch); err != nil { - return ierrors.Wrap(err, "unable to read epoch index") + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(int) error { + epoch, err := stream.Read[iotago.EpochIndex](reader) + if err != nil { + return ierrors.Wrap(err, "unable to read epoch") } rewardsTree, err := t.rewardsMap(epoch) @@ -131,307 +121,280 @@ func (t *Tracker) importPoolRewards(reader io.ReadSeeker) error { return ierrors.Wrapf(err, "unable to get rewards tree for epoch index %d", epoch) } - var accountsCount uint64 - if err = binary.Read(reader, binary.LittleEndian, &accountsCount); err != nil { - return ierrors.Wrapf(err, "unable to read accounts count for epoch index %d", epoch) - } - - for j := uint64(0); j < accountsCount; j++ { - var accountID iotago.AccountID - if err = binary.Read(reader, binary.LittleEndian, &accountID); err != nil { - return ierrors.Wrapf(err, "unable to read account id for the epoch index %d", epoch) + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint64, func(int) error { + accountID, err := stream.Read[iotago.AccountID](reader) + if err != nil { + return ierrors.Wrap(err, "unable to read account id") } - var reward model.PoolRewards - if err = binary.Read(reader, binary.LittleEndian, &reward); err != nil { + reward, err := stream.ReadObjectFromReader(reader, model.PoolRewardsFromReader) + if err != nil { return ierrors.Wrapf(err, "unable to read reward for account %s and epoch index %d", accountID, epoch) } - if err = rewardsTree.Set(accountID, &reward); err != nil { + if err = rewardsTree.Set(accountID, reward); err != nil { return ierrors.Wrapf(err, "unable to set reward for account %s and epoch index %d", accountID, epoch) } + + return nil + }); err != nil { + return ierrors.Wrapf(err, "unable to read rewards collection for epoch %d", epoch) } - if err = rewardsTree.Commit(); err != nil { + if err := rewardsTree.Commit(); err != nil { return ierrors.Wrapf(err, "unable to commit rewards for epoch index %d", epoch) } + + return nil + }); err != nil { + return ierrors.Wrap(err, "unable to read pool rewards collection") } return nil } func (t *Tracker) importPoolsStats(reader io.ReadSeeker) error { - var epochCount uint64 - if err := binary.Read(reader, binary.LittleEndian, &epochCount); err != nil { - return ierrors.Wrap(err, "unable to read epoch count") - } - - for i := uint64(0); i < epochCount; i++ { - var epoch iotago.EpochIndex - if err := binary.Read(reader, binary.LittleEndian, &epoch); err != nil { - return ierrors.Wrap(err, "unable to read epoch index") + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(int) error { + epoch, err := stream.Read[iotago.EpochIndex](reader) + if err != nil { + return ierrors.Wrap(err, "unable to read epoch") } - var poolStats model.PoolsStats - if err := binary.Read(reader, binary.LittleEndian, &poolStats); err != nil { - return ierrors.Wrapf(err, "unable to read pool stats for epoch index %d", epoch) + poolStats, err := stream.ReadObjectFromReader(reader, model.PoolStatsFromReader) + if err != nil { + return ierrors.Wrapf(err, "unable to read pool stats for epoch %d", epoch) } - if err := t.poolStatsStore.Store(epoch, &poolStats); err != nil { + if err := t.poolStatsStore.Store(epoch, poolStats); err != nil { return ierrors.Wrapf(err, "unable to store pool stats for the epoch index %d", epoch) } + + return nil + }); err != nil { + return ierrors.Wrap(err, "unable to read pool stats collection") } return nil } func (t *Tracker) importCommittees(reader io.ReadSeeker) error { - var epochCount uint64 - if err := binary.Read(reader, binary.LittleEndian, &epochCount); err != nil { - return ierrors.Wrap(err, "unable to read committees epoch count") - } - for i := uint64(0); i < epochCount; i++ { - var epoch iotago.EpochIndex - if err := binary.Read(reader, binary.LittleEndian, &epoch); err != nil { + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(int) error { + epoch, err := stream.Read[iotago.EpochIndex](reader) + if err != nil { return ierrors.Wrap(err, "unable to read epoch index") } - committee, _, err := account.AccountsFromReader(reader) + committee, err := account.AccountsFromReader(reader) if err != nil { - return ierrors.Wrapf(err, "unable to read committee for the epoch index %d", epoch) + return ierrors.Wrapf(err, "unable to read committee for the epoch %d", epoch) } if err = t.committeeStore.Store(epoch, committee); err != nil { return ierrors.Wrap(err, "unable to store committee") } + + return nil + }); err != nil { + return ierrors.Wrap(err, "unable to read committees collection") } return nil } -func (t *Tracker) exportPerformanceFactor(pWriter *utils.PositionedWriter, startSlot, targetSlot iotago.SlotIndex) error { +func (t *Tracker) exportPerformanceFactor(writer io.WriteSeeker, startSlot, targetSlot iotago.SlotIndex) error { t.performanceFactorsMutex.RLock() defer t.performanceFactorsMutex.RUnlock() - var slotCount uint64 - if err := pWriter.WriteValue("pf slot count", slotCount, true); err != nil { - return ierrors.Wrap(err, "unable to write pf slot count") - } + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint32, func() (int, error) { + var slotCount int - for currentSlot := startSlot; currentSlot <= targetSlot; currentSlot++ { - if err := pWriter.WriteValue("slot index", currentSlot); err != nil { - return ierrors.Wrapf(err, "unable to write slot index %d", currentSlot) - } + for currentSlot := startSlot; currentSlot <= targetSlot; currentSlot++ { + if err := stream.Write(writer, currentSlot); err != nil { + return 0, ierrors.Wrapf(err, "unable to write slot index %d", currentSlot) + } - var accountsCount uint64 - if err := pWriter.WriteValue("pf account count", accountsCount, true); err != nil { - return ierrors.Wrapf(err, "unable to write pf accounts count for slot index %d", currentSlot) - } + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint64, func() (int, error) { + var accountsCount int - performanceFactors, err := t.validatorPerformancesFunc(currentSlot) - if err != nil { - return ierrors.Wrapf(err, "unable to get performance factors for slot index %d", currentSlot) - } + performanceFactors, err := t.validatorPerformancesFunc(currentSlot) + if err != nil { + return 0, ierrors.Wrapf(err, "unable to get performance factors for slot index %d", currentSlot) + } - if err = performanceFactors.Stream(func(accountID iotago.AccountID, pf *model.ValidatorPerformance) error { - if err = pWriter.WriteValue("account id", accountID); err != nil { - return ierrors.Wrapf(err, "unable to write account id %s for slot %d", accountID, currentSlot) - } + if err = performanceFactors.Stream(func(accountID iotago.AccountID, pf *model.ValidatorPerformance) error { + if err := stream.Write(writer, accountID); err != nil { + return ierrors.Wrapf(err, "unable to write account id %s for slot %d", accountID, currentSlot) + } - bytes, err := t.apiProvider.APIForSlot(currentSlot).Encode(pf) - if err != nil { - return ierrors.Wrapf(err, "unable to encode performance factor for accountID %s and slot index %d", accountID, currentSlot) - } + if err := stream.WriteObject(writer, pf, (*model.ValidatorPerformance).Bytes); err != nil { + return ierrors.Wrapf(err, "unable to write performance factor for accountID %s and slot index %d", accountID, currentSlot) + } - if err = pWriter.WriteBytes(bytes); err != nil { - return ierrors.Wrapf(err, "unable to write performance factor for accountID %s and slot index %d", accountID, currentSlot) - } + accountsCount++ - accountsCount++ + return nil + }); err != nil { + return 0, ierrors.Wrapf(err, "unable to write performance factors for slot index %d", currentSlot) + } - return nil - }); err != nil { - return ierrors.Wrapf(err, "unable to write performance factors for slot index %d", currentSlot) - } + return accountsCount, nil + }); err != nil { + return 0, ierrors.Wrapf(err, "unable to write accounts for slot %d", currentSlot) + } - if err = pWriter.WriteValueAtBookmark("pf account count", accountsCount); err != nil { - return ierrors.Wrap(err, "unable to write pf accounts count") + slotCount++ } - slotCount++ - } - - if err := pWriter.WriteValueAtBookmark("pf slot count", slotCount); err != nil { - return ierrors.Wrap(err, "unable to write pf slot count at bookmarked position") + return slotCount, nil + }); err != nil { + return ierrors.Wrap(err, "unable to write slot count") } return nil } -func (t *Tracker) exportPoolRewards(pWriter *utils.PositionedWriter, targetEpoch iotago.EpochIndex) error { +func (t *Tracker) exportPoolRewards(writer io.WriteSeeker, targetEpoch iotago.EpochIndex) error { // export all stored pools // in theory we could save the epoch count only once, because stats and rewards should be the same length - var epochCount uint64 - if err := pWriter.WriteValue("pool rewards epoch count", epochCount, true); err != nil { - return ierrors.Wrap(err, "unable to write epoch count") - } - for epoch := targetEpoch; epoch > iotago.EpochIndex(lo.Max(0, int(targetEpoch)-daysInYear)); epoch-- { - rewardsMap, err := t.rewardsMap(epoch) - if err != nil { - return ierrors.Wrapf(err, "unable to get rewards tree for epoch index %d", epoch) - } - // if the map was not present in storage we can skip this epoch and the previous ones, as we never stored any rewards - if !rewardsMap.WasRestoredFromStorage() { - break - } - - if err = pWriter.WriteValue("epoch index", epoch); err != nil { - return ierrors.Wrapf(err, "unable to write epoch index for epoch index %d", epoch) - } - - var accountCount uint64 - if err = pWriter.WriteValue("pool rewards account count", accountCount, true); err != nil { - return ierrors.Wrapf(err, "unable to write account count for epoch index %d", epoch) - } + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint32, func() (int, error) { + var epochCount int - if err = rewardsMap.Stream(func(key iotago.AccountID, value *model.PoolRewards) error { - if err = pWriter.WriteValue("account id", key); err != nil { - return ierrors.Wrapf(err, "unable to write account id for epoch index %d and accountID %s", epoch, key) + for epoch := targetEpoch; epoch > iotago.EpochIndex(lo.Max(0, int(targetEpoch)-daysInYear)); epoch-- { + rewardsMap, err := t.rewardsMap(epoch) + if err != nil { + return 0, ierrors.Wrapf(err, "unable to get rewards tree for epoch %d", epoch) + } + // if the map was not present in storage we can skip this epoch and the previous ones, as we never stored any rewards + if !rewardsMap.WasRestoredFromStorage() { + break } - if err = pWriter.WriteValue("account rewards", value); err != nil { - return ierrors.Wrapf(err, "unable to write account rewards for epoch index %d and accountID %s", epoch, key) + if err := stream.Write(writer, epoch); err != nil { + return 0, ierrors.Wrapf(err, "unable to write epoch index for epoch index %d", epoch) } - accountCount++ + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint64, func() (int, error) { + var accountCount int - return nil - }); err != nil { - return ierrors.Wrapf(err, "unable to stream rewards for epoch index %d", epoch) - } + if err = rewardsMap.Stream(func(key iotago.AccountID, value *model.PoolRewards) error { + if err := stream.Write(writer, key); err != nil { + return ierrors.Wrapf(err, "unable to write account id for epoch %d and accountID %s", epoch, key) + } - if err = pWriter.WriteValueAtBookmark("pool rewards account count", accountCount); err != nil { - return ierrors.Wrapf(err, "unable to write account count for epoch index %d at bookmarked position", epoch) - } + if err := stream.WriteObject(writer, value, (*model.PoolRewards).Bytes); err != nil { + return ierrors.Wrapf(err, "unable to write account rewards for epoch index %d and accountID %s", epoch, key) + } - epochCount++ - } + accountCount++ - if err := pWriter.WriteValueAtBookmark("pool rewards epoch count", epochCount); err != nil { - return ierrors.Wrap(err, "unable to write epoch count at bookmarked position") + return nil + }); err != nil { + return 0, ierrors.Wrapf(err, "unable to stream rewards for epoch index %d", epoch) + } + + return accountCount, nil + }); err != nil { + return 0, ierrors.Wrapf(err, "unable to write rewards for epoch index %d", epoch) + } + + epochCount++ + } + + return epochCount, nil + }); err != nil { + return ierrors.Wrap(err, "unable to write pool rewards collection") } return nil } -func (t *Tracker) exportPoolsStats(pWriter *utils.PositionedWriter, targetEpoch iotago.EpochIndex) error { - var epochCount uint64 - if err := pWriter.WriteValue("pools stats epoch count", epochCount, true); err != nil { - return ierrors.Wrap(err, "unable to write epoch count") - } - // export all stored pools - var innerErr error - if err := t.poolStatsStore.StreamBytes(func(key []byte, value []byte) error { - epoch, _, err := iotago.EpochIndexFromBytes(key) - if err != nil { - innerErr = err +func (t *Tracker) exportPoolsStats(writer io.WriteSeeker, targetEpoch iotago.EpochIndex) error { + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint32, func() (int, error) { + var epochCount int - return innerErr - } + // export all stored pools + if err := t.poolStatsStore.StreamBytes(func(key []byte, value []byte) error { + epoch, _, err := iotago.EpochIndexFromBytes(key) + if err != nil { + return err + } - if epoch > targetEpoch { - // continue - return nil - } - if err := pWriter.WriteBytes(key); err != nil { - innerErr = ierrors.Wrapf(err, "unable to write epoch index %d", epoch) + if epoch > targetEpoch { + // continue + return nil + } - return innerErr - } + if err := stream.WriteBytes(writer, key); err != nil { + return ierrors.Wrapf(err, "unable to write epoch index %d", epoch) + } + + if err := stream.WriteBytes(writer, value); err != nil { + return ierrors.Wrapf(err, "unable to write pools stats for epoch %d", epoch) + } - if err := pWriter.WriteBytes(value); err != nil { - innerErr = ierrors.Wrapf(err, "unable to write pools stats for epoch %d", epoch) + epochCount++ - return innerErr + return nil + }); err != nil { + return 0, ierrors.Wrap(err, "unable to iterate over pools stats") } - epochCount++ - - return nil + return epochCount, nil }); err != nil { - return ierrors.Wrap(err, "unable to iterate over pools stats") - } else if innerErr != nil { - return ierrors.Wrap(innerErr, "error while iterating over pools stats") - } - if err := pWriter.WriteValueAtBookmark("pools stats epoch count", epochCount); err != nil { - return ierrors.Wrap(err, "unable to write stats epoch count at bookmarked position") + return ierrors.Wrap(err, "unable to write pool stats collection") } return nil } -func (t *Tracker) exportCommittees(pWriter *utils.PositionedWriter, targetSlot iotago.SlotIndex) error { - var epochCount uint64 - if err := pWriter.WriteValue("committees epoch count", epochCount, true); err != nil { - return ierrors.Wrap(err, "unable to write committees epoch count") - } - +func (t *Tracker) exportCommittees(writer io.WriteSeeker, targetSlot iotago.SlotIndex) error { apiForSlot := t.apiProvider.APIForSlot(targetSlot) epochFromTargetSlot := apiForSlot.TimeProvider().EpochFromSlot(targetSlot) pointOfNoReturn := apiForSlot.TimeProvider().EpochEnd(epochFromTargetSlot) - apiForSlot.ProtocolParameters().MaxCommittableAge() - var innerErr error - err := t.committeeStore.StreamBytes(func(epochBytes []byte, committeeBytes []byte) error { - epoch, _, err := iotago.EpochIndexFromBytes(epochBytes) - if err != nil { - innerErr = err - - return innerErr - } + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint32, func() (int, error) { + var epochCount int - // We have a committee for an epoch higher than the targetSlot - // 1. we trust the point of no return, we export the committee for the next epoch - // 2. if we don't trust the point-of-no-return - // - we were able to rotate a committee, then we export it - // - we were not able to rotate a committee (reused), then we don't export it - if epoch > epochFromTargetSlot && targetSlot < pointOfNoReturn { - committee, _, err := account.AccountsFromBytes(committeeBytes) + if err := t.committeeStore.StreamBytes(func(epochBytes []byte, committeeBytes []byte) error { + epoch, _, err := iotago.EpochIndexFromBytes(epochBytes) if err != nil { - innerErr = ierrors.Wrapf(err, "failed to parse committee bytes for epoch %d", epoch) - - return innerErr + return err } - if committee.IsReused() { - return nil + + // We have a committee for an epoch higher than the targetSlot + // 1. we trust the point of no return, we export the committee for the next epoch + // 2. if we don't trust the point-of-no-return + // - we were able to rotate a committee, then we export it + // - we were not able to rotate a committee (reused), then we don't export it + if epoch > epochFromTargetSlot && targetSlot < pointOfNoReturn { + committee, _, err := account.AccountsFromBytes(committeeBytes) + if err != nil { + return ierrors.Wrapf(err, "failed to parse committee bytes for epoch %d", epoch) + } + if committee.IsReused() { + return nil + } } - } - if err := pWriter.WriteBytes(epochBytes); err != nil { - innerErr = ierrors.Wrap(err, "unable to write epoch index") + if err := stream.WriteBytes(writer, epochBytes); err != nil { + return ierrors.Wrapf(err, "unable to write epoch index %d", epoch) + } + if err := stream.WriteBytes(writer, committeeBytes); err != nil { + return ierrors.Wrapf(err, "unable to write committee for epoch %d", epoch) + } - return innerErr - } - if err := pWriter.WriteBytes(committeeBytes); err != nil { - innerErr = ierrors.Wrap(err, "unable to write epoch committee") + epochCount++ - return innerErr + return nil + }); err != nil { + return 0, ierrors.Wrap(err, "unable to iterate over committee base store") } - epochCount++ - - return nil - }) - if err != nil { - return ierrors.Wrapf(err, "unable to iterate over committee base store: %w", innerErr) - } - if innerErr != nil { - return ierrors.Wrap(err, "error while iterating over committee base store") - } - - if err = pWriter.WriteValueAtBookmark("committees epoch count", epochCount); err != nil { - return ierrors.Wrap(err, "unable to write committee epoch count at bookmarked position") + return epochCount, nil + }); err != nil { + return ierrors.Wrap(err, "unable to write committees collection") } return nil diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/snapshot_test.go b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/snapshot_test.go index 2f5ccc8c8..d74d8c2bd 100644 --- a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/snapshot_test.go +++ b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/snapshot_test.go @@ -3,9 +3,9 @@ package performance import ( "testing" - "github.com/orcaman/writerseeker" "github.com/stretchr/testify/require" + "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) @@ -37,7 +37,7 @@ func TestManager_Import_Export(t *testing.T) { } { - writer := &writerseeker.WriterSeeker{} + writer := stream.NewByteBuffer() delegatorRewardBeforeImport, validatorRewardBeforeImport := ts.calculateExpectedRewards(epochsCount, epochActions) // export two full epochs @@ -47,14 +47,14 @@ func TestManager_Import_Export(t *testing.T) { ts.InitPerformanceTracker() - err = ts.Instance.Import(writer.BytesReader()) + err = ts.Instance.Import(writer.Reader()) require.NoError(t, err) delegatorRewardAfterImport, validatorRewardAfterImport := ts.calculateExpectedRewards(epochsCount, epochActions) require.Equal(t, delegatorRewardBeforeImport, delegatorRewardAfterImport) require.Equal(t, validatorRewardBeforeImport, validatorRewardAfterImport) } { - writer := &writerseeker.WriterSeeker{} + writer := stream.NewByteBuffer() delegatorRewardBeforeImport, validatorRewardBeforeImport := ts.calculateExpectedRewards(epochsCount, epochActions) // export at the beginning of epoch 2, skip epoch 3 at all @@ -64,7 +64,7 @@ func TestManager_Import_Export(t *testing.T) { ts.InitPerformanceTracker() - err = ts.Instance.Import(writer.BytesReader()) + err = ts.Instance.Import(writer.Reader()) require.NoError(t, err) delegatorRewardAfterImport, validatorRewardAfterImport := ts.calculateExpectedRewards(epochsCount, epochActions) diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/testsuite_test.go b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/testsuite_test.go index 62c5c1e48..be9f2961b 100644 --- a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/testsuite_test.go +++ b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/testsuite_test.go @@ -61,10 +61,8 @@ func (t *TestSuite) InitPerformanceTracker() { p := slotstore.NewStore(slot, prunableStores[slot], iotago.AccountID.Bytes, iotago.AccountIDFromBytes, - func(s *model.ValidatorPerformance) ([]byte, error) { - return s.Bytes(t.api) - }, - model.ValidatorPerformanceFromBytes(t.api), + (*model.ValidatorPerformance).Bytes, + model.ValidatorPerformanceFromBytes, ) return p, nil diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go b/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go index fd3ff4c70..320f1e5cb 100644 --- a/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go +++ b/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go @@ -226,6 +226,8 @@ func (o *SybilProtection) committeeRoot(targetCommitteeEpoch iotago.EpochIndex) committeeTree := ads.NewSet[iotago.Identifier]( mapdb.NewMapDB(), + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, iotago.AccountID.Bytes, iotago.AccountIDFromBytes, ) diff --git a/pkg/storage/permanent/commitments.go b/pkg/storage/permanent/commitments.go index abc4b59c2..4f832ee22 100644 --- a/pkg/storage/permanent/commitments.go +++ b/pkg/storage/permanent/commitments.go @@ -6,6 +6,7 @@ import ( "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/serializer/v2" "github.com/iotaledger/hive.go/serializer/v2/stream" "github.com/iotaledger/iota-core/pkg/model" iotago "github.com/iotaledger/iota.go/v4" @@ -22,13 +23,8 @@ func NewCommitments(store kvstore.KVStore, apiProvider iotago.APIProvider) *Comm store: kvstore.NewTypedStore(store, iotago.SlotIndex.Bytes, iotago.SlotIndexFromBytes, - func(c *model.Commitment) ([]byte, error) { - return c.Data(), nil - }, - func(bytes []byte) (*model.Commitment, int, error) { - c, err := model.CommitmentFromBytes(bytes, apiProvider) - return c, len(bytes), err - }, + (*model.Commitment).Bytes, + model.CommitmentFromBytes(apiProvider), ), } } @@ -42,14 +38,15 @@ func (c *Commitments) Load(slot iotago.SlotIndex) (commitment *model.Commitment, } func (c *Commitments) Export(writer io.WriteSeeker, targetSlot iotago.SlotIndex) (err error) { - if err := stream.WriteCollection(writer, func() (elementsCount uint64, err error) { - var count uint64 + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint32, func() (elementsCount int, err error) { + var count int for slot := iotago.SlotIndex(0); slot <= targetSlot; slot++ { commitmentBytes, err := c.store.KVStore().Get(lo.PanicOnErr(slot.Bytes())) if err != nil { return 0, ierrors.Wrapf(err, "failed to load commitment for slot %d", slot) } - if err := stream.WriteBlob(writer, commitmentBytes); err != nil { + + if err := stream.WriteBytesWithSize(writer, commitmentBytes, serializer.SeriLengthPrefixTypeAsUint16); err != nil { return 0, ierrors.Wrapf(err, "failed to write commitment for slot %d", slot) } @@ -65,17 +62,12 @@ func (c *Commitments) Export(writer io.WriteSeeker, targetSlot iotago.SlotIndex) } func (c *Commitments) Import(reader io.ReadSeeker) (err error) { - if err := stream.ReadCollection(reader, func(i int) error { - commitmentBytes, err := stream.ReadBlob(reader) + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(i int) error { + commitment, err := stream.ReadObjectWithSize[*model.Commitment](reader, serializer.SeriLengthPrefixTypeAsUint16, model.CommitmentFromBytes(c.apiProvider)) if err != nil { return ierrors.Wrapf(err, "failed to read commitment at index %d", i) } - commitment, err := model.CommitmentFromBytes(commitmentBytes, c.apiProvider) - if err != nil { - return ierrors.Wrapf(err, "failed to parse commitment at index %d", i) - } - if err := c.Store(commitment); err != nil { return ierrors.Wrapf(err, "failed to store commitment at index %d", i) } diff --git a/pkg/storage/permanent/settings.go b/pkg/storage/permanent/settings.go index d1ad5fe82..be4459ed1 100644 --- a/pkg/storage/permanent/settings.go +++ b/pkg/storage/permanent/settings.go @@ -10,6 +10,7 @@ import ( "github.com/iotaledger/hive.go/lo" "github.com/iotaledger/hive.go/runtime/options" "github.com/iotaledger/hive.go/runtime/syncutils" + "github.com/iotaledger/hive.go/serializer/v2" "github.com/iotaledger/hive.go/serializer/v2/byteutils" "github.com/iotaledger/hive.go/serializer/v2/stream" "github.com/iotaledger/hive.go/stringify" @@ -65,17 +66,8 @@ func NewSettings(store kvstore.KVStore, opts ...options.Option[api.EpochBasedPro storeLatestCommitment: kvstore.NewTypedValue( store, []byte{latestCommitmentKey}, - func(commitment *model.Commitment) ([]byte, error) { - return commitment.Data(), nil - }, - func(bytes []byte) (*model.Commitment, int, error) { - commitment, err := model.CommitmentFromBytes(bytes, apiProvider) - if err != nil { - return nil, 0, err - } - - return commitment, len(bytes), nil - }, + (*model.Commitment).Bytes, + model.CommitmentFromBytes(apiProvider), ), storeLatestFinalizedSlot: kvstore.NewTypedValue( store, @@ -346,7 +338,7 @@ func (s *Settings) Export(writer io.WriteSeeker, targetCommitment *iotago.Commit commitmentBytes = s.LatestCommitment().Data() } - if err := stream.WriteBlob(writer, commitmentBytes); err != nil { + if err := stream.WriteBytesWithSize(writer, commitmentBytes, serializer.SeriLengthPrefixTypeAsUint16); err != nil { return ierrors.Wrap(err, "failed to write commitment") } @@ -358,8 +350,8 @@ func (s *Settings) Export(writer io.WriteSeeker, targetCommitment *iotago.Commit defer s.mutex.RUnlock() // Export protocol versions - if err := stream.WriteCollection(writer, func() (uint64, error) { - var count uint64 + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint16, func() (int, error) { + var count int var innerErr error if err := s.storeProtocolVersionEpochMapping.Iterate(kvstore.EmptyPrefix, func(version iotago.Version, epoch iotago.EpochIndex) bool { @@ -391,8 +383,8 @@ func (s *Settings) Export(writer io.WriteSeeker, targetCommitment *iotago.Commit // TODO: rollback future protocol parameters if it was added after targetCommitment.Slot() // Export future protocol parameters - if err := stream.WriteCollection(writer, func() (uint64, error) { - var count uint64 + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint16, func() (int, error) { + var count int var innerErr error if err := s.storeFutureProtocolParameters.Iterate(kvstore.EmptyPrefix, func(version iotago.Version, tuple *types.Tuple[iotago.EpochIndex, iotago.Identifier]) bool { @@ -428,8 +420,8 @@ func (s *Settings) Export(writer io.WriteSeeker, targetCommitment *iotago.Commit } // Export protocol parameters: we only export the parameters up until the current active ones. - if err := stream.WriteCollection(writer, func() (uint64, error) { - var paramsCount uint64 + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint16, func() (int, error) { + var paramsCount int var innerErr error if err := s.storeProtocolParameters.KVStore().Iterate(kvstore.EmptyPrefix, func(key kvstore.Key, value kvstore.Value) bool { @@ -444,7 +436,7 @@ func (s *Settings) Export(writer io.WriteSeeker, targetCommitment *iotago.Commit return true } - if err := stream.WriteBlob(writer, value); err != nil { + if err := stream.WriteBytesWithSize(writer, value, serializer.SeriLengthPrefixTypeAsUint32); err != nil { innerErr = err return false } @@ -467,7 +459,7 @@ func (s *Settings) Export(writer io.WriteSeeker, targetCommitment *iotago.Commit } func (s *Settings) Import(reader io.ReadSeeker) (err error) { - commitmentBytes, err := stream.ReadBlob(reader) + commitmentBytes, err := stream.ReadBytesWithSize(reader, serializer.SeriLengthPrefixTypeAsUint16) if err != nil { return ierrors.Wrap(err, "failed to read commitment") } @@ -482,7 +474,7 @@ func (s *Settings) Import(reader io.ReadSeeker) (err error) { } // Read protocol version epoch mapping - if err := stream.ReadCollection(reader, func(i int) error { + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint16, func(i int) error { version, err := stream.Read[iotago.Version](reader) if err != nil { return ierrors.Wrap(err, "failed to parse version") @@ -504,7 +496,7 @@ func (s *Settings) Import(reader io.ReadSeeker) (err error) { } // Read future protocol parameters - if err := stream.ReadCollection(reader, func(i int) error { + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint16, func(i int) error { version, err := stream.Read[iotago.Version](reader) if err != nil { return ierrors.Wrap(err, "failed to parse version") @@ -530,8 +522,8 @@ func (s *Settings) Import(reader io.ReadSeeker) (err error) { } // Read protocol parameters - if err := stream.ReadCollection(reader, func(i int) error { - paramsBytes, err := stream.ReadBlob(reader) + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint16, func(i int) error { + paramsBytes, err := stream.ReadBytesWithSize(reader, serializer.SeriLengthPrefixTypeAsUint32) if err != nil { return ierrors.Wrapf(err, "failed to read protocol parameters bytes at index %d", i) } @@ -550,7 +542,7 @@ func (s *Settings) Import(reader io.ReadSeeker) (err error) { } // Now that we parsed the protocol parameters, we can parse the commitment since there will be an API available - commitment, err := model.CommitmentFromBytes(commitmentBytes, s.apiProvider) + commitment, err := lo.DropCount(model.CommitmentFromBytes(s.apiProvider)(commitmentBytes)) if err != nil { return ierrors.Wrap(err, "failed to parse commitment") } diff --git a/pkg/storage/prunable/prunable_slot.go b/pkg/storage/prunable/prunable_slot.go index 0549278b8..9297c5977 100644 --- a/pkg/storage/prunable/prunable_slot.go +++ b/pkg/storage/prunable/prunable_slot.go @@ -82,15 +82,11 @@ func (p *Prunable) ValidatorPerformances(slot iotago.SlotIndex) (*slotstore.Stor return nil, ierrors.Wrapf(database.ErrEpochPruned, "could not get performance factors with slot %d", slot) } - apiForSlot := p.apiProvider.APIForSlot(slot) - return slotstore.NewStore(slot, kv, iotago.AccountID.Bytes, iotago.AccountIDFromBytes, - func(s *model.ValidatorPerformance) ([]byte, error) { - return s.Bytes(apiForSlot) - }, - model.ValidatorPerformanceFromBytes(apiForSlot), + (*model.ValidatorPerformance).Bytes, + model.ValidatorPerformanceFromBytes, ), nil } diff --git a/pkg/storage/prunable/slotstore/accountdiffs.go b/pkg/storage/prunable/slotstore/accountdiffs.go index 8aa889fa4..e04eb5242 100644 --- a/pkg/storage/prunable/slotstore/accountdiffs.go +++ b/pkg/storage/prunable/slotstore/accountdiffs.go @@ -31,19 +31,14 @@ func NewAccountDiffs(slot iotago.SlotIndex, store kvstore.KVStore, api iotago.AP iotago.AccountID.Bytes, iotago.AccountIDFromBytes, (*model.AccountDiff).Bytes, - func(bytes []byte) (object *model.AccountDiff, consumed int, err error) { - diff := new(model.AccountDiff) - n, err := diff.FromBytes(bytes) - - return diff, n, err - }), + model.AccountDiffFromBytes, + ), destroyedAccounts: kvstore.NewTypedStore[iotago.AccountID, types.Empty](lo.PanicOnErr(store.WithExtendedRealm(kvstore.Realm{destroyedAccountsPrefix})), iotago.AccountID.Bytes, iotago.AccountIDFromBytes, types.Empty.Bytes, - func(bytes []byte) (object types.Empty, consumed int, err error) { - return types.Void, 0, nil - }), + types.EmptyFromBytes, + ), } } diff --git a/pkg/storage/prunable/slotstore/retainer.go b/pkg/storage/prunable/slotstore/retainer.go index 27a46c5d3..646ed7913 100644 --- a/pkg/storage/prunable/slotstore/retainer.go +++ b/pkg/storage/prunable/slotstore/retainer.go @@ -4,7 +4,7 @@ import ( "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" "github.com/iotaledger/hive.go/lo" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" + "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" "github.com/iotaledger/iota.go/v4/nodeclient/apimodels" ) @@ -20,28 +20,32 @@ type BlockRetainerData struct { } func (b *BlockRetainerData) Bytes() ([]byte, error) { - marshalUtil := marshalutil.New(2) - marshalUtil.WriteUint8(uint8(b.State)) - marshalUtil.WriteUint8(uint8(b.FailureReason)) + byteBuffer := stream.NewByteBuffer(2) - return marshalUtil.Bytes(), nil + if err := stream.Write(byteBuffer, b.State); err != nil { + return nil, ierrors.Wrap(err, "failed to write block state") + } + if err := stream.Write(byteBuffer, b.FailureReason); err != nil { + return nil, ierrors.Wrap(err, "failed to write block failure reason") + } + + return byteBuffer.Bytes() } -func (b *BlockRetainerData) FromBytes(bytes []byte) (int, error) { - marshalUtil := marshalutil.New(bytes) - state, err := marshalUtil.ReadUint8() - if err != nil { - return 0, err - } - b.State = apimodels.BlockState(state) +func BlockRetainerDataFromBytes(bytes []byte) (*BlockRetainerData, int, error) { + byteReader := stream.NewByteReader(bytes) - reason, err := marshalUtil.ReadUint8() - if err != nil { - return 0, err + var err error + b := new(BlockRetainerData) + + if b.State, err = stream.Read[apimodels.BlockState](byteReader); err != nil { + return nil, 0, ierrors.Wrap(err, "failed to read block state") + } + if b.FailureReason, err = stream.Read[apimodels.BlockFailureReason](byteReader); err != nil { + return nil, 0, ierrors.Wrap(err, "failed to read block failure reason") } - b.FailureReason = apimodels.BlockFailureReason(reason) - return marshalUtil.ReadOffset(), nil + return b, byteReader.BytesRead(), nil } type TransactionRetainerData struct { @@ -50,28 +54,32 @@ type TransactionRetainerData struct { } func (t *TransactionRetainerData) Bytes() ([]byte, error) { - marshalUtil := marshalutil.New(2) - marshalUtil.WriteUint8(uint8(t.State)) - marshalUtil.WriteUint8(uint8(t.FailureReason)) + byteBuffer := stream.NewByteBuffer(2) + + if err := stream.Write(byteBuffer, t.State); err != nil { + return nil, ierrors.Wrap(err, "failed to write transaction state") + } + if err := stream.Write(byteBuffer, t.FailureReason); err != nil { + return nil, ierrors.Wrap(err, "failed to write transaction failure reason") + } - return marshalUtil.Bytes(), nil + return byteBuffer.Bytes() } -func (t *TransactionRetainerData) FromBytes(bytes []byte) (int, error) { - marshalUtil := marshalutil.New(bytes) - state, err := marshalUtil.ReadUint8() - if err != nil { - return 0, err - } - t.State = apimodels.TransactionState(state) +func TransactionRetainerDataFromBytes(bytes []byte) (*TransactionRetainerData, int, error) { + byteReader := stream.NewByteReader(bytes) - reason, err := marshalUtil.ReadUint8() - if err != nil { - return 0, err + var err error + t := new(TransactionRetainerData) + + if t.State, err = stream.Read[apimodels.TransactionState](byteReader); err != nil { + return nil, 0, ierrors.Wrap(err, "failed to read transaction state") + } + if t.FailureReason, err = stream.Read[apimodels.TransactionFailureReason](byteReader); err != nil { + return nil, 0, ierrors.Wrap(err, "failed to read transaction failure reason") } - t.FailureReason = apimodels.TransactionFailureReason(reason) - return marshalUtil.ReadOffset(), nil + return t, byteReader.BytesRead(), nil } type Retainer struct { @@ -88,23 +96,13 @@ func NewRetainer(slot iotago.SlotIndex, store kvstore.KVStore) (newRetainer *Ret iotago.BlockID.Bytes, iotago.BlockIDFromBytes, (*BlockRetainerData).Bytes, - func(bytes []byte) (*BlockRetainerData, int, error) { - b := new(BlockRetainerData) - c, err := b.FromBytes(bytes) - - return b, c, err - }, + BlockRetainerDataFromBytes, ), transactionStore: kvstore.NewTypedStore(lo.PanicOnErr(store.WithExtendedRealm(kvstore.Realm{transactionStorePrefix})), iotago.BlockID.Bytes, iotago.BlockIDFromBytes, (*TransactionRetainerData).Bytes, - func(bytes []byte) (*TransactionRetainerData, int, error) { - t := new(TransactionRetainerData) - c, err := t.FromBytes(bytes) - - return t, c, err - }, + TransactionRetainerDataFromBytes, ), } } diff --git a/pkg/tests/accounts_test.go b/pkg/tests/accounts_test.go index 99886583d..a4e84dc01 100644 --- a/pkg/tests/accounts_test.go +++ b/pkg/tests/accounts_test.go @@ -108,7 +108,7 @@ func Test_TransitionAndDestroyAccount(t *testing.T) { // assert diff of the genesis account, it should have a new output ID, new expiry slot and a new block issuer key. ts.AssertAccountDiff(genesisAccountOutput.AccountID, block1Slot, &model.AccountDiff{ BICChange: 0, - PreviousUpdatedTime: 0, + PreviousUpdatedSlot: 0, PreviousExpirySlot: iotago.MaxSlotIndex, NewExpirySlot: newExpirySlot, NewOutputID: ts.DefaultWallet().Output("TX1:0").OutputID(), @@ -139,7 +139,7 @@ func Test_TransitionAndDestroyAccount(t *testing.T) { // assert diff of the destroyed account. ts.AssertAccountDiff(genesisAccountOutput.AccountID, block2Slot, &model.AccountDiff{ BICChange: -iotago.BlockIssuanceCredits(123), - PreviousUpdatedTime: 0, + PreviousUpdatedSlot: 0, NewExpirySlot: 0, PreviousExpirySlot: newExpirySlot, NewOutputID: iotago.EmptyOutputID, @@ -229,7 +229,7 @@ func Test_StakeDelegateAndDelayedClaim(t *testing.T) { ts.AssertAccountDiff(newAccountOutput.AccountID, block1Slot, &model.AccountDiff{ BICChange: 0, - PreviousUpdatedTime: 0, + PreviousUpdatedSlot: 0, NewExpirySlot: newAccountExpirySlot, PreviousExpirySlot: 0, NewOutputID: newAccount.OutputID(), @@ -271,7 +271,7 @@ func Test_StakeDelegateAndDelayedClaim(t *testing.T) { ts.AssertAccountDiff(newAccountOutput.AccountID, block2Slot, &model.AccountDiff{ BICChange: 0, - PreviousUpdatedTime: 0, + PreviousUpdatedSlot: 0, NewOutputID: iotago.EmptyOutputID, PreviousOutputID: iotago.EmptyOutputID, BlockIssuerKeysAdded: iotago.NewBlockIssuerKeys(), @@ -304,7 +304,7 @@ func Test_StakeDelegateAndDelayedClaim(t *testing.T) { // Transitioning to delayed claiming effectively removes the delegation, so we expect a negative delegation stake change. ts.AssertAccountDiff(newAccountOutput.AccountID, block3Slot, &model.AccountDiff{ BICChange: 0, - PreviousUpdatedTime: 0, + PreviousUpdatedSlot: 0, NewOutputID: iotago.EmptyOutputID, PreviousOutputID: iotago.EmptyOutputID, BlockIssuerKeysAdded: iotago.NewBlockIssuerKeys(), @@ -429,7 +429,7 @@ func Test_ImplicitAccounts(t *testing.T) { // the implicit account should now have been transitioned to a full account in the accounts ledger. ts.AssertAccountDiff(implicitAccountID, block2Slot, &model.AccountDiff{ BICChange: allotted - burned, - PreviousUpdatedTime: block1Slot, + PreviousUpdatedSlot: block1Slot, NewOutputID: fullAccountOutputID, PreviousOutputID: implicitAccountOutputID, PreviousExpirySlot: iotago.MaxSlotIndex, diff --git a/pkg/tests/upgrade_signaling_test.go b/pkg/tests/upgrade_signaling_test.go index 8f5ed29f3..057b89835 100644 --- a/pkg/tests/upgrade_signaling_test.go +++ b/pkg/tests/upgrade_signaling_test.go @@ -132,7 +132,7 @@ func Test_Upgrade_Signaling(t *testing.T) { ts.AssertAccountData(&accounts.AccountData{ ID: ts.Node("nodeA").Validator.AccountID, - Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateTime: 0}, + Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateSlot: 0}, ExpirySlot: iotago.MaxSlotIndex, OutputID: ts.AccountOutput("Genesis:1").OutputID(), BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.Node("nodeA").Validator.PublicKey))), @@ -145,7 +145,7 @@ func Test_Upgrade_Signaling(t *testing.T) { ts.AssertAccountData(&accounts.AccountData{ ID: wallet.BlockIssuer.AccountID, - Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateTime: 0}, + Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateSlot: 0}, ExpirySlot: iotago.MaxSlotIndex, OutputID: ts.AccountOutput("Genesis:5").OutputID(), BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(wallet.BlockIssuer.PublicKey))), @@ -166,7 +166,7 @@ func Test_Upgrade_Signaling(t *testing.T) { // check account data before all nodes set the current version ts.AssertAccountData(&accounts.AccountData{ ID: ts.Node("nodeA").Validator.AccountID, - Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateTime: 0}, + Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateSlot: 0}, ExpirySlot: iotago.MaxSlotIndex, OutputID: ts.AccountOutput("Genesis:1").OutputID(), BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.Node("nodeA").Validator.PublicKey))), @@ -179,7 +179,7 @@ func Test_Upgrade_Signaling(t *testing.T) { ts.AssertAccountData(&accounts.AccountData{ ID: ts.Node("nodeD").Validator.AccountID, - Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateTime: 0}, + Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateSlot: 0}, ExpirySlot: iotago.MaxSlotIndex, OutputID: ts.AccountOutput("Genesis:4").OutputID(), BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.Node("nodeD").Validator.PublicKey))), @@ -200,7 +200,7 @@ func Test_Upgrade_Signaling(t *testing.T) { ts.AssertAccountData(&accounts.AccountData{ ID: ts.Node("nodeA").Validator.AccountID, - Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateTime: 0}, + Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateSlot: 0}, ExpirySlot: iotago.MaxSlotIndex, OutputID: ts.AccountOutput("Genesis:1").OutputID(), BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.Node("nodeA").Validator.PublicKey))), @@ -365,7 +365,7 @@ func Test_Upgrade_Signaling(t *testing.T) { // check account data at the end of the test ts.AssertAccountData(&accounts.AccountData{ ID: ts.Node("nodeA").Validator.AccountID, - Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateTime: 0}, + Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateSlot: 0}, ExpirySlot: iotago.MaxSlotIndex, OutputID: ts.AccountOutput("Genesis:1").OutputID(), BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.Node("nodeA").Validator.PublicKey))), @@ -378,7 +378,7 @@ func Test_Upgrade_Signaling(t *testing.T) { ts.AssertAccountData(&accounts.AccountData{ ID: ts.Node("nodeD").Validator.AccountID, - Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateTime: 0}, + Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateSlot: 0}, ExpirySlot: iotago.MaxSlotIndex, OutputID: ts.AccountOutput("Genesis:4").OutputID(), BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.Node("nodeD").Validator.PublicKey))), diff --git a/pkg/testsuite/accounts.go b/pkg/testsuite/accounts.go index b3683761a..715276ef4 100644 --- a/pkg/testsuite/accounts.go +++ b/pkg/testsuite/accounts.go @@ -29,8 +29,8 @@ func (t *TestSuite) AssertAccountData(accountData *accounts.AccountData, nodes . return ierrors.Errorf("AssertAccountData: %s: accountID %s expected credits value %d, got %d", node.Name, accountData.ID, accountData.Credits.Value, actualAccountData.Credits.Value) } - if accountData.Credits.UpdateTime != actualAccountData.Credits.UpdateTime { - return ierrors.Errorf("AssertAccountData: %s: accountID %s expected credits update time %d, got %d", node.Name, accountData.ID, accountData.Credits.UpdateTime, actualAccountData.Credits.UpdateTime) + if accountData.Credits.UpdateSlot != actualAccountData.Credits.UpdateSlot { + return ierrors.Errorf("AssertAccountData: %s: accountID %s expected credits update time %d, got %d", node.Name, accountData.ID, accountData.Credits.UpdateSlot, actualAccountData.Credits.UpdateSlot) } if accountData.OutputID != actualAccountData.OutputID { @@ -98,8 +98,8 @@ func (t *TestSuite) AssertAccountDiff(accountID iotago.AccountID, index iotago.S return ierrors.Errorf("AssertAccountDiff: %s: expected change %d but actual %d for account %s at slot %d", node.Name, accountDiff.BICChange, actualAccountDiff.BICChange, accountID, index) } - if accountDiff.PreviousUpdatedTime != actualAccountDiff.PreviousUpdatedTime { - return ierrors.Errorf("AssertAccountDiff: %s: expected previous updated time %d but actual %d for account %s at slot %d", node.Name, accountDiff.PreviousUpdatedTime, actualAccountDiff.PreviousUpdatedTime, accountID, index) + if accountDiff.PreviousUpdatedSlot != actualAccountDiff.PreviousUpdatedSlot { + return ierrors.Errorf("AssertAccountDiff: %s: expected previous updated time %d but actual %d for account %s at slot %d", node.Name, accountDiff.PreviousUpdatedSlot, actualAccountDiff.PreviousUpdatedSlot, accountID, index) } if accountDiff.NewExpirySlot != actualAccountDiff.NewExpirySlot { diff --git a/pkg/testsuite/snapshotcreator/snapshotcreator.go b/pkg/testsuite/snapshotcreator/snapshotcreator.go index c09f9d622..62bc321cf 100644 --- a/pkg/testsuite/snapshotcreator/snapshotcreator.go +++ b/pkg/testsuite/snapshotcreator/snapshotcreator.go @@ -82,7 +82,7 @@ func CreateSnapshot(opts ...options.Option[Options]) error { accountID := blake2b.Sum256(ed25519PubKey[:]) committeeAccountsData = append(committeeAccountsData, &accounts.AccountData{ ID: accountID, - Credits: &accounts.BlockIssuanceCredits{Value: snapshotAccountDetails.BlockIssuanceCredits, UpdateTime: 0}, + Credits: &accounts.BlockIssuanceCredits{Value: snapshotAccountDetails.BlockIssuanceCredits, UpdateSlot: 0}, ExpirySlot: snapshotAccountDetails.ExpirySlot, OutputID: iotago.OutputID{}, BlockIssuerKeys: iotago.BlockIssuerKeys{snapshotAccountDetails.IssuerKey}, diff --git a/pkg/utils/ioutils.go b/pkg/utils/ioutils.go deleted file mode 100644 index 5c7d810b4..000000000 --- a/pkg/utils/ioutils.go +++ /dev/null @@ -1,98 +0,0 @@ -package utils - -import ( - "encoding/binary" - "io" - - "github.com/iotaledger/hive.go/ierrors" -) - -func increaseOffsets(amount int64, offsets ...*int64) { - for _, offset := range offsets { - *offset += amount - } -} - -func WriteValueFunc(writeSeeker io.WriteSeeker, value any, offsetsToIncrease ...*int64) error { - length := binary.Size(value) - if length == -1 { - return ierrors.New("unable to determine length of value") - } - - if err := binary.Write(writeSeeker, binary.LittleEndian, value); err != nil { - return ierrors.Wrap(err, "unable to write value") - } - - increaseOffsets(int64(length), offsetsToIncrease...) - - return nil -} - -func WriteBytesFunc(writeSeeker io.WriteSeeker, bytes []byte, offsetsToIncrease ...*int64) error { - length, err := writeSeeker.Write(bytes) - if err != nil { - return ierrors.Wrap(err, "unable to write bytes") - } - - increaseOffsets(int64(length), offsetsToIncrease...) - - return nil -} - -type PositionedWriter struct { - bookmarks map[string]int64 - writer io.WriteSeeker -} - -func NewPositionedWriter(writer io.WriteSeeker) *PositionedWriter { - p := &PositionedWriter{ - bookmarks: make(map[string]int64), - writer: writer, - } - - return p -} - -func (p *PositionedWriter) WriteBytes(bytes []byte) error { - return WriteBytesFunc(p.writer, bytes) -} - -func (p *PositionedWriter) WriteValue(name string, value interface{}, saveBookmark ...bool) error { - if len(saveBookmark) > 0 && saveBookmark[0] { - currentPosition, err := p.writer.Seek(0, io.SeekCurrent) - if err != nil { - return err - } - p.bookmarks[name] = currentPosition - } - if err := WriteValueFunc(p.writer, value); err != nil { - return ierrors.Wrapf(err, "unable to write value %s", name) - } - - return nil -} - -func (p *PositionedWriter) WriteValueAtBookmark(name string, value interface{}) error { - bookmarkPosition, ok := p.bookmarks[name] - if !ok { - return ierrors.Errorf("unable to find saved position for bookmark %s", name) - } - originalPosition, err := p.writer.Seek(0, io.SeekCurrent) - if err != nil { - return ierrors.Wrap(err, "unable to obtain current seek position") - } - if bookmarkPosition >= originalPosition { - return ierrors.Errorf("cannot write into the future, current write position %d is greater than or equal to the bookmark position %d", originalPosition, bookmarkPosition) - } - if _, err := p.writer.Seek(bookmarkPosition, io.SeekStart); err != nil { - return ierrors.Wrapf(err, "unable to seek back to bookmark %s position", name) - } - if err := WriteValueFunc(p.writer, value); err != nil { - return ierrors.Wrapf(err, "unable to write value %s", name) - } - if _, err := p.writer.Seek(originalPosition, io.SeekStart); err != nil { - return ierrors.Wrap(err, "unable to seek to original position") - } - - return nil -} diff --git a/tools/docker-network/run.sh b/tools/docker-network/run.sh index 323a6bdbb..87697914f 100755 --- a/tools/docker-network/run.sh +++ b/tools/docker-network/run.sh @@ -49,7 +49,7 @@ docker run --rm \ -e GOCACHE="/go-cache" \ -e GOMODCACHE="/go-mod-cache" \ -w "/workspace/tools/genesis-snapshot" \ - golang:1.21 go run -tags=rocksdb . --config docker --seed 7R1itJx5hVuo9w9hjg5cwKFmek4HMSoBDgJZN8hKGxih + golang:1.21-bookworm go run -tags=rocksdb . --config docker --seed 7R1itJx5hVuo9w9hjg5cwKFmek4HMSoBDgJZN8hKGxih # Move and set permissions for the .snapshot file mv -f ../genesis-snapshot/*.snapshot . diff --git a/tools/gendoc/go.mod b/tools/gendoc/go.mod index 4fdc2c0b0..7e15edb04 100644 --- a/tools/gendoc/go.mod +++ b/tools/gendoc/go.mod @@ -5,7 +5,7 @@ go 1.21 replace github.com/iotaledger/iota-core => ../../ require ( - github.com/iotaledger/hive.go/app v0.0.0-20231027195901-620bd7470e42 + github.com/iotaledger/hive.go/app v0.0.0-20231108050255-98e0fa35e936 github.com/iotaledger/hive.go/apputils v0.0.0-20230829152614-7afc7a4d89b3 github.com/iotaledger/iota-core v0.0.0-00010101000000-000000000000 ) @@ -57,22 +57,22 @@ require ( github.com/huin/goupnp v1.3.0 // indirect github.com/iancoleman/orderedmap v0.3.0 // indirect github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7 // indirect - github.com/iotaledger/hive.go/ads v0.0.0-20231027195901-620bd7470e42 // indirect - github.com/iotaledger/hive.go/constraints v0.0.0-20231027195901-620bd7470e42 // indirect - github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231027195901-620bd7470e42 // indirect - github.com/iotaledger/hive.go/crypto v0.0.0-20231027195901-620bd7470e42 // indirect - github.com/iotaledger/hive.go/ds v0.0.0-20231027195901-620bd7470e42 // indirect - github.com/iotaledger/hive.go/ierrors v0.0.0-20231027195901-620bd7470e42 // indirect - github.com/iotaledger/hive.go/kvstore v0.0.0-20231027195901-620bd7470e42 // indirect - github.com/iotaledger/hive.go/lo v0.0.0-20231027195901-620bd7470e42 // indirect - github.com/iotaledger/hive.go/log v0.0.0-20231027195901-620bd7470e42 // indirect - github.com/iotaledger/hive.go/logger v0.0.0-20231027195901-620bd7470e42 // indirect - github.com/iotaledger/hive.go/runtime v0.0.0-20231027195901-620bd7470e42 // indirect - github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231027195901-620bd7470e42 // indirect - github.com/iotaledger/hive.go/stringify v0.0.0-20231027195901-620bd7470e42 // indirect + github.com/iotaledger/hive.go/ads v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/constraints v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/crypto v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/ds v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/ierrors v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/kvstore v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/lo v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/log v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/logger v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/runtime v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/stringify v0.0.0-20231108050255-98e0fa35e936 // indirect github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231031135002-4c79ea5193f5 // indirect github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231031134131-b6ad918dc1ac // indirect - github.com/iotaledger/iota.go/v4 v4.0.0-20231102113728-20b8d01e826e // indirect + github.com/iotaledger/iota.go/v4 v4.0.0-20231108050608-afce96cfe8a6 // indirect github.com/ipfs/boxo v0.13.1 // indirect github.com/ipfs/go-cid v0.4.1 // indirect github.com/ipfs/go-datastore v0.6.0 // indirect diff --git a/tools/gendoc/go.sum b/tools/gendoc/go.sum index b16ac173a..17f6c8f9f 100644 --- a/tools/gendoc/go.sum +++ b/tools/gendoc/go.sum @@ -277,42 +277,42 @@ github.com/iancoleman/orderedmap v0.3.0/go.mod h1:XuLcCUkdL5owUCQeF2Ue9uuw1EptkJ github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7 h1:dTrD7X2PTNgli6EbS4tV9qu3QAm/kBU3XaYZV2xdzys= github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7/go.mod h1:ZRdPu684P0fQ1z8sXz4dj9H5LWHhz4a9oCtvjunkSrw= -github.com/iotaledger/hive.go/ads v0.0.0-20231027195901-620bd7470e42 h1:EOfxTuAiBmED1VHuVh7/UIeB27cCRe13gdSzyioNMBw= -github.com/iotaledger/hive.go/ads v0.0.0-20231027195901-620bd7470e42/go.mod h1:IFh0gDfeMgZtfCo+5afK59IDR4xXh+cTR9YtLnZPcbY= -github.com/iotaledger/hive.go/app v0.0.0-20231027195901-620bd7470e42 h1:xAER9M9Uoz2EOWT43E9wmXRe+RmAk8OBSUoboH4Su8M= -github.com/iotaledger/hive.go/app v0.0.0-20231027195901-620bd7470e42/go.mod h1:8ZbIKR84oQd/3iQ5eeT7xpudO9/ytzXP7veIYnk7Orc= +github.com/iotaledger/hive.go/ads v0.0.0-20231108050255-98e0fa35e936 h1:2r4FgIGdc2lHcIbXiUFCCVq4+B0oZk9t6Z0SSLjrzCE= +github.com/iotaledger/hive.go/ads v0.0.0-20231108050255-98e0fa35e936/go.mod h1:gbUvr01B5ha530GnNm8K2OsHXOd2BtzBYOMxyTX3iDg= +github.com/iotaledger/hive.go/app v0.0.0-20231108050255-98e0fa35e936 h1:SnmQt9GxrWIvpW7pgQS049x1b8T+lQutTQbo35FImug= +github.com/iotaledger/hive.go/app v0.0.0-20231108050255-98e0fa35e936/go.mod h1:+riYmeLApkLlj4+EpuJpEJAsj/KGfD7cqLGy7oTsPOM= github.com/iotaledger/hive.go/apputils v0.0.0-20230829152614-7afc7a4d89b3 h1:4aVJTc0KS77uEw0Tny4r0n1ORwcbAQDECaCclgf/6lE= github.com/iotaledger/hive.go/apputils v0.0.0-20230829152614-7afc7a4d89b3/go.mod h1:TZeAqieDu+xDOZp2e9+S+8pZp1PrfgcwLUnxmd8IgLU= -github.com/iotaledger/hive.go/constraints v0.0.0-20231027195901-620bd7470e42 h1:drmpgLlJy7kZ09Dt1qKSnbILU+27Qu2jp4VdPDNwbFk= -github.com/iotaledger/hive.go/constraints v0.0.0-20231027195901-620bd7470e42/go.mod h1:dOBOM2s4se3HcWefPe8sQLUalGXJ8yVXw58oK8jke3s= -github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231027195901-620bd7470e42 h1:BC5GkIHyXdoJGdw6Tu5ds2kjw9grFLtwQiuMaKfdLU8= -github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231027195901-620bd7470e42/go.mod h1:Mc+ACqBGPxrPMIPUBOm6/HL0J6m0iVMwjtIEKW3uow8= -github.com/iotaledger/hive.go/crypto v0.0.0-20231027195901-620bd7470e42 h1:r8TkdQJB7/bJd8cF8z5GQ+rX/7JpbPdPoN7wMoV1OCM= -github.com/iotaledger/hive.go/crypto v0.0.0-20231027195901-620bd7470e42/go.mod h1:h3o6okvMSEK3KOX6pOp3yq1h9ohTkTfo6X8MzEadeb0= -github.com/iotaledger/hive.go/ds v0.0.0-20231027195901-620bd7470e42 h1:ytzZZPtclAzLfjxv26frbinCGx3Z6ouUENbx5U7lFGg= -github.com/iotaledger/hive.go/ds v0.0.0-20231027195901-620bd7470e42/go.mod h1:3XkUSKfHaVxGbT0XAvjNlVYqPzhfLTGhDtdNA5UBPco= -github.com/iotaledger/hive.go/ierrors v0.0.0-20231027195901-620bd7470e42 h1:QMxd32Y/veVhTDPCiOFgetjUbG7sr9MryF29/rSPkMA= -github.com/iotaledger/hive.go/ierrors v0.0.0-20231027195901-620bd7470e42/go.mod h1:HcE8B5lP96enc/OALTb2/rIIi+yOLouRoHOKRclKmC8= -github.com/iotaledger/hive.go/kvstore v0.0.0-20231027195901-620bd7470e42 h1:/xPwStUckZ2V0XPoY496cXU+c5elpHyvYoT6JAmuvRY= -github.com/iotaledger/hive.go/kvstore v0.0.0-20231027195901-620bd7470e42/go.mod h1:O/U3jtiUDeqqM0MZQFu2UPqS9fUm0C5hNISxlmg/thE= -github.com/iotaledger/hive.go/lo v0.0.0-20231027195901-620bd7470e42 h1:AvNLzONVMspwx7nD/NyYUgb5Hi7/zgzIOegr1uRD/M8= -github.com/iotaledger/hive.go/lo v0.0.0-20231027195901-620bd7470e42/go.mod h1:s4kzx9QY1MVWHJralj+3q5kI0eARtrJhphYD/iBbPfo= -github.com/iotaledger/hive.go/log v0.0.0-20231027195901-620bd7470e42 h1:e1uJAlXE3zeXpa+c4uFOG+/AMFbUlLt2mcrSK5NMxVs= -github.com/iotaledger/hive.go/log v0.0.0-20231027195901-620bd7470e42/go.mod h1:JvokzmpmFZPDskMlUqqjgHtD8usVJU4nAY/TNMGge8M= -github.com/iotaledger/hive.go/logger v0.0.0-20231027195901-620bd7470e42 h1:7wjs4t1snBDJ8LOTl+tZhr2ORywSOTgJMppxiIAMA0A= -github.com/iotaledger/hive.go/logger v0.0.0-20231027195901-620bd7470e42/go.mod h1:aBfAfIB2GO/IblhYt5ipCbyeL9bXSNeAwtYVA3hZaHg= -github.com/iotaledger/hive.go/runtime v0.0.0-20231027195901-620bd7470e42 h1:1QMJ39qXIx/IZVzus3+97IV7Pa++e+d340TvbMjhiBU= -github.com/iotaledger/hive.go/runtime v0.0.0-20231027195901-620bd7470e42/go.mod h1:jRw8yFipiPaqmTPHh7hTcxAP9u6pjRGpByS3REJKkbY= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231027195901-620bd7470e42 h1:hZli4E9kJUAEQ7gzZR1XbPcpgqvqMPYq8YBPMbrBuos= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231027195901-620bd7470e42/go.mod h1:SdK26z8/VhWtxaqCuQrufm80SELgowQPmu9T/8eUQ8g= -github.com/iotaledger/hive.go/stringify v0.0.0-20231027195901-620bd7470e42 h1:OlDhgvJ48bZxcvTeebJ1b96xtNnJAddejd2Q4rlH1mU= -github.com/iotaledger/hive.go/stringify v0.0.0-20231027195901-620bd7470e42/go.mod h1:FTo/UWzNYgnQ082GI9QVM9HFDERqf9rw9RivNpqrnTs= +github.com/iotaledger/hive.go/constraints v0.0.0-20231108050255-98e0fa35e936 h1:qkq0Wz+Y3J8QYRLd0fwTgHuur/A3k7d82BxOKSfvk8c= +github.com/iotaledger/hive.go/constraints v0.0.0-20231108050255-98e0fa35e936/go.mod h1:dOBOM2s4se3HcWefPe8sQLUalGXJ8yVXw58oK8jke3s= +github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231108050255-98e0fa35e936 h1:GtsYwcCqRomhMo190TPxBrOzs6YnVmqkmQgT/lJrJRo= +github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231108050255-98e0fa35e936/go.mod h1:CdixkrB7VdQzEDlVuwsxPtsiJL/WXrQgz3PELIqlLko= +github.com/iotaledger/hive.go/crypto v0.0.0-20231108050255-98e0fa35e936 h1:Xeb4w0g0Kv2ZjdCZQqz8oiqAU5qAy8OXG8kGTXSPzuY= +github.com/iotaledger/hive.go/crypto v0.0.0-20231108050255-98e0fa35e936/go.mod h1:OQ9EVTTQT1mkO/16BgwSIyQlAhEg+Cptud/yutevWsI= +github.com/iotaledger/hive.go/ds v0.0.0-20231108050255-98e0fa35e936 h1:NtQLSS0Lq5qg/w5nbMpXrlQpmcK3KiOaQmgZWoRc4mM= +github.com/iotaledger/hive.go/ds v0.0.0-20231108050255-98e0fa35e936/go.mod h1:JE8cbZSvzbB5TrwXibg6M0B7ck35YxF30ItHBzQRlgc= +github.com/iotaledger/hive.go/ierrors v0.0.0-20231108050255-98e0fa35e936 h1:o5S4KUAwToOLXoYYRj9ZgqeDsFv1VRM4+Mni0Tdj2Ck= +github.com/iotaledger/hive.go/ierrors v0.0.0-20231108050255-98e0fa35e936/go.mod h1:HcE8B5lP96enc/OALTb2/rIIi+yOLouRoHOKRclKmC8= +github.com/iotaledger/hive.go/kvstore v0.0.0-20231108050255-98e0fa35e936 h1:kXKJQ8UvbA8kI0Jx0EnlXbwDeZFY8pEX0Q6KaOPsYlQ= +github.com/iotaledger/hive.go/kvstore v0.0.0-20231108050255-98e0fa35e936/go.mod h1:ytfKoHr/nF8u0y0G4mamfG0yjFtJiJVk0kgjnPOtsSY= +github.com/iotaledger/hive.go/lo v0.0.0-20231108050255-98e0fa35e936 h1:coXPklQ7JgqTXIUXh3b4OHml1VIvI8x7pQsjsES/u/s= +github.com/iotaledger/hive.go/lo v0.0.0-20231108050255-98e0fa35e936/go.mod h1:6Ee7i6b4tuTHuRYnPP8VUb0wr9XFI5qlqtnttBd9jRg= +github.com/iotaledger/hive.go/log v0.0.0-20231108050255-98e0fa35e936 h1:VBvGnsVwqhoT9zMyMIlK5fPmz6fsbiPZOwdU1E8WU7o= +github.com/iotaledger/hive.go/log v0.0.0-20231108050255-98e0fa35e936/go.mod h1:vzO4/wRkEJDEZb/9fD10oKU9k1bj4qLir2Uhl5U1FkM= +github.com/iotaledger/hive.go/logger v0.0.0-20231108050255-98e0fa35e936 h1:05EbTaladbyo7mD8yBaWYJh9P8u/TUTmrjVmcUjoW8A= +github.com/iotaledger/hive.go/logger v0.0.0-20231108050255-98e0fa35e936/go.mod h1:w1psHM2MuKsen1WdsPKrpqElYH7ZOQ+YdQIgJZg4HTo= +github.com/iotaledger/hive.go/runtime v0.0.0-20231108050255-98e0fa35e936 h1:XbC1fmY87UJ/yMs8U2YqlUdJsqb0Xqj/ZYQKlZ7AUG8= +github.com/iotaledger/hive.go/runtime v0.0.0-20231108050255-98e0fa35e936/go.mod h1:DrZPvUvLarK8C2qb+3H2vdypp/MuhpQmB3iMJbDCr/Q= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231108050255-98e0fa35e936 h1:LXhLW2cN9bQYoHQsgmJRb/jiRBRU5s2rLoCNjZfgHdg= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231108050255-98e0fa35e936/go.mod h1:FoH3T6yKlZJp8xm8K+zsQiibSynp32v21CpWx8xkek8= +github.com/iotaledger/hive.go/stringify v0.0.0-20231108050255-98e0fa35e936 h1:Y4HgL5gm9S27usg5M2t6wi1BSdCxVorM62lwnpKuMd4= +github.com/iotaledger/hive.go/stringify v0.0.0-20231108050255-98e0fa35e936/go.mod h1:FTo/UWzNYgnQ082GI9QVM9HFDERqf9rw9RivNpqrnTs= github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231031135002-4c79ea5193f5 h1:17JDzMKTMXKF3xys6gPURRddkZhg1LY+xwfhbr/sVqg= github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231031135002-4c79ea5193f5/go.mod h1:LsJvoBUVVnY7tkwwByCVtAwmp5bFXdyJNGU/+KVQJVM= github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231031134131-b6ad918dc1ac h1:c7R33+TQGMYP6pvLUQQaqpdDFl+GZbhAcfGMI0285fo= github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231031134131-b6ad918dc1ac/go.mod h1:qPuMUvCTaghsnYRDnRoRuztTyEKFlmi2S7gb44rH7WM= -github.com/iotaledger/iota.go/v4 v4.0.0-20231102113728-20b8d01e826e h1:ZYRC1MHn/ghsqtjIpYGTxLQrh5n5eUmC0/YWnJiTRhk= -github.com/iotaledger/iota.go/v4 v4.0.0-20231102113728-20b8d01e826e/go.mod h1:jqbLYq4a/FwuiPBqFfkAwwxU8vs3+kReRq2/tyX5qRA= +github.com/iotaledger/iota.go/v4 v4.0.0-20231108050608-afce96cfe8a6 h1:4kvG+BB4GOBsNYPY/enPo3xeC65A133L9cD73Kf1p9Q= +github.com/iotaledger/iota.go/v4 v4.0.0-20231108050608-afce96cfe8a6/go.mod h1:8iDORW4/e4NztyAGqjW07uSMjbhs7snbxw+81IWOczY= github.com/ipfs/boxo v0.13.1 h1:nQ5oQzcMZR3oL41REJDcTbrvDvuZh3J9ckc9+ILeRQI= github.com/ipfs/boxo v0.13.1/go.mod h1:btrtHy0lmO1ODMECbbEY1pxNtrLilvKSYLoGQt1yYCk= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= @@ -511,8 +511,6 @@ github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/ github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e h1:s2RNOM/IGdY0Y6qfTeUKhDawdHDpK9RGBdx80qN4Ttw= -github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e/go.mod h1:nBdnFKj15wFbf94Rwfq4m30eAcyY9V/IyKAGQFtqkW0= github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= diff --git a/tools/genesis-snapshot/go.mod b/tools/genesis-snapshot/go.mod index 1ec9104e2..edbf18aae 100644 --- a/tools/genesis-snapshot/go.mod +++ b/tools/genesis-snapshot/go.mod @@ -5,12 +5,12 @@ go 1.21 replace github.com/iotaledger/iota-core => ../../ require ( - github.com/iotaledger/hive.go/crypto v0.0.0-20231027195901-620bd7470e42 - github.com/iotaledger/hive.go/ierrors v0.0.0-20231027195901-620bd7470e42 - github.com/iotaledger/hive.go/lo v0.0.0-20231027195901-620bd7470e42 - github.com/iotaledger/hive.go/runtime v0.0.0-20231027195901-620bd7470e42 + github.com/iotaledger/hive.go/crypto v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/ierrors v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/lo v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/runtime v0.0.0-20231108050255-98e0fa35e936 github.com/iotaledger/iota-core v0.0.0-00010101000000-000000000000 - github.com/iotaledger/iota.go/v4 v4.0.0-20231102113728-20b8d01e826e + github.com/iotaledger/iota.go/v4 v4.0.0-20231108050608-afce96cfe8a6 github.com/mr-tron/base58 v1.2.0 github.com/spf13/pflag v1.0.5 golang.org/x/crypto v0.14.0 @@ -26,14 +26,14 @@ require ( github.com/holiman/uint256 v1.2.3 // indirect github.com/iancoleman/orderedmap v0.3.0 // indirect github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7 // indirect - github.com/iotaledger/hive.go/ads v0.0.0-20231027195901-620bd7470e42 // indirect - github.com/iotaledger/hive.go/constraints v0.0.0-20231027195901-620bd7470e42 // indirect - github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231027195901-620bd7470e42 // indirect - github.com/iotaledger/hive.go/ds v0.0.0-20231027195901-620bd7470e42 // indirect - github.com/iotaledger/hive.go/kvstore v0.0.0-20231027195901-620bd7470e42 // indirect - github.com/iotaledger/hive.go/log v0.0.0-20231027195901-620bd7470e42 // indirect - github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231027195901-620bd7470e42 // indirect - github.com/iotaledger/hive.go/stringify v0.0.0-20231027195901-620bd7470e42 // indirect + github.com/iotaledger/hive.go/ads v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/constraints v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/ds v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/kvstore v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/log v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/stringify v0.0.0-20231108050255-98e0fa35e936 // indirect github.com/ipfs/go-cid v0.4.1 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/kr/text v0.2.0 // indirect diff --git a/tools/genesis-snapshot/go.sum b/tools/genesis-snapshot/go.sum index 00953620a..0a0bf91ab 100644 --- a/tools/genesis-snapshot/go.sum +++ b/tools/genesis-snapshot/go.sum @@ -28,32 +28,32 @@ github.com/iancoleman/orderedmap v0.3.0 h1:5cbR2grmZR/DiVt+VJopEhtVs9YGInGIxAoMJ github.com/iancoleman/orderedmap v0.3.0/go.mod h1:XuLcCUkdL5owUCQeF2Ue9uuw1EptkJDkXXS7VoV7XGE= github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7 h1:dTrD7X2PTNgli6EbS4tV9qu3QAm/kBU3XaYZV2xdzys= github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7/go.mod h1:ZRdPu684P0fQ1z8sXz4dj9H5LWHhz4a9oCtvjunkSrw= -github.com/iotaledger/hive.go/ads v0.0.0-20231027195901-620bd7470e42 h1:EOfxTuAiBmED1VHuVh7/UIeB27cCRe13gdSzyioNMBw= -github.com/iotaledger/hive.go/ads v0.0.0-20231027195901-620bd7470e42/go.mod h1:IFh0gDfeMgZtfCo+5afK59IDR4xXh+cTR9YtLnZPcbY= -github.com/iotaledger/hive.go/constraints v0.0.0-20231027195901-620bd7470e42 h1:drmpgLlJy7kZ09Dt1qKSnbILU+27Qu2jp4VdPDNwbFk= -github.com/iotaledger/hive.go/constraints v0.0.0-20231027195901-620bd7470e42/go.mod h1:dOBOM2s4se3HcWefPe8sQLUalGXJ8yVXw58oK8jke3s= -github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231027195901-620bd7470e42 h1:BC5GkIHyXdoJGdw6Tu5ds2kjw9grFLtwQiuMaKfdLU8= -github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231027195901-620bd7470e42/go.mod h1:Mc+ACqBGPxrPMIPUBOm6/HL0J6m0iVMwjtIEKW3uow8= -github.com/iotaledger/hive.go/crypto v0.0.0-20231027195901-620bd7470e42 h1:r8TkdQJB7/bJd8cF8z5GQ+rX/7JpbPdPoN7wMoV1OCM= -github.com/iotaledger/hive.go/crypto v0.0.0-20231027195901-620bd7470e42/go.mod h1:h3o6okvMSEK3KOX6pOp3yq1h9ohTkTfo6X8MzEadeb0= -github.com/iotaledger/hive.go/ds v0.0.0-20231027195901-620bd7470e42 h1:ytzZZPtclAzLfjxv26frbinCGx3Z6ouUENbx5U7lFGg= -github.com/iotaledger/hive.go/ds v0.0.0-20231027195901-620bd7470e42/go.mod h1:3XkUSKfHaVxGbT0XAvjNlVYqPzhfLTGhDtdNA5UBPco= -github.com/iotaledger/hive.go/ierrors v0.0.0-20231027195901-620bd7470e42 h1:QMxd32Y/veVhTDPCiOFgetjUbG7sr9MryF29/rSPkMA= -github.com/iotaledger/hive.go/ierrors v0.0.0-20231027195901-620bd7470e42/go.mod h1:HcE8B5lP96enc/OALTb2/rIIi+yOLouRoHOKRclKmC8= -github.com/iotaledger/hive.go/kvstore v0.0.0-20231027195901-620bd7470e42 h1:/xPwStUckZ2V0XPoY496cXU+c5elpHyvYoT6JAmuvRY= -github.com/iotaledger/hive.go/kvstore v0.0.0-20231027195901-620bd7470e42/go.mod h1:O/U3jtiUDeqqM0MZQFu2UPqS9fUm0C5hNISxlmg/thE= -github.com/iotaledger/hive.go/lo v0.0.0-20231027195901-620bd7470e42 h1:AvNLzONVMspwx7nD/NyYUgb5Hi7/zgzIOegr1uRD/M8= -github.com/iotaledger/hive.go/lo v0.0.0-20231027195901-620bd7470e42/go.mod h1:s4kzx9QY1MVWHJralj+3q5kI0eARtrJhphYD/iBbPfo= -github.com/iotaledger/hive.go/log v0.0.0-20231027195901-620bd7470e42 h1:e1uJAlXE3zeXpa+c4uFOG+/AMFbUlLt2mcrSK5NMxVs= -github.com/iotaledger/hive.go/log v0.0.0-20231027195901-620bd7470e42/go.mod h1:JvokzmpmFZPDskMlUqqjgHtD8usVJU4nAY/TNMGge8M= -github.com/iotaledger/hive.go/runtime v0.0.0-20231027195901-620bd7470e42 h1:1QMJ39qXIx/IZVzus3+97IV7Pa++e+d340TvbMjhiBU= -github.com/iotaledger/hive.go/runtime v0.0.0-20231027195901-620bd7470e42/go.mod h1:jRw8yFipiPaqmTPHh7hTcxAP9u6pjRGpByS3REJKkbY= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231027195901-620bd7470e42 h1:hZli4E9kJUAEQ7gzZR1XbPcpgqvqMPYq8YBPMbrBuos= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231027195901-620bd7470e42/go.mod h1:SdK26z8/VhWtxaqCuQrufm80SELgowQPmu9T/8eUQ8g= -github.com/iotaledger/hive.go/stringify v0.0.0-20231027195901-620bd7470e42 h1:OlDhgvJ48bZxcvTeebJ1b96xtNnJAddejd2Q4rlH1mU= -github.com/iotaledger/hive.go/stringify v0.0.0-20231027195901-620bd7470e42/go.mod h1:FTo/UWzNYgnQ082GI9QVM9HFDERqf9rw9RivNpqrnTs= -github.com/iotaledger/iota.go/v4 v4.0.0-20231102113728-20b8d01e826e h1:ZYRC1MHn/ghsqtjIpYGTxLQrh5n5eUmC0/YWnJiTRhk= -github.com/iotaledger/iota.go/v4 v4.0.0-20231102113728-20b8d01e826e/go.mod h1:jqbLYq4a/FwuiPBqFfkAwwxU8vs3+kReRq2/tyX5qRA= +github.com/iotaledger/hive.go/ads v0.0.0-20231108050255-98e0fa35e936 h1:2r4FgIGdc2lHcIbXiUFCCVq4+B0oZk9t6Z0SSLjrzCE= +github.com/iotaledger/hive.go/ads v0.0.0-20231108050255-98e0fa35e936/go.mod h1:gbUvr01B5ha530GnNm8K2OsHXOd2BtzBYOMxyTX3iDg= +github.com/iotaledger/hive.go/constraints v0.0.0-20231108050255-98e0fa35e936 h1:qkq0Wz+Y3J8QYRLd0fwTgHuur/A3k7d82BxOKSfvk8c= +github.com/iotaledger/hive.go/constraints v0.0.0-20231108050255-98e0fa35e936/go.mod h1:dOBOM2s4se3HcWefPe8sQLUalGXJ8yVXw58oK8jke3s= +github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231108050255-98e0fa35e936 h1:GtsYwcCqRomhMo190TPxBrOzs6YnVmqkmQgT/lJrJRo= +github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231108050255-98e0fa35e936/go.mod h1:CdixkrB7VdQzEDlVuwsxPtsiJL/WXrQgz3PELIqlLko= +github.com/iotaledger/hive.go/crypto v0.0.0-20231108050255-98e0fa35e936 h1:Xeb4w0g0Kv2ZjdCZQqz8oiqAU5qAy8OXG8kGTXSPzuY= +github.com/iotaledger/hive.go/crypto v0.0.0-20231108050255-98e0fa35e936/go.mod h1:OQ9EVTTQT1mkO/16BgwSIyQlAhEg+Cptud/yutevWsI= +github.com/iotaledger/hive.go/ds v0.0.0-20231108050255-98e0fa35e936 h1:NtQLSS0Lq5qg/w5nbMpXrlQpmcK3KiOaQmgZWoRc4mM= +github.com/iotaledger/hive.go/ds v0.0.0-20231108050255-98e0fa35e936/go.mod h1:JE8cbZSvzbB5TrwXibg6M0B7ck35YxF30ItHBzQRlgc= +github.com/iotaledger/hive.go/ierrors v0.0.0-20231108050255-98e0fa35e936 h1:o5S4KUAwToOLXoYYRj9ZgqeDsFv1VRM4+Mni0Tdj2Ck= +github.com/iotaledger/hive.go/ierrors v0.0.0-20231108050255-98e0fa35e936/go.mod h1:HcE8B5lP96enc/OALTb2/rIIi+yOLouRoHOKRclKmC8= +github.com/iotaledger/hive.go/kvstore v0.0.0-20231108050255-98e0fa35e936 h1:kXKJQ8UvbA8kI0Jx0EnlXbwDeZFY8pEX0Q6KaOPsYlQ= +github.com/iotaledger/hive.go/kvstore v0.0.0-20231108050255-98e0fa35e936/go.mod h1:ytfKoHr/nF8u0y0G4mamfG0yjFtJiJVk0kgjnPOtsSY= +github.com/iotaledger/hive.go/lo v0.0.0-20231108050255-98e0fa35e936 h1:coXPklQ7JgqTXIUXh3b4OHml1VIvI8x7pQsjsES/u/s= +github.com/iotaledger/hive.go/lo v0.0.0-20231108050255-98e0fa35e936/go.mod h1:6Ee7i6b4tuTHuRYnPP8VUb0wr9XFI5qlqtnttBd9jRg= +github.com/iotaledger/hive.go/log v0.0.0-20231108050255-98e0fa35e936 h1:VBvGnsVwqhoT9zMyMIlK5fPmz6fsbiPZOwdU1E8WU7o= +github.com/iotaledger/hive.go/log v0.0.0-20231108050255-98e0fa35e936/go.mod h1:vzO4/wRkEJDEZb/9fD10oKU9k1bj4qLir2Uhl5U1FkM= +github.com/iotaledger/hive.go/runtime v0.0.0-20231108050255-98e0fa35e936 h1:XbC1fmY87UJ/yMs8U2YqlUdJsqb0Xqj/ZYQKlZ7AUG8= +github.com/iotaledger/hive.go/runtime v0.0.0-20231108050255-98e0fa35e936/go.mod h1:DrZPvUvLarK8C2qb+3H2vdypp/MuhpQmB3iMJbDCr/Q= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231108050255-98e0fa35e936 h1:LXhLW2cN9bQYoHQsgmJRb/jiRBRU5s2rLoCNjZfgHdg= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231108050255-98e0fa35e936/go.mod h1:FoH3T6yKlZJp8xm8K+zsQiibSynp32v21CpWx8xkek8= +github.com/iotaledger/hive.go/stringify v0.0.0-20231108050255-98e0fa35e936 h1:Y4HgL5gm9S27usg5M2t6wi1BSdCxVorM62lwnpKuMd4= +github.com/iotaledger/hive.go/stringify v0.0.0-20231108050255-98e0fa35e936/go.mod h1:FTo/UWzNYgnQ082GI9QVM9HFDERqf9rw9RivNpqrnTs= +github.com/iotaledger/iota.go/v4 v4.0.0-20231108050608-afce96cfe8a6 h1:4kvG+BB4GOBsNYPY/enPo3xeC65A133L9cD73Kf1p9Q= +github.com/iotaledger/iota.go/v4 v4.0.0-20231108050608-afce96cfe8a6/go.mod h1:8iDORW4/e4NztyAGqjW07uSMjbhs7snbxw+81IWOczY= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= @@ -84,8 +84,6 @@ github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7B github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= -github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e h1:s2RNOM/IGdY0Y6qfTeUKhDawdHDpK9RGBdx80qN4Ttw= -github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e/go.mod h1:nBdnFKj15wFbf94Rwfq4m30eAcyY9V/IyKAGQFtqkW0= github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks=