diff --git a/Dockerfile b/Dockerfile index 13188b839f..68d9a11223 100644 --- a/Dockerfile +++ b/Dockerfile @@ -54,6 +54,7 @@ COPY ./arbos ./arbos COPY ./arbstate ./arbstate COPY ./blsSignatures ./blsSignatures COPY ./cmd/replay ./cmd/replay +COPY ./das/dastree ./das/dastree COPY ./precompiles ./precompiles COPY ./statetransfer ./statetransfer COPY ./util ./util diff --git a/arbstate/das_reader.go b/arbstate/das_reader.go index 93cb768e2a..081f820c73 100644 --- a/arbstate/das_reader.go +++ b/arbstate/das_reader.go @@ -12,14 +12,15 @@ import ( "fmt" "io" - "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/common" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/blsSignatures" + "github.com/offchainlabs/nitro/das/dastree" ) type DataAvailabilityReader interface { - GetByHash(ctx context.Context, hash []byte) ([]byte, error) + GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) HealthCheck(ctx context.Context) error ExpirationPolicy(ctx context.Context) (ExpirationPolicy, error) } @@ -30,6 +31,10 @@ var ErrHashMismatch = errors.New("Result does not match expected hash") // which will retrieve the full batch data. const DASMessageHeaderFlag byte = 0x80 +// Indicates that this DAS certificate data employs the new merkelization strategy. +// Ignored when DASMessageHeaderFlag is not set. +const TreeDASMessageHeaderFlag byte = 0x08 + // Indicates that this message was authenticated by L1. Currently unused. const L1AuthenticatedMessageHeaderFlag byte = 0x40 @@ -43,6 +48,10 @@ func IsDASMessageHeaderByte(header byte) bool { return (DASMessageHeaderFlag & header) > 0 } +func IsTreeDASMessageHeaderByte(header byte) bool { + return (TreeDASMessageHeaderFlag & header) > 0 +} + func IsZeroheavyEncodedHeaderByte(header byte) bool { return (ZeroheavyMessageHeaderFlag & header) > 0 } @@ -57,6 +66,7 @@ type DataAvailabilityCertificate struct { Timeout uint64 SignersMask uint64 Sig blsSignatures.Signature + Version uint8 } func DeserializeDASCertFrom(rd io.Reader) (c *DataAvailabilityCertificate, err error) { @@ -88,6 +98,15 @@ func DeserializeDASCertFrom(rd io.Reader) (c *DataAvailabilityCertificate, err e } c.Timeout = binary.BigEndian.Uint64(timeoutBuf[:]) + if IsTreeDASMessageHeaderByte(header) { + var versionBuf [1]byte + _, err = io.ReadFull(r, versionBuf[:]) + if err != nil { + return nil, err + } + c.Version = versionBuf[0] + } + var signersMaskBuf [8]byte _, err = io.ReadFull(r, signersMaskBuf[:]) if err != nil { @@ -109,13 +128,17 @@ func DeserializeDASCertFrom(rd io.Reader) (c *DataAvailabilityCertificate, err e } func (c *DataAvailabilityCertificate) SerializeSignableFields() []byte { - buf := make([]byte, 0, 32+8) + buf := make([]byte, 0, 32+9) buf = append(buf, c.DataHash[:]...) var intData [8]byte binary.BigEndian.PutUint64(intData[:], c.Timeout) buf = append(buf, intData[:]...) + if c.Version != 0 { + buf = append(buf, c.Version) + } + return buf } @@ -123,11 +146,11 @@ func (cert *DataAvailabilityCertificate) RecoverKeyset( ctx context.Context, da DataAvailabilityReader, ) (*DataAvailabilityKeyset, error) { - keysetBytes, err := da.GetByHash(ctx, cert.KeysetHash[:]) + keysetBytes, err := da.GetByHash(ctx, cert.KeysetHash) if err != nil { return nil, err } - if !bytes.Equal(crypto.Keccak256(keysetBytes), cert.KeysetHash[:]) { + if !dastree.ValidHash(cert.KeysetHash, keysetBytes) { return nil, errors.New("keyset hash does not match cert") } return DeserializeKeyset(bytes.NewReader(keysetBytes)) @@ -168,12 +191,15 @@ func (keyset *DataAvailabilityKeyset) Serialize(wr io.Writer) error { return nil } -func (keyset *DataAvailabilityKeyset) Hash() ([]byte, error) { +func (keyset *DataAvailabilityKeyset) Hash() (common.Hash, error) { wr := bytes.NewBuffer([]byte{}) if err := keyset.Serialize(wr); err != nil { - return nil, err + return common.Hash{}, err + } + if wr.Len() > dastree.BinSize { + return common.Hash{}, errors.New("keyset too large") } - return crypto.Keccak256(wr.Bytes()), nil + return dastree.Hash(wr.Bytes()), nil } func DeserializeKeyset(rd io.Reader) (*DataAvailabilityKeyset, error) { diff --git a/arbstate/inbox.go b/arbstate/inbox.go index dfe0539ad6..cfff184cb5 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/zeroheavy" "github.com/ethereum/go-ethereum/log" @@ -138,17 +139,41 @@ func RecoverPayloadFromDasBatch( log.Error("Failed to deserialize DAS message", "err", err) return nil, nil } - keysetPreimage, err := dasReader.GetByHash(ctx, cert.KeysetHash[:]) - if err == nil && !bytes.Equal(cert.KeysetHash[:], crypto.Keccak256(keysetPreimage)) { - err = ErrHashMismatch + version := cert.Version + + checkPreimage := func(hash common.Hash, preimage []byte, message string) error { + switch { + case version == 0 && crypto.Keccak256Hash(preimage) != hash: + fallthrough + case version == 1 && dastree.Hash(preimage) != hash: + log.Error(message, "err", ErrHashMismatch, "version", version) + return ErrHashMismatch + case version >= 2: + log.Error( + "Committee signed unsuported certificate format", + "version", version, "hash", hash, "payload", preimage, + ) + panic("node software out of date") + } + return nil + } + recordPreimage := func(key common.Hash, value []byte) { + preimages[key] = value + } + + keysetPreimage, err := dasReader.GetByHash(ctx, cert.KeysetHash) + keysetHash := cert.KeysetHash + if err == nil { + err = checkPreimage(keysetHash, keysetPreimage, "Keyset hash mismatch") } if err != nil { log.Error("Couldn't get keyset", "err", err) return nil, err } if preimages != nil { - preimages[common.BytesToHash(cert.KeysetHash[:])] = keysetPreimage + dastree.RecordHash(recordPreimage, keysetPreimage) } + keyset, err := DeserializeKeyset(bytes.NewReader(keysetPreimage)) if err != nil { log.Error("Couldn't deserialize keyset", "err", err) @@ -159,21 +184,29 @@ func RecoverPayloadFromDasBatch( log.Error("Bad signature on DAS batch", "err", err) return nil, nil } + maxTimestamp := binary.BigEndian.Uint64(sequencerMsg[8:16]) if cert.Timeout < maxTimestamp+MinLifetimeSecondsForDataAvailabilityCert { log.Error("Data availability cert expires too soon", "err", "") return nil, nil } - payload, err := dasReader.GetByHash(ctx, cert.DataHash[:]) - if err == nil && !bytes.Equal(crypto.Keccak256(payload), cert.DataHash[:]) { - err = ErrHashMismatch + + dataHash := cert.DataHash + payload, err := dasReader.GetByHash(ctx, dataHash) + if err == nil { + err = checkPreimage(dataHash, payload, "batch hash mismatch") } if err != nil { log.Error("Couldn't fetch DAS batch contents", "err", err) return nil, err } + if preimages != nil { - preimages[common.BytesToHash(cert.DataHash[:])] = payload + if version == 0 { + preimages[dataHash] = payload + } else { + dastree.RecordHash(recordPreimage, payload) + } } return payload, nil diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index 9b15066135..092f1ca941 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -13,6 +13,7 @@ import ( "strings" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/offchainlabs/nitro/cmd/genericconf" @@ -174,7 +175,7 @@ func startRPCClientGetByHash(args []string) error { } ctx := context.Background() - message, err := client.GetByHash(ctx, decodedHash) + message, err := client.GetByHash(ctx, common.BytesToHash(decodedHash)) if err != nil { return err } @@ -235,7 +236,7 @@ func startRESTClientGetByHash(args []string) error { } ctx := context.Background() - message, err := client.GetByHash(ctx, decodedHash) + message, err := client.GetByHash(ctx, common.BytesToHash(decodedHash)) if err != nil { return err } diff --git a/cmd/one-time-das-upgrade/upgrade.go b/cmd/one-time-das-upgrade/upgrade.go new file mode 100644 index 0000000000..daee01e51e --- /dev/null +++ b/cmd/one-time-das-upgrade/upgrade.go @@ -0,0 +1,87 @@ +// Copyright 2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package main + +import ( + "encoding/hex" + "fmt" + "io/fs" + "os" + "path/filepath" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/offchainlabs/nitro/das/dastree" + "github.com/offchainlabs/nitro/util/colors" +) + +func main() { + args := os.Args + if len(args) < 2 { + panic("Usage: upgrade ") + } + + path := filepath.FromSlash(args[1]) + info, err := os.Stat(path) + if err != nil { + panic(fmt.Sprintf("failed to open directory: %v\n%v", path, err)) + } + if !info.IsDir() { + panic(fmt.Sprintf("path %v is not a directory", path)) + } + + println("upgrading das files in directory", path) + + renames := make(map[string]string) + + err = filepath.Walk(path, func(path string, info fs.FileInfo, err error) error { + if err != nil { + colors.PrintRed("skipping ", path, err) + return nil + } + if info.IsDir() { + return nil + } + stem := filepath.Dir(path) + "/" + name := info.Name() + zero := false + if name[:2] == "0x" { + name = name[2:] + zero = true + } + + hashbytes, err := hex.DecodeString(name) + if err != nil || len(hashbytes) != 32 { + panic(fmt.Sprintf("filename %v isn't a hash", path)) + } + hash := *(*common.Hash)(hashbytes) + tree := dastree.FlatHashToTreeHash(hash) + + contents, err := os.ReadFile(path) + if err != nil { + panic(fmt.Sprintf("failed to read file %v %v", path, err)) + } + if crypto.Keccak256Hash(contents) != hash { + panic(fmt.Sprintf("file hash %v does not match its contents", path)) + } + + newName := tree.Hex() + if !zero { + newName = newName[2:] + } + renames[path] = stem + newName + return nil + }) + if err != nil { + panic(err) + } + + for name, rename := range renames { + println(name, colors.Grey, "=>", colors.Clear, rename) + err := os.Rename(name, rename) + if err != nil { + panic("failed to mv file") + } + } +} diff --git a/cmd/replay/main.go b/cmd/replay/main.go index be8558c1ee..906d119748 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -19,6 +19,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/wavmio" ) @@ -85,8 +86,8 @@ func (i WavmInbox) ReadDelayedInbox(seqNum uint64) ([]byte, error) { type PreimageDASReader struct { } -func (dasReader *PreimageDASReader) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { - return wavmio.ResolvePreImage(common.BytesToHash(hash)), nil +func (dasReader *PreimageDASReader) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { + return dastree.Content(hash, wavmio.ResolvePreImage) } func (dasReader *PreimageDASReader) HealthCheck(ctx context.Context) error { @@ -96,6 +97,7 @@ func (dasReader *PreimageDASReader) HealthCheck(ctx context.Context) error { func (dasReader *PreimageDASReader) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { return arbstate.DiscardImmediately, nil } + func main() { wavmio.StubInit() diff --git a/contracts/src/bridge/SequencerInbox.sol b/contracts/src/bridge/SequencerInbox.sol index 06445d49b5..cc1de7e794 100644 --- a/contracts/src/bridge/SequencerInbox.sol +++ b/contracts/src/bridge/SequencerInbox.sol @@ -353,7 +353,9 @@ contract SequencerInbox is DelegateCallAware, GasRefundEnabled, ISequencerInbox * @param keysetBytes bytes of the serialized keyset */ function setValidKeyset(bytes calldata keysetBytes) external override onlyRollupOwner { - bytes32 ksHash = keccak256(keysetBytes); + bytes32 ksHash = bytes32(keccak256(bytes.concat(hex"fe", keccak256(keysetBytes)))); + require(keysetBytes.length < 64 * 1024, "keyset is too large"); + if (dasKeySetInfo[ksHash].isValidKeyset) revert AlreadyValidDASKeyset(ksHash); dasKeySetInfo[ksHash] = DasKeySetInfo({ isValidKeyset: true, diff --git a/das/aggregator.go b/das/aggregator.go index b1215b9671..2cac67ebcc 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -14,12 +14,12 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util/pretty" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/blsSignatures" @@ -136,12 +136,10 @@ func NewAggregatorWithSeqInboxCaller( if err := keyset.Serialize(ksBuf); err != nil { return nil, err } - keysetHashBuf, err := keyset.Hash() + keysetHash, err := keyset.Hash() if err != nil { return nil, err } - var keysetHash [32]byte - copy(keysetHash[:], keysetHashBuf) if config.DumpKeyset { fmt.Printf("Keyset: %s\n", hexutil.Encode(ksBuf.Bytes())) fmt.Printf("KeysetHash: %s\n", hexutil.Encode(keysetHash[:])) @@ -164,7 +162,7 @@ func NewAggregatorWithSeqInboxCaller( }, nil } -func (a *Aggregator) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { +func (a *Aggregator) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { // Query all services, even those that didn't sign. // They may have been late in returning a response after storing the data, // or got the data by some other means. @@ -179,7 +177,7 @@ func (a *Aggregator) GetByHash(ctx context.Context, hash []byte) ([]byte, error) errorChan <- err return } - if bytes.Equal(crypto.Keccak256(blob), hash) { + if dastree.ValidHash(hash, blob) { blobChan <- blob } else { errorChan <- fmt.Errorf("DAS (mask %X) returned data that doesn't match requested hash!", d.signersMask) @@ -247,7 +245,7 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, responses := make(chan storeResponse, len(a.services)) - expectedHash := crypto.Keccak256(message) + expectedHash := dastree.Hash(message) for _, d := range a.services { go func(ctx context.Context, d ServiceDetails) { cert, err := d.service.Store(ctx, message, timeout, sig) @@ -256,7 +254,9 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, return } - verified, err := blsSignatures.VerifySignature(cert.Sig, serializeSignableFields(cert), d.pubKey) + verified, err := blsSignatures.VerifySignature( + cert.Sig, cert.SerializeSignableFields(), d.pubKey, + ) if err != nil { responses <- storeResponse{d, nil, err} return @@ -268,7 +268,7 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, // SignersMask from backend DAS is ignored. - if !bytes.Equal(cert.DataHash[:], expectedHash) { + if cert.DataHash != expectedHash { responses <- storeResponse{d, nil, errors.New("Hash verification failed.")} return } @@ -312,11 +312,12 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, aggCert.Sig = blsSignatures.AggregateSignatures(sigs) aggPubKey := blsSignatures.AggregatePublicKeys(pubKeys) aggCert.SignersMask = aggSignersMask - copy(aggCert.DataHash[:], expectedHash) + aggCert.DataHash = expectedHash aggCert.Timeout = timeout aggCert.KeysetHash = a.keysetHash + aggCert.Version = 1 - verified, err := blsSignatures.VerifySignature(aggCert.Sig, serializeSignableFields(&aggCert), aggPubKey) + verified, err := blsSignatures.VerifySignature(aggCert.Sig, aggCert.SerializeSignableFields(), aggPubKey) if err != nil { return nil, err } diff --git a/das/aggregator_test.go b/das/aggregator_test.go index ff665046fa..21242967be 100644 --- a/das/aggregator_test.go +++ b/das/aggregator_test.go @@ -15,6 +15,7 @@ import ( "testing" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" ) @@ -62,7 +63,7 @@ func TestDAS_BasicAggregationLocal(t *testing.T) { cert, err := aggregator.Store(ctx, rawMsg, 0, []byte{}) Require(t, err, "Error storing message") - messageRetrieved, err := aggregator.GetByHash(ctx, cert.DataHash[:]) + messageRetrieved, err := aggregator.GetByHash(ctx, cert.DataHash) Require(t, err, "Failed to retrieve message") if !bytes.Equal(rawMsg, messageRetrieved) { Fail(t, "Retrieved message is not the same as stored one.") @@ -132,7 +133,7 @@ type WrapGetByHash struct { DataAvailabilityService } -func (w *WrapGetByHash) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { +func (w *WrapGetByHash) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { switch w.injector.shouldFail() { case success: return w.DataAvailabilityService.GetByHash(ctx, hash) @@ -256,7 +257,7 @@ func testConfigurableStorageFailures(t *testing.T, shouldFailAggregation bool) { return } - messageRetrieved, err := aggregator.GetByHash(ctx, cert.DataHash[:]) + messageRetrieved, err := aggregator.GetByHash(ctx, cert.DataHash) Require(t, err, "Failed to retrieve message") if !bytes.Equal(rawMsg, messageRetrieved) { Fail(t, "Retrieved message is not the same as stored one.") @@ -366,7 +367,7 @@ func testConfigurableRetrieveFailures(t *testing.T, shouldFail bool) { cert, err := aggregator.Store(ctx, rawMsg, 0, []byte{}) Require(t, err, "Error storing message") - messageRetrieved, err := aggregator.GetByHash(ctx, cert.DataHash[:]) + messageRetrieved, err := aggregator.GetByHash(ctx, cert.DataHash) if !shouldFail { Require(t, err, "Error retrieving message") } else { diff --git a/das/archiving_storage_service.go b/das/archiving_storage_service.go index 60b751f484..cd49756ee1 100644 --- a/das/archiving_storage_service.go +++ b/das/archiving_storage_service.go @@ -9,6 +9,7 @@ import ( "sync" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/util/arbmath" @@ -80,8 +81,8 @@ func NewArchivingStorageService( return ret, nil } -func (serv *ArchivingStorageService) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { - log.Trace("das.ArchivingStorageService.GetByHash", "key", pretty.FirstFewBytes(hash), "this", serv) +func (serv *ArchivingStorageService) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { + log.Trace("das.ArchivingStorageService.GetByHash", "key", pretty.PrettyHash(hash), "this", serv) data, err := serv.inner.GetByHash(ctx, hash) if err != nil { @@ -174,7 +175,7 @@ func NewArchivingSimpleDASReader( return &ArchivingSimpleDASReader{arch}, nil } -func (asdr *ArchivingSimpleDASReader) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { +func (asdr *ArchivingSimpleDASReader) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { return asdr.wrapped.GetByHash(ctx, hash) } diff --git a/das/archiving_storage_service_test.go b/das/archiving_storage_service_test.go index 95de8c3906..8625bda670 100644 --- a/das/archiving_storage_service_test.go +++ b/das/archiving_storage_service_test.go @@ -6,10 +6,11 @@ package das import ( "bytes" "context" - "github.com/ethereum/go-ethereum/crypto" - "github.com/offchainlabs/nitro/arbstate" "testing" "time" + + "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" ) func TestArchivingStorageService(t *testing.T) { @@ -18,9 +19,9 @@ func TestArchivingStorageService(t *testing.T) { futureTime := uint64(time.Now().Add(time.Hour).Unix()) val1 := []byte("First value") - hash1 := crypto.Keccak256(val1) + hash1 := dastree.Hash(val1) val2 := []byte("Second value") - hash2 := crypto.Keccak256(val2) + hash2 := dastree.Hash(val2) firstStorage := NewMemoryBackedStorageService(ctx) archiveTo := NewMemoryBackedStorageService(ctx) diff --git a/das/bigcache_storage_service.go b/das/bigcache_storage_service.go index c675ec4802..1bc37a3047 100644 --- a/das/bigcache_storage_service.go +++ b/das/bigcache_storage_service.go @@ -10,10 +10,11 @@ import ( "github.com/allegro/bigcache" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" flag "github.com/spf13/pflag" - "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) @@ -50,17 +51,17 @@ func NewBigCacheStorageService(bigCacheConfig BigCacheConfig, baseStorageService }, nil } -func (bcs *BigCacheStorageService) GetByHash(ctx context.Context, key []byte) ([]byte, error) { - log.Trace("das.BigCacheStorageService.GetByHash", "key", pretty.FirstFewBytes(key), "this", bcs) +func (bcs *BigCacheStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { + log.Trace("das.BigCacheStorageService.GetByHash", "key", pretty.PrettyHash(key), "this", bcs) - ret, err := bcs.bigCache.Get(string(key)) + ret, err := bcs.bigCache.Get(string(key.Bytes())) if err != nil { ret, err = bcs.baseStorageService.GetByHash(ctx, key) if err != nil { return nil, err } - err = bcs.bigCache.Set(string(key), ret) + err = bcs.bigCache.Set(string(key.Bytes()), ret) if err != nil { return nil, err } @@ -71,14 +72,12 @@ func (bcs *BigCacheStorageService) GetByHash(ctx context.Context, key []byte) ([ } func (bcs *BigCacheStorageService) Put(ctx context.Context, value []byte, timeout uint64) error { - log.Trace("das.BigCacheStorageService.Put", "message", pretty.FirstFewBytes(value), "timeout", time.Unix(int64(timeout), 0), "this", bcs) - + logPut("das.BigCacheStorageService.Put", value, timeout, bcs) err := bcs.baseStorageService.Put(ctx, value, timeout) if err != nil { return err } - err = bcs.bigCache.Set(string(crypto.Keccak256(value)), value) - return err + return bcs.bigCache.Set(string(dastree.HashBytes(value)), value) } func (bcs *BigCacheStorageService) Sync(ctx context.Context) error { diff --git a/das/bigcache_storage_service_test.go b/das/bigcache_storage_service_test.go index a1b3ddea05..e9071f7f87 100644 --- a/das/bigcache_storage_service_test.go +++ b/das/bigcache_storage_service_test.go @@ -11,8 +11,7 @@ import ( "time" "github.com/allegro/bigcache" - - "github.com/ethereum/go-ethereum/crypto" + "github.com/offchainlabs/nitro/das/dastree" ) func TestBigCacheStorageService(t *testing.T) { @@ -29,8 +28,8 @@ func TestBigCacheStorageService(t *testing.T) { Require(t, err) val1 := []byte("The first value") - val1CorrectKey := crypto.Keccak256(val1) - val1IncorrectKey := crypto.Keccak256(append(val1, 0)) + val1CorrectKey := dastree.Hash(val1) + val1IncorrectKey := dastree.Hash(append(val1, 0)) _, err = bigCacheService.GetByHash(ctx, val1CorrectKey) if !errors.Is(err, ErrNotFound) { @@ -52,8 +51,8 @@ func TestBigCacheStorageService(t *testing.T) { // For Case where the value is present in the base storage but not present in the cache. val2 := []byte("The Second value") - val2CorrectKey := crypto.Keccak256(val2) - val2IncorrectKey := crypto.Keccak256(append(val2, 0)) + val2CorrectKey := dastree.Hash(val2) + val2IncorrectKey := dastree.Hash(append(val2, 0)) err = baseStorageService.Put(ctx, val2, timeout) Require(t, err) diff --git a/das/cache_storage_to_das_adapter.go b/das/cache_storage_to_das_adapter.go index c2d1891dee..5764a2fbcd 100644 --- a/das/cache_storage_to_das_adapter.go +++ b/das/cache_storage_to_das_adapter.go @@ -8,6 +8,7 @@ import ( "fmt" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/util/pretty" @@ -28,8 +29,8 @@ func NewCacheStorageToDASAdapter( } } -func (a *CacheStorageToDASAdapter) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { - log.Trace("das.CacheStorageToDASAdapter.GetByHash", "key", pretty.FirstFewBytes(hash), "this", a) +func (a *CacheStorageToDASAdapter) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { + log.Trace("das.CacheStorageToDASAdapter.GetByHash", "key", pretty.PrettyHash(hash), "this", a) ret, err := a.cache.GetByHash(ctx, hash) if err != nil { ret, err = a.DataAvailabilityService.GetByHash(ctx, hash) @@ -46,7 +47,9 @@ func (a *CacheStorageToDASAdapter) GetByHash(ctx context.Context, hash []byte) ( return ret, nil } -func (a *CacheStorageToDASAdapter) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*arbstate.DataAvailabilityCertificate, error) { +func (a *CacheStorageToDASAdapter) Store( + ctx context.Context, message []byte, timeout uint64, sig []byte, +) (*arbstate.DataAvailabilityCertificate, error) { log.Trace("das.CacheStorageToDASAdapter.Store", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(sig), "this", a) cert, err := a.DataAvailabilityService.Store(ctx, message, timeout, sig) if err != nil { @@ -72,7 +75,7 @@ func NewEmptyStorageService() *emptyStorageService { return &emptyStorageService{} } -func (s *emptyStorageService) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { +func (s *emptyStorageService) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { return nil, ErrNotFound } diff --git a/das/chain_fetch_das.go b/das/chain_fetch_das.go index 672b668d17..643c98fa5d 100644 --- a/das/chain_fetch_das.go +++ b/das/chain_fetch_das.go @@ -4,7 +4,6 @@ package das import ( - "bytes" "context" "errors" "sync" @@ -14,9 +13,9 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/solgen/go/bridgegen" ) @@ -87,13 +86,13 @@ func NewChainFetchReaderWithSeqInbox(inner arbstate.DataAvailabilityReader, seqI }, nil } -func (this *ChainFetchDAS) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { - log.Trace("das.ChainFetchDAS.GetByHash", "hash", pretty.FirstFewBytes(hash)) +func (this *ChainFetchDAS) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { + log.Trace("das.ChainFetchDAS.GetByHash", "hash", pretty.PrettyHash(hash)) return chainFetchGetByHash(ctx, this.DataAvailabilityService, &this.keysetCache, this.seqInboxCaller, this.seqInboxFilterer, hash) } -func (this *ChainFetchReader) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { - log.Trace("das.ChainFetchReader.GetByHash", "hash", pretty.FirstFewBytes(hash)) +func (this *ChainFetchReader) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { + log.Trace("das.ChainFetchReader.GetByHash", "hash", pretty.PrettyHash(hash)) return chainFetchGetByHash(ctx, this.DataAvailabilityReader, &this.keysetCache, this.seqInboxCaller, this.seqInboxFilterer, hash) } @@ -103,24 +102,22 @@ func chainFetchGetByHash( cache *syncedKeysetCache, seqInboxCaller *bridgegen.SequencerInboxCaller, seqInboxFilterer *bridgegen.SequencerInboxFilterer, - hash []byte, + hash common.Hash, ) ([]byte, error) { // try to fetch from the cache - var hash32 [32]byte - copy(hash32[:], hash) - res, ok := cache.get(hash32) + res, ok := cache.get(hash) if ok { return res, nil } // try to fetch from the inner DAS innerRes, err := daReader.GetByHash(ctx, hash) - if err == nil && bytes.Equal(hash, crypto.Keccak256(innerRes)) { + if err == nil && dastree.ValidHash(hash, innerRes) { return innerRes, nil } // try to fetch from the L1 chain - blockNumBig, err := seqInboxCaller.GetKeysetCreationBlock(&bind.CallOpts{Context: ctx}, hash32) + blockNumBig, err := seqInboxCaller.GetKeysetCreationBlock(&bind.CallOpts{Context: ctx}, hash) if err != nil { return nil, err } @@ -135,13 +132,13 @@ func chainFetchGetByHash( End: &blockNumPlus1, Context: ctx, } - iter, err := seqInboxFilterer.FilterSetValidKeyset(filterOpts, [][32]byte{hash32}) + iter, err := seqInboxFilterer.FilterSetValidKeyset(filterOpts, [][32]byte{hash}) if err != nil { return nil, err } for iter.Next() { - if bytes.Equal(hash, crypto.Keccak256(iter.Event.KeysetBytes)) { - cache.put(hash32, iter.Event.KeysetBytes) + if dastree.ValidHash(hash, iter.Event.KeysetBytes) { + cache.put(hash, iter.Event.KeysetBytes) return iter.Event.KeysetBytes, nil } } diff --git a/das/das.go b/das/das.go index ff3bb6a462..97c7778004 100644 --- a/das/das.go +++ b/das/das.go @@ -64,37 +64,6 @@ var DefaultDataAvailabilityConfig = DataAvailabilityConfig{ PanicOnError: false, } -/* TODO put these checks somewhere -func (c *DataAvailabilityConfig) Mode() (DataAvailabilityMode, error) { - if c.ModeImpl == "" { - return 0, errors.New("--data-availability.mode missing") - } - - if c.ModeImpl == OnchainDataAvailabilityString { - return OnchainDataAvailability, nil - } - - if c.ModeImpl == DASDataAvailabilityString { - if c.DASConfig.LocalConfig.DataDir == "" || (c.DASConfig.KeyDir == "" && c.DASConfig.PrivKey == "") { - flag.Usage() - return 0, errors.New("--data-availability.das.local.data-dir and --data-availability.das.key-dir must be specified if mode is set to das") - } - return DASDataAvailability, nil - } - - if c.ModeImpl == AggregatorDataAvailabilityString { - if reflect.DeepEqual(c.AggregatorConfig, DefaultAggregatorConfig) { - flag.Usage() - return 0, errors.New("--data-availability.aggregator.X config options must be specified if mode is set to aggregator") - } - return AggregatorDataAvailability, nil - } - - flag.Usage() - return 0, errors.New("--data-availability.mode " + c.ModeImpl + " not recognized") -} -*/ - func OptionalAddressFromString(s string) (*common.Address, error) { if s == "none" { return nil, nil @@ -137,25 +106,17 @@ func DataAvailabilityConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".sequencer-inbox-address", DefaultDataAvailabilityConfig.SequencerInboxAddress, "L1 address of SequencerInbox contract") } -func serializeSignableFields(c *arbstate.DataAvailabilityCertificate) []byte { - buf := make([]byte, 0, 32+8) - buf = append(buf, c.DataHash[:]...) - - var intData [8]byte - binary.BigEndian.PutUint64(intData[:], c.Timeout) - buf = append(buf, intData[:]...) - - return buf -} - func Serialize(c *arbstate.DataAvailabilityCertificate) []byte { - buf := make([]byte, 0) - buf = append(buf, arbstate.DASMessageHeaderFlag) + flags := arbstate.DASMessageHeaderFlag + if c.Version != 0 { + flags |= arbstate.TreeDASMessageHeaderFlag + } + buf := make([]byte, 0) + buf = append(buf, flags) buf = append(buf, c.KeysetHash[:]...) - - buf = append(buf, serializeSignableFields(c)...) + buf = append(buf, c.SerializeSignableFields()...) var intData [8]byte binary.BigEndian.PutUint64(intData[:], c.SignersMask) diff --git a/das/das_test.go b/das/das_test.go index cdb8566d0d..1bd79c7ba7 100644 --- a/das/das_test.go +++ b/das/das_test.go @@ -60,7 +60,7 @@ func testDASStoreRetrieveMultipleInstances(t *testing.T, storageType string) { Fail(t, fmt.Sprintf("Expected timeout of %d in cert, was %d", timeout, cert.Timeout)) } - messageRetrieved, err := das.GetByHash(firstCtx, cert.DataHash[:]) + messageRetrieved, err := das.GetByHash(firstCtx, cert.DataHash) Require(t, err, "Failed to retrieve message") if !bytes.Equal(messageSaved, messageRetrieved) { Fail(t, "Retrieved message is not the same as stored one.") @@ -79,13 +79,13 @@ func testDASStoreRetrieveMultipleInstances(t *testing.T, storageType string) { das2, err := NewSignAfterStoreDAS(secondCtx, config, storageService2) Require(t, err, "no das") - messageRetrieved2, err := das2.GetByHash(secondCtx, cert.DataHash[:]) + messageRetrieved2, err := das2.GetByHash(secondCtx, cert.DataHash) Require(t, err, "Failed to retrieve message") if !bytes.Equal(messageSaved, messageRetrieved2) { Fail(t, "Retrieved message is not the same as stored one.") } - messageRetrieved2, err = das2.GetByHash(secondCtx, cert.DataHash[:]) + messageRetrieved2, err = das2.GetByHash(secondCtx, cert.DataHash) Require(t, err, "Failed to getByHash message") if !bytes.Equal(messageSaved, messageRetrieved2) { Fail(t, "Retrieved message is not the same as stored one.") @@ -151,12 +151,12 @@ func testDASMissingMessage(t *testing.T, storageType string) { // Change the hash to look up cert.DataHash[0] += 1 - _, err = das.GetByHash(ctx, cert.DataHash[:]) + _, err = das.GetByHash(ctx, cert.DataHash) if err == nil { Fail(t, "Expected an error when retrieving message that is not in the store.") } - _, err = das.GetByHash(ctx, cert.DataHash[:]) + _, err = das.GetByHash(ctx, cert.DataHash) if err == nil { Fail(t, "Expected an error when getting by hash a message that is not in the store.") } diff --git a/das/dasrpc/dasRpcClient.go b/das/dasrpc/dasRpcClient.go index edd6736367..334f97d9ea 100644 --- a/das/dasrpc/dasRpcClient.go +++ b/das/dasrpc/dasRpcClient.go @@ -4,18 +4,18 @@ package dasrpc import ( - "bytes" "context" "fmt" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/blsSignatures" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" ) @@ -35,15 +35,15 @@ func NewDASRPCClient(target string) (*DASRPCClient, error) { }, nil } -func (c *DASRPCClient) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { +func (c *DASRPCClient) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { if len(hash) != 32 { return nil, fmt.Errorf("Hash must be 32 bytes long, was %d", len(hash)) } var ret hexutil.Bytes - if err := c.clnt.CallContext(ctx, &ret, "das_getByHash", hexutil.Bytes(hash)); err != nil { + if err := c.clnt.CallContext(ctx, &ret, "das_getByHash", hash); err != nil { return nil, err } - if !bytes.Equal(hash, crypto.Keccak256(ret)) { // check hash because RPC server might be untrusted + if !dastree.ValidHash(hash, ret) { // check hash because RPC server might be untrusted return nil, arbstate.ErrHashMismatch } return ret, nil @@ -55,20 +55,17 @@ func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64 if err := c.clnt.CallContext(ctx, &ret, "das_store", hexutil.Bytes(message), hexutil.Uint64(timeout), hexutil.Bytes(reqSig)); err != nil { return nil, err } - var keysetHash [32]byte - copy(keysetHash[:], ret.KeysetHash) - var dataHash [32]byte - copy(dataHash[:], ret.DataHash) respSig, err := blsSignatures.SignatureFromBytes(ret.Sig) if err != nil { return nil, err } return &arbstate.DataAvailabilityCertificate{ - DataHash: dataHash, + DataHash: common.BytesToHash(ret.DataHash), Timeout: uint64(ret.Timeout), SignersMask: uint64(ret.SignersMask), Sig: respSig, - KeysetHash: keysetHash, + KeysetHash: common.BytesToHash(ret.KeysetHash), + Version: byte(ret.Version), }, nil } diff --git a/das/dasrpc/dasRpcServer.go b/das/dasrpc/dasRpcServer.go index 5cf11f6552..780f879c9d 100644 --- a/das/dasrpc/dasRpcServer.go +++ b/das/dasrpc/dasRpcServer.go @@ -10,6 +10,7 @@ import ( "net/http" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" @@ -89,6 +90,7 @@ type StoreResult struct { SignersMask hexutil.Uint64 `json:"signersMask,omitempty"` KeysetHash hexutil.Bytes `json:"keysetHash,omitempty"` Sig hexutil.Bytes `json:"sig,omitempty"` + Version hexutil.Uint64 `json:"version,omitempty"` } func (serv *DASRPCServer) Store(ctx context.Context, message hexutil.Bytes, timeout hexutil.Uint64, sig hexutil.Bytes) (*StoreResult, error) { @@ -117,6 +119,7 @@ func (serv *DASRPCServer) Store(ctx context.Context, message hexutil.Bytes, time Timeout: hexutil.Uint64(cert.Timeout), SignersMask: hexutil.Uint64(cert.SignersMask), Sig: blsSignatures.SignatureToBytes(cert.Sig), + Version: hexutil.Uint64(cert.Version), }, nil } @@ -133,7 +136,7 @@ func (serv *DASRPCServer) GetByHash(ctx context.Context, certBytes hexutil.Bytes rpcGetByHashDurationHistogram.Update(time.Since(start).Nanoseconds()) }() - bytes, err := serv.localDAS.GetByHash(ctx, certBytes) + bytes, err := serv.localDAS.GetByHash(ctx, common.BytesToHash(certBytes)) if err != nil { return nil, err } diff --git a/das/dasrpc/rpc_test.go b/das/dasrpc/rpc_test.go index be16c9553c..e2f49191c3 100644 --- a/das/dasrpc/rpc_test.go +++ b/das/dasrpc/rpc_test.go @@ -75,14 +75,14 @@ func TestRPC(t *testing.T) { cert, err := rpcAgg.Store(ctx, msg, 0, nil) testhelpers.RequireImpl(t, err) - retrievedMessage, err := rpcAgg.GetByHash(ctx, cert.DataHash[:]) + retrievedMessage, err := rpcAgg.GetByHash(ctx, cert.DataHash) testhelpers.RequireImpl(t, err) if !bytes.Equal(msg, retrievedMessage) { testhelpers.FailImpl(t, "failed to retrieve correct message") } - retrievedMessage, err = rpcAgg.GetByHash(ctx, cert.DataHash[:]) + retrievedMessage, err = rpcAgg.GetByHash(ctx, cert.DataHash) testhelpers.RequireImpl(t, err) if !bytes.Equal(msg, retrievedMessage) { diff --git a/das/dastree/dastree.go b/das/dastree/dastree.go new file mode 100644 index 0000000000..dfafb48b21 --- /dev/null +++ b/das/dastree/dastree.go @@ -0,0 +1,210 @@ +// Copyright 2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package dastree + +import ( + "encoding/binary" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/offchainlabs/nitro/util/arbmath" +) + +const BinSize = 64 * 1024 // 64 kB +const NodeByte = byte(0xff) +const LeafByte = byte(0xfe) + +type bytes32 = common.Hash + +type node struct { + hash bytes32 + size uint32 +} + +func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { + // Algorithm + // 1. split the preimage into 64kB bins and double hash them to produce the tree's leaves + // 2. repeatedly hash pairs and their combined length, bubbling up any odd-one's out, to form the root + // + // r <=> H(0xff, H(0xff, 0, 1, L(0:1)), 2, L(0:2)) step 3 + // / \ + // * 2 <=> H(0xff, 0, 1, L(0:1)), 2 step 2 + // / \ + // 0 1 <=> 0, 1, 2 step 1 + // + // 0 1 2 <=> leaf n = H(0xfe, H(bin n)) step 0 + // + // Where H is keccak and L is the length + // Intermediate hashes like '*' from above may be recorded via the `record` closure + // + + keccord := func(value []byte) bytes32 { + hash := crypto.Keccak256Hash(value) + record(hash, value) + return hash + } + prepend := func(before byte, slice []byte) []byte { + return append([]byte{before}, slice...) + } + + unrolled := arbmath.ConcatByteSlices(preimage...) + if len(unrolled) == 0 { + return keccord(prepend(LeafByte, keccord([]byte{}).Bytes())) + } + + length := uint32(len(unrolled)) + leaves := []node{} + for bin := uint32(0); bin < length; bin += BinSize { + end := arbmath.MinUint32(bin+BinSize, length) + hash := keccord(prepend(LeafByte, keccord(unrolled[bin:end]).Bytes())) + leaves = append(leaves, node{hash, end - bin}) + } + + layer := leaves + for len(layer) > 1 { + prior := len(layer) + after := prior/2 + prior%2 + paired := make([]node, after) + for i := 0; i < prior-1; i += 2 { + firstHash := layer[i].hash.Bytes() + otherHash := layer[i+1].hash.Bytes() + sizeUnder := layer[i].size + layer[i+1].size + dataUnder := arbmath.ConcatByteSlices(firstHash, otherHash, arbmath.Uint32ToBytes(sizeUnder)) + parent := node{ + keccord(prepend(NodeByte, dataUnder)), + sizeUnder, + } + paired[i/2] = parent + } + if prior%2 == 1 { + paired[after-1] = layer[prior-1] + } + layer = paired + } + return layer[0].hash +} + +func Hash(preimage ...[]byte) bytes32 { + // Merkelizes without recording anything. All but the validator's DAS will call this + return RecordHash(func(bytes32, []byte) {}, preimage...) +} + +func HashBytes(preimage ...[]byte) []byte { + return Hash(preimage...).Bytes() +} + +func FlatHashToTreeHash(flat bytes32) bytes32 { + // Forms a degenerate dastree that's just a single leaf + // note: the inner preimage may be larger than the 64 kB standard + return crypto.Keccak256Hash(append([]byte{LeafByte}, flat[:]...)) +} + +func ValidHash(hash bytes32, preimage []byte) bool { + if hash == Hash(preimage) { + return true + } + if len(preimage) > 0 { + kind := preimage[0] + return kind != NodeByte && kind != LeafByte && hash == crypto.Keccak256Hash(preimage) + } + return false +} + +func Content(root bytes32, oracle func(bytes32) []byte) ([]byte, error) { + // Reverses hashes to reveal the full preimage under the root using the preimage oracle. + // This function also checks that the size-data is consistent and that the hash is canonical. + // + // Notes + // 1. Because we accept degenerate dastrees, we can't check that single-leaf trees are canonical. + // 2. For any canonical dastree, there exists a degenerate single-leaf equivalent that we accept. + // 3. We also accept old-style flat hashes + // 4. Only the committee can produce trees unwrapped by this function + // 5. Only the replay binary calls this + // + + unpeal := func(hash bytes32) (byte, []byte, error) { + data := oracle(hash) + size := len(data) + if size == 0 { + return 0, nil, fmt.Errorf("invalid node %v", hash) + } + kind := data[0] + if (kind == LeafByte && size != 33) || (kind == NodeByte && size != 69) { + return 0, nil, fmt.Errorf("invalid node for hash %v: %v", hash, data) + } + return kind, data[1:], nil + } + + total := uint32(0) + kind, upper, err := unpeal(root) + if err != nil { + return nil, err + } + switch kind { + case LeafByte: + return oracle(common.BytesToHash(upper)), nil + case NodeByte: + total = binary.BigEndian.Uint32(upper[64:]) + default: + return oracle(root), nil // accept old-style hashes + } + + leaves := []node{} + stack := []node{{hash: root, size: total}} + + for len(stack) > 0 { + place := stack[len(stack)-1] + stack = stack[:len(stack)-1] + kind, data, err := unpeal(place.hash) + if err != nil { + return nil, err + } + + switch kind { + case LeafByte: + leaf := node{ + hash: common.BytesToHash(data), + size: place.size, + } + leaves = append(leaves, leaf) + case NodeByte: + count := binary.BigEndian.Uint32(data[64:]) + power := uint32(arbmath.NextOrCurrentPowerOf2(uint64(count))) + + if place.size != count { + return nil, fmt.Errorf("invalid size data: %v vs %v for %v", count, place.size, data) + } + + prior := node{ + hash: common.BytesToHash(data[:32]), + size: power / 2, + } + after := node{ + hash: common.BytesToHash(data[32:64]), + size: count - power/2, + } + + // we want to expand leftward so we reverse their order + stack = append(stack, after, prior) + default: + return nil, fmt.Errorf("failed to resolve preimage %v %v", place.hash, data) + } + } + + preimage := []byte{} + for i, leaf := range leaves { + bin := oracle(leaf.hash) + if len(bin) != int(leaf.size) { + return nil, fmt.Errorf("leaf %v has an incorrectly sized bin: %v vs %v", i, len(bin), leaf.size) + } + preimage = append(preimage, bin...) + } + + // Check the hash matches. Given the size data this should never fail but we'll check anyway + if Hash(preimage) != root { + return nil, fmt.Errorf("preimage not canonically hashed") + } + return preimage, nil +} diff --git a/das/dastree/dastree_test.go b/das/dastree/dastree_test.go new file mode 100644 index 0000000000..d03560a4e0 --- /dev/null +++ b/das/dastree/dastree_test.go @@ -0,0 +1,72 @@ +// Copyright 2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package dastree + +import ( + "bytes" + "math/rand" + "testing" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/offchainlabs/nitro/util/colors" + "github.com/offchainlabs/nitro/util/pretty" + "github.com/offchainlabs/nitro/util/testhelpers" +) + +func TestDASTree(t *testing.T) { + store := make(map[bytes32][]byte) + tests := [][]byte{ + {}, {0x32}, crypto.Keccak256(), + make([]byte, BinSize), make([]byte, BinSize+1), make([]byte, 4*BinSize), + } + for i := 0; i < 64; i++ { + large := make([]byte, rand.Intn(12*BinSize)) + tests = append(tests, large) + } + + record := func(key bytes32, value []byte) { + colors.PrintGrey("storing ", key, " ", pretty.PrettyBytes(value)) + store[key] = value + if crypto.Keccak256Hash(value) != key { + Fail(t, "key not the hash of value") + } + } + oracle := func(key bytes32) []byte { + preimage, ok := store[key] + if !ok { + Fail(t, "no preimage for key", key) + } + if crypto.Keccak256Hash(preimage) != key { + Fail(t, "key not the hash of preimage") + } + colors.PrintBlue("loading ", key, " ", pretty.PrettyBytes(preimage)) + return preimage + } + + hashes := map[bytes32][]byte{} + for _, test := range tests { + hash := RecordHash(record, test) + hashes[hash] = test + } + + for key, value := range hashes { + colors.PrintMint("testing ", key) + preimage, err := Content(key, oracle) + Require(t, err, key) + + if !bytes.Equal(preimage, value) || !ValidHash(key, preimage) { + Fail(t, "incorrect preimage", pretty.FirstFewBytes(preimage), pretty.FirstFewBytes(value)) + } + } +} + +func Require(t *testing.T, err error, printables ...interface{}) { + t.Helper() + testhelpers.RequireImpl(t, err, printables...) +} + +func Fail(t *testing.T, printables ...interface{}) { + t.Helper() + testhelpers.FailImpl(t, printables...) +} diff --git a/das/db_storage_service.go b/das/db_storage_service.go index 31eab54b4a..267bff97ac 100644 --- a/das/db_storage_service.go +++ b/das/db_storage_service.go @@ -10,9 +10,10 @@ import ( "time" badger "github.com/dgraph-io/badger/v3" - "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" "github.com/offchainlabs/nitro/util/stopwaiter" flag "github.com/spf13/pflag" @@ -81,12 +82,12 @@ func NewDBStorageService(ctx context.Context, dirPath string, discardAfterTimeou return ret, nil } -func (dbs *DBStorageService) GetByHash(ctx context.Context, key []byte) ([]byte, error) { - log.Trace("das.DBStorageService.GetByHash", "key", pretty.FirstFewBytes(key), "this", dbs) +func (dbs *DBStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { + log.Trace("das.DBStorageService.GetByHash", "key", pretty.PrettyHash(key), "this", dbs) var ret []byte err := dbs.db.View(func(txn *badger.Txn) error { - item, err := txn.Get(key) + item, err := txn.Get(key.Bytes()) if err != nil { return err } @@ -102,10 +103,10 @@ func (dbs *DBStorageService) GetByHash(ctx context.Context, key []byte) ([]byte, } func (dbs *DBStorageService) Put(ctx context.Context, data []byte, timeout uint64) error { - log.Trace("das.DBStorageService.Put", "message", pretty.FirstFewBytes(data), "timeout", time.Unix(int64(timeout), 0), "this", dbs) + logPut("das.DBStorageService.Put", data, timeout, dbs) return dbs.db.Update(func(txn *badger.Txn) error { - e := badger.NewEntry(crypto.Keccak256(data), data) + e := badger.NewEntry(dastree.HashBytes(data), data) if dbs.discardAfterTimeout { e = e.WithTTL(time.Until(time.Unix(int64(timeout), 0))) } @@ -140,7 +141,7 @@ func (dbs *DBStorageService) HealthCheck(ctx context.Context) error { if err != nil { return err } - res, err := dbs.GetByHash(ctx, crypto.Keccak256(testData)) + res, err := dbs.GetByHash(ctx, dastree.Hash(testData)) if err != nil { return err } diff --git a/das/fallback_storage_service.go b/das/fallback_storage_service.go index 8df0c7cb90..923cbe9b35 100644 --- a/das/fallback_storage_service.go +++ b/das/fallback_storage_service.go @@ -4,14 +4,14 @@ package das import ( - "bytes" "context" "sync" "time" - "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/pretty" ) @@ -47,13 +47,11 @@ func NewFallbackStorageService( } } -func (f *FallbackStorageService) GetByHash(ctx context.Context, key []byte) ([]byte, error) { - log.Trace("das.FallbackStorageService.GetByHash", "key", pretty.FirstFewBytes(key), "this", f) - var key32 [32]byte +func (f *FallbackStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { + log.Trace("das.FallbackStorageService.GetByHash", "key", pretty.PrettyHash(key), "this", f) if f.preventRecursiveGets { f.currentlyFetchingMutex.RLock() - copy(key32[:], key) - if f.currentlyFetching[key32] { + if f.currentlyFetching[key] { // This is a recursive call, so return not-found f.currentlyFetchingMutex.RUnlock() return nil, ErrNotFound @@ -66,8 +64,8 @@ func (f *FallbackStorageService) GetByHash(ctx context.Context, key []byte) ([]b doDelete := false if f.preventRecursiveGets { f.currentlyFetchingMutex.Lock() - if !f.currentlyFetching[key32] { - f.currentlyFetching[key32] = true + if !f.currentlyFetching[key] { + f.currentlyFetching[key] = true doDelete = true } f.currentlyFetchingMutex.Unlock() @@ -76,14 +74,16 @@ func (f *FallbackStorageService) GetByHash(ctx context.Context, key []byte) ([]b data, err = f.backup.GetByHash(ctx, key) if doDelete { f.currentlyFetchingMutex.Lock() - delete(f.currentlyFetching, key32) + delete(f.currentlyFetching, key) f.currentlyFetchingMutex.Unlock() } if err != nil { return nil, err } - if bytes.Equal(key, crypto.Keccak256(data)) { - putErr := f.StorageService.Put(ctx, data, arbmath.SaturatingUAdd(uint64(time.Now().Unix()), f.backupRetentionSeconds)) + if dastree.ValidHash(key, data) { + putErr := f.StorageService.Put( + ctx, data, arbmath.SaturatingUAdd(uint64(time.Now().Unix()), f.backupRetentionSeconds), + ) if putErr != nil && !f.ignoreRetentionWriteErrors { return nil, err } diff --git a/das/fallback_storage_service_test.go b/das/fallback_storage_service_test.go index c7254e2a05..14eed2d0c4 100644 --- a/das/fallback_storage_service_test.go +++ b/das/fallback_storage_service_test.go @@ -7,9 +7,10 @@ import ( "bytes" "context" "errors" - "github.com/ethereum/go-ethereum/common/math" - "github.com/ethereum/go-ethereum/crypto" "testing" + + "github.com/ethereum/go-ethereum/common/math" + "github.com/offchainlabs/nitro/das/dastree" ) func TestFallbackStorageService(t *testing.T) { @@ -17,9 +18,9 @@ func TestFallbackStorageService(t *testing.T) { defer cancel() val1 := []byte("First value") - hash1 := crypto.Keccak256(val1) + hash1 := dastree.Hash(val1) val2 := []byte("Second value") - hash2 := crypto.Keccak256(val2) + hash2 := dastree.Hash(val2) primary := NewMemoryBackedStorageService(ctx) err := primary.Put(ctx, val1, math.MaxUint64) @@ -53,7 +54,7 @@ func TestFallbackStorageServiceRecursive(t *testing.T) { defer cancel() val1 := []byte("First value") - hash1 := crypto.Keccak256(val1) + hash1 := dastree.Hash(val1) ss := NewMemoryBackedStorageService(ctx) fss := NewFallbackStorageService(ss, ss, 60*60, true, true) diff --git a/das/local_file_storage_service.go b/das/local_file_storage_service.go index 12c0c130de..8cd82bd99d 100644 --- a/das/local_file_storage_service.go +++ b/das/local_file_storage_service.go @@ -12,9 +12,10 @@ import ( "os" "time" - "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" flag "github.com/spf13/pflag" "golang.org/x/sys/unix" @@ -45,13 +46,13 @@ func NewLocalFileStorageService(dataDir string) (StorageService, error) { return &LocalFileStorageService{dataDir: dataDir}, nil } -func (s *LocalFileStorageService) GetByHash(ctx context.Context, key []byte) ([]byte, error) { - log.Trace("das.LocalFileStorageService.GetByHash", "key", pretty.FirstFewBytes(key), "this", s) +func (s *LocalFileStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { + log.Trace("das.LocalFileStorageService.GetByHash", "key", pretty.PrettyHash(key), "this", s) pathname := s.dataDir + "/" + EncodeStorageServiceKey(key) data, err := os.ReadFile(pathname) if err != nil { // Just for backward compatability. - pathname = s.dataDir + "/" + base32.StdEncoding.EncodeToString(key) + pathname = s.dataDir + "/" + base32.StdEncoding.EncodeToString(key.Bytes()) data, err = os.ReadFile(pathname) if err != nil { if errors.Is(err, os.ErrNotExist) { @@ -65,8 +66,8 @@ func (s *LocalFileStorageService) GetByHash(ctx context.Context, key []byte) ([] } func (s *LocalFileStorageService) Put(ctx context.Context, data []byte, timeout uint64) error { - log.Trace("das.LocalFileStorageService.Store", "message", pretty.FirstFewBytes(data), "timeout", time.Unix(int64(timeout), 0), "this", s) - fileName := EncodeStorageServiceKey(crypto.Keccak256(data)) + logPut("das.LocalFileStorageService.Store", data, timeout, s) + fileName := EncodeStorageServiceKey(dastree.Hash(data)) finalPath := s.dataDir + "/" + fileName // Use a temp file and rename to achieve atomic writes. @@ -113,7 +114,7 @@ func (s *LocalFileStorageService) HealthCheck(ctx context.Context) error { if err != nil { return err } - res, err := s.GetByHash(ctx, crypto.Keccak256(testData)) + res, err := s.GetByHash(ctx, dastree.Hash(testData)) if err != nil { return err } diff --git a/das/memory_backed_storage_service.go b/das/memory_backed_storage_service.go index 31b8f90d17..7931cccce0 100644 --- a/das/memory_backed_storage_service.go +++ b/das/memory_backed_storage_service.go @@ -7,12 +7,11 @@ import ( "context" "errors" "sync" - "time" - "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" - "github.com/offchainlabs/nitro/util/pretty" + "github.com/offchainlabs/nitro/das/dastree" ) type MemoryBackedStorageService struct { // intended for testing and debugging @@ -29,16 +28,14 @@ func NewMemoryBackedStorageService(ctx context.Context) StorageService { } } -func (m *MemoryBackedStorageService) GetByHash(ctx context.Context, key []byte) ([]byte, error) { - log.Trace("das.MemoryBackedStorageService.GetByHash", "key", pretty.FirstFewBytes(key), "this", m) +func (m *MemoryBackedStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { + log.Trace("das.MemoryBackedStorageService.GetByHash", "key", key, "this", m) m.rwmutex.RLock() defer m.rwmutex.RUnlock() if m.closed { return nil, ErrClosed } - var h32 [32]byte - copy(h32[:], key) - res, found := m.contents[h32] + res, found := m.contents[key] if !found { return nil, ErrNotFound } @@ -46,16 +43,13 @@ func (m *MemoryBackedStorageService) GetByHash(ctx context.Context, key []byte) } func (m *MemoryBackedStorageService) Put(ctx context.Context, data []byte, expirationTime uint64) error { - log.Trace("das.MemoryBackedStorageService.Store", "message", pretty.FirstFewBytes(data), "timeout", time.Unix(int64(expirationTime), 0), "this", m) + logPut("das.MemoryBackedStorageService.Store", data, expirationTime, m) m.rwmutex.Lock() defer m.rwmutex.Unlock() if m.closed { return ErrClosed } - var h32 [32]byte - h := crypto.Keccak256(data) - copy(h32[:], h) - m.contents[h32] = append([]byte{}, data...) + m.contents[dastree.Hash(data)] = append([]byte{}, data...) return nil } diff --git a/das/panic_wrapper.go b/das/panic_wrapper.go index cc6d577544..567be07431 100644 --- a/das/panic_wrapper.go +++ b/das/panic_wrapper.go @@ -7,6 +7,7 @@ import ( "context" "fmt" + "github.com/ethereum/go-ethereum/common" "github.com/offchainlabs/nitro/arbstate" ) @@ -20,7 +21,7 @@ func NewPanicWrapper(dataAvailabilityService DataAvailabilityService) DataAvaila } } -func (w *PanicWrapper) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { +func (w *PanicWrapper) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { data, err := w.DataAvailabilityService.GetByHash(ctx, hash) if err != nil { panic(fmt.Sprintf("panic wrapper GetByHash: %v", err)) diff --git a/das/reader_aggregator_strategies_test.go b/das/reader_aggregator_strategies_test.go index 83fc41bbfb..987bc08938 100644 --- a/das/reader_aggregator_strategies_test.go +++ b/das/reader_aggregator_strategies_test.go @@ -10,6 +10,7 @@ import ( "testing" "time" + "github.com/ethereum/go-ethereum/common" "github.com/offchainlabs/nitro/arbstate" ) @@ -17,7 +18,7 @@ type dummyReader struct { int } -func (*dummyReader) GetByHash(context.Context, []byte) ([]byte, error) { +func (*dummyReader) GetByHash(context.Context, common.Hash) ([]byte, error) { return nil, errors.New("not implemented") } diff --git a/das/redis_storage_service.go b/das/redis_storage_service.go index 2d6b3b2ec3..0afaa219c0 100644 --- a/das/redis_storage_service.go +++ b/das/redis_storage_service.go @@ -13,12 +13,12 @@ import ( "github.com/go-redis/redis/v8" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" "github.com/pkg/errors" flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" ) @@ -71,8 +71,7 @@ func (rs *RedisStorageService) verifyMessageSignature(data []byte) ([]byte, erro return nil, errors.New("data is too short to contain message signature") } message := data[:len(data)-32] - var haveHmac common.Hash - copy(haveHmac[:], data[len(data)-32:]) + haveHmac := common.BytesToHash(data[len(data)-32:]) mac := hmac.New(sha3.NewLegacyKeccak256, rs.signingKey[:]) mac.Write(message) expectHmac := mac.Sum(nil) @@ -82,8 +81,8 @@ func (rs *RedisStorageService) verifyMessageSignature(data []byte) ([]byte, erro return message, nil } -func (rs *RedisStorageService) getVerifiedData(ctx context.Context, key []byte) ([]byte, error) { - data, err := rs.client.Get(ctx, string(key)).Bytes() +func (rs *RedisStorageService) getVerifiedData(ctx context.Context, key common.Hash) ([]byte, error) { + data, err := rs.client.Get(ctx, string(key.Bytes())).Bytes() if err != nil { log.Error("das.RedisStorageService.getVerifiedData", "err", err) return nil, err @@ -101,8 +100,8 @@ func (rs *RedisStorageService) signMessage(message []byte) []byte { return mac.Sum(message) } -func (rs *RedisStorageService) GetByHash(ctx context.Context, key []byte) ([]byte, error) { - log.Trace("das.RedisStorageService.GetByHash", "key", pretty.FirstFewBytes(key), "this", rs) +func (rs *RedisStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { + log.Trace("das.RedisStorageService.GetByHash", "key", pretty.PrettyHash(key), "this", rs) ret, err := rs.getVerifiedData(ctx, key) if err != nil { ret, err = rs.baseStorageService.GetByHash(ctx, key) @@ -110,7 +109,7 @@ func (rs *RedisStorageService) GetByHash(ctx context.Context, key []byte) ([]byt return nil, err } - err = rs.client.Set(ctx, string(key), rs.signMessage(ret), rs.redisConfig.Expiration).Err() + err = rs.client.Set(ctx, string(key.Bytes()), rs.signMessage(ret), rs.redisConfig.Expiration).Err() if err != nil { return nil, err } @@ -121,12 +120,14 @@ func (rs *RedisStorageService) GetByHash(ctx context.Context, key []byte) ([]byt } func (rs *RedisStorageService) Put(ctx context.Context, value []byte, timeout uint64) error { - log.Trace("das.RedisStorageService.Store", "message", pretty.FirstFewBytes(value), "timeout", timeout, "this", rs) + logPut("das.RedisStorageService.Store", value, timeout, rs) err := rs.baseStorageService.Put(ctx, value, timeout) if err != nil { return err } - err = rs.client.Set(ctx, string(crypto.Keccak256(value)), rs.signMessage(value), rs.redisConfig.Expiration).Err() + err = rs.client.Set( + ctx, string(dastree.Hash(value).Bytes()), rs.signMessage(value), rs.redisConfig.Expiration, + ).Err() if err != nil { log.Error("das.RedisStorageService.Store", "err", err) } diff --git a/das/redis_storage_service_test.go b/das/redis_storage_service_test.go index 3f68a6c9f0..2481358cf6 100644 --- a/das/redis_storage_service_test.go +++ b/das/redis_storage_service_test.go @@ -11,8 +11,7 @@ import ( "time" "github.com/alicebob/miniredis/v2" - - "github.com/ethereum/go-ethereum/crypto" + "github.com/offchainlabs/nitro/das/dastree" ) func TestRedisStorageService(t *testing.T) { @@ -32,8 +31,8 @@ func TestRedisStorageService(t *testing.T) { Require(t, err) val1 := []byte("The first value") - val1CorrectKey := crypto.Keccak256(val1) - val1IncorrectKey := crypto.Keccak256(append(val1, 0)) + val1CorrectKey := dastree.Hash(val1) + val1IncorrectKey := dastree.Hash(append(val1, 0)) _, err = redisService.GetByHash(ctx, val1CorrectKey) if !errors.Is(err, ErrNotFound) { @@ -55,8 +54,8 @@ func TestRedisStorageService(t *testing.T) { // For Case where the value is present in the base storage but not present in the cache. val2 := []byte("The Second value") - val2CorrectKey := crypto.Keccak256(val2) - val2IncorrectKey := crypto.Keccak256(append(val2, 0)) + val2CorrectKey := dastree.Hash(val2) + val2IncorrectKey := dastree.Hash(append(val2, 0)) err = baseStorageService.Put(ctx, val2, timeout) Require(t, err) diff --git a/das/redundant_simple_das_reader.go b/das/redundant_simple_das_reader.go index f4ceb549a0..ed8b8c1096 100644 --- a/das/redundant_simple_das_reader.go +++ b/das/redundant_simple_das_reader.go @@ -7,6 +7,7 @@ import ( "context" "errors" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/util/pretty" @@ -25,8 +26,8 @@ type rsdrResponse struct { err error } -func (r RedundantSimpleDASReader) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { - log.Trace("das.RedundantSimpleDASReader.GetByHash", "key", pretty.FirstFewBytes(hash), "this", r) +func (r RedundantSimpleDASReader) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { + log.Trace("das.RedundantSimpleDASReader.GetByHash", "key", pretty.PrettyHash(hash), "this", r) subCtx, cancel := context.WithCancel(ctx) defer cancel() diff --git a/das/redundant_storage_service.go b/das/redundant_storage_service.go index e348dd4288..74d32bd819 100644 --- a/das/redundant_storage_service.go +++ b/das/redundant_storage_service.go @@ -7,8 +7,8 @@ import ( "context" "errors" "sync" - "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/util/pretty" @@ -32,8 +32,8 @@ type readResponse struct { err error } -func (r *RedundantStorageService) GetByHash(ctx context.Context, key []byte) ([]byte, error) { - log.Trace("das.RedundantStorageService.GetByHash", "key", pretty.FirstFewBytes(key), "this", r) +func (r *RedundantStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { + log.Trace("das.RedundantStorageService.GetByHash", "key", pretty.PrettyHash(key), "this", r) subCtx, cancel := context.WithCancel(ctx) defer cancel() var anyError error @@ -61,7 +61,7 @@ func (r *RedundantStorageService) GetByHash(ctx context.Context, key []byte) ([] } func (r *RedundantStorageService) Put(ctx context.Context, data []byte, expirationTime uint64) error { - log.Trace("das.RedundantStorageService.Store", "message", pretty.FirstFewBytes(data), "timeout", time.Unix(int64(expirationTime), 0), "this", r) + logPut("das.RedundantStorageService.Store", data, expirationTime, r) var wg sync.WaitGroup var errorMutex sync.Mutex var anyError error diff --git a/das/redundant_storage_test.go b/das/redundant_storage_test.go index aa582d2d27..b56f62ee24 100644 --- a/das/redundant_storage_test.go +++ b/das/redundant_storage_test.go @@ -7,9 +7,10 @@ import ( "bytes" "context" "errors" - "github.com/ethereum/go-ethereum/crypto" "testing" "time" + + "github.com/offchainlabs/nitro/das/dastree" ) const NumServices = 3 @@ -25,8 +26,8 @@ func TestRedundantStorageService(t *testing.T) { Require(t, err) val1 := []byte("The first value") - key1 := crypto.Keccak256(val1) - key2 := crypto.Keccak256(append(val1, 0)) + key1 := dastree.Hash(val1) + key2 := dastree.Hash(append(val1, 0)) _, err = redundantService.GetByHash(ctx, key1) if !errors.Is(err, ErrNotFound) { diff --git a/das/restful_client.go b/das/restful_client.go index c23fb77cd9..70fa09bbdb 100644 --- a/das/restful_client.go +++ b/das/restful_client.go @@ -13,8 +13,9 @@ import ( "net/http" "strings" - "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/common" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" ) // Implements DataAvailabilityReader @@ -38,10 +39,7 @@ func NewRestfulDasClientFromURL(url string) (*RestfulDasClient, error) { }, nil } -func (c *RestfulDasClient) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { - if len(hash) != 32 { - return nil, fmt.Errorf("Hash must be 32 bytes long, was %d", len(hash)) - } +func (c *RestfulDasClient) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { res, err := http.Get(c.url + getByHashRequestPath + EncodeStorageServiceKey(hash)) if err != nil { return nil, err @@ -66,7 +64,7 @@ func (c *RestfulDasClient) GetByHash(ctx context.Context, hash []byte) ([]byte, if err != nil { return nil, err } - if !bytes.Equal(hash, crypto.Keccak256(decodedBytes)) { + if !dastree.ValidHash(hash, decodedBytes) { return nil, arbstate.ErrHashMismatch } diff --git a/das/restful_server.go b/das/restful_server.go index 72c1102288..dda909a7ad 100644 --- a/das/restful_server.go +++ b/das/restful_server.go @@ -14,6 +14,7 @@ import ( "strings" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/offchainlabs/nitro/arbstate" @@ -163,7 +164,7 @@ func (rds *RestfulDasServer) GetByHashHandler(w http.ResponseWriter, r *http.Req return } - responseData, err := rds.storage.GetByHash(r.Context(), hashBytes[:32]) + responseData, err := rds.storage.GetByHash(r.Context(), common.BytesToHash(hashBytes[:32])) if err != nil { log.Warn("Unable to find data", "path", requestPath, "err", err) w.WriteHeader(http.StatusNotFound) diff --git a/das/restful_server_test.go b/das/restful_server_test.go index 671e26f459..2467048771 100644 --- a/das/restful_server_test.go +++ b/das/restful_server_test.go @@ -13,9 +13,9 @@ import ( "testing" "time" - "github.com/ethereum/go-ethereum/crypto" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/cmd/genericconf" + "github.com/offchainlabs/nitro/das/dastree" ) const LocalServerAddressForTest = "localhost" @@ -44,7 +44,7 @@ func TestRestfulClientServer(t *testing.T) { storage := NewMemoryBackedStorageService(ctx) data := []byte("Testing a restful server now.") - dataHash := crypto.Keccak256(data) + dataHash := dastree.Hash(data) server, port, err := NewRestfulDasServerOnRandomPort(LocalServerAddressForTest, storage) Require(t, err) @@ -61,7 +61,7 @@ func TestRestfulClientServer(t *testing.T) { Fail(t, fmt.Sprintf("Returned data '%s' does not match expected '%s'", returnedData, data)) } - _, err = client.GetByHash(ctx, crypto.Keccak256([]byte("absent data"))) + _, err = client.GetByHash(ctx, dastree.Hash([]byte("absent data"))) if err == nil || !strings.Contains(err.Error(), "404") { Fail(t, "Expected a 404 error") } diff --git a/das/retry_wrapper.go b/das/retry_wrapper.go index 0767127828..0f0ba9830d 100644 --- a/das/retry_wrapper.go +++ b/das/retry_wrapper.go @@ -9,6 +9,7 @@ import ( "time" "github.com/cenkalti/backoff/v4" + "github.com/ethereum/go-ethereum/common" "github.com/offchainlabs/nitro/arbstate" ) @@ -27,7 +28,7 @@ func NewRetryWrapper(dataAvailabilityService DataAvailabilityService) DataAvaila } } -func (w *RetryWrapper) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { +func (w *RetryWrapper) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { var res []byte err := backoff.Retry(func() error { if ctx.Err() != nil { diff --git a/das/s3_storage_service.go b/das/s3_storage_service.go index fe75375d8d..65d19ee00d 100644 --- a/das/s3_storage_service.go +++ b/das/s3_storage_service.go @@ -15,9 +15,10 @@ import ( "github.com/aws/aws-sdk-go-v2/feature/s3/manager" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" - "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" flag "github.com/spf13/pflag" @@ -64,9 +65,12 @@ type S3StorageService struct { } func NewS3StorageService(config S3StorageServiceConfig) (StorageService, error) { + credCache := aws.NewCredentialsCache( + credentials.NewStaticCredentialsProvider(config.AccessKey, config.SecretKey, ""), + ) client := s3.New(s3.Options{ Region: config.Region, - Credentials: aws.NewCredentialsCache(credentials.NewStaticCredentialsProvider(config.AccessKey, config.SecretKey, "")), + Credentials: credCache, }) return &S3StorageService{ client: client, @@ -78,8 +82,8 @@ func NewS3StorageService(config S3StorageServiceConfig) (StorageService, error) }, nil } -func (s3s *S3StorageService) GetByHash(ctx context.Context, key []byte) ([]byte, error) { - log.Trace("das.S3StorageService.GetByHash", "key", pretty.FirstFewBytes(key), "this", s3s) +func (s3s *S3StorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { + log.Trace("das.S3StorageService.GetByHash", "key", pretty.PrettyHash(key), "this", s3s) buf := manager.NewWriteAtBuffer([]byte{}) _, err := s3s.downloader.Download(ctx, buf, &s3.GetObjectInput{ @@ -90,11 +94,10 @@ func (s3s *S3StorageService) GetByHash(ctx context.Context, key []byte) ([]byte, } func (s3s *S3StorageService) Put(ctx context.Context, value []byte, timeout uint64) error { - log.Trace("das.S3StorageService.Store", "message", pretty.FirstFewBytes(value), "timeout", timeout, "this", s3s) - + logPut("das.S3StorageService.Store", value, timeout, s3s) putObjectInput := s3.PutObjectInput{ Bucket: aws.String(s3s.bucket), - Key: aws.String(s3s.objectPrefix + EncodeStorageServiceKey(crypto.Keccak256(value))), + Key: aws.String(s3s.objectPrefix + EncodeStorageServiceKey(dastree.Hash(value))), Body: bytes.NewReader(value)} if !s3s.discardAfterTimeout { expires := time.Unix(int64(timeout), 0) diff --git a/das/s3_storage_service_test.go b/das/s3_storage_service_test.go index df12b1c724..183b6f94be 100644 --- a/das/s3_storage_service_test.go +++ b/das/s3_storage_service_test.go @@ -14,9 +14,8 @@ import ( "github.com/aws/aws-sdk-go-v2/feature/s3/manager" "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/ethereum/go-ethereum/crypto" - "github.com/offchainlabs/nitro/cmd/genericconf" + "github.com/offchainlabs/nitro/das/dastree" ) type mockS3Uploader struct { @@ -70,8 +69,8 @@ func TestS3StorageService(t *testing.T) { Require(t, err) val1 := []byte("The first value") - val1CorrectKey := crypto.Keccak256(val1) - val2IncorrectKey := crypto.Keccak256(append(val1, 0)) + val1CorrectKey := dastree.Hash(val1) + val2IncorrectKey := dastree.Hash(append(val1, 0)) _, err = s3Service.GetByHash(ctx, val1CorrectKey) if !errors.Is(err, ErrNotFound) { diff --git a/das/sign_after_store_das.go b/das/sign_after_store_das.go index c0e8405a3d..fb48f44bd6 100644 --- a/das/sign_after_store_das.go +++ b/das/sign_after_store_das.go @@ -11,11 +11,12 @@ import ( "os" "time" - "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/blsSignatures" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util/pretty" @@ -116,12 +117,10 @@ func NewSignAfterStoreDASWithSeqInboxCaller( if err := keyset.Serialize(ksBuf); err != nil { return nil, err } - ksHashBuf, err := keyset.Hash() + ksHash, err := keyset.Hash() if err != nil { return nil, err } - var ksHash [32]byte - copy(ksHash[:], ksHashBuf) var bpVerifier *BatchPosterVerifier if seqInboxCaller != nil { @@ -138,7 +137,9 @@ func NewSignAfterStoreDASWithSeqInboxCaller( }, nil } -func (d *SignAfterStoreDAS) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (c *arbstate.DataAvailabilityCertificate, err error) { +func (d *SignAfterStoreDAS) Store( + ctx context.Context, message []byte, timeout uint64, sig []byte, +) (c *arbstate.DataAvailabilityCertificate, err error) { log.Trace("das.SignAfterStoreDAS.Store", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(sig), "this", d) if d.bpVerifier != nil { actualSigner, err := DasRecoverSigner(message, timeout, sig) @@ -154,11 +155,12 @@ func (d *SignAfterStoreDAS) Store(ctx context.Context, message []byte, timeout u } } - c = &arbstate.DataAvailabilityCertificate{} - copy(c.DataHash[:], crypto.Keccak256(message)) - - c.Timeout = timeout - c.SignersMask = 1 // The aggregator will override this if we're part of a committee. + c = &arbstate.DataAvailabilityCertificate{ + Timeout: timeout, + DataHash: dastree.Hash(message), + Version: 1, + SignersMask: 1, // The aggregator will override this if we're part of a committee. + } fields := c.SerializeSignableFields() c.Sig, err = blsSignatures.SignMessage(*d.privKey, fields) @@ -180,7 +182,7 @@ func (d *SignAfterStoreDAS) Store(ctx context.Context, message []byte, timeout u return c, nil } -func (d *SignAfterStoreDAS) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { +func (d *SignAfterStoreDAS) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { return d.storageService.GetByHash(ctx, hash) } diff --git a/das/simple_das_reader_aggregator.go b/das/simple_das_reader_aggregator.go index 780fb1f5dc..f63a7e085e 100644 --- a/das/simple_das_reader_aggregator.go +++ b/das/simple_das_reader_aggregator.go @@ -4,7 +4,6 @@ package das import ( - "bytes" "context" "errors" "fmt" @@ -13,9 +12,10 @@ import ( "sync" "time" - "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" "github.com/offchainlabs/nitro/util/stopwaiter" flag "github.com/spf13/pflag" @@ -178,10 +178,10 @@ type SimpleDASReaderAggregator struct { statMessages chan readerStatMessage } -func (a *SimpleDASReaderAggregator) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { +func (a *SimpleDASReaderAggregator) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { a.readersMutex.RLock() defer a.readersMutex.RUnlock() - log.Trace("das.SimpleDASReaderAggregator.GetByHash", "key", pretty.FirstFewBytes(hash), "this", a) + log.Trace("das.SimpleDASReaderAggregator.GetByHash", "key", pretty.PrettyHash(hash), "this", a) type dataErrorPair struct { data []byte @@ -242,14 +242,16 @@ func (a *SimpleDASReaderAggregator) GetByHash(ctx context.Context, hash []byte) return nil, fmt.Errorf("Data wasn't able to be retrieved from any DAS Reader: %v", errorCollection) } -func (a *SimpleDASReaderAggregator) tryGetByHash(ctx context.Context, hash []byte, reader arbstate.DataAvailabilityReader) ([]byte, error) { +func (a *SimpleDASReaderAggregator) tryGetByHash( + ctx context.Context, hash common.Hash, reader arbstate.DataAvailabilityReader, +) ([]byte, error) { stat := readerStatMessage{reader: reader} stat.success = false start := time.Now() result, err := reader.GetByHash(ctx, hash) if err == nil { - if bytes.Equal(crypto.Keccak256(result), hash) { + if dastree.ValidHash(hash, result) { stat.success = true } else { err = fmt.Errorf("SimpleDASReaderAggregator got result from reader(%v) not matching hash", reader) diff --git a/das/simple_das_reader_aggregator_test.go b/das/simple_das_reader_aggregator_test.go index 4b088d436b..d189568c7c 100644 --- a/das/simple_das_reader_aggregator_test.go +++ b/das/simple_das_reader_aggregator_test.go @@ -12,7 +12,7 @@ import ( "testing" "time" - "github.com/ethereum/go-ethereum/crypto" + "github.com/offchainlabs/nitro/das/dastree" ) func TestSimpleDASReaderAggregator(t *testing.T) { //nolint @@ -24,7 +24,7 @@ func TestSimpleDASReaderAggregator(t *testing.T) { //nolint storage1, storage2, storage3 := NewMemoryBackedStorageService(ctx), NewMemoryBackedStorageService(ctx), NewMemoryBackedStorageService(ctx) data1 := []byte("Testing a restful server now.") - dataHash1 := crypto.Keccak256(data1) + dataHash1 := dastree.Hash(data1) server1, port1, err := NewRestfulDasServerOnRandomPort(LocalServerAddressForTest, storage1) Require(t, err) @@ -59,13 +59,13 @@ func TestSimpleDASReaderAggregator(t *testing.T) { //nolint Fail(t, fmt.Sprintf("Returned data '%s' does not match expected '%s'", returnedData, data1)) } - _, err = agg.GetByHash(ctx, crypto.Keccak256([]byte("absent data"))) + _, err = agg.GetByHash(ctx, dastree.Hash([]byte("absent data"))) if err == nil || !strings.Contains(err.Error(), "404") { Fail(t, "Expected a 404 error") } data2 := []byte("Testing data that is only on the last REST endpoint.") - dataHash2 := crypto.Keccak256(data2) + dataHash2 := dastree.Hash(data2) err = storage3.Put(ctx, data2, uint64(time.Now().Add(time.Hour).Unix())) Require(t, err) diff --git a/das/storage_service.go b/das/storage_service.go index 045f11ae38..d2732dfb98 100644 --- a/das/storage_service.go +++ b/das/storage_service.go @@ -9,6 +9,7 @@ import ( "fmt" "strings" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/offchainlabs/nitro/arbstate" ) @@ -24,13 +25,17 @@ type StorageService interface { HealthCheck(ctx context.Context) error } -func EncodeStorageServiceKey(b []byte) string { - return hexutil.Encode(b)[2:] +func EncodeStorageServiceKey(key common.Hash) string { + return key.Hex()[2:] } -func DecodeStorageServiceKey(input string) ([]byte, error) { - if strings.HasPrefix(input, "0x") { - return hexutil.Decode(input) +func DecodeStorageServiceKey(input string) (common.Hash, error) { + if !strings.HasPrefix(input, "0x") { + input = "0x" + input } - return hexutil.Decode("0x" + input) + key, err := hexutil.Decode(input) + if err != nil { + return common.Hash{}, err + } + return common.BytesToHash(key), nil } diff --git a/das/store_signing.go b/das/store_signing.go index e368ba9fff..aa4ab6717d 100644 --- a/das/store_signing.go +++ b/das/store_signing.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" ) @@ -41,7 +42,7 @@ func DasRecoverSigner(data []byte, timeout uint64, sig []byte) (common.Address, func dasStoreHash(data []byte, timeout uint64) []byte { var buf8 [8]byte binary.BigEndian.PutUint64(buf8[:], timeout) - return crypto.Keccak256(uniquifyingPrefix, buf8[:], data) + return dastree.HashBytes(uniquifyingPrefix, buf8[:], data) } type StoreSigningDAS struct { diff --git a/das/syncing_fallback_storage.go b/das/syncing_fallback_storage.go index 27201e3cf8..d53d0eb82d 100644 --- a/das/syncing_fallback_storage.go +++ b/das/syncing_fallback_storage.go @@ -189,7 +189,7 @@ func (s *l1SyncService) processBatchDelivered(ctx context.Context, batchDelivere return err } for hash, contents := range preimages { - _, err := s.syncTo.GetByHash(ctx, hash.Bytes()) + _, err := s.syncTo.GetByHash(ctx, hash) if errors.Is(err, ErrNotFound) { if err := s.syncTo.Put(ctx, contents, storeUntil); err != nil { return err diff --git a/das/timeout_wrapper.go b/das/timeout_wrapper.go index 37297925c6..031ff6330f 100644 --- a/das/timeout_wrapper.go +++ b/das/timeout_wrapper.go @@ -8,6 +8,7 @@ import ( "fmt" "time" + "github.com/ethereum/go-ethereum/common" "github.com/offchainlabs/nitro/arbstate" ) @@ -23,7 +24,7 @@ func NewTimeoutWrapper(dataAvailabilityService DataAvailabilityService, t time.D } } -func (w *TimeoutWrapper) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { +func (w *TimeoutWrapper) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { deadlineCtx, cancel := context.WithDeadline(ctx, time.Now().Add(w.t)) // For Retrieve we want fast cancellation of all goroutines started by // the aggregator as soon as one returns. diff --git a/das/util.go b/das/util.go new file mode 100644 index 0000000000..7142bf2d62 --- /dev/null +++ b/das/util.go @@ -0,0 +1,19 @@ +// Copyright 2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package das + +import ( + "time" + + "github.com/ethereum/go-ethereum/log" + "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/util/pretty" +) + +func logPut(store string, data []byte, timeout uint64, reader arbstate.DataAvailabilityReader, more ...interface{}) { + log.Trace( + store, "message", pretty.FirstFewBytes(data), "timeout", time.Unix(int64(timeout), 0), + "this", reader, more, + ) +} diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 814a037979..6fc1a26c55 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -58,7 +58,9 @@ func SendWaitTestTransactions(t *testing.T, ctx context.Context, client client, } } -func TransferBalance(t *testing.T, from, to string, amount *big.Int, l2info info, client client, ctx context.Context) (*types.Transaction, *types.Receipt) { +func TransferBalance( + t *testing.T, from, to string, amount *big.Int, l2info info, client client, ctx context.Context, +) (*types.Transaction, *types.Receipt) { tx := l2info.PrepareTx(from, to, l2info.TransferGas, amount, nil) err := client.SendTransaction(ctx, tx) Require(t, err) @@ -67,7 +69,14 @@ func TransferBalance(t *testing.T, from, to string, amount *big.Int, l2info info return tx, res } -func SendSignedTxViaL1(t *testing.T, ctx context.Context, l1info *BlockchainTestInfo, l1client arbutil.L1Interface, l2client arbutil.L1Interface, delayedTx *types.Transaction) *types.Receipt { +func SendSignedTxViaL1( + t *testing.T, + ctx context.Context, + l1info *BlockchainTestInfo, + l1client arbutil.L1Interface, + l2client arbutil.L1Interface, + delayedTx *types.Transaction, +) *types.Receipt { delayedInboxContract, err := bridgegen.NewInbox(l1info.GetAddress("Inbox"), l1client) Require(t, err) usertxopts := l1info.GetDefaultTransactOpts("User", ctx) @@ -149,7 +158,9 @@ func CreateTestL1BlockChain(t *testing.T, l1info info) (info, *ethclient.Client, return l1info, l1Client, l1backend, stack } -func DeployOnTestL1(t *testing.T, ctx context.Context, l1info info, l1client client, chainId *big.Int) *arbnode.RollupAddresses { +func DeployOnTestL1( + t *testing.T, ctx context.Context, l1info info, l1client client, chainId *big.Int, +) *arbnode.RollupAddresses { l1info.GenerateAccount("RollupOwner") l1info.GenerateAccount("Sequencer") l1info.GenerateAccount("User") @@ -179,7 +190,9 @@ func DeployOnTestL1(t *testing.T, ctx context.Context, l1info info, l1client cli return addresses } -func createL2BlockChain(t *testing.T, l2info *BlockchainTestInfo, chainConfig *params.ChainConfig) (*BlockchainTestInfo, *node.Node, ethdb.Database, ethdb.Database, *core.BlockChain) { +func createL2BlockChain( + t *testing.T, l2info *BlockchainTestInfo, chainConfig *params.ChainConfig, +) (*BlockchainTestInfo, *node.Node, ethdb.Database, ethdb.Database, *core.BlockChain) { if l2info == nil { l2info = NewArbTestInfo(t, chainConfig.ChainID) } @@ -207,12 +220,28 @@ func ClientForArbBackend(t *testing.T, backend *arbitrum.Backend) *ethclient.Cli } // Create and deploy L1 and arbnode for L2 -func CreateTestNodeOnL1(t *testing.T, ctx context.Context, isSequencer bool) (l2info info, node *arbnode.Node, l2client *ethclient.Client, l1info info, l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node) { +func CreateTestNodeOnL1( + t *testing.T, + ctx context.Context, + isSequencer bool, +) ( + l2info info, node *arbnode.Node, l2client *ethclient.Client, l1info info, + l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node, +) { conf := arbnode.ConfigDefaultL1Test() return CreateTestNodeOnL1WithConfig(t, ctx, isSequencer, conf, params.ArbitrumDevTestChainConfig()) } -func CreateTestNodeOnL1WithConfig(t *testing.T, ctx context.Context, isSequencer bool, nodeConfig *arbnode.Config, chainConfig *params.ChainConfig) (l2info info, node *arbnode.Node, l2client *ethclient.Client, l1info info, l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node) { +func CreateTestNodeOnL1WithConfig( + t *testing.T, + ctx context.Context, + isSequencer bool, + nodeConfig *arbnode.Config, + chainConfig *params.ChainConfig, +) ( + l2info info, node *arbnode.Node, l2client *ethclient.Client, l1info info, + l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node, +) { l1info, l1client, l1backend, l1stack = CreateTestL1BlockChain(t, nil) l2info, l2stack, l2chainDb, l2arbDb, l2blockchain := createL2BlockChain(t, nil, chainConfig) addresses := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig.ChainID) @@ -242,7 +271,9 @@ func CreateTestL2(t *testing.T, ctx context.Context) (*BlockchainTestInfo, *arbn return CreateTestL2WithConfig(t, ctx, nil, arbnode.ConfigDefaultL2Test(), true) } -func CreateTestL2WithConfig(t *testing.T, ctx context.Context, l2Info *BlockchainTestInfo, nodeConfig *arbnode.Config, takeOwnership bool) (*BlockchainTestInfo, *arbnode.Node, *ethclient.Client) { +func CreateTestL2WithConfig( + t *testing.T, ctx context.Context, l2Info *BlockchainTestInfo, nodeConfig *arbnode.Config, takeOwnership bool, +) (*BlockchainTestInfo, *arbnode.Node, *ethclient.Client) { l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, l2Info, params.ArbitrumDevTestChainConfig()) node, err := arbnode.CreateNode(ctx, stack, chainDb, arbDb, nodeConfig, blockchain, nil, nil, nil, nil) Require(t, err) @@ -281,7 +312,14 @@ func Fail(t *testing.T, printables ...interface{}) { testhelpers.FailImpl(t, printables...) } -func Create2ndNode(t *testing.T, ctx context.Context, first *arbnode.Node, l1stack *node.Node, l2InitData *statetransfer.ArbosInitializationInfo, dasConfig *das.DataAvailabilityConfig) (*ethclient.Client, *arbnode.Node) { +func Create2ndNode( + t *testing.T, + ctx context.Context, + first *arbnode.Node, + l1stack *node.Node, + l2InitData *statetransfer.ArbosInitializationInfo, + dasConfig *das.DataAvailabilityConfig, +) (*ethclient.Client, *arbnode.Node) { nodeConf := arbnode.ConfigDefaultL1NonSequencerTest() if dasConfig == nil { nodeConf.DataAvailability.Enable = false @@ -291,10 +329,17 @@ func Create2ndNode(t *testing.T, ctx context.Context, first *arbnode.Node, l1sta return Create2ndNodeWithConfig(t, ctx, first, l1stack, l2InitData, nodeConf) } -func Create2ndNodeWithConfig(t *testing.T, ctx context.Context, first *arbnode.Node, l1stack *node.Node, l2InitData *statetransfer.ArbosInitializationInfo, nodeConfig *arbnode.Config) (*ethclient.Client, *arbnode.Node) { +func Create2ndNodeWithConfig( + t *testing.T, + ctx context.Context, + first *arbnode.Node, + l1stack *node.Node, + l2InitData *statetransfer.ArbosInitializationInfo, + nodeConfig *arbnode.Config, +) (*ethclient.Client, *arbnode.Node) { l1rpcClient, err := l1stack.Attach() if err != nil { - t.Fatal(err) + Fail(t, err) } l1client := ethclient.NewClient(l1rpcClient) l2stack, err := arbnode.CreateDefaultStack() @@ -322,7 +367,13 @@ func GetBalance(t *testing.T, ctx context.Context, client *ethclient.Client, acc return balance } -func authorizeDASKeyset(t *testing.T, ctx context.Context, dasSignerKey *blsSignatures.PublicKey, l1info info, l1client arbutil.L1Interface) { +func authorizeDASKeyset( + t *testing.T, + ctx context.Context, + dasSignerKey *blsSignatures.PublicKey, + l1info info, + l1client arbutil.L1Interface, +) { if dasSignerKey == nil { return } @@ -343,7 +394,9 @@ func authorizeDASKeyset(t *testing.T, ctx context.Context, dasSignerKey *blsSign Require(t, err) } -func setupConfigWithDAS(t *testing.T, dasModeString string) (*params.ChainConfig, *arbnode.Config, string, *blsSignatures.PublicKey) { +func setupConfigWithDAS( + t *testing.T, dasModeString string, +) (*params.ChainConfig, *arbnode.Config, string, *blsSignatures.PublicKey) { l1NodeConfigA := arbnode.ConfigDefaultL1Test() chainConfig := params.ArbitrumDevTestChainConfig() var dbPath string @@ -379,10 +432,10 @@ func setupConfigWithDAS(t *testing.T, dasModeString string) (*params.ChainConfig Enable: enableDbStorage, DataDir: dbPath, }, + RequestTimeout: 5 * time.Second, + L1NodeURL: "none", PanicOnError: true, DisableSignatureChecking: true, - L1NodeURL: "none", - RequestTimeout: 5 * time.Second, } l1NodeConfigA.DataAvailability = dasConfig diff --git a/util/arbmath/bits.go b/util/arbmath/bits.go new file mode 100644 index 0000000000..d3d14bda91 --- /dev/null +++ b/util/arbmath/bits.go @@ -0,0 +1,36 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package arbmath + +import ( + "encoding/binary" +) + +// unrolls a series of slices into a singular, concatenated slice +func ConcatByteSlices(slices ...[]byte) []byte { + unrolled := []byte{} + for _, slice := range slices { + unrolled = append(unrolled, slice...) + } + return unrolled +} + +// the number of eth-words needed to store n bytes +func WordsForBytes(nbytes uint64) uint64 { + return (nbytes + 31) / 32 +} + +// casts a uint64 to its big-endian representation +func UintToBytes(value uint64) []byte { + result := make([]byte, 8) + binary.BigEndian.PutUint64(result, value) + return result +} + +// casts a uint32 to its big-endian representation +func Uint32ToBytes(value uint32) []byte { + result := make([]byte, 4) + binary.BigEndian.PutUint32(result, value) + return result +} diff --git a/util/arbmath/math.go b/util/arbmath/math.go index ec0f00f6d5..9b296534ec 100644 --- a/util/arbmath/math.go +++ b/util/arbmath/math.go @@ -4,7 +4,6 @@ package arbmath import ( - "encoding/binary" "math" "math/big" "math/bits" @@ -15,6 +14,15 @@ func NextPowerOf2(value uint64) uint64 { return 1 << Log2ceil(value) } +// the smallest power of no less than the input +func NextOrCurrentPowerOf2(value uint64) uint64 { + power := NextPowerOf2(value) + if power == 2*value { + power /= 2 + } + return power +} + // the log2 of the int, rounded up func Log2ceil(value uint64) uint64 { return uint64(64 - bits.LeadingZeros64(value)) @@ -36,6 +44,14 @@ func MinUint(value, ceiling uint64) uint64 { return value } +// the minimum of two 32-bit uints +func MinUint32(value, ceiling uint32) uint32 { + if value > ceiling { + return ceiling + } + return value +} + // the maximum of two ints func MaxInt(value, floor int64) int64 { if value < floor { @@ -279,18 +295,6 @@ func SaturatingCastToUint(value *big.Int) uint64 { return value.Uint64() } -// the number of eth-words needed to store n bytes -func WordsForBytes(nbytes uint64) uint64 { - return (nbytes + 31) / 32 -} - -// casts a uint64 to its big-endian representation -func UintToBytes(value uint64) []byte { - result := make([]byte, 8) - binary.BigEndian.PutUint64(result, value) - return result -} - // Return the Maclaurin series approximation of e^x, where x is denominated in basis points. // This quartic polynomial will underestimate e^x by about 5% as x approaches 20000 bips. func ApproxExpBasisPoints(value Bips) Bips { diff --git a/util/pretty/pretty_printing.go b/util/pretty/pretty_printing.go index a0df13ea36..72a8690eb7 100644 --- a/util/pretty/pretty_printing.go +++ b/util/pretty/pretty_printing.go @@ -3,7 +3,11 @@ package pretty -import "fmt" +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" +) func FirstFewBytes(b []byte) string { if len(b) < 9 { @@ -13,6 +17,18 @@ func FirstFewBytes(b []byte) string { } } +func PrettyBytes(b []byte) string { + hex := common.Bytes2Hex(b) + if len(hex) > 24 { + return fmt.Sprintf("%v...", hex[:24]) + } + return hex +} + +func PrettyHash(hash common.Hash) string { + return FirstFewBytes(hash.Bytes()) +} + func FirstFewChars(s string) string { if len(s) < 9 { return fmt.Sprintf("\"%s\"", s)