From 17632926ad3d22f656fea104c9dc5e04dfd9e1d0 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Wed, 29 Jun 2022 21:40:11 -0500 Subject: [PATCH 01/23] add dastree data structure --- das/aggregator.go | 6 +-- das/archiving_storage_service_test.go | 9 ++-- das/bigcache_storage_service.go | 7 ++- das/bigcache_storage_service_test.go | 11 ++--- das/chain_fetch_das.go | 6 +-- das/dasrpc/dasRpcClient.go | 4 +- das/dastree/dastree.go | 58 ++++++++++++++++++++++++ das/db_storage_service.go | 6 +-- das/fallback_storage_service.go | 4 +- das/fallback_storage_service_test.go | 11 +++-- das/local_file_storage_service.go | 8 ++-- das/memory_backed_storage_service.go | 10 ++-- das/redis_storage_service.go | 9 ++-- das/redis_storage_service_test.go | 11 ++--- das/redundant_storage_test.go | 7 +-- das/restful_client.go | 4 +- das/restful_server_test.go | 6 +-- das/s3_storage_service.go | 7 ++- das/s3_storage_service_test.go | 7 ++- das/sign_after_store_das.go | 8 ++-- das/simple_das_reader_aggregator.go | 8 ++-- das/simple_das_reader_aggregator_test.go | 8 ++-- das/store_signing.go | 3 +- das/util.go | 18 ++++++++ 24 files changed, 156 insertions(+), 80 deletions(-) create mode 100644 das/dastree/dastree.go create mode 100644 das/util.go diff --git a/das/aggregator.go b/das/aggregator.go index ff55f1582a..dcf83ef60b 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -15,12 +15,12 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/ethclient" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util/pretty" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/blsSignatures" @@ -180,7 +180,7 @@ func (a *Aggregator) GetByHash(ctx context.Context, hash []byte) ([]byte, error) errorChan <- err return } - if bytes.Equal(crypto.Keccak256(blob), hash) { + if bytes.Equal(dastree.Hash(blob), hash) { blobChan <- blob } else { errorChan <- fmt.Errorf("DAS (mask %X) returned data that doesn't match requested hash!", d.signersMask) @@ -248,7 +248,7 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, responses := make(chan storeResponse, len(a.services)) - expectedHash := crypto.Keccak256(message) + expectedHash := dastree.Hash(message) for _, d := range a.services { go func(ctx context.Context, d ServiceDetails) { cert, err := d.service.Store(ctx, message, timeout, sig) diff --git a/das/archiving_storage_service_test.go b/das/archiving_storage_service_test.go index 95de8c3906..8625bda670 100644 --- a/das/archiving_storage_service_test.go +++ b/das/archiving_storage_service_test.go @@ -6,10 +6,11 @@ package das import ( "bytes" "context" - "github.com/ethereum/go-ethereum/crypto" - "github.com/offchainlabs/nitro/arbstate" "testing" "time" + + "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" ) func TestArchivingStorageService(t *testing.T) { @@ -18,9 +19,9 @@ func TestArchivingStorageService(t *testing.T) { futureTime := uint64(time.Now().Add(time.Hour).Unix()) val1 := []byte("First value") - hash1 := crypto.Keccak256(val1) + hash1 := dastree.Hash(val1) val2 := []byte("Second value") - hash2 := crypto.Keccak256(val2) + hash2 := dastree.Hash(val2) firstStorage := NewMemoryBackedStorageService(ctx) archiveTo := NewMemoryBackedStorageService(ctx) diff --git a/das/bigcache_storage_service.go b/das/bigcache_storage_service.go index c675ec4802..fde597044e 100644 --- a/das/bigcache_storage_service.go +++ b/das/bigcache_storage_service.go @@ -10,10 +10,10 @@ import ( "github.com/allegro/bigcache" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" flag "github.com/spf13/pflag" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" ) @@ -71,13 +71,12 @@ func (bcs *BigCacheStorageService) GetByHash(ctx context.Context, key []byte) ([ } func (bcs *BigCacheStorageService) Put(ctx context.Context, value []byte, timeout uint64) error { - log.Trace("das.BigCacheStorageService.Put", "message", pretty.FirstFewBytes(value), "timeout", time.Unix(int64(timeout), 0), "this", bcs) - + logPut("das.BigCacheStorageService.Put", value, timeout, bcs) err := bcs.baseStorageService.Put(ctx, value, timeout) if err != nil { return err } - err = bcs.bigCache.Set(string(crypto.Keccak256(value)), value) + err = bcs.bigCache.Set(string(dastree.Hash(value)), value) return err } diff --git a/das/bigcache_storage_service_test.go b/das/bigcache_storage_service_test.go index a1b3ddea05..e9071f7f87 100644 --- a/das/bigcache_storage_service_test.go +++ b/das/bigcache_storage_service_test.go @@ -11,8 +11,7 @@ import ( "time" "github.com/allegro/bigcache" - - "github.com/ethereum/go-ethereum/crypto" + "github.com/offchainlabs/nitro/das/dastree" ) func TestBigCacheStorageService(t *testing.T) { @@ -29,8 +28,8 @@ func TestBigCacheStorageService(t *testing.T) { Require(t, err) val1 := []byte("The first value") - val1CorrectKey := crypto.Keccak256(val1) - val1IncorrectKey := crypto.Keccak256(append(val1, 0)) + val1CorrectKey := dastree.Hash(val1) + val1IncorrectKey := dastree.Hash(append(val1, 0)) _, err = bigCacheService.GetByHash(ctx, val1CorrectKey) if !errors.Is(err, ErrNotFound) { @@ -52,8 +51,8 @@ func TestBigCacheStorageService(t *testing.T) { // For Case where the value is present in the base storage but not present in the cache. val2 := []byte("The Second value") - val2CorrectKey := crypto.Keccak256(val2) - val2IncorrectKey := crypto.Keccak256(append(val2, 0)) + val2CorrectKey := dastree.Hash(val2) + val2IncorrectKey := dastree.Hash(append(val2, 0)) err = baseStorageService.Put(ctx, val2, timeout) Require(t, err) diff --git a/das/chain_fetch_das.go b/das/chain_fetch_das.go index 672b668d17..5c80b82d9e 100644 --- a/das/chain_fetch_das.go +++ b/das/chain_fetch_das.go @@ -17,6 +17,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/solgen/go/bridgegen" ) @@ -106,8 +107,7 @@ func chainFetchGetByHash( hash []byte, ) ([]byte, error) { // try to fetch from the cache - var hash32 [32]byte - copy(hash32[:], hash) + hash32 := common.BytesToHash(hash) res, ok := cache.get(hash32) if ok { return res, nil @@ -115,7 +115,7 @@ func chainFetchGetByHash( // try to fetch from the inner DAS innerRes, err := daReader.GetByHash(ctx, hash) - if err == nil && bytes.Equal(hash, crypto.Keccak256(innerRes)) { + if err == nil && bytes.Equal(hash, dastree.Hash(innerRes)) { return innerRes, nil } diff --git a/das/dasrpc/dasRpcClient.go b/das/dasrpc/dasRpcClient.go index edd6736367..ae7b6f017a 100644 --- a/das/dasrpc/dasRpcClient.go +++ b/das/dasrpc/dasRpcClient.go @@ -10,12 +10,12 @@ import ( "time" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/blsSignatures" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" ) @@ -43,7 +43,7 @@ func (c *DASRPCClient) GetByHash(ctx context.Context, hash []byte) ([]byte, erro if err := c.clnt.CallContext(ctx, &ret, "das_getByHash", hexutil.Bytes(hash)); err != nil { return nil, err } - if !bytes.Equal(hash, crypto.Keccak256(ret)) { // check hash because RPC server might be untrusted + if !bytes.Equal(hash, dastree.Hash(ret)) { // check hash because RPC server might be untrusted return nil, arbstate.ErrHashMismatch } return ret, nil diff --git a/das/dastree/dastree.go b/das/dastree/dastree.go new file mode 100644 index 0000000000..7a45be0e91 --- /dev/null +++ b/das/dastree/dastree.go @@ -0,0 +1,58 @@ +// Copyright 2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package dastree + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/offchainlabs/nitro/util/arbmath" +) + +const binSize = 64 * 1024 // 64 kB + +type bytes32 = common.Hash + +func Hash(preimage ...[]byte) []byte { + // Algorithm + // 1. split the preimage into 64kB bins and hash them to produces the tree's leaves + // 2. repeatedly hash pairs over and over, bubbling up any odd-one's out, forming the root + // + // r <=> hash(hash(0, 1), 2) step 2 + // / \ + // * 2 <=> hash(0, 1), 2 step 1 + // / \ + // 0 1 <=> 0, 1, 2 step 0 + + unrolled := []byte{} + for _, slice := range preimage { + unrolled = append(unrolled, slice...) + } + if len(unrolled) == 0 { + return crypto.Keccak256([]byte{}) + } + + length := int64(len(unrolled)) + leaves := []bytes32{} + for bin := int64(0); bin < length; bin += binSize { + end := arbmath.MinInt(bin+binSize, length) + keccak := crypto.Keccak256Hash(unrolled[bin:end]) + leaves = append(leaves, keccak) + } + + layer := leaves + for len(layer) > 1 { + prior := len(layer) + after := prior/2 + prior%2 + paired := make([]bytes32, after) + for i := 0; i < prior-1; i += 2 { + paired[i/2] = crypto.Keccak256Hash(layer[i][:], layer[i+1][:]) + } + if prior%2 == 1 { + paired[after-1] = layer[prior-1] + } + layer = paired + } + + return layer[0][:] +} diff --git a/das/db_storage_service.go b/das/db_storage_service.go index 31eab54b4a..df9a42518a 100644 --- a/das/db_storage_service.go +++ b/das/db_storage_service.go @@ -10,9 +10,9 @@ import ( "time" badger "github.com/dgraph-io/badger/v3" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" "github.com/offchainlabs/nitro/util/stopwaiter" flag "github.com/spf13/pflag" @@ -105,7 +105,7 @@ func (dbs *DBStorageService) Put(ctx context.Context, data []byte, timeout uint6 log.Trace("das.DBStorageService.Put", "message", pretty.FirstFewBytes(data), "timeout", time.Unix(int64(timeout), 0), "this", dbs) return dbs.db.Update(func(txn *badger.Txn) error { - e := badger.NewEntry(crypto.Keccak256(data), data) + e := badger.NewEntry(dastree.Hash(data), data) if dbs.discardAfterTimeout { e = e.WithTTL(time.Until(time.Unix(int64(timeout), 0))) } @@ -140,7 +140,7 @@ func (dbs *DBStorageService) HealthCheck(ctx context.Context) error { if err != nil { return err } - res, err := dbs.GetByHash(ctx, crypto.Keccak256(testData)) + res, err := dbs.GetByHash(ctx, dastree.Hash(testData)) if err != nil { return err } diff --git a/das/fallback_storage_service.go b/das/fallback_storage_service.go index 8df0c7cb90..52c5903f67 100644 --- a/das/fallback_storage_service.go +++ b/das/fallback_storage_service.go @@ -9,9 +9,9 @@ import ( "sync" "time" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/pretty" ) @@ -82,7 +82,7 @@ func (f *FallbackStorageService) GetByHash(ctx context.Context, key []byte) ([]b if err != nil { return nil, err } - if bytes.Equal(key, crypto.Keccak256(data)) { + if bytes.Equal(key, dastree.Hash(data)) { putErr := f.StorageService.Put(ctx, data, arbmath.SaturatingUAdd(uint64(time.Now().Unix()), f.backupRetentionSeconds)) if putErr != nil && !f.ignoreRetentionWriteErrors { return nil, err diff --git a/das/fallback_storage_service_test.go b/das/fallback_storage_service_test.go index c7254e2a05..14eed2d0c4 100644 --- a/das/fallback_storage_service_test.go +++ b/das/fallback_storage_service_test.go @@ -7,9 +7,10 @@ import ( "bytes" "context" "errors" - "github.com/ethereum/go-ethereum/common/math" - "github.com/ethereum/go-ethereum/crypto" "testing" + + "github.com/ethereum/go-ethereum/common/math" + "github.com/offchainlabs/nitro/das/dastree" ) func TestFallbackStorageService(t *testing.T) { @@ -17,9 +18,9 @@ func TestFallbackStorageService(t *testing.T) { defer cancel() val1 := []byte("First value") - hash1 := crypto.Keccak256(val1) + hash1 := dastree.Hash(val1) val2 := []byte("Second value") - hash2 := crypto.Keccak256(val2) + hash2 := dastree.Hash(val2) primary := NewMemoryBackedStorageService(ctx) err := primary.Put(ctx, val1, math.MaxUint64) @@ -53,7 +54,7 @@ func TestFallbackStorageServiceRecursive(t *testing.T) { defer cancel() val1 := []byte("First value") - hash1 := crypto.Keccak256(val1) + hash1 := dastree.Hash(val1) ss := NewMemoryBackedStorageService(ctx) fss := NewFallbackStorageService(ss, ss, 60*60, true, true) diff --git a/das/local_file_storage_service.go b/das/local_file_storage_service.go index 12c0c130de..bc620df48d 100644 --- a/das/local_file_storage_service.go +++ b/das/local_file_storage_service.go @@ -12,9 +12,9 @@ import ( "os" "time" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" flag "github.com/spf13/pflag" "golang.org/x/sys/unix" @@ -65,8 +65,8 @@ func (s *LocalFileStorageService) GetByHash(ctx context.Context, key []byte) ([] } func (s *LocalFileStorageService) Put(ctx context.Context, data []byte, timeout uint64) error { - log.Trace("das.LocalFileStorageService.Store", "message", pretty.FirstFewBytes(data), "timeout", time.Unix(int64(timeout), 0), "this", s) - fileName := EncodeStorageServiceKey(crypto.Keccak256(data)) + logPut("das.LocalFileStorageService.Store", data, timeout, s) + fileName := EncodeStorageServiceKey(dastree.Hash(data)) finalPath := s.dataDir + "/" + fileName // Use a temp file and rename to achieve atomic writes. @@ -113,7 +113,7 @@ func (s *LocalFileStorageService) HealthCheck(ctx context.Context) error { if err != nil { return err } - res, err := s.GetByHash(ctx, crypto.Keccak256(testData)) + res, err := s.GetByHash(ctx, dastree.Hash(testData)) if err != nil { return err } diff --git a/das/memory_backed_storage_service.go b/das/memory_backed_storage_service.go index 31b8f90d17..1e1b57de02 100644 --- a/das/memory_backed_storage_service.go +++ b/das/memory_backed_storage_service.go @@ -7,11 +7,11 @@ import ( "context" "errors" "sync" - "time" - "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" ) @@ -46,15 +46,13 @@ func (m *MemoryBackedStorageService) GetByHash(ctx context.Context, key []byte) } func (m *MemoryBackedStorageService) Put(ctx context.Context, data []byte, expirationTime uint64) error { - log.Trace("das.MemoryBackedStorageService.Store", "message", pretty.FirstFewBytes(data), "timeout", time.Unix(int64(expirationTime), 0), "this", m) + logPut("das.MemoryBackedStorageService.Store", data, expirationTime, m) m.rwmutex.Lock() defer m.rwmutex.Unlock() if m.closed { return ErrClosed } - var h32 [32]byte - h := crypto.Keccak256(data) - copy(h32[:], h) + h32 := common.BytesToHash(dastree.Hash(data)) m.contents[h32] = append([]byte{}, data...) return nil } diff --git a/das/redis_storage_service.go b/das/redis_storage_service.go index 2d6b3b2ec3..8c1cda9c70 100644 --- a/das/redis_storage_service.go +++ b/das/redis_storage_service.go @@ -13,12 +13,12 @@ import ( "github.com/go-redis/redis/v8" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" "github.com/pkg/errors" flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" ) @@ -71,8 +71,7 @@ func (rs *RedisStorageService) verifyMessageSignature(data []byte) ([]byte, erro return nil, errors.New("data is too short to contain message signature") } message := data[:len(data)-32] - var haveHmac common.Hash - copy(haveHmac[:], data[len(data)-32:]) + haveHmac := common.BytesToHash(data[len(data)-32:]) mac := hmac.New(sha3.NewLegacyKeccak256, rs.signingKey[:]) mac.Write(message) expectHmac := mac.Sum(nil) @@ -121,12 +120,12 @@ func (rs *RedisStorageService) GetByHash(ctx context.Context, key []byte) ([]byt } func (rs *RedisStorageService) Put(ctx context.Context, value []byte, timeout uint64) error { - log.Trace("das.RedisStorageService.Store", "message", pretty.FirstFewBytes(value), "timeout", timeout, "this", rs) + logPut("das.RedisStorageService.Store", value, timeout, rs) err := rs.baseStorageService.Put(ctx, value, timeout) if err != nil { return err } - err = rs.client.Set(ctx, string(crypto.Keccak256(value)), rs.signMessage(value), rs.redisConfig.Expiration).Err() + err = rs.client.Set(ctx, string(dastree.Hash(value)), rs.signMessage(value), rs.redisConfig.Expiration).Err() if err != nil { log.Error("das.RedisStorageService.Store", "err", err) } diff --git a/das/redis_storage_service_test.go b/das/redis_storage_service_test.go index 3f68a6c9f0..2481358cf6 100644 --- a/das/redis_storage_service_test.go +++ b/das/redis_storage_service_test.go @@ -11,8 +11,7 @@ import ( "time" "github.com/alicebob/miniredis/v2" - - "github.com/ethereum/go-ethereum/crypto" + "github.com/offchainlabs/nitro/das/dastree" ) func TestRedisStorageService(t *testing.T) { @@ -32,8 +31,8 @@ func TestRedisStorageService(t *testing.T) { Require(t, err) val1 := []byte("The first value") - val1CorrectKey := crypto.Keccak256(val1) - val1IncorrectKey := crypto.Keccak256(append(val1, 0)) + val1CorrectKey := dastree.Hash(val1) + val1IncorrectKey := dastree.Hash(append(val1, 0)) _, err = redisService.GetByHash(ctx, val1CorrectKey) if !errors.Is(err, ErrNotFound) { @@ -55,8 +54,8 @@ func TestRedisStorageService(t *testing.T) { // For Case where the value is present in the base storage but not present in the cache. val2 := []byte("The Second value") - val2CorrectKey := crypto.Keccak256(val2) - val2IncorrectKey := crypto.Keccak256(append(val2, 0)) + val2CorrectKey := dastree.Hash(val2) + val2IncorrectKey := dastree.Hash(append(val2, 0)) err = baseStorageService.Put(ctx, val2, timeout) Require(t, err) diff --git a/das/redundant_storage_test.go b/das/redundant_storage_test.go index aa582d2d27..b56f62ee24 100644 --- a/das/redundant_storage_test.go +++ b/das/redundant_storage_test.go @@ -7,9 +7,10 @@ import ( "bytes" "context" "errors" - "github.com/ethereum/go-ethereum/crypto" "testing" "time" + + "github.com/offchainlabs/nitro/das/dastree" ) const NumServices = 3 @@ -25,8 +26,8 @@ func TestRedundantStorageService(t *testing.T) { Require(t, err) val1 := []byte("The first value") - key1 := crypto.Keccak256(val1) - key2 := crypto.Keccak256(append(val1, 0)) + key1 := dastree.Hash(val1) + key2 := dastree.Hash(append(val1, 0)) _, err = redundantService.GetByHash(ctx, key1) if !errors.Is(err, ErrNotFound) { diff --git a/das/restful_client.go b/das/restful_client.go index c23fb77cd9..ecdaa7626e 100644 --- a/das/restful_client.go +++ b/das/restful_client.go @@ -13,8 +13,8 @@ import ( "net/http" "strings" - "github.com/ethereum/go-ethereum/crypto" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" ) // Implements DataAvailabilityReader @@ -66,7 +66,7 @@ func (c *RestfulDasClient) GetByHash(ctx context.Context, hash []byte) ([]byte, if err != nil { return nil, err } - if !bytes.Equal(hash, crypto.Keccak256(decodedBytes)) { + if !bytes.Equal(hash, dastree.Hash(decodedBytes)) { return nil, arbstate.ErrHashMismatch } diff --git a/das/restful_server_test.go b/das/restful_server_test.go index c2cf9cb5ca..9071602fce 100644 --- a/das/restful_server_test.go +++ b/das/restful_server_test.go @@ -13,8 +13,8 @@ import ( "testing" "time" - "github.com/ethereum/go-ethereum/crypto" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" ) const LocalServerAddressForTest = "localhost" @@ -43,7 +43,7 @@ func TestRestfulClientServer(t *testing.T) { storage := NewMemoryBackedStorageService(ctx) data := []byte("Testing a restful server now.") - dataHash := crypto.Keccak256(data) + dataHash := dastree.Hash(data) server, port, err := NewRestfulDasServerOnRandomPort(LocalServerAddressForTest, storage) Require(t, err) @@ -60,7 +60,7 @@ func TestRestfulClientServer(t *testing.T) { Fail(t, fmt.Sprintf("Returned data '%s' does not match expected '%s'", returnedData, data)) } - _, err = client.GetByHash(ctx, crypto.Keccak256([]byte("absent data"))) + _, err = client.GetByHash(ctx, dastree.Hash([]byte("absent data"))) if err == nil || !strings.Contains(err.Error(), "404") { Fail(t, "Expected a 404 error") } diff --git a/das/s3_storage_service.go b/das/s3_storage_service.go index fe75375d8d..f07f0747d4 100644 --- a/das/s3_storage_service.go +++ b/das/s3_storage_service.go @@ -15,9 +15,9 @@ import ( "github.com/aws/aws-sdk-go-v2/feature/s3/manager" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" flag "github.com/spf13/pflag" @@ -90,11 +90,10 @@ func (s3s *S3StorageService) GetByHash(ctx context.Context, key []byte) ([]byte, } func (s3s *S3StorageService) Put(ctx context.Context, value []byte, timeout uint64) error { - log.Trace("das.S3StorageService.Store", "message", pretty.FirstFewBytes(value), "timeout", timeout, "this", s3s) - + logPut("das.S3StorageService.Store", value, timeout, s3s) putObjectInput := s3.PutObjectInput{ Bucket: aws.String(s3s.bucket), - Key: aws.String(s3s.objectPrefix + EncodeStorageServiceKey(crypto.Keccak256(value))), + Key: aws.String(s3s.objectPrefix + EncodeStorageServiceKey(dastree.Hash(value))), Body: bytes.NewReader(value)} if !s3s.discardAfterTimeout { expires := time.Unix(int64(timeout), 0) diff --git a/das/s3_storage_service_test.go b/das/s3_storage_service_test.go index df12b1c724..183b6f94be 100644 --- a/das/s3_storage_service_test.go +++ b/das/s3_storage_service_test.go @@ -14,9 +14,8 @@ import ( "github.com/aws/aws-sdk-go-v2/feature/s3/manager" "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/ethereum/go-ethereum/crypto" - "github.com/offchainlabs/nitro/cmd/genericconf" + "github.com/offchainlabs/nitro/das/dastree" ) type mockS3Uploader struct { @@ -70,8 +69,8 @@ func TestS3StorageService(t *testing.T) { Require(t, err) val1 := []byte("The first value") - val1CorrectKey := crypto.Keccak256(val1) - val2IncorrectKey := crypto.Keccak256(append(val1, 0)) + val1CorrectKey := dastree.Hash(val1) + val2IncorrectKey := dastree.Hash(append(val1, 0)) _, err = s3Service.GetByHash(ctx, val1CorrectKey) if !errors.Is(err, ErrNotFound) { diff --git a/das/sign_after_store_das.go b/das/sign_after_store_das.go index 6fcb4e0348..f4f53684b1 100644 --- a/das/sign_after_store_das.go +++ b/das/sign_after_store_das.go @@ -11,12 +11,12 @@ import ( "os" "time" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/blsSignatures" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util/pretty" @@ -139,7 +139,9 @@ func NewSignAfterStoreDASWithSeqInboxCaller( }, nil } -func (d *SignAfterStoreDAS) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (c *arbstate.DataAvailabilityCertificate, err error) { +func (d *SignAfterStoreDAS) Store( + ctx context.Context, message []byte, timeout uint64, sig []byte, +) (c *arbstate.DataAvailabilityCertificate, err error) { log.Trace("das.SignAfterStoreDAS.Store", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(sig), "this", d) if d.bpVerifier != nil { actualSigner, err := DasRecoverSigner(message, timeout, sig) @@ -156,7 +158,7 @@ func (d *SignAfterStoreDAS) Store(ctx context.Context, message []byte, timeout u } c = &arbstate.DataAvailabilityCertificate{} - copy(c.DataHash[:], crypto.Keccak256(message)) + copy(c.DataHash[:], dastree.Hash(message)) c.Timeout = timeout c.SignersMask = 1 // The aggregator will override this if we're part of a committee. diff --git a/das/simple_das_reader_aggregator.go b/das/simple_das_reader_aggregator.go index 1dc41d4d8b..57c67b4171 100644 --- a/das/simple_das_reader_aggregator.go +++ b/das/simple_das_reader_aggregator.go @@ -13,9 +13,9 @@ import ( "sync" "time" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" "github.com/offchainlabs/nitro/util/stopwaiter" flag "github.com/spf13/pflag" @@ -236,14 +236,16 @@ func (a *SimpleDASReaderAggregator) GetByHash(ctx context.Context, hash []byte) return nil, fmt.Errorf("Data wasn't able to be retrieved from any DAS Reader: %v", errorCollection) } -func (a *SimpleDASReaderAggregator) tryGetByHash(ctx context.Context, hash []byte, reader arbstate.DataAvailabilityReader) ([]byte, error) { +func (a *SimpleDASReaderAggregator) tryGetByHash( + ctx context.Context, hash []byte, reader arbstate.DataAvailabilityReader, +) ([]byte, error) { stat := readerStatMessage{reader: reader} stat.success = false start := time.Now() result, err := reader.GetByHash(ctx, hash) if err == nil { - if bytes.Equal(crypto.Keccak256(result), hash) { + if bytes.Equal(dastree.Hash(result), hash) { stat.success = true } else { err = fmt.Errorf("SimpleDASReaderAggregator got result from reader(%v) not matching hash", reader) diff --git a/das/simple_das_reader_aggregator_test.go b/das/simple_das_reader_aggregator_test.go index 04eb978122..6262c37c5f 100644 --- a/das/simple_das_reader_aggregator_test.go +++ b/das/simple_das_reader_aggregator_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - "github.com/ethereum/go-ethereum/crypto" + "github.com/offchainlabs/nitro/das/dastree" ) func TestSimpleDASReaderAggregator(t *testing.T) { //nolint @@ -23,7 +23,7 @@ func TestSimpleDASReaderAggregator(t *testing.T) { //nolint storage1, storage2, storage3 := NewMemoryBackedStorageService(ctx), NewMemoryBackedStorageService(ctx), NewMemoryBackedStorageService(ctx) data1 := []byte("Testing a restful server now.") - dataHash1 := crypto.Keccak256(data1) + dataHash1 := dastree.Hash(data1) server1, err := NewRestfulDasServer(LocalServerAddressForTest, 9888, storage1) Require(t, err) @@ -58,13 +58,13 @@ func TestSimpleDASReaderAggregator(t *testing.T) { //nolint Fail(t, fmt.Sprintf("Returned data '%s' does not match expected '%s'", returnedData, data1)) } - _, err = agg.GetByHash(ctx, crypto.Keccak256([]byte("absent data"))) + _, err = agg.GetByHash(ctx, dastree.Hash([]byte("absent data"))) if err == nil || !strings.Contains(err.Error(), "404") { Fail(t, "Expected a 404 error") } data2 := []byte("Testing data that is only on the last REST endpoint.") - dataHash2 := crypto.Keccak256(data2) + dataHash2 := dastree.Hash(data2) err = storage3.Put(ctx, data2, uint64(time.Now().Add(time.Hour).Unix())) Require(t, err) diff --git a/das/store_signing.go b/das/store_signing.go index e368ba9fff..5cf1e5b493 100644 --- a/das/store_signing.go +++ b/das/store_signing.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" ) @@ -41,7 +42,7 @@ func DasRecoverSigner(data []byte, timeout uint64, sig []byte) (common.Address, func dasStoreHash(data []byte, timeout uint64) []byte { var buf8 [8]byte binary.BigEndian.PutUint64(buf8[:], timeout) - return crypto.Keccak256(uniquifyingPrefix, buf8[:], data) + return dastree.Hash(uniquifyingPrefix, buf8[:], data) } type StoreSigningDAS struct { diff --git a/das/util.go b/das/util.go new file mode 100644 index 0000000000..fac15389cf --- /dev/null +++ b/das/util.go @@ -0,0 +1,18 @@ +// Copyright 2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package das + +import ( + "time" + + "github.com/ethereum/go-ethereum/log" + "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/util/pretty" +) + +func logPut(store string, data []byte, timeout uint64, reader arbstate.DataAvailabilityReader) { + log.Trace( + store, "message", pretty.FirstFewBytes(data), "timeout", time.Unix(int64(timeout), 0), "this", reader, + ) +} From df972f5139eddff95bac892390bb1f17d03887b3 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Thu, 30 Jun 2022 17:35:37 -0500 Subject: [PATCH 02/23] refactor GetByHash to use common.Hash --- arbstate/das_reader.go | 5 +++-- arbstate/inbox.go | 4 ++-- cmd/datool/datool.go | 5 +++-- cmd/replay/main.go | 5 +++-- das/aggregator.go | 8 ++++---- das/aggregator_test.go | 9 +++++---- das/archiving_storage_service.go | 7 ++++--- das/bigcache_storage_service.go | 12 ++++++------ das/cache_storage_to_das_adapter.go | 11 +++++++---- das/chain_fetch_das.go | 24 +++++++++++------------- das/das_test.go | 10 +++++----- das/dasrpc/dasRpcClient.go | 16 ++++++---------- das/dasrpc/dasRpcServer.go | 3 ++- das/dasrpc/rpc_test.go | 4 ++-- das/dastree/dastree.go | 11 +++++++---- das/db_storage_service.go | 11 ++++++----- das/fallback_storage_service.go | 22 +++++++++++----------- das/local_file_storage_service.go | 7 ++++--- das/memory_backed_storage_service.go | 12 ++++-------- das/reader_aggregator_strategies_test.go | 3 ++- das/redis_storage_service.go | 14 ++++++++------ das/redundant_simple_das_reader.go | 5 +++-- das/redundant_storage_service.go | 8 ++++---- das/restful_client.go | 8 +++----- das/restful_server.go | 3 ++- das/retry_wrapper.go | 3 ++- das/s3_storage_service.go | 10 +++++++--- das/sign_after_store_das.go | 8 +++++--- das/simple_das_reader_aggregator.go | 10 +++++----- das/storage_service.go | 17 +++++++++++------ das/store_signing.go | 2 +- das/sync_from_l1_chain.go | 2 +- das/timeout_wrapper.go | 3 ++- util/pretty/pretty_printing.go | 10 +++++++++- 34 files changed, 160 insertions(+), 132 deletions(-) diff --git a/arbstate/das_reader.go b/arbstate/das_reader.go index 93cb768e2a..b75388689c 100644 --- a/arbstate/das_reader.go +++ b/arbstate/das_reader.go @@ -12,6 +12,7 @@ import ( "fmt" "io" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/offchainlabs/nitro/arbos/util" @@ -19,7 +20,7 @@ import ( ) type DataAvailabilityReader interface { - GetByHash(ctx context.Context, hash []byte) ([]byte, error) + GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) HealthCheck(ctx context.Context) error ExpirationPolicy(ctx context.Context) (ExpirationPolicy, error) } @@ -123,7 +124,7 @@ func (cert *DataAvailabilityCertificate) RecoverKeyset( ctx context.Context, da DataAvailabilityReader, ) (*DataAvailabilityKeyset, error) { - keysetBytes, err := da.GetByHash(ctx, cert.KeysetHash[:]) + keysetBytes, err := da.GetByHash(ctx, cert.KeysetHash) if err != nil { return nil, err } diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 5f0c3be13e..0c7af5b9ca 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -138,7 +138,7 @@ func RecoverPayloadFromDasBatch( log.Error("Failed to deserialize DAS message", "err", err) return nil, nil } - keysetPreimage, err := dasReader.GetByHash(ctx, cert.KeysetHash[:]) + keysetPreimage, err := dasReader.GetByHash(ctx, cert.KeysetHash) if err == nil && !bytes.Equal(cert.KeysetHash[:], crypto.Keccak256(keysetPreimage)) { err = ErrHashMismatch } @@ -164,7 +164,7 @@ func RecoverPayloadFromDasBatch( log.Error("Data availability cert expires too soon", "err", "") return nil, nil } - payload, err := dasReader.GetByHash(ctx, cert.DataHash[:]) + payload, err := dasReader.GetByHash(ctx, cert.DataHash) if err == nil && !bytes.Equal(crypto.Keccak256(payload), cert.DataHash[:]) { err = ErrHashMismatch } diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index 2fe9052986..f61e0e9751 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -13,6 +13,7 @@ import ( "strings" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/offchainlabs/nitro/cmd/genericconf" @@ -179,7 +180,7 @@ func startRPCClientGetByHash(args []string) error { } ctx := context.Background() - message, err := client.GetByHash(ctx, decodedHash) + message, err := client.GetByHash(ctx, common.BytesToHash((decodedHash))) if err != nil { return err } @@ -240,7 +241,7 @@ func startRESTClientGetByHash(args []string) error { } ctx := context.Background() - message, err := client.GetByHash(ctx, decodedHash) + message, err := client.GetByHash(ctx, common.BytesToHash(decodedHash)) if err != nil { return err } diff --git a/cmd/replay/main.go b/cmd/replay/main.go index be8558c1ee..82ad46516e 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -85,8 +85,8 @@ func (i WavmInbox) ReadDelayedInbox(seqNum uint64) ([]byte, error) { type PreimageDASReader struct { } -func (dasReader *PreimageDASReader) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { - return wavmio.ResolvePreImage(common.BytesToHash(hash)), nil +func (dasReader *PreimageDASReader) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { + return wavmio.ResolvePreImage(hash), nil } func (dasReader *PreimageDASReader) HealthCheck(ctx context.Context) error { @@ -96,6 +96,7 @@ func (dasReader *PreimageDASReader) HealthCheck(ctx context.Context) error { func (dasReader *PreimageDASReader) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { return arbstate.DiscardImmediately, nil } + func main() { wavmio.StubInit() diff --git a/das/aggregator.go b/das/aggregator.go index dcf83ef60b..13d6bebfe3 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -165,7 +165,7 @@ func NewAggregatorWithSeqInboxCaller( }, nil } -func (a *Aggregator) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { +func (a *Aggregator) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { // Query all services, even those that didn't sign. // They may have been late in returning a response after storing the data, // or got the data by some other means. @@ -180,7 +180,7 @@ func (a *Aggregator) GetByHash(ctx context.Context, hash []byte) ([]byte, error) errorChan <- err return } - if bytes.Equal(dastree.Hash(blob), hash) { + if dastree.Hash(blob) == hash { blobChan <- blob } else { errorChan <- fmt.Errorf("DAS (mask %X) returned data that doesn't match requested hash!", d.signersMask) @@ -269,7 +269,7 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, // SignersMask from backend DAS is ignored. - if !bytes.Equal(cert.DataHash[:], expectedHash) { + if cert.DataHash != expectedHash { responses <- storeResponse{d, nil, errors.New("Hash verification failed.")} return } @@ -313,7 +313,7 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, aggCert.Sig = blsSignatures.AggregateSignatures(sigs) aggPubKey := blsSignatures.AggregatePublicKeys(pubKeys) aggCert.SignersMask = aggSignersMask - copy(aggCert.DataHash[:], expectedHash) + aggCert.DataHash = expectedHash aggCert.Timeout = timeout aggCert.KeysetHash = a.keysetHash diff --git a/das/aggregator_test.go b/das/aggregator_test.go index ff665046fa..21242967be 100644 --- a/das/aggregator_test.go +++ b/das/aggregator_test.go @@ -15,6 +15,7 @@ import ( "testing" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" ) @@ -62,7 +63,7 @@ func TestDAS_BasicAggregationLocal(t *testing.T) { cert, err := aggregator.Store(ctx, rawMsg, 0, []byte{}) Require(t, err, "Error storing message") - messageRetrieved, err := aggregator.GetByHash(ctx, cert.DataHash[:]) + messageRetrieved, err := aggregator.GetByHash(ctx, cert.DataHash) Require(t, err, "Failed to retrieve message") if !bytes.Equal(rawMsg, messageRetrieved) { Fail(t, "Retrieved message is not the same as stored one.") @@ -132,7 +133,7 @@ type WrapGetByHash struct { DataAvailabilityService } -func (w *WrapGetByHash) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { +func (w *WrapGetByHash) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { switch w.injector.shouldFail() { case success: return w.DataAvailabilityService.GetByHash(ctx, hash) @@ -256,7 +257,7 @@ func testConfigurableStorageFailures(t *testing.T, shouldFailAggregation bool) { return } - messageRetrieved, err := aggregator.GetByHash(ctx, cert.DataHash[:]) + messageRetrieved, err := aggregator.GetByHash(ctx, cert.DataHash) Require(t, err, "Failed to retrieve message") if !bytes.Equal(rawMsg, messageRetrieved) { Fail(t, "Retrieved message is not the same as stored one.") @@ -366,7 +367,7 @@ func testConfigurableRetrieveFailures(t *testing.T, shouldFail bool) { cert, err := aggregator.Store(ctx, rawMsg, 0, []byte{}) Require(t, err, "Error storing message") - messageRetrieved, err := aggregator.GetByHash(ctx, cert.DataHash[:]) + messageRetrieved, err := aggregator.GetByHash(ctx, cert.DataHash) if !shouldFail { Require(t, err, "Error retrieving message") } else { diff --git a/das/archiving_storage_service.go b/das/archiving_storage_service.go index 60b751f484..cd49756ee1 100644 --- a/das/archiving_storage_service.go +++ b/das/archiving_storage_service.go @@ -9,6 +9,7 @@ import ( "sync" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/util/arbmath" @@ -80,8 +81,8 @@ func NewArchivingStorageService( return ret, nil } -func (serv *ArchivingStorageService) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { - log.Trace("das.ArchivingStorageService.GetByHash", "key", pretty.FirstFewBytes(hash), "this", serv) +func (serv *ArchivingStorageService) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { + log.Trace("das.ArchivingStorageService.GetByHash", "key", pretty.PrettyHash(hash), "this", serv) data, err := serv.inner.GetByHash(ctx, hash) if err != nil { @@ -174,7 +175,7 @@ func NewArchivingSimpleDASReader( return &ArchivingSimpleDASReader{arch}, nil } -func (asdr *ArchivingSimpleDASReader) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { +func (asdr *ArchivingSimpleDASReader) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { return asdr.wrapped.GetByHash(ctx, hash) } diff --git a/das/bigcache_storage_service.go b/das/bigcache_storage_service.go index fde597044e..1bc37a3047 100644 --- a/das/bigcache_storage_service.go +++ b/das/bigcache_storage_service.go @@ -14,6 +14,7 @@ import ( "github.com/offchainlabs/nitro/util/pretty" flag "github.com/spf13/pflag" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) @@ -50,17 +51,17 @@ func NewBigCacheStorageService(bigCacheConfig BigCacheConfig, baseStorageService }, nil } -func (bcs *BigCacheStorageService) GetByHash(ctx context.Context, key []byte) ([]byte, error) { - log.Trace("das.BigCacheStorageService.GetByHash", "key", pretty.FirstFewBytes(key), "this", bcs) +func (bcs *BigCacheStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { + log.Trace("das.BigCacheStorageService.GetByHash", "key", pretty.PrettyHash(key), "this", bcs) - ret, err := bcs.bigCache.Get(string(key)) + ret, err := bcs.bigCache.Get(string(key.Bytes())) if err != nil { ret, err = bcs.baseStorageService.GetByHash(ctx, key) if err != nil { return nil, err } - err = bcs.bigCache.Set(string(key), ret) + err = bcs.bigCache.Set(string(key.Bytes()), ret) if err != nil { return nil, err } @@ -76,8 +77,7 @@ func (bcs *BigCacheStorageService) Put(ctx context.Context, value []byte, timeou if err != nil { return err } - err = bcs.bigCache.Set(string(dastree.Hash(value)), value) - return err + return bcs.bigCache.Set(string(dastree.HashBytes(value)), value) } func (bcs *BigCacheStorageService) Sync(ctx context.Context) error { diff --git a/das/cache_storage_to_das_adapter.go b/das/cache_storage_to_das_adapter.go index c2d1891dee..5764a2fbcd 100644 --- a/das/cache_storage_to_das_adapter.go +++ b/das/cache_storage_to_das_adapter.go @@ -8,6 +8,7 @@ import ( "fmt" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/util/pretty" @@ -28,8 +29,8 @@ func NewCacheStorageToDASAdapter( } } -func (a *CacheStorageToDASAdapter) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { - log.Trace("das.CacheStorageToDASAdapter.GetByHash", "key", pretty.FirstFewBytes(hash), "this", a) +func (a *CacheStorageToDASAdapter) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { + log.Trace("das.CacheStorageToDASAdapter.GetByHash", "key", pretty.PrettyHash(hash), "this", a) ret, err := a.cache.GetByHash(ctx, hash) if err != nil { ret, err = a.DataAvailabilityService.GetByHash(ctx, hash) @@ -46,7 +47,9 @@ func (a *CacheStorageToDASAdapter) GetByHash(ctx context.Context, hash []byte) ( return ret, nil } -func (a *CacheStorageToDASAdapter) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*arbstate.DataAvailabilityCertificate, error) { +func (a *CacheStorageToDASAdapter) Store( + ctx context.Context, message []byte, timeout uint64, sig []byte, +) (*arbstate.DataAvailabilityCertificate, error) { log.Trace("das.CacheStorageToDASAdapter.Store", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(sig), "this", a) cert, err := a.DataAvailabilityService.Store(ctx, message, timeout, sig) if err != nil { @@ -72,7 +75,7 @@ func NewEmptyStorageService() *emptyStorageService { return &emptyStorageService{} } -func (s *emptyStorageService) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { +func (s *emptyStorageService) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { return nil, ErrNotFound } diff --git a/das/chain_fetch_das.go b/das/chain_fetch_das.go index 5c80b82d9e..7aa2fd8cf3 100644 --- a/das/chain_fetch_das.go +++ b/das/chain_fetch_das.go @@ -4,7 +4,6 @@ package das import ( - "bytes" "context" "errors" "sync" @@ -88,13 +87,13 @@ func NewChainFetchReaderWithSeqInbox(inner arbstate.DataAvailabilityReader, seqI }, nil } -func (this *ChainFetchDAS) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { - log.Trace("das.ChainFetchDAS.GetByHash", "hash", pretty.FirstFewBytes(hash)) +func (this *ChainFetchDAS) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { + log.Trace("das.ChainFetchDAS.GetByHash", "hash", pretty.PrettyHash(hash)) return chainFetchGetByHash(ctx, this.DataAvailabilityService, &this.keysetCache, this.seqInboxCaller, this.seqInboxFilterer, hash) } -func (this *ChainFetchReader) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { - log.Trace("das.ChainFetchReader.GetByHash", "hash", pretty.FirstFewBytes(hash)) +func (this *ChainFetchReader) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { + log.Trace("das.ChainFetchReader.GetByHash", "hash", pretty.PrettyHash(hash)) return chainFetchGetByHash(ctx, this.DataAvailabilityReader, &this.keysetCache, this.seqInboxCaller, this.seqInboxFilterer, hash) } @@ -104,23 +103,22 @@ func chainFetchGetByHash( cache *syncedKeysetCache, seqInboxCaller *bridgegen.SequencerInboxCaller, seqInboxFilterer *bridgegen.SequencerInboxFilterer, - hash []byte, + hash common.Hash, ) ([]byte, error) { // try to fetch from the cache - hash32 := common.BytesToHash(hash) - res, ok := cache.get(hash32) + res, ok := cache.get(hash) if ok { return res, nil } // try to fetch from the inner DAS innerRes, err := daReader.GetByHash(ctx, hash) - if err == nil && bytes.Equal(hash, dastree.Hash(innerRes)) { + if err == nil && hash == dastree.Hash(innerRes) { return innerRes, nil } // try to fetch from the L1 chain - blockNumBig, err := seqInboxCaller.GetKeysetCreationBlock(&bind.CallOpts{Context: ctx}, hash32) + blockNumBig, err := seqInboxCaller.GetKeysetCreationBlock(&bind.CallOpts{Context: ctx}, hash) if err != nil { return nil, err } @@ -135,13 +133,13 @@ func chainFetchGetByHash( End: &blockNumPlus1, Context: ctx, } - iter, err := seqInboxFilterer.FilterSetValidKeyset(filterOpts, [][32]byte{hash32}) + iter, err := seqInboxFilterer.FilterSetValidKeyset(filterOpts, [][32]byte{hash}) if err != nil { return nil, err } for iter.Next() { - if bytes.Equal(hash, crypto.Keccak256(iter.Event.KeysetBytes)) { - cache.put(hash32, iter.Event.KeysetBytes) + if hash == crypto.Keccak256Hash(iter.Event.KeysetBytes) { + cache.put(hash, iter.Event.KeysetBytes) return iter.Event.KeysetBytes, nil } } diff --git a/das/das_test.go b/das/das_test.go index cdb8566d0d..1bd79c7ba7 100644 --- a/das/das_test.go +++ b/das/das_test.go @@ -60,7 +60,7 @@ func testDASStoreRetrieveMultipleInstances(t *testing.T, storageType string) { Fail(t, fmt.Sprintf("Expected timeout of %d in cert, was %d", timeout, cert.Timeout)) } - messageRetrieved, err := das.GetByHash(firstCtx, cert.DataHash[:]) + messageRetrieved, err := das.GetByHash(firstCtx, cert.DataHash) Require(t, err, "Failed to retrieve message") if !bytes.Equal(messageSaved, messageRetrieved) { Fail(t, "Retrieved message is not the same as stored one.") @@ -79,13 +79,13 @@ func testDASStoreRetrieveMultipleInstances(t *testing.T, storageType string) { das2, err := NewSignAfterStoreDAS(secondCtx, config, storageService2) Require(t, err, "no das") - messageRetrieved2, err := das2.GetByHash(secondCtx, cert.DataHash[:]) + messageRetrieved2, err := das2.GetByHash(secondCtx, cert.DataHash) Require(t, err, "Failed to retrieve message") if !bytes.Equal(messageSaved, messageRetrieved2) { Fail(t, "Retrieved message is not the same as stored one.") } - messageRetrieved2, err = das2.GetByHash(secondCtx, cert.DataHash[:]) + messageRetrieved2, err = das2.GetByHash(secondCtx, cert.DataHash) Require(t, err, "Failed to getByHash message") if !bytes.Equal(messageSaved, messageRetrieved2) { Fail(t, "Retrieved message is not the same as stored one.") @@ -151,12 +151,12 @@ func testDASMissingMessage(t *testing.T, storageType string) { // Change the hash to look up cert.DataHash[0] += 1 - _, err = das.GetByHash(ctx, cert.DataHash[:]) + _, err = das.GetByHash(ctx, cert.DataHash) if err == nil { Fail(t, "Expected an error when retrieving message that is not in the store.") } - _, err = das.GetByHash(ctx, cert.DataHash[:]) + _, err = das.GetByHash(ctx, cert.DataHash) if err == nil { Fail(t, "Expected an error when getting by hash a message that is not in the store.") } diff --git a/das/dasrpc/dasRpcClient.go b/das/dasrpc/dasRpcClient.go index ae7b6f017a..080c420b75 100644 --- a/das/dasrpc/dasRpcClient.go +++ b/das/dasrpc/dasRpcClient.go @@ -4,11 +4,11 @@ package dasrpc import ( - "bytes" "context" "fmt" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/log" @@ -35,15 +35,15 @@ func NewDASRPCClient(target string) (*DASRPCClient, error) { }, nil } -func (c *DASRPCClient) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { +func (c *DASRPCClient) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { if len(hash) != 32 { return nil, fmt.Errorf("Hash must be 32 bytes long, was %d", len(hash)) } var ret hexutil.Bytes - if err := c.clnt.CallContext(ctx, &ret, "das_getByHash", hexutil.Bytes(hash)); err != nil { + if err := c.clnt.CallContext(ctx, &ret, "das_getByHash", hexutil.Bytes(hash[:])); err != nil { return nil, err } - if !bytes.Equal(hash, dastree.Hash(ret)) { // check hash because RPC server might be untrusted + if hash != dastree.Hash(ret) { // check hash because RPC server might be untrusted return nil, arbstate.ErrHashMismatch } return ret, nil @@ -55,20 +55,16 @@ func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64 if err := c.clnt.CallContext(ctx, &ret, "das_store", hexutil.Bytes(message), hexutil.Uint64(timeout), hexutil.Bytes(reqSig)); err != nil { return nil, err } - var keysetHash [32]byte - copy(keysetHash[:], ret.KeysetHash) - var dataHash [32]byte - copy(dataHash[:], ret.DataHash) respSig, err := blsSignatures.SignatureFromBytes(ret.Sig) if err != nil { return nil, err } return &arbstate.DataAvailabilityCertificate{ - DataHash: dataHash, + DataHash: common.BytesToHash(ret.DataHash), Timeout: uint64(ret.Timeout), SignersMask: uint64(ret.SignersMask), Sig: respSig, - KeysetHash: keysetHash, + KeysetHash: common.BytesToHash(ret.KeysetHash), }, nil } diff --git a/das/dasrpc/dasRpcServer.go b/das/dasrpc/dasRpcServer.go index 4dd3adcee8..c011a081c3 100644 --- a/das/dasrpc/dasRpcServer.go +++ b/das/dasrpc/dasRpcServer.go @@ -10,6 +10,7 @@ import ( "net/http" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/log" @@ -81,7 +82,7 @@ func (serv *DASRPCServer) Store(ctx context.Context, message hexutil.Bytes, time } func (serv *DASRPCServer) GetByHash(ctx context.Context, certBytes hexutil.Bytes) (hexutil.Bytes, error) { - return serv.localDAS.GetByHash(ctx, certBytes) + return serv.localDAS.GetByHash(ctx, common.BytesToHash(certBytes)) } func (serv *DASRPCServer) HealthCheck(ctx context.Context) error { diff --git a/das/dasrpc/rpc_test.go b/das/dasrpc/rpc_test.go index 533b39efdb..9fa798a2d1 100644 --- a/das/dasrpc/rpc_test.go +++ b/das/dasrpc/rpc_test.go @@ -73,14 +73,14 @@ func TestRPC(t *testing.T) { cert, err := rpcAgg.Store(ctx, msg, 0, nil) testhelpers.RequireImpl(t, err) - retrievedMessage, err := rpcAgg.GetByHash(ctx, cert.DataHash[:]) + retrievedMessage, err := rpcAgg.GetByHash(ctx, cert.DataHash) testhelpers.RequireImpl(t, err) if !bytes.Equal(msg, retrievedMessage) { testhelpers.FailImpl(t, "failed to retrieve correct message") } - retrievedMessage, err = rpcAgg.GetByHash(ctx, cert.DataHash[:]) + retrievedMessage, err = rpcAgg.GetByHash(ctx, cert.DataHash) testhelpers.RequireImpl(t, err) if !bytes.Equal(msg, retrievedMessage) { diff --git a/das/dastree/dastree.go b/das/dastree/dastree.go index 7a45be0e91..b61d2119b3 100644 --- a/das/dastree/dastree.go +++ b/das/dastree/dastree.go @@ -13,10 +13,10 @@ const binSize = 64 * 1024 // 64 kB type bytes32 = common.Hash -func Hash(preimage ...[]byte) []byte { +func Hash(preimage ...[]byte) bytes32 { // Algorithm // 1. split the preimage into 64kB bins and hash them to produces the tree's leaves - // 2. repeatedly hash pairs over and over, bubbling up any odd-one's out, forming the root + // 2. repeatedly hash pairs over and over, bubbling up any odd-one's out, to form the root // // r <=> hash(hash(0, 1), 2) step 2 // / \ @@ -29,7 +29,7 @@ func Hash(preimage ...[]byte) []byte { unrolled = append(unrolled, slice...) } if len(unrolled) == 0 { - return crypto.Keccak256([]byte{}) + return crypto.Keccak256Hash([]byte{}) } length := int64(len(unrolled)) @@ -53,6 +53,9 @@ func Hash(preimage ...[]byte) []byte { } layer = paired } + return layer[0] +} - return layer[0][:] +func HashBytes(preimage ...[]byte) []byte { + return Hash(preimage...).Bytes() } diff --git a/das/db_storage_service.go b/das/db_storage_service.go index df9a42518a..267bff97ac 100644 --- a/das/db_storage_service.go +++ b/das/db_storage_service.go @@ -10,6 +10,7 @@ import ( "time" badger "github.com/dgraph-io/badger/v3" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/das/dastree" @@ -81,12 +82,12 @@ func NewDBStorageService(ctx context.Context, dirPath string, discardAfterTimeou return ret, nil } -func (dbs *DBStorageService) GetByHash(ctx context.Context, key []byte) ([]byte, error) { - log.Trace("das.DBStorageService.GetByHash", "key", pretty.FirstFewBytes(key), "this", dbs) +func (dbs *DBStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { + log.Trace("das.DBStorageService.GetByHash", "key", pretty.PrettyHash(key), "this", dbs) var ret []byte err := dbs.db.View(func(txn *badger.Txn) error { - item, err := txn.Get(key) + item, err := txn.Get(key.Bytes()) if err != nil { return err } @@ -102,10 +103,10 @@ func (dbs *DBStorageService) GetByHash(ctx context.Context, key []byte) ([]byte, } func (dbs *DBStorageService) Put(ctx context.Context, data []byte, timeout uint64) error { - log.Trace("das.DBStorageService.Put", "message", pretty.FirstFewBytes(data), "timeout", time.Unix(int64(timeout), 0), "this", dbs) + logPut("das.DBStorageService.Put", data, timeout, dbs) return dbs.db.Update(func(txn *badger.Txn) error { - e := badger.NewEntry(dastree.Hash(data), data) + e := badger.NewEntry(dastree.HashBytes(data), data) if dbs.discardAfterTimeout { e = e.WithTTL(time.Until(time.Unix(int64(timeout), 0))) } diff --git a/das/fallback_storage_service.go b/das/fallback_storage_service.go index 52c5903f67..9ab892d436 100644 --- a/das/fallback_storage_service.go +++ b/das/fallback_storage_service.go @@ -4,11 +4,11 @@ package das import ( - "bytes" "context" "sync" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/das/dastree" @@ -47,13 +47,11 @@ func NewFallbackStorageService( } } -func (f *FallbackStorageService) GetByHash(ctx context.Context, key []byte) ([]byte, error) { - log.Trace("das.FallbackStorageService.GetByHash", "key", pretty.FirstFewBytes(key), "this", f) - var key32 [32]byte +func (f *FallbackStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { + log.Trace("das.FallbackStorageService.GetByHash", "key", pretty.PrettyHash(key), "this", f) if f.preventRecursiveGets { f.currentlyFetchingMutex.RLock() - copy(key32[:], key) - if f.currentlyFetching[key32] { + if f.currentlyFetching[key] { // This is a recursive call, so return not-found f.currentlyFetchingMutex.RUnlock() return nil, ErrNotFound @@ -66,8 +64,8 @@ func (f *FallbackStorageService) GetByHash(ctx context.Context, key []byte) ([]b doDelete := false if f.preventRecursiveGets { f.currentlyFetchingMutex.Lock() - if !f.currentlyFetching[key32] { - f.currentlyFetching[key32] = true + if !f.currentlyFetching[key] { + f.currentlyFetching[key] = true doDelete = true } f.currentlyFetchingMutex.Unlock() @@ -76,14 +74,16 @@ func (f *FallbackStorageService) GetByHash(ctx context.Context, key []byte) ([]b data, err = f.backup.GetByHash(ctx, key) if doDelete { f.currentlyFetchingMutex.Lock() - delete(f.currentlyFetching, key32) + delete(f.currentlyFetching, key) f.currentlyFetchingMutex.Unlock() } if err != nil { return nil, err } - if bytes.Equal(key, dastree.Hash(data)) { - putErr := f.StorageService.Put(ctx, data, arbmath.SaturatingUAdd(uint64(time.Now().Unix()), f.backupRetentionSeconds)) + if key == dastree.Hash(data) { + putErr := f.StorageService.Put( + ctx, data, arbmath.SaturatingUAdd(uint64(time.Now().Unix()), f.backupRetentionSeconds), + ) if putErr != nil && !f.ignoreRetentionWriteErrors { return nil, err } diff --git a/das/local_file_storage_service.go b/das/local_file_storage_service.go index bc620df48d..8cd82bd99d 100644 --- a/das/local_file_storage_service.go +++ b/das/local_file_storage_service.go @@ -12,6 +12,7 @@ import ( "os" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/das/dastree" @@ -45,13 +46,13 @@ func NewLocalFileStorageService(dataDir string) (StorageService, error) { return &LocalFileStorageService{dataDir: dataDir}, nil } -func (s *LocalFileStorageService) GetByHash(ctx context.Context, key []byte) ([]byte, error) { - log.Trace("das.LocalFileStorageService.GetByHash", "key", pretty.FirstFewBytes(key), "this", s) +func (s *LocalFileStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { + log.Trace("das.LocalFileStorageService.GetByHash", "key", pretty.PrettyHash(key), "this", s) pathname := s.dataDir + "/" + EncodeStorageServiceKey(key) data, err := os.ReadFile(pathname) if err != nil { // Just for backward compatability. - pathname = s.dataDir + "/" + base32.StdEncoding.EncodeToString(key) + pathname = s.dataDir + "/" + base32.StdEncoding.EncodeToString(key.Bytes()) data, err = os.ReadFile(pathname) if err != nil { if errors.Is(err, os.ErrNotExist) { diff --git a/das/memory_backed_storage_service.go b/das/memory_backed_storage_service.go index 1e1b57de02..7931cccce0 100644 --- a/das/memory_backed_storage_service.go +++ b/das/memory_backed_storage_service.go @@ -12,7 +12,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/das/dastree" - "github.com/offchainlabs/nitro/util/pretty" ) type MemoryBackedStorageService struct { // intended for testing and debugging @@ -29,16 +28,14 @@ func NewMemoryBackedStorageService(ctx context.Context) StorageService { } } -func (m *MemoryBackedStorageService) GetByHash(ctx context.Context, key []byte) ([]byte, error) { - log.Trace("das.MemoryBackedStorageService.GetByHash", "key", pretty.FirstFewBytes(key), "this", m) +func (m *MemoryBackedStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { + log.Trace("das.MemoryBackedStorageService.GetByHash", "key", key, "this", m) m.rwmutex.RLock() defer m.rwmutex.RUnlock() if m.closed { return nil, ErrClosed } - var h32 [32]byte - copy(h32[:], key) - res, found := m.contents[h32] + res, found := m.contents[key] if !found { return nil, ErrNotFound } @@ -52,8 +49,7 @@ func (m *MemoryBackedStorageService) Put(ctx context.Context, data []byte, expir if m.closed { return ErrClosed } - h32 := common.BytesToHash(dastree.Hash(data)) - m.contents[h32] = append([]byte{}, data...) + m.contents[dastree.Hash(data)] = append([]byte{}, data...) return nil } diff --git a/das/reader_aggregator_strategies_test.go b/das/reader_aggregator_strategies_test.go index 83fc41bbfb..987bc08938 100644 --- a/das/reader_aggregator_strategies_test.go +++ b/das/reader_aggregator_strategies_test.go @@ -10,6 +10,7 @@ import ( "testing" "time" + "github.com/ethereum/go-ethereum/common" "github.com/offchainlabs/nitro/arbstate" ) @@ -17,7 +18,7 @@ type dummyReader struct { int } -func (*dummyReader) GetByHash(context.Context, []byte) ([]byte, error) { +func (*dummyReader) GetByHash(context.Context, common.Hash) ([]byte, error) { return nil, errors.New("not implemented") } diff --git a/das/redis_storage_service.go b/das/redis_storage_service.go index 8c1cda9c70..0afaa219c0 100644 --- a/das/redis_storage_service.go +++ b/das/redis_storage_service.go @@ -81,8 +81,8 @@ func (rs *RedisStorageService) verifyMessageSignature(data []byte) ([]byte, erro return message, nil } -func (rs *RedisStorageService) getVerifiedData(ctx context.Context, key []byte) ([]byte, error) { - data, err := rs.client.Get(ctx, string(key)).Bytes() +func (rs *RedisStorageService) getVerifiedData(ctx context.Context, key common.Hash) ([]byte, error) { + data, err := rs.client.Get(ctx, string(key.Bytes())).Bytes() if err != nil { log.Error("das.RedisStorageService.getVerifiedData", "err", err) return nil, err @@ -100,8 +100,8 @@ func (rs *RedisStorageService) signMessage(message []byte) []byte { return mac.Sum(message) } -func (rs *RedisStorageService) GetByHash(ctx context.Context, key []byte) ([]byte, error) { - log.Trace("das.RedisStorageService.GetByHash", "key", pretty.FirstFewBytes(key), "this", rs) +func (rs *RedisStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { + log.Trace("das.RedisStorageService.GetByHash", "key", pretty.PrettyHash(key), "this", rs) ret, err := rs.getVerifiedData(ctx, key) if err != nil { ret, err = rs.baseStorageService.GetByHash(ctx, key) @@ -109,7 +109,7 @@ func (rs *RedisStorageService) GetByHash(ctx context.Context, key []byte) ([]byt return nil, err } - err = rs.client.Set(ctx, string(key), rs.signMessage(ret), rs.redisConfig.Expiration).Err() + err = rs.client.Set(ctx, string(key.Bytes()), rs.signMessage(ret), rs.redisConfig.Expiration).Err() if err != nil { return nil, err } @@ -125,7 +125,9 @@ func (rs *RedisStorageService) Put(ctx context.Context, value []byte, timeout ui if err != nil { return err } - err = rs.client.Set(ctx, string(dastree.Hash(value)), rs.signMessage(value), rs.redisConfig.Expiration).Err() + err = rs.client.Set( + ctx, string(dastree.Hash(value).Bytes()), rs.signMessage(value), rs.redisConfig.Expiration, + ).Err() if err != nil { log.Error("das.RedisStorageService.Store", "err", err) } diff --git a/das/redundant_simple_das_reader.go b/das/redundant_simple_das_reader.go index f4ceb549a0..ed8b8c1096 100644 --- a/das/redundant_simple_das_reader.go +++ b/das/redundant_simple_das_reader.go @@ -7,6 +7,7 @@ import ( "context" "errors" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/util/pretty" @@ -25,8 +26,8 @@ type rsdrResponse struct { err error } -func (r RedundantSimpleDASReader) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { - log.Trace("das.RedundantSimpleDASReader.GetByHash", "key", pretty.FirstFewBytes(hash), "this", r) +func (r RedundantSimpleDASReader) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { + log.Trace("das.RedundantSimpleDASReader.GetByHash", "key", pretty.PrettyHash(hash), "this", r) subCtx, cancel := context.WithCancel(ctx) defer cancel() diff --git a/das/redundant_storage_service.go b/das/redundant_storage_service.go index e348dd4288..74d32bd819 100644 --- a/das/redundant_storage_service.go +++ b/das/redundant_storage_service.go @@ -7,8 +7,8 @@ import ( "context" "errors" "sync" - "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/util/pretty" @@ -32,8 +32,8 @@ type readResponse struct { err error } -func (r *RedundantStorageService) GetByHash(ctx context.Context, key []byte) ([]byte, error) { - log.Trace("das.RedundantStorageService.GetByHash", "key", pretty.FirstFewBytes(key), "this", r) +func (r *RedundantStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { + log.Trace("das.RedundantStorageService.GetByHash", "key", pretty.PrettyHash(key), "this", r) subCtx, cancel := context.WithCancel(ctx) defer cancel() var anyError error @@ -61,7 +61,7 @@ func (r *RedundantStorageService) GetByHash(ctx context.Context, key []byte) ([] } func (r *RedundantStorageService) Put(ctx context.Context, data []byte, expirationTime uint64) error { - log.Trace("das.RedundantStorageService.Store", "message", pretty.FirstFewBytes(data), "timeout", time.Unix(int64(expirationTime), 0), "this", r) + logPut("das.RedundantStorageService.Store", data, expirationTime, r) var wg sync.WaitGroup var errorMutex sync.Mutex var anyError error diff --git a/das/restful_client.go b/das/restful_client.go index ecdaa7626e..8d00f82892 100644 --- a/das/restful_client.go +++ b/das/restful_client.go @@ -13,6 +13,7 @@ import ( "net/http" "strings" + "github.com/ethereum/go-ethereum/common" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/das/dastree" ) @@ -38,10 +39,7 @@ func NewRestfulDasClientFromURL(url string) (*RestfulDasClient, error) { }, nil } -func (c *RestfulDasClient) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { - if len(hash) != 32 { - return nil, fmt.Errorf("Hash must be 32 bytes long, was %d", len(hash)) - } +func (c *RestfulDasClient) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { res, err := http.Get(c.url + getByHashRequestPath + EncodeStorageServiceKey(hash)) if err != nil { return nil, err @@ -66,7 +64,7 @@ func (c *RestfulDasClient) GetByHash(ctx context.Context, hash []byte) ([]byte, if err != nil { return nil, err } - if !bytes.Equal(hash, dastree.Hash(decodedBytes)) { + if hash != dastree.Hash(decodedBytes) { return nil, arbstate.ErrHashMismatch } diff --git a/das/restful_server.go b/das/restful_server.go index 1d12482ef7..e1ae898f3c 100644 --- a/das/restful_server.go +++ b/das/restful_server.go @@ -13,6 +13,7 @@ import ( "path" "strings" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/util/pretty" @@ -132,7 +133,7 @@ func (rds *RestfulDasServer) GetByHashHandler(w http.ResponseWriter, r *http.Req return } - responseData, err := rds.storage.GetByHash(r.Context(), hashBytes[:32]) + responseData, err := rds.storage.GetByHash(r.Context(), common.BytesToHash(hashBytes[:32])) if err != nil { log.Warn("Unable to find data", "path", requestPath, "err", err) w.WriteHeader(http.StatusNotFound) diff --git a/das/retry_wrapper.go b/das/retry_wrapper.go index 0767127828..0f0ba9830d 100644 --- a/das/retry_wrapper.go +++ b/das/retry_wrapper.go @@ -9,6 +9,7 @@ import ( "time" "github.com/cenkalti/backoff/v4" + "github.com/ethereum/go-ethereum/common" "github.com/offchainlabs/nitro/arbstate" ) @@ -27,7 +28,7 @@ func NewRetryWrapper(dataAvailabilityService DataAvailabilityService) DataAvaila } } -func (w *RetryWrapper) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { +func (w *RetryWrapper) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { var res []byte err := backoff.Retry(func() error { if ctx.Err() != nil { diff --git a/das/s3_storage_service.go b/das/s3_storage_service.go index f07f0747d4..65d19ee00d 100644 --- a/das/s3_storage_service.go +++ b/das/s3_storage_service.go @@ -18,6 +18,7 @@ import ( "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" flag "github.com/spf13/pflag" @@ -64,9 +65,12 @@ type S3StorageService struct { } func NewS3StorageService(config S3StorageServiceConfig) (StorageService, error) { + credCache := aws.NewCredentialsCache( + credentials.NewStaticCredentialsProvider(config.AccessKey, config.SecretKey, ""), + ) client := s3.New(s3.Options{ Region: config.Region, - Credentials: aws.NewCredentialsCache(credentials.NewStaticCredentialsProvider(config.AccessKey, config.SecretKey, "")), + Credentials: credCache, }) return &S3StorageService{ client: client, @@ -78,8 +82,8 @@ func NewS3StorageService(config S3StorageServiceConfig) (StorageService, error) }, nil } -func (s3s *S3StorageService) GetByHash(ctx context.Context, key []byte) ([]byte, error) { - log.Trace("das.S3StorageService.GetByHash", "key", pretty.FirstFewBytes(key), "this", s3s) +func (s3s *S3StorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { + log.Trace("das.S3StorageService.GetByHash", "key", pretty.PrettyHash(key), "this", s3s) buf := manager.NewWriteAtBuffer([]byte{}) _, err := s3s.downloader.Download(ctx, buf, &s3.GetObjectInput{ diff --git a/das/sign_after_store_das.go b/das/sign_after_store_das.go index f4f53684b1..66a66b84d4 100644 --- a/das/sign_after_store_das.go +++ b/das/sign_after_store_das.go @@ -11,6 +11,7 @@ import ( "os" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" @@ -157,8 +158,9 @@ func (d *SignAfterStoreDAS) Store( } } - c = &arbstate.DataAvailabilityCertificate{} - copy(c.DataHash[:], dastree.Hash(message)) + c = &arbstate.DataAvailabilityCertificate{ + DataHash: dastree.Hash(message), + } c.Timeout = timeout c.SignersMask = 1 // The aggregator will override this if we're part of a committee. @@ -183,7 +185,7 @@ func (d *SignAfterStoreDAS) Store( return c, nil } -func (d *SignAfterStoreDAS) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { +func (d *SignAfterStoreDAS) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { return d.storageService.GetByHash(ctx, hash) } diff --git a/das/simple_das_reader_aggregator.go b/das/simple_das_reader_aggregator.go index 57c67b4171..e60d149de8 100644 --- a/das/simple_das_reader_aggregator.go +++ b/das/simple_das_reader_aggregator.go @@ -4,7 +4,6 @@ package das import ( - "bytes" "context" "errors" "fmt" @@ -13,6 +12,7 @@ import ( "sync" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/das/dastree" @@ -174,8 +174,8 @@ type SimpleDASReaderAggregator struct { statMessages chan readerStatMessage } -func (a *SimpleDASReaderAggregator) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { - log.Trace("das.SimpleDASReaderAggregator.GetByHash", "key", pretty.FirstFewBytes(hash), "this", a) +func (a *SimpleDASReaderAggregator) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { + log.Trace("das.SimpleDASReaderAggregator.GetByHash", "key", pretty.PrettyHash(hash), "this", a) type dataErrorPair struct { data []byte @@ -237,7 +237,7 @@ func (a *SimpleDASReaderAggregator) GetByHash(ctx context.Context, hash []byte) } func (a *SimpleDASReaderAggregator) tryGetByHash( - ctx context.Context, hash []byte, reader arbstate.DataAvailabilityReader, + ctx context.Context, hash common.Hash, reader arbstate.DataAvailabilityReader, ) ([]byte, error) { stat := readerStatMessage{reader: reader} stat.success = false @@ -245,7 +245,7 @@ func (a *SimpleDASReaderAggregator) tryGetByHash( start := time.Now() result, err := reader.GetByHash(ctx, hash) if err == nil { - if bytes.Equal(dastree.Hash(result), hash) { + if dastree.Hash(result) == hash { stat.success = true } else { err = fmt.Errorf("SimpleDASReaderAggregator got result from reader(%v) not matching hash", reader) diff --git a/das/storage_service.go b/das/storage_service.go index 045f11ae38..d2732dfb98 100644 --- a/das/storage_service.go +++ b/das/storage_service.go @@ -9,6 +9,7 @@ import ( "fmt" "strings" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/offchainlabs/nitro/arbstate" ) @@ -24,13 +25,17 @@ type StorageService interface { HealthCheck(ctx context.Context) error } -func EncodeStorageServiceKey(b []byte) string { - return hexutil.Encode(b)[2:] +func EncodeStorageServiceKey(key common.Hash) string { + return key.Hex()[2:] } -func DecodeStorageServiceKey(input string) ([]byte, error) { - if strings.HasPrefix(input, "0x") { - return hexutil.Decode(input) +func DecodeStorageServiceKey(input string) (common.Hash, error) { + if !strings.HasPrefix(input, "0x") { + input = "0x" + input } - return hexutil.Decode("0x" + input) + key, err := hexutil.Decode(input) + if err != nil { + return common.Hash{}, err + } + return common.BytesToHash(key), nil } diff --git a/das/store_signing.go b/das/store_signing.go index 5cf1e5b493..aa4ab6717d 100644 --- a/das/store_signing.go +++ b/das/store_signing.go @@ -42,7 +42,7 @@ func DasRecoverSigner(data []byte, timeout uint64, sig []byte) (common.Address, func dasStoreHash(data []byte, timeout uint64) []byte { var buf8 [8]byte binary.BigEndian.PutUint64(buf8[:], timeout) - return dastree.Hash(uniquifyingPrefix, buf8[:], data) + return dastree.HashBytes(uniquifyingPrefix, buf8[:], data) } type StoreSigningDAS struct { diff --git a/das/sync_from_l1_chain.go b/das/sync_from_l1_chain.go index c8bea1cff9..b71efcef4d 100644 --- a/das/sync_from_l1_chain.go +++ b/das/sync_from_l1_chain.go @@ -105,7 +105,7 @@ func SyncStorageServiceFromChain( return err } for hash, contents := range preimages { - _, err := syncTo.GetByHash(ctx, hash.Bytes()) + _, err := syncTo.GetByHash(ctx, hash) if errors.Is(err, ErrNotFound) { if err := syncTo.Put(ctx, contents, arbmath.SaturatingUAdd(uint64(time.Now().Unix()), expirationTime)); err != nil { return err diff --git a/das/timeout_wrapper.go b/das/timeout_wrapper.go index 37297925c6..031ff6330f 100644 --- a/das/timeout_wrapper.go +++ b/das/timeout_wrapper.go @@ -8,6 +8,7 @@ import ( "fmt" "time" + "github.com/ethereum/go-ethereum/common" "github.com/offchainlabs/nitro/arbstate" ) @@ -23,7 +24,7 @@ func NewTimeoutWrapper(dataAvailabilityService DataAvailabilityService, t time.D } } -func (w *TimeoutWrapper) GetByHash(ctx context.Context, hash []byte) ([]byte, error) { +func (w *TimeoutWrapper) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { deadlineCtx, cancel := context.WithDeadline(ctx, time.Now().Add(w.t)) // For Retrieve we want fast cancellation of all goroutines started by // the aggregator as soon as one returns. diff --git a/util/pretty/pretty_printing.go b/util/pretty/pretty_printing.go index a0df13ea36..4d22459693 100644 --- a/util/pretty/pretty_printing.go +++ b/util/pretty/pretty_printing.go @@ -3,7 +3,11 @@ package pretty -import "fmt" +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" +) func FirstFewBytes(b []byte) string { if len(b) < 9 { @@ -13,6 +17,10 @@ func FirstFewBytes(b []byte) string { } } +func PrettyHash(hash common.Hash) string { + return FirstFewBytes(hash.Bytes()) +} + func FirstFewChars(s string) string { if len(s) < 9 { return fmt.Sprintf("\"%s\"", s) From c86340d36cdd96aba4a29d3b1cd55f97a07a377a Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Thu, 30 Jun 2022 17:39:21 -0500 Subject: [PATCH 03/23] remove copies --- arbstate/das_reader.go | 6 +++--- das/aggregator.go | 4 +--- das/sign_after_store_das.go | 4 +--- 3 files changed, 5 insertions(+), 9 deletions(-) diff --git a/arbstate/das_reader.go b/arbstate/das_reader.go index b75388689c..0540c898d6 100644 --- a/arbstate/das_reader.go +++ b/arbstate/das_reader.go @@ -169,12 +169,12 @@ func (keyset *DataAvailabilityKeyset) Serialize(wr io.Writer) error { return nil } -func (keyset *DataAvailabilityKeyset) Hash() ([]byte, error) { +func (keyset *DataAvailabilityKeyset) Hash() (common.Hash, error) { wr := bytes.NewBuffer([]byte{}) if err := keyset.Serialize(wr); err != nil { - return nil, err + return common.Hash{}, err } - return crypto.Keccak256(wr.Bytes()), nil + return crypto.Keccak256Hash(wr.Bytes()), nil } func DeserializeKeyset(rd io.Reader) (*DataAvailabilityKeyset, error) { diff --git a/das/aggregator.go b/das/aggregator.go index 13d6bebfe3..87059a1b93 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -137,12 +137,10 @@ func NewAggregatorWithSeqInboxCaller( if err := keyset.Serialize(ksBuf); err != nil { return nil, err } - keysetHashBuf, err := keyset.Hash() + keysetHash, err := keyset.Hash() if err != nil { return nil, err } - var keysetHash [32]byte - copy(keysetHash[:], keysetHashBuf) if config.DumpKeyset { fmt.Printf("Keyset: %s\n", hexutil.Encode(ksBuf.Bytes())) fmt.Printf("KeysetHash: %s\n", hexutil.Encode(keysetHash[:])) diff --git a/das/sign_after_store_das.go b/das/sign_after_store_das.go index 66a66b84d4..006d66e580 100644 --- a/das/sign_after_store_das.go +++ b/das/sign_after_store_das.go @@ -118,12 +118,10 @@ func NewSignAfterStoreDASWithSeqInboxCaller( if err := keyset.Serialize(ksBuf); err != nil { return nil, err } - ksHashBuf, err := keyset.Hash() + ksHash, err := keyset.Hash() if err != nil { return nil, err } - var ksHash [32]byte - copy(ksHash[:], ksHashBuf) var bpVerifier *BatchPosterVerifier if seqInboxCaller != nil { From f7d9d242acbac9c29d3b84f1fa6067cd7c8f17c0 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Thu, 30 Jun 2022 22:18:57 -0500 Subject: [PATCH 04/23] version field --- arbstate/das_reader.go | 24 ++++++++++++++- arbstate/inbox.go | 25 ++++++++++++---- cmd/replay/main.go | 3 +- das/aggregator.go | 9 ++++-- das/das.go | 53 +++++---------------------------- das/dasrpc/dasRpcClient.go | 1 + das/dasrpc/dasRpcServer.go | 2 ++ das/dastree/dastree.go | 56 +++++++++++++++++++++++++++++++---- das/dastree/dastree_test.go | 59 +++++++++++++++++++++++++++++++++++++ das/sign_after_store_das.go | 3 +- das/util.go | 5 ++-- 11 files changed, 177 insertions(+), 63 deletions(-) create mode 100644 das/dastree/dastree_test.go diff --git a/arbstate/das_reader.go b/arbstate/das_reader.go index 0540c898d6..08bc130f5b 100644 --- a/arbstate/das_reader.go +++ b/arbstate/das_reader.go @@ -31,6 +31,10 @@ var ErrHashMismatch = errors.New("Result does not match expected hash") // which will retrieve the full batch data. const DASMessageHeaderFlag byte = 0x80 +// Indicates that this DAS certificate data employs the new merkelization strategy. +// Ignored when DASMessageHeaderFlag is not set. +const TreeDASMessageHeaderFlag byte = 0x08 + // Indicates that this message was authenticated by L1. Currently unused. const L1AuthenticatedMessageHeaderFlag byte = 0x40 @@ -44,6 +48,10 @@ func IsDASMessageHeaderByte(header byte) bool { return (DASMessageHeaderFlag & header) > 0 } +func IsTreeDASMessageHeaderByte(header byte) bool { + return (TreeDASMessageHeaderFlag & header) > 0 +} + func IsZeroheavyEncodedHeaderByte(header byte) bool { return (ZeroheavyMessageHeaderFlag & header) > 0 } @@ -58,6 +66,7 @@ type DataAvailabilityCertificate struct { Timeout uint64 SignersMask uint64 Sig blsSignatures.Signature + Version uint8 } func DeserializeDASCertFrom(rd io.Reader) (c *DataAvailabilityCertificate, err error) { @@ -89,6 +98,15 @@ func DeserializeDASCertFrom(rd io.Reader) (c *DataAvailabilityCertificate, err e } c.Timeout = binary.BigEndian.Uint64(timeoutBuf[:]) + if IsTreeDASMessageHeaderByte(header) { + var versionBuf [1]byte + _, err = io.ReadFull(r, versionBuf[:]) + if err != nil { + return nil, err + } + c.Version = versionBuf[0] + } + var signersMaskBuf [8]byte _, err = io.ReadFull(r, signersMaskBuf[:]) if err != nil { @@ -110,13 +128,17 @@ func DeserializeDASCertFrom(rd io.Reader) (c *DataAvailabilityCertificate, err e } func (c *DataAvailabilityCertificate) SerializeSignableFields() []byte { - buf := make([]byte, 0, 32+8) + buf := make([]byte, 0, 32+9) buf = append(buf, c.DataHash[:]...) var intData [8]byte binary.BigEndian.PutUint64(intData[:], c.Timeout) buf = append(buf, intData[:]...) + if c.Version != 0 { + buf = append(buf, c.Version) + } + return buf } diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 0c7af5b9ca..2e00e10dd6 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/zeroheavy" "github.com/ethereum/go-ethereum/log" @@ -139,7 +140,7 @@ func RecoverPayloadFromDasBatch( return nil, nil } keysetPreimage, err := dasReader.GetByHash(ctx, cert.KeysetHash) - if err == nil && !bytes.Equal(cert.KeysetHash[:], crypto.Keccak256(keysetPreimage)) { + if err == nil && cert.KeysetHash != crypto.Keccak256Hash(keysetPreimage) { err = ErrHashMismatch } if err != nil { @@ -147,7 +148,7 @@ func RecoverPayloadFromDasBatch( return nil, err } if preimages != nil { - preimages[common.BytesToHash(cert.KeysetHash[:])] = keysetPreimage + preimages[cert.KeysetHash] = keysetPreimage } keyset, err := DeserializeKeyset(bytes.NewReader(keysetPreimage)) if err != nil { @@ -164,8 +165,14 @@ func RecoverPayloadFromDasBatch( log.Error("Data availability cert expires too soon", "err", "") return nil, nil } - payload, err := dasReader.GetByHash(ctx, cert.DataHash) - if err == nil && !bytes.Equal(crypto.Keccak256(payload), cert.DataHash[:]) { + + newDataHash := cert.DataHash + if cert.Version == 0 { + newDataHash = crypto.Keccak256Hash(cert.DataHash[:]) + } + + payload, err := dasReader.GetByHash(ctx, newDataHash) + if err == nil && crypto.Keccak256Hash(payload) != cert.DataHash { err = ErrHashMismatch } if err != nil { @@ -173,7 +180,15 @@ func RecoverPayloadFromDasBatch( return nil, err } if preimages != nil { - preimages[common.BytesToHash(cert.DataHash[:])] = payload + if cert.Version == 0 { + preimages[cert.DataHash] = payload + preimages[newDataHash] = cert.DataHash[:] + } else { + record := func(key common.Hash, value []byte) { + preimages[key] = value + } + dastree.RecordHash(record, payload) + } } return payload, nil diff --git a/cmd/replay/main.go b/cmd/replay/main.go index 82ad46516e..906d119748 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -19,6 +19,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/wavmio" ) @@ -86,7 +87,7 @@ type PreimageDASReader struct { } func (dasReader *PreimageDASReader) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { - return wavmio.ResolvePreImage(hash), nil + return dastree.Content(hash, wavmio.ResolvePreImage) } func (dasReader *PreimageDASReader) HealthCheck(ctx context.Context) error { diff --git a/das/aggregator.go b/das/aggregator.go index 87059a1b93..bbfb58bded 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -255,7 +255,9 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, return } - verified, err := blsSignatures.VerifySignature(cert.Sig, serializeSignableFields(cert), d.pubKey) + verified, err := blsSignatures.VerifySignature( + cert.Sig, cert.SerializeSignableFields(), d.pubKey, + ) if err != nil { responses <- storeResponse{d, nil, err} return @@ -315,7 +317,10 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, aggCert.Timeout = timeout aggCert.KeysetHash = a.keysetHash - verified, err := blsSignatures.VerifySignature(aggCert.Sig, serializeSignableFields(&aggCert), aggPubKey) + // Rachel TODO: count up requiredServicesForStore number on each version + aggCert.Version = 1 + + verified, err := blsSignatures.VerifySignature(aggCert.Sig, aggCert.SerializeSignableFields(), aggPubKey) if err != nil { return nil, err } diff --git a/das/das.go b/das/das.go index 743fd11491..29d680cf66 100644 --- a/das/das.go +++ b/das/das.go @@ -55,37 +55,6 @@ var DefaultDataAvailabilityConfig = DataAvailabilityConfig{ RestfulClientAggregatorConfig: DefaultRestfulClientAggregatorConfig, } -/* TODO put these checks somewhere -func (c *DataAvailabilityConfig) Mode() (DataAvailabilityMode, error) { - if c.ModeImpl == "" { - return 0, errors.New("--data-availability.mode missing") - } - - if c.ModeImpl == OnchainDataAvailabilityString { - return OnchainDataAvailability, nil - } - - if c.ModeImpl == DASDataAvailabilityString { - if c.DASConfig.LocalConfig.DataDir == "" || (c.DASConfig.KeyDir == "" && c.DASConfig.PrivKey == "") { - flag.Usage() - return 0, errors.New("--data-availability.das.local.data-dir and --data-availability.das.key-dir must be specified if mode is set to das") - } - return DASDataAvailability, nil - } - - if c.ModeImpl == AggregatorDataAvailabilityString { - if reflect.DeepEqual(c.AggregatorConfig, DefaultAggregatorConfig) { - flag.Usage() - return 0, errors.New("--data-availability.aggregator.X config options must be specified if mode is set to aggregator") - } - return AggregatorDataAvailability, nil - } - - flag.Usage() - return 0, errors.New("--data-availability.mode " + c.ModeImpl + " not recognized") -} -*/ - func OptionalAddressFromString(s string) (*common.Address, error) { if s == "none" { return nil, nil @@ -125,25 +94,17 @@ func DataAvailabilityConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".sequencer-inbox-address", DefaultDataAvailabilityConfig.SequencerInboxAddress, "L1 address of SequencerInbox contract") } -func serializeSignableFields(c *arbstate.DataAvailabilityCertificate) []byte { - buf := make([]byte, 0, 32+8) - buf = append(buf, c.DataHash[:]...) - - var intData [8]byte - binary.BigEndian.PutUint64(intData[:], c.Timeout) - buf = append(buf, intData[:]...) - - return buf -} - func Serialize(c *arbstate.DataAvailabilityCertificate) []byte { - buf := make([]byte, 0) - buf = append(buf, arbstate.DASMessageHeaderFlag) + flags := arbstate.DASMessageHeaderFlag + if c.Version != 0 { + flags |= arbstate.TreeDASMessageHeaderFlag + } + buf := make([]byte, 0) + buf = append(buf, flags) buf = append(buf, c.KeysetHash[:]...) - - buf = append(buf, serializeSignableFields(c)...) + buf = append(buf, c.SerializeSignableFields()...) var intData [8]byte binary.BigEndian.PutUint64(intData[:], c.SignersMask) diff --git a/das/dasrpc/dasRpcClient.go b/das/dasrpc/dasRpcClient.go index 080c420b75..9f93fd2119 100644 --- a/das/dasrpc/dasRpcClient.go +++ b/das/dasrpc/dasRpcClient.go @@ -65,6 +65,7 @@ func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64 SignersMask: uint64(ret.SignersMask), Sig: respSig, KeysetHash: common.BytesToHash(ret.KeysetHash), + Version: byte(ret.Version), }, nil } diff --git a/das/dasrpc/dasRpcServer.go b/das/dasrpc/dasRpcServer.go index c011a081c3..a544120e4a 100644 --- a/das/dasrpc/dasRpcServer.go +++ b/das/dasrpc/dasRpcServer.go @@ -63,6 +63,7 @@ type StoreResult struct { SignersMask hexutil.Uint64 `json:"signersMask,omitempty"` KeysetHash hexutil.Bytes `json:"keysetHash,omitempty"` Sig hexutil.Bytes `json:"sig,omitempty"` + Version hexutil.Uint64 `json:"version,omitempty"` } func (serv *DASRPCServer) Store(ctx context.Context, message hexutil.Bytes, timeout hexutil.Uint64, sig hexutil.Bytes) (*StoreResult, error) { @@ -78,6 +79,7 @@ func (serv *DASRPCServer) Store(ctx context.Context, message hexutil.Bytes, time Timeout: hexutil.Uint64(cert.Timeout), SignersMask: hexutil.Uint64(cert.SignersMask), Sig: blsSignatures.SignatureToBytes(cert.Sig), + Version: hexutil.Uint64(cert.Version), }, nil } diff --git a/das/dastree/dastree.go b/das/dastree/dastree.go index b61d2119b3..20dab46870 100644 --- a/das/dastree/dastree.go +++ b/das/dastree/dastree.go @@ -4,6 +4,8 @@ package dastree import ( + "fmt" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/offchainlabs/nitro/util/arbmath" @@ -13,7 +15,7 @@ const binSize = 64 * 1024 // 64 kB type bytes32 = common.Hash -func Hash(preimage ...[]byte) bytes32 { +func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { // Algorithm // 1. split the preimage into 64kB bins and hash them to produces the tree's leaves // 2. repeatedly hash pairs over and over, bubbling up any odd-one's out, to form the root @@ -23,21 +25,28 @@ func Hash(preimage ...[]byte) bytes32 { // * 2 <=> hash(0, 1), 2 step 1 // / \ // 0 1 <=> 0, 1, 2 step 0 + // + // Intermediate hashes like '*' from above may be recorded via the `record` closure + // unrolled := []byte{} for _, slice := range preimage { unrolled = append(unrolled, slice...) } if len(unrolled) == 0 { - return crypto.Keccak256Hash([]byte{}) + return crypto.Keccak256Hash(crypto.Keccak256([]byte{})) } length := int64(len(unrolled)) leaves := []bytes32{} for bin := int64(0); bin < length; bin += binSize { end := arbmath.MinInt(bin+binSize, length) - keccak := crypto.Keccak256Hash(unrolled[bin:end]) - leaves = append(leaves, keccak) + content := unrolled[bin:end] + innerKeccak := crypto.Keccak256Hash(content) + outerKeccak := crypto.Keccak256Hash(innerKeccak.Bytes()) + record(outerKeccak, innerKeccak.Bytes()) + record(innerKeccak, content) + leaves = append(leaves, outerKeccak) } layer := leaves @@ -46,7 +55,11 @@ func Hash(preimage ...[]byte) bytes32 { after := prior/2 + prior%2 paired := make([]bytes32, after) for i := 0; i < prior-1; i += 2 { - paired[i/2] = crypto.Keccak256Hash(layer[i][:], layer[i+1][:]) + leftChild := layer[i].Bytes() + rightChild := layer[i+1].Bytes() + parent := crypto.Keccak256Hash(leftChild, rightChild) + record(parent, append(leftChild, rightChild...)) + paired[i/2] = parent } if prior%2 == 1 { paired[after-1] = layer[prior-1] @@ -56,6 +69,39 @@ func Hash(preimage ...[]byte) bytes32 { return layer[0] } +func Hash(preimage ...[]byte) bytes32 { + // Merkelizes without recording anything. All but the replay binary's DAS will call this + return RecordHash(func(bytes32, []byte) {}, preimage...) +} + func HashBytes(preimage ...[]byte) []byte { return Hash(preimage...).Bytes() } + +func Content(root common.Hash, oracle func(common.Hash) []byte) ([]byte, error) { + leaves := []common.Hash{} + stack := []common.Hash{root} + + for len(stack) > 0 { + node := stack[len(stack)-1] + under := oracle(node) + stack = stack[:len(stack)-1] + + switch len(under) { + case 32: + leaves = append(leaves, common.BytesToHash(under)) + case 64: + prior := common.BytesToHash(under[:32]) // we want to expand leftward, + after := common.BytesToHash(under[32:]) // so we reverse their order + stack = append(stack, after, prior) + default: + return nil, fmt.Errorf("failed to resolve preimage %v", node) + } + } + + preimage := []byte{} + for _, leaf := range leaves { + preimage = append(preimage, oracle(leaf)...) + } + return preimage, nil +} diff --git a/das/dastree/dastree_test.go b/das/dastree/dastree_test.go new file mode 100644 index 0000000000..b6ec69e15a --- /dev/null +++ b/das/dastree/dastree_test.go @@ -0,0 +1,59 @@ +// Copyright 2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package dastree + +import ( + "bytes" + "math/rand" + "testing" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/offchainlabs/nitro/util/pretty" + "github.com/offchainlabs/nitro/util/testhelpers" +) + +func TestDASTree(t *testing.T) { + store := make(map[bytes32][]byte) + tests := [][]byte{{}, {0x32}, crypto.Keccak256(), crypto.Keccak256([]byte{0x32})} + for i := 0; i < 8; i++ { + large := make([]byte, rand.Intn(8*binSize)) + tests = append(tests, large) + } + + record := func(key bytes32, value []byte) { + store[key] = value + } + oracle := func(key bytes32) []byte { + preimage, ok := store[key] + if !ok { + t.Error("no preimage for key", key) + return []byte{} + } + return preimage + } + + for _, test := range tests { + hash := RecordHash(record, test) + store[hash] = test + } + + for key, value := range store { + preimage, err := Content(key, oracle) + Require(t, err, key) + + if !bytes.Equal(preimage, value) { + Fail(t, "incorrect preimage", pretty.FirstFewBytes(preimage), pretty.FirstFewBytes(value)) + } + } +} + +func Require(t *testing.T, err error, printables ...interface{}) { + t.Helper() + testhelpers.RequireImpl(t, err, printables...) +} + +func Fail(t *testing.T, printables ...interface{}) { + t.Helper() + testhelpers.FailImpl(t, printables...) +} diff --git a/das/sign_after_store_das.go b/das/sign_after_store_das.go index 006d66e580..d67e66b9c1 100644 --- a/das/sign_after_store_das.go +++ b/das/sign_after_store_das.go @@ -157,10 +157,11 @@ func (d *SignAfterStoreDAS) Store( } c = &arbstate.DataAvailabilityCertificate{ + Timeout: timeout, DataHash: dastree.Hash(message), + Version: 1, } - c.Timeout = timeout c.SignersMask = 1 // The aggregator will override this if we're part of a committee. fields := c.SerializeSignableFields() diff --git a/das/util.go b/das/util.go index fac15389cf..7142bf2d62 100644 --- a/das/util.go +++ b/das/util.go @@ -11,8 +11,9 @@ import ( "github.com/offchainlabs/nitro/util/pretty" ) -func logPut(store string, data []byte, timeout uint64, reader arbstate.DataAvailabilityReader) { +func logPut(store string, data []byte, timeout uint64, reader arbstate.DataAvailabilityReader, more ...interface{}) { log.Trace( - store, "message", pretty.FirstFewBytes(data), "timeout", time.Unix(int64(timeout), 0), "this", reader, + store, "message", pretty.FirstFewBytes(data), "timeout", time.Unix(int64(timeout), 0), + "this", reader, more, ) } From fa05e22add7e6417c7c9ce96e72a6dd56a487000 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Fri, 1 Jul 2022 09:14:28 -0500 Subject: [PATCH 05/23] fix test --- das/dastree/dastree.go | 10 +++++++--- das/dastree/dastree_test.go | 13 ++++++++----- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/das/dastree/dastree.go b/das/dastree/dastree.go index 20dab46870..0bf57fb2b5 100644 --- a/das/dastree/dastree.go +++ b/das/dastree/dastree.go @@ -34,7 +34,11 @@ func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { unrolled = append(unrolled, slice...) } if len(unrolled) == 0 { - return crypto.Keccak256Hash(crypto.Keccak256([]byte{})) + innerKeccak := crypto.Keccak256Hash([]byte{}) + outerKeccak := crypto.Keccak256Hash(innerKeccak.Bytes()) + record(outerKeccak, innerKeccak.Bytes()) + record(innerKeccak, []byte{}) + return outerKeccak } length := int64(len(unrolled)) @@ -84,8 +88,8 @@ func Content(root common.Hash, oracle func(common.Hash) []byte) ([]byte, error) for len(stack) > 0 { node := stack[len(stack)-1] - under := oracle(node) stack = stack[:len(stack)-1] + under := oracle(node) switch len(under) { case 32: @@ -95,7 +99,7 @@ func Content(root common.Hash, oracle func(common.Hash) []byte) ([]byte, error) after := common.BytesToHash(under[32:]) // so we reverse their order stack = append(stack, after, prior) default: - return nil, fmt.Errorf("failed to resolve preimage %v", node) + return nil, fmt.Errorf("failed to resolve preimage %v %v", len(under), node) } } diff --git a/das/dastree/dastree_test.go b/das/dastree/dastree_test.go index b6ec69e15a..656ce29fc4 100644 --- a/das/dastree/dastree_test.go +++ b/das/dastree/dastree_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/ethereum/go-ethereum/crypto" + "github.com/offchainlabs/nitro/util/colors" "github.com/offchainlabs/nitro/util/pretty" "github.com/offchainlabs/nitro/util/testhelpers" ) @@ -16,29 +17,31 @@ import ( func TestDASTree(t *testing.T) { store := make(map[bytes32][]byte) tests := [][]byte{{}, {0x32}, crypto.Keccak256(), crypto.Keccak256([]byte{0x32})} - for i := 0; i < 8; i++ { + for i := 0; i < 64; i++ { large := make([]byte, rand.Intn(8*binSize)) tests = append(tests, large) } record := func(key bytes32, value []byte) { + colors.PrintGrey("storing ", key, " ", pretty.FirstFewBytes(value)) store[key] = value } oracle := func(key bytes32) []byte { preimage, ok := store[key] if !ok { - t.Error("no preimage for key", key) - return []byte{} + Fail(t, "no preimage for key", key) } + colors.PrintBlue("loading ", key, " ", pretty.FirstFewBytes(preimage)) return preimage } + hashes := map[bytes32][]byte{} for _, test := range tests { hash := RecordHash(record, test) - store[hash] = test + hashes[hash] = test } - for key, value := range store { + for key, value := range hashes { preimage, err := Content(key, oracle) Require(t, err, key) From dccfbf529e8f584cca42c55ae99470421e6d47f9 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Fri, 1 Jul 2022 10:03:07 -0500 Subject: [PATCH 06/23] panic wrapper --- arbnode/node.go | 7 +++- das/das.go | 4 ++ das/panic_wrapper.go | 43 ++++++++++++++++++++ system_tests/common_test.go | 80 +++++++++++++++++++++++++++++++------ 4 files changed, 120 insertions(+), 14 deletions(-) create mode 100644 das/panic_wrapper.go diff --git a/arbnode/node.go b/arbnode/node.go index ca51e38001..dabb06b327 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -680,7 +680,12 @@ func createNodeImpl( } else { dataAvailabilityService = das.NewReadLimitedDataAvailabilityService(dataAvailabilityService) } - dataAvailabilityService = das.NewTimeoutWrapper(dataAvailabilityService, config.DataAvailability.RequestTimeout) + dataAvailabilityService = das.NewTimeoutWrapper( + dataAvailabilityService, config.DataAvailability.RequestTimeout, + ) + if config.DataAvailability.PanicOnError { + dataAvailabilityService = das.NewPanicWrapper(dataAvailabilityService) + } } else if l2BlockChain.Config().ArbitrumChainParams.DataAvailabilityCommittee { return nil, errors.New("a data availability service is required for this chain, but it was not configured") } diff --git a/das/das.go b/das/das.go index 29d680cf66..175c4a2058 100644 --- a/das/das.go +++ b/das/das.go @@ -47,12 +47,15 @@ type DataAvailabilityConfig struct { L1NodeURL string `koanf:"l1-node-url"` SequencerInboxAddress string `koanf:"sequencer-inbox-address"` + + PanicOnError bool `koanf:"panic-on-error"` } var DefaultDataAvailabilityConfig = DataAvailabilityConfig{ RequestTimeout: 5 * time.Second, Enable: false, RestfulClientAggregatorConfig: DefaultRestfulClientAggregatorConfig, + PanicOnError: false, } func OptionalAddressFromString(s string) (*common.Address, error) { @@ -71,6 +74,7 @@ func OptionalAddressFromString(s string) (*common.Address, error) { func DataAvailabilityConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultDataAvailabilityConfig.Enable, "enable Anytrust Data Availability mode") + f.Bool(prefix+".panic-on-error", DefaultDataAvailabilityConfig.PanicOnError, "whether the Data Availability Service should fail fast on errors (not recommended)") f.Duration(prefix+".request-timeout", DefaultDataAvailabilityConfig.RequestTimeout, "Data Availability Service request timeout duration") diff --git a/das/panic_wrapper.go b/das/panic_wrapper.go new file mode 100644 index 0000000000..c7376469a0 --- /dev/null +++ b/das/panic_wrapper.go @@ -0,0 +1,43 @@ +// Copyright 2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package das + +import ( + "context" + "fmt" + + "github.com/ethereum/go-ethereum/common" + + "github.com/offchainlabs/nitro/arbstate" +) + +type PanicWrapper struct { + DataAvailabilityService +} + +func NewPanicWrapper(dataAvailabilityService DataAvailabilityService) DataAvailabilityService { + return &PanicWrapper{ + DataAvailabilityService: dataAvailabilityService, + } +} + +func (w *PanicWrapper) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { + data, err := w.DataAvailabilityService.GetByHash(ctx, hash) + if err != nil { + panic(fmt.Sprintf("panic wrapper GetByHash: %v", err)) + } + return data, nil +} + +func (w *PanicWrapper) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*arbstate.DataAvailabilityCertificate, error) { + cert, err := w.DataAvailabilityService.Store(ctx, message, timeout, sig) + if err != nil { + panic(fmt.Sprintf("panic wrapper Store: %v", err)) + } + return cert, nil +} + +func (w *PanicWrapper) String() string { + return fmt.Sprintf("PanicWrapper{%v}", w.DataAvailabilityService) +} diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 03b92c6464..c083990918 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -58,7 +58,9 @@ func SendWaitTestTransactions(t *testing.T, ctx context.Context, client client, } } -func TransferBalance(t *testing.T, from, to string, amount *big.Int, l2info info, client client, ctx context.Context) (*types.Transaction, *types.Receipt) { +func TransferBalance( + t *testing.T, from, to string, amount *big.Int, l2info info, client client, ctx context.Context, +) (*types.Transaction, *types.Receipt) { tx := l2info.PrepareTx(from, to, l2info.TransferGas, amount, nil) err := client.SendTransaction(ctx, tx) Require(t, err) @@ -67,7 +69,14 @@ func TransferBalance(t *testing.T, from, to string, amount *big.Int, l2info info return tx, res } -func SendSignedTxViaL1(t *testing.T, ctx context.Context, l1info *BlockchainTestInfo, l1client arbutil.L1Interface, l2client arbutil.L1Interface, delayedTx *types.Transaction) *types.Receipt { +func SendSignedTxViaL1( + t *testing.T, + ctx context.Context, + l1info *BlockchainTestInfo, + l1client arbutil.L1Interface, + l2client arbutil.L1Interface, + delayedTx *types.Transaction, +) *types.Receipt { delayedInboxContract, err := bridgegen.NewInbox(l1info.GetAddress("Inbox"), l1client) Require(t, err) usertxopts := l1info.GetDefaultTransactOpts("User", ctx) @@ -149,7 +158,9 @@ func CreateTestL1BlockChain(t *testing.T, l1info info) (info, *ethclient.Client, return l1info, l1Client, l1backend, stack } -func DeployOnTestL1(t *testing.T, ctx context.Context, l1info info, l1client client, chainId *big.Int) *arbnode.RollupAddresses { +func DeployOnTestL1( + t *testing.T, ctx context.Context, l1info info, l1client client, chainId *big.Int, +) *arbnode.RollupAddresses { l1info.GenerateAccount("RollupOwner") l1info.GenerateAccount("Sequencer") l1info.GenerateAccount("User") @@ -179,7 +190,9 @@ func DeployOnTestL1(t *testing.T, ctx context.Context, l1info info, l1client cli return addresses } -func createL2BlockChain(t *testing.T, l2info *BlockchainTestInfo, chainConfig *params.ChainConfig) (*BlockchainTestInfo, *node.Node, ethdb.Database, ethdb.Database, *core.BlockChain) { +func createL2BlockChain( + t *testing.T, l2info *BlockchainTestInfo, chainConfig *params.ChainConfig, +) (*BlockchainTestInfo, *node.Node, ethdb.Database, ethdb.Database, *core.BlockChain) { if l2info == nil { l2info = NewArbTestInfo(t, chainConfig.ChainID) } @@ -207,12 +220,28 @@ func ClientForArbBackend(t *testing.T, backend *arbitrum.Backend) *ethclient.Cli } // Create and deploy L1 and arbnode for L2 -func CreateTestNodeOnL1(t *testing.T, ctx context.Context, isSequencer bool) (l2info info, node *arbnode.Node, l2client *ethclient.Client, l1info info, l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node) { +func CreateTestNodeOnL1( + t *testing.T, + ctx context.Context, + isSequencer bool, +) ( + l2info info, node *arbnode.Node, l2client *ethclient.Client, l1info info, + l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node, +) { conf := arbnode.ConfigDefaultL1Test() return CreateTestNodeOnL1WithConfig(t, ctx, isSequencer, conf, params.ArbitrumDevTestChainConfig()) } -func CreateTestNodeOnL1WithConfig(t *testing.T, ctx context.Context, isSequencer bool, nodeConfig *arbnode.Config, chainConfig *params.ChainConfig) (l2info info, node *arbnode.Node, l2client *ethclient.Client, l1info info, l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node) { +func CreateTestNodeOnL1WithConfig( + t *testing.T, + ctx context.Context, + isSequencer bool, + nodeConfig *arbnode.Config, + chainConfig *params.ChainConfig, +) ( + l2info info, node *arbnode.Node, l2client *ethclient.Client, l1info info, + l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node, +) { l1info, l1client, l1backend, l1stack = CreateTestL1BlockChain(t, nil) l2info, l2stack, l2chainDb, l2arbDb, l2blockchain := createL2BlockChain(t, nil, chainConfig) addresses := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig.ChainID) @@ -242,7 +271,9 @@ func CreateTestL2(t *testing.T, ctx context.Context) (*BlockchainTestInfo, *arbn return CreateTestL2WithConfig(t, ctx, nil, arbnode.ConfigDefaultL2Test(), true) } -func CreateTestL2WithConfig(t *testing.T, ctx context.Context, l2Info *BlockchainTestInfo, nodeConfig *arbnode.Config, takeOwnership bool) (*BlockchainTestInfo, *arbnode.Node, *ethclient.Client) { +func CreateTestL2WithConfig( + t *testing.T, ctx context.Context, l2Info *BlockchainTestInfo, nodeConfig *arbnode.Config, takeOwnership bool, +) (*BlockchainTestInfo, *arbnode.Node, *ethclient.Client) { l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, l2Info, params.ArbitrumDevTestChainConfig()) node, err := arbnode.CreateNode(ctx, stack, chainDb, arbDb, nodeConfig, blockchain, nil, nil, nil, nil) Require(t, err) @@ -281,17 +312,31 @@ func Fail(t *testing.T, printables ...interface{}) { testhelpers.FailImpl(t, printables...) } -func Create2ndNode(t *testing.T, ctx context.Context, first *arbnode.Node, l1stack *node.Node, l2InitData *statetransfer.ArbosInitializationInfo, blockValidator bool) (*ethclient.Client, *arbnode.Node) { +func Create2ndNode( + t *testing.T, + ctx context.Context, + first *arbnode.Node, + l1stack *node.Node, + l2InitData *statetransfer.ArbosInitializationInfo, + blockValidator bool, +) (*ethclient.Client, *arbnode.Node) { nodeConf := arbnode.ConfigDefaultL1Test() nodeConf.BatchPoster.Enable = false nodeConf.BlockValidator.Enable = blockValidator return Create2ndNodeWithConfig(t, ctx, first, l1stack, l2InitData, nodeConf) } -func Create2ndNodeWithConfig(t *testing.T, ctx context.Context, first *arbnode.Node, l1stack *node.Node, l2InitData *statetransfer.ArbosInitializationInfo, nodeConfig *arbnode.Config) (*ethclient.Client, *arbnode.Node) { +func Create2ndNodeWithConfig( + t *testing.T, + ctx context.Context, + first *arbnode.Node, + l1stack *node.Node, + l2InitData *statetransfer.ArbosInitializationInfo, + nodeConfig *arbnode.Config, +) (*ethclient.Client, *arbnode.Node) { l1rpcClient, err := l1stack.Attach() if err != nil { - t.Fatal(err) + Fail(t, err) } l1client := ethclient.NewClient(l1rpcClient) l2stack, err := arbnode.CreateDefaultStack() @@ -319,7 +364,13 @@ func GetBalance(t *testing.T, ctx context.Context, client *ethclient.Client, acc return balance } -func authorizeDASKeyset(t *testing.T, ctx context.Context, dasSignerKey *blsSignatures.PublicKey, l1info info, l1client arbutil.L1Interface) { +func authorizeDASKeyset( + t *testing.T, + ctx context.Context, + dasSignerKey *blsSignatures.PublicKey, + l1info info, + l1client arbutil.L1Interface, +) { if dasSignerKey == nil { return } @@ -340,7 +391,9 @@ func authorizeDASKeyset(t *testing.T, ctx context.Context, dasSignerKey *blsSign Require(t, err) } -func setupConfigWithDAS(t *testing.T, dasModeString string) (*params.ChainConfig, *arbnode.Config, string, *blsSignatures.PublicKey) { +func setupConfigWithDAS( + t *testing.T, dasModeString string, +) (*params.ChainConfig, *arbnode.Config, string, *blsSignatures.PublicKey) { l1NodeConfigA := arbnode.ConfigDefaultL1Test() chainConfig := params.ArbitrumDevTestChainConfig() var dbPath string @@ -376,7 +429,8 @@ func setupConfigWithDAS(t *testing.T, dasModeString string) (*params.ChainConfig Enable: enableDbStorage, DataDir: dbPath, }, - L1NodeURL: "none", + L1NodeURL: "none", + PanicOnError: true, } l1NodeConfigA.DataAvailability = dasConfig From a5a780430c84acff2b94ef4e4f31891292a4e28b Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Thu, 7 Jul 2022 18:48:48 -0500 Subject: [PATCH 07/23] fix keysets --- arbstate/das_reader.go | 9 ++-- arbstate/inbox.go | 41 ++++++++++------ contracts/src/bridge/SequencerInbox.sol | 3 +- das/chain_fetch_das.go | 3 +- das/dastree/dastree.go | 64 ++++++++++++++++--------- das/dastree/dastree_test.go | 8 +++- util/arbmath/math.go | 15 ++++++ 7 files changed, 98 insertions(+), 45 deletions(-) diff --git a/arbstate/das_reader.go b/arbstate/das_reader.go index 08bc130f5b..8c002a362f 100644 --- a/arbstate/das_reader.go +++ b/arbstate/das_reader.go @@ -13,10 +13,10 @@ import ( "io" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/blsSignatures" + "github.com/offchainlabs/nitro/das/dastree" ) type DataAvailabilityReader interface { @@ -150,7 +150,7 @@ func (cert *DataAvailabilityCertificate) RecoverKeyset( if err != nil { return nil, err } - if !bytes.Equal(crypto.Keccak256(keysetBytes), cert.KeysetHash[:]) { + if dastree.Hash(keysetBytes) != cert.KeysetHash { return nil, errors.New("keyset hash does not match cert") } return DeserializeKeyset(bytes.NewReader(keysetBytes)) @@ -196,7 +196,10 @@ func (keyset *DataAvailabilityKeyset) Hash() (common.Hash, error) { if err := keyset.Serialize(wr); err != nil { return common.Hash{}, err } - return crypto.Keccak256Hash(wr.Bytes()), nil + if wr.Len() > dastree.BinSize { + return common.Hash{}, errors.New("keyset too large") + } + return dastree.Hash(wr.Bytes()), nil } func DeserializeKeyset(rd io.Reader) (*DataAvailabilityKeyset, error) { diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 2e00e10dd6..f3383d316a 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -139,8 +139,13 @@ func RecoverPayloadFromDasBatch( log.Error("Failed to deserialize DAS message", "err", err) return nil, nil } + + recordPreimage := func(key common.Hash, value []byte) { + preimages[key] = value + } + keysetPreimage, err := dasReader.GetByHash(ctx, cert.KeysetHash) - if err == nil && cert.KeysetHash != crypto.Keccak256Hash(keysetPreimage) { + if err == nil && cert.KeysetHash != dastree.Hash(keysetPreimage) { err = ErrHashMismatch } if err != nil { @@ -148,8 +153,9 @@ func RecoverPayloadFromDasBatch( return nil, err } if preimages != nil { - preimages[cert.KeysetHash] = keysetPreimage + dastree.RecordHash(recordPreimage, keysetPreimage) } + keyset, err := DeserializeKeyset(bytes.NewReader(keysetPreimage)) if err != nil { log.Error("Couldn't deserialize keyset", "err", err) @@ -160,34 +166,39 @@ func RecoverPayloadFromDasBatch( log.Error("Bad signature on DAS batch", "err", err) return nil, nil } + maxTimestamp := binary.BigEndian.Uint64(sequencerMsg[8:16]) if cert.Timeout < maxTimestamp+MinLifetimeSecondsForDataAvailabilityCert { log.Error("Data availability cert expires too soon", "err", "") return nil, nil } - newDataHash := cert.DataHash - if cert.Version == 0 { - newDataHash = crypto.Keccak256Hash(cert.DataHash[:]) + dataHash := cert.DataHash + treeHash := dataHash + version := cert.Version + if version == 0 { + treeHash = dastree.FlatHashToTreeHash(dataHash) } - payload, err := dasReader.GetByHash(ctx, newDataHash) - if err == nil && crypto.Keccak256Hash(payload) != cert.DataHash { - err = ErrHashMismatch - } + payload, err := dasReader.GetByHash(ctx, treeHash) if err != nil { log.Error("Couldn't fetch DAS batch contents", "err", err) return nil, err } + switch { + case version == 0 && crypto.Keccak256Hash(payload) != dataHash: + fallthrough + case version == 1 && dastree.Hash(payload) != dataHash: + log.Error("Couldn't fetch DAS batch contents", "err", ErrHashMismatch, "version", version) + return nil, err + } + if preimages != nil { if cert.Version == 0 { - preimages[cert.DataHash] = payload - preimages[newDataHash] = cert.DataHash[:] + preimages[dataHash] = payload + preimages[treeHash] = dataHash[:] } else { - record := func(key common.Hash, value []byte) { - preimages[key] = value - } - dastree.RecordHash(record, payload) + dastree.RecordHash(recordPreimage, payload) } } diff --git a/contracts/src/bridge/SequencerInbox.sol b/contracts/src/bridge/SequencerInbox.sol index dac329be22..5bd201e0b1 100644 --- a/contracts/src/bridge/SequencerInbox.sol +++ b/contracts/src/bridge/SequencerInbox.sol @@ -336,7 +336,8 @@ contract SequencerInbox is DelegateCallAware, GasRefundEnabled, ISequencerInbox * @param keysetBytes bytes of the serialized keyset */ function setValidKeyset(bytes calldata keysetBytes) external override onlyRollupOwner { - bytes32 ksHash = keccak256(keysetBytes); + bytes32 ksHash = keccak256(bytes.concat(keccak256(keysetBytes))); + if (dasKeySetInfo[ksHash].isValidKeyset) revert AlreadyValidDASKeyset(ksHash); dasKeySetInfo[ksHash] = DasKeySetInfo({ isValidKeyset: true, diff --git a/das/chain_fetch_das.go b/das/chain_fetch_das.go index 7aa2fd8cf3..6105a73a16 100644 --- a/das/chain_fetch_das.go +++ b/das/chain_fetch_das.go @@ -13,7 +13,6 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/das/dastree" @@ -138,7 +137,7 @@ func chainFetchGetByHash( return nil, err } for iter.Next() { - if hash == crypto.Keccak256Hash(iter.Event.KeysetBytes) { + if hash == dastree.Hash(iter.Event.KeysetBytes) { cache.put(hash, iter.Event.KeysetBytes) return iter.Event.KeysetBytes, nil } diff --git a/das/dastree/dastree.go b/das/dastree/dastree.go index 0bf57fb2b5..3949700abf 100644 --- a/das/dastree/dastree.go +++ b/das/dastree/dastree.go @@ -11,20 +11,25 @@ import ( "github.com/offchainlabs/nitro/util/arbmath" ) -const binSize = 64 * 1024 // 64 kB +const BinSize = 64 * 1024 // 64 kB type bytes32 = common.Hash +type node struct { + hash bytes32 + size uint32 +} + func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { // Algorithm - // 1. split the preimage into 64kB bins and hash them to produces the tree's leaves - // 2. repeatedly hash pairs over and over, bubbling up any odd-one's out, to form the root + // 1. split the preimage into 64kB bins and hash them to produce the tree's leaves + // 2. repeatedly hash pairs and their combined length, bubbling up any odd-one's out, to form the root // - // r <=> hash(hash(0, 1), 2) step 2 + // r <=> hash(hash(0, 1), 2, len(0:2)) step 2 // / \ - // * 2 <=> hash(0, 1), 2 step 1 + // * 2 <=> hash(0, 1, len(0:1)), 2 step 1 // / \ - // 0 1 <=> 0, 1, 2 step 0 + // 0 1 <=> 0, 1, 2 step 0 // // Intermediate hashes like '*' from above may be recorded via the `record` closure // @@ -41,28 +46,35 @@ func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { return outerKeccak } - length := int64(len(unrolled)) - leaves := []bytes32{} - for bin := int64(0); bin < length; bin += binSize { - end := arbmath.MinInt(bin+binSize, length) + length := uint32(len(unrolled)) + leaves := []node{} + for bin := uint32(0); bin < length; bin += BinSize { + end := arbmath.MinUint32(bin+BinSize, length) content := unrolled[bin:end] innerKeccak := crypto.Keccak256Hash(content) outerKeccak := crypto.Keccak256Hash(innerKeccak.Bytes()) record(outerKeccak, innerKeccak.Bytes()) record(innerKeccak, content) - leaves = append(leaves, outerKeccak) + leaves = append(leaves, node{outerKeccak, end - bin}) } layer := leaves for len(layer) > 1 { prior := len(layer) after := prior/2 + prior%2 - paired := make([]bytes32, after) + paired := make([]node, after) for i := 0; i < prior-1; i += 2 { - leftChild := layer[i].Bytes() - rightChild := layer[i+1].Bytes() - parent := crypto.Keccak256Hash(leftChild, rightChild) - record(parent, append(leftChild, rightChild...)) + firstHash := layer[i].hash.Bytes() + otherHash := layer[i+1].hash.Bytes() + sizeUnder := layer[i].size + layer[i+1].size + dataUnder := firstHash + dataUnder = append(dataUnder, otherHash...) + dataUnder = append(dataUnder, arbmath.Uint32ToBytes(sizeUnder)...) + parent := node{ + crypto.Keccak256Hash(dataUnder), + sizeUnder, + } + record(parent.hash, dataUnder) paired[i/2] = parent } if prior%2 == 1 { @@ -70,7 +82,7 @@ func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { } layer = paired } - return layer[0] + return layer[0].hash } func Hash(preimage ...[]byte) bytes32 { @@ -82,9 +94,15 @@ func HashBytes(preimage ...[]byte) []byte { return Hash(preimage...).Bytes() } -func Content(root common.Hash, oracle func(common.Hash) []byte) ([]byte, error) { - leaves := []common.Hash{} - stack := []common.Hash{root} +func FlatHashToTreeHash(flat bytes32) bytes32 { + // Forms a degenerate dastree that's just a single leaf + // note: the inner preimage may be arbitrarily larger than the 64 kB standard + return crypto.Keccak256Hash(flat[:]) +} + +func Content(root bytes32, oracle func(bytes32) []byte) ([]byte, error) { + leaves := []bytes32{} + stack := []bytes32{root} for len(stack) > 0 { node := stack[len(stack)-1] @@ -94,9 +112,9 @@ func Content(root common.Hash, oracle func(common.Hash) []byte) ([]byte, error) switch len(under) { case 32: leaves = append(leaves, common.BytesToHash(under)) - case 64: - prior := common.BytesToHash(under[:32]) // we want to expand leftward, - after := common.BytesToHash(under[32:]) // so we reverse their order + case 68: + prior := common.BytesToHash(under[:32]) // we want to expand leftward, + after := common.BytesToHash(under[32:64]) // so we reverse their order stack = append(stack, after, prior) default: return nil, fmt.Errorf("failed to resolve preimage %v %v", len(under), node) diff --git a/das/dastree/dastree_test.go b/das/dastree/dastree_test.go index 656ce29fc4..aa263cae6e 100644 --- a/das/dastree/dastree_test.go +++ b/das/dastree/dastree_test.go @@ -18,19 +18,25 @@ func TestDASTree(t *testing.T) { store := make(map[bytes32][]byte) tests := [][]byte{{}, {0x32}, crypto.Keccak256(), crypto.Keccak256([]byte{0x32})} for i := 0; i < 64; i++ { - large := make([]byte, rand.Intn(8*binSize)) + large := make([]byte, rand.Intn(12*BinSize)) tests = append(tests, large) } record := func(key bytes32, value []byte) { colors.PrintGrey("storing ", key, " ", pretty.FirstFewBytes(value)) store[key] = value + if crypto.Keccak256Hash(value) != key { + Fail(t, "key not the hash of value") + } } oracle := func(key bytes32) []byte { preimage, ok := store[key] if !ok { Fail(t, "no preimage for key", key) } + if crypto.Keccak256Hash(preimage) != key { + Fail(t, "key not the hash of preimage") + } colors.PrintBlue("loading ", key, " ", pretty.FirstFewBytes(preimage)) return preimage } diff --git a/util/arbmath/math.go b/util/arbmath/math.go index ec0f00f6d5..402574a85f 100644 --- a/util/arbmath/math.go +++ b/util/arbmath/math.go @@ -36,6 +36,14 @@ func MinUint(value, ceiling uint64) uint64 { return value } +// the minimum of two 32-bit uints +func MinUint32(value, ceiling uint32) uint32 { + if value > ceiling { + return ceiling + } + return value +} + // the maximum of two ints func MaxInt(value, floor int64) int64 { if value < floor { @@ -291,6 +299,13 @@ func UintToBytes(value uint64) []byte { return result } +// casts a uint32 to its big-endian representation +func Uint32ToBytes(value uint32) []byte { + result := make([]byte, 4) + binary.BigEndian.PutUint32(result, value) + return result +} + // Return the Maclaurin series approximation of e^x, where x is denominated in basis points. // This quartic polynomial will underestimate e^x by about 5% as x approaches 20000 bips. func ApproxExpBasisPoints(value Bips) Bips { From 24a3f7e68d2ed53f622421b0bd8fff735fcde946 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Fri, 8 Jul 2022 12:31:01 -0500 Subject: [PATCH 08/23] directory upgrade script --- cmd/one-time-das-upgrade/upgrade.go | 78 +++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 cmd/one-time-das-upgrade/upgrade.go diff --git a/cmd/one-time-das-upgrade/upgrade.go b/cmd/one-time-das-upgrade/upgrade.go new file mode 100644 index 0000000000..1c68e6b67a --- /dev/null +++ b/cmd/one-time-das-upgrade/upgrade.go @@ -0,0 +1,78 @@ +// Copyright 2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package main + +import ( + "encoding/hex" + "fmt" + "io/fs" + "os" + "path/filepath" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/offchainlabs/nitro/das/dastree" + "github.com/offchainlabs/nitro/util/colors" +) + +func main() { + args := os.Args + if len(args) < 2 { + panic("Usage: upgrade ") + } + + path := filepath.FromSlash(args[1]) + info, err := os.Stat(path) + if err != nil { + panic(fmt.Sprintf("failed to open directory: %v\n%v", path, err)) + } + if !info.IsDir() { + panic(fmt.Sprintf("path %v is not a directory", path)) + } + + println("upgrading das files in directory", path) + + renames := make(map[string]string) + + filepath.Walk(path, func(path string, info fs.FileInfo, err error) error { + if err != nil { + colors.PrintRed("skipping ", path, err) + return nil + } + if info.IsDir() { + return nil + } + stem := filepath.Dir(path) + "/" + name := info.Name() + if name[:2] == "0x" { + name = name[2:] + } + + hashbytes, err := hex.DecodeString(name) + if err != nil || len(hashbytes) != 32 { + panic(fmt.Sprintf("filename %v isn't a hash", path)) + } + hash := *(*common.Hash)(hashbytes) + tree := dastree.FlatHashToTreeHash(hash) + + contents, err := os.ReadFile(path) + if err != nil { + panic(fmt.Sprintf("failed to read file %v %v", path, err)) + } + if crypto.Keccak256Hash(contents) != hash { + panic(fmt.Sprintf("file hash %v does not match its contents", path)) + } + + renames[path] = stem + tree.Hex() + return nil + }) + + for name, rename := range renames { + println(name, colors.Grey, "=>", colors.Clear, rename) + err := os.Rename(name, rename) + if err != nil { + panic("failed to mv file") + } + } +} From 07e2c902aee83d9c890d782c7f2988f2dced4fc8 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Fri, 8 Jul 2022 12:34:44 -0500 Subject: [PATCH 09/23] check walk result --- cmd/one-time-das-upgrade/upgrade.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cmd/one-time-das-upgrade/upgrade.go b/cmd/one-time-das-upgrade/upgrade.go index 1c68e6b67a..05a027a4cc 100644 --- a/cmd/one-time-das-upgrade/upgrade.go +++ b/cmd/one-time-das-upgrade/upgrade.go @@ -35,7 +35,7 @@ func main() { renames := make(map[string]string) - filepath.Walk(path, func(path string, info fs.FileInfo, err error) error { + err := filepath.Walk(path, func(path string, info fs.FileInfo, err error) error { if err != nil { colors.PrintRed("skipping ", path, err) return nil @@ -67,6 +67,9 @@ func main() { renames[path] = stem + tree.Hex() return nil }) + if err != nil { + panic(err) + } for name, rename := range renames { println(name, colors.Grey, "=>", colors.Clear, rename) From 6d7a84dba098d99661a7246235a5f1937ffba048 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Fri, 8 Jul 2022 12:35:06 -0500 Subject: [PATCH 10/23] fix bug --- cmd/one-time-das-upgrade/upgrade.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/one-time-das-upgrade/upgrade.go b/cmd/one-time-das-upgrade/upgrade.go index 05a027a4cc..7757c25939 100644 --- a/cmd/one-time-das-upgrade/upgrade.go +++ b/cmd/one-time-das-upgrade/upgrade.go @@ -35,7 +35,7 @@ func main() { renames := make(map[string]string) - err := filepath.Walk(path, func(path string, info fs.FileInfo, err error) error { + err = filepath.Walk(path, func(path string, info fs.FileInfo, err error) error { if err != nil { colors.PrintRed("skipping ", path, err) return nil From b53b6638d7c0981df06c1fb899403f65084a2983 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Fri, 8 Jul 2022 13:54:15 -0500 Subject: [PATCH 11/23] remove TODO --- das/aggregator.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/das/aggregator.go b/das/aggregator.go index 10ba0494bd..324a66fd05 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -315,8 +315,6 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, aggCert.DataHash = expectedHash aggCert.Timeout = timeout aggCert.KeysetHash = a.keysetHash - - // Rachel TODO: count up requiredServicesForStore number on each version aggCert.Version = 1 verified, err := blsSignatures.VerifySignature(aggCert.Sig, aggCert.SerializeSignableFields(), aggPubKey) From 897da98b2c4149a0a050b545e4946302ef8674cb Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Fri, 8 Jul 2022 19:25:07 -0500 Subject: [PATCH 12/23] address review comments --- arbstate/inbox.go | 3 ++ das/dastree/dastree.go | 71 +++++++++++++++++++++++++++++++------ das/dastree/dastree_test.go | 5 ++- das/sign_after_store_das.go | 9 +++-- util/arbmath/math.go | 9 +++++ 5 files changed, 80 insertions(+), 17 deletions(-) diff --git a/arbstate/inbox.go b/arbstate/inbox.go index f3383d316a..f9321fc3ea 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -191,6 +191,9 @@ func RecoverPayloadFromDasBatch( case version == 1 && dastree.Hash(payload) != dataHash: log.Error("Couldn't fetch DAS batch contents", "err", ErrHashMismatch, "version", version) return nil, err + case version >= 2: + log.Error("Committee signed unsuported certificate format", "version", version, "payload", payload) + panic("node software out of date") } if preimages != nil { diff --git a/das/dastree/dastree.go b/das/dastree/dastree.go index 3949700abf..0373fd5c33 100644 --- a/das/dastree/dastree.go +++ b/das/dastree/dastree.go @@ -4,6 +4,7 @@ package dastree import ( + "encoding/binary" "fmt" "github.com/ethereum/go-ethereum/common" @@ -96,34 +97,82 @@ func HashBytes(preimage ...[]byte) []byte { func FlatHashToTreeHash(flat bytes32) bytes32 { // Forms a degenerate dastree that's just a single leaf - // note: the inner preimage may be arbitrarily larger than the 64 kB standard + // note: the inner preimage may be larger than the 64 kB standard return crypto.Keccak256Hash(flat[:]) } func Content(root bytes32, oracle func(bytes32) []byte) ([]byte, error) { - leaves := []bytes32{} - stack := []bytes32{root} + // Reverses hashes to reveal the full preimage under the root using the preimage oracle. + // This function also checks that the size-data is consistent and that the hash is canonical. + // + // Notes + // 1. Because we accept degenerate dastrees, we can't check that single-leaf trees are canonical. + // 2. For any canonical dastree, there exists a degenerate single-leaf equivalent that we accept. + // 3. Only the committee can produce trees unwrapped by this function + // + + total := uint32(0) + upper := oracle(root) + switch len(upper) { + case 32: + return oracle(common.BytesToHash(upper)), nil + case 68: + total = binary.BigEndian.Uint32(upper[64:]) + default: + return nil, fmt.Errorf("invalid root with preimage of size %v: %v %v", len(upper), root, upper) + } + + leaves := []node{} + stack := []node{{hash: root, size: total}} for len(stack) > 0 { - node := stack[len(stack)-1] + place := stack[len(stack)-1] stack = stack[:len(stack)-1] - under := oracle(node) + under := oracle(place.hash) switch len(under) { case 32: - leaves = append(leaves, common.BytesToHash(under)) + leaf := node{ + hash: common.BytesToHash(under), + size: place.size, + } + leaves = append(leaves, leaf) case 68: - prior := common.BytesToHash(under[:32]) // we want to expand leftward, - after := common.BytesToHash(under[32:64]) // so we reverse their order + count := binary.BigEndian.Uint32(under[64:]) + power := uint32(arbmath.NextOrCurrentPowerOf2(uint64(count))) + + if place.size != count { + return nil, fmt.Errorf("invalid size data: %v vs %v for %v", count, place.size, under) + } + + prior := node{ + hash: common.BytesToHash(under[:32]), + size: power / 2, + } + after := node{ + hash: common.BytesToHash(under[32:64]), + size: count - power/2, + } + + // we want to expand leftward so we reverse their order stack = append(stack, after, prior) default: - return nil, fmt.Errorf("failed to resolve preimage %v %v", len(under), node) + return nil, fmt.Errorf("failed to resolve preimage %v %v", place.hash, under) } } preimage := []byte{} - for _, leaf := range leaves { - preimage = append(preimage, oracle(leaf)...) + for i, leaf := range leaves { + bin := oracle(leaf.hash) + if len(bin) != int(leaf.size) { + return nil, fmt.Errorf("leaf %v has an incorrectly sized bin: %v vs %v", i, len(bin), leaf.size) + } + preimage = append(preimage, bin...) + } + + // Check the hash matches. Given the size data this shouldn't be possible but we'll check anyway + if Hash(preimage) != root { + return nil, fmt.Errorf("preimage not canonically hashed") } return preimage, nil } diff --git a/das/dastree/dastree_test.go b/das/dastree/dastree_test.go index aa263cae6e..335b2e3120 100644 --- a/das/dastree/dastree_test.go +++ b/das/dastree/dastree_test.go @@ -16,7 +16,10 @@ import ( func TestDASTree(t *testing.T) { store := make(map[bytes32][]byte) - tests := [][]byte{{}, {0x32}, crypto.Keccak256(), crypto.Keccak256([]byte{0x32})} + tests := [][]byte{ + {}, {0x32}, crypto.Keccak256(), + make([]byte, BinSize), make([]byte, BinSize+1), make([]byte, 4*BinSize), + } for i := 0; i < 64; i++ { large := make([]byte, rand.Intn(12*BinSize)) tests = append(tests, large) diff --git a/das/sign_after_store_das.go b/das/sign_after_store_das.go index 2e4fdb52b8..fb48f44bd6 100644 --- a/das/sign_after_store_das.go +++ b/das/sign_after_store_das.go @@ -156,13 +156,12 @@ func (d *SignAfterStoreDAS) Store( } c = &arbstate.DataAvailabilityCertificate{ - Timeout: timeout, - DataHash: dastree.Hash(message), - Version: 1, + Timeout: timeout, + DataHash: dastree.Hash(message), + Version: 1, + SignersMask: 1, // The aggregator will override this if we're part of a committee. } - c.SignersMask = 1 // The aggregator will override this if we're part of a committee. - fields := c.SerializeSignableFields() c.Sig, err = blsSignatures.SignMessage(*d.privKey, fields) if err != nil { diff --git a/util/arbmath/math.go b/util/arbmath/math.go index 402574a85f..13ec39bd8f 100644 --- a/util/arbmath/math.go +++ b/util/arbmath/math.go @@ -15,6 +15,15 @@ func NextPowerOf2(value uint64) uint64 { return 1 << Log2ceil(value) } +// the smallest power of no less than the input +func NextOrCurrentPowerOf2(value uint64) uint64 { + power := NextPowerOf2(value) + if power == 2*value { + power /= 2 + } + return power +} + // the log2 of the int, rounded up func Log2ceil(value uint64) uint64 { return uint64(64 - bits.LeadingZeros64(value)) From 43016fafc1fa664e1644322be78e25b0431a5b84 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Fri, 8 Jul 2022 20:24:41 -0500 Subject: [PATCH 13/23] add dastree to dockerfile --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index cb7ce7eb02..70609b6128 100644 --- a/Dockerfile +++ b/Dockerfile @@ -54,6 +54,7 @@ COPY ./arbos ./arbos COPY ./arbstate ./arbstate COPY ./blsSignatures ./blsSignatures COPY ./cmd/replay ./cmd/replay +COPY ./das/dastree ./das/dastree COPY ./precompiles ./precompiles COPY ./statetransfer ./statetransfer COPY ./util ./util From c09c2752d678af7bbbcdce29c1c0fb86d52f5121 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Sat, 9 Jul 2022 09:25:39 -0500 Subject: [PATCH 14/23] update comment --- das/dastree/dastree.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/das/dastree/dastree.go b/das/dastree/dastree.go index 0373fd5c33..e6eb59da53 100644 --- a/das/dastree/dastree.go +++ b/das/dastree/dastree.go @@ -170,7 +170,7 @@ func Content(root bytes32, oracle func(bytes32) []byte) ([]byte, error) { preimage = append(preimage, bin...) } - // Check the hash matches. Given the size data this shouldn't be possible but we'll check anyway + // Check the hash matches. Given the size data this should never fail but we'll check anyway if Hash(preimage) != root { return nil, fmt.Errorf("preimage not canonically hashed") } From 87d201e309680f3cc3e644900deb1c186390f228 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Mon, 11 Jul 2022 13:24:03 -0500 Subject: [PATCH 15/23] use 0x only when would be consistent --- cmd/one-time-das-upgrade/upgrade.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cmd/one-time-das-upgrade/upgrade.go b/cmd/one-time-das-upgrade/upgrade.go index 7757c25939..daee01e51e 100644 --- a/cmd/one-time-das-upgrade/upgrade.go +++ b/cmd/one-time-das-upgrade/upgrade.go @@ -45,8 +45,10 @@ func main() { } stem := filepath.Dir(path) + "/" name := info.Name() + zero := false if name[:2] == "0x" { name = name[2:] + zero = true } hashbytes, err := hex.DecodeString(name) @@ -64,7 +66,11 @@ func main() { panic(fmt.Sprintf("file hash %v does not match its contents", path)) } - renames[path] = stem + tree.Hex() + newName := tree.Hex() + if !zero { + newName = newName[2:] + } + renames[path] = stem + newName return nil }) if err != nil { From 0a656fd07fe9c89249a52ce01a9de8b813796765 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Tue, 19 Jul 2022 10:58:31 -0500 Subject: [PATCH 16/23] make upgrade possible in 2 phases --- arbstate/inbox.go | 52 +++++++++++++++++++++++++++++++---------------- 1 file changed, 34 insertions(+), 18 deletions(-) diff --git a/arbstate/inbox.go b/arbstate/inbox.go index bfb1ca9f47..5881153c4f 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -139,14 +139,32 @@ func RecoverPayloadFromDasBatch( log.Error("Failed to deserialize DAS message", "err", err) return nil, nil } + version := cert.Version + checkPreimage := func(hash common.Hash, preimage []byte, message string) error { + switch { + case version == 0 && crypto.Keccak256Hash(preimage) != hash: + fallthrough + case version == 1 && dastree.Hash(preimage) != hash: + log.Error(message, "err", ErrHashMismatch, "version", version) + return ErrHashMismatch + case version >= 2: + log.Error( + "Committee signed unsuported certificate format", + "version", version, "hash", hash, "payload", preimage, + ) + panic("node software out of date") + } + return nil + } recordPreimage := func(key common.Hash, value []byte) { preimages[key] = value } keysetPreimage, err := dasReader.GetByHash(ctx, cert.KeysetHash) - if err == nil && cert.KeysetHash != dastree.Hash(keysetPreimage) { - err = ErrHashMismatch + keysetHash := cert.KeysetHash + if err == nil { + err = checkPreimage(keysetHash, keysetPreimage, "Keyset hash mismatch") } if err != nil { log.Error("Couldn't get keyset", "err", err) @@ -175,31 +193,29 @@ func RecoverPayloadFromDasBatch( dataHash := cert.DataHash treeHash := dataHash - version := cert.Version - if version == 0 { - treeHash = dastree.FlatHashToTreeHash(dataHash) - } + // + // TODO: add back after committee upgrade + // if version == 0 { + // treeHash = dastree.FlatHashToTreeHash(dataHash) + // } + // payload, err := dasReader.GetByHash(ctx, treeHash) + if err == nil { + err = checkPreimage(dataHash, payload, "batch hash mismatch") + } if err != nil { log.Error("Couldn't fetch DAS batch contents", "err", err) return nil, err } - switch { - case version == 0 && crypto.Keccak256Hash(payload) != dataHash: - fallthrough - case version == 1 && dastree.Hash(payload) != dataHash: - log.Error("Couldn't fetch DAS batch contents", "err", ErrHashMismatch, "version", version) - return nil, err - case version >= 2: - log.Error("Committee signed unsuported certificate format", "version", version, "payload", payload) - panic("node software out of date") - } if preimages != nil { - if cert.Version == 0 { + if version == 0 { preimages[dataHash] = payload - preimages[treeHash] = dataHash[:] + // + // TODO: add back after committee upgrade + // preimages[treeHash] = dataHash[:] + // } else { dastree.RecordHash(recordPreimage, payload) } From e217a581662aa15158b11b01f7df0015234730d7 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Tue, 19 Jul 2022 20:18:47 -0500 Subject: [PATCH 17/23] fe ff scheme --- das/dastree/dastree.go | 80 ++++++++++++++++++++----------------- das/dastree/dastree_test.go | 4 +- 2 files changed, 45 insertions(+), 39 deletions(-) diff --git a/das/dastree/dastree.go b/das/dastree/dastree.go index e6eb59da53..b0bdc3cc3c 100644 --- a/das/dastree/dastree.go +++ b/das/dastree/dastree.go @@ -10,9 +10,12 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/colors" ) const BinSize = 64 * 1024 // 64 kB +const LeafByte = 0xfe +const NodeByte = 0xff type bytes32 = common.Hash @@ -40,23 +43,20 @@ func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { unrolled = append(unrolled, slice...) } if len(unrolled) == 0 { - innerKeccak := crypto.Keccak256Hash([]byte{}) - outerKeccak := crypto.Keccak256Hash(innerKeccak.Bytes()) - record(outerKeccak, innerKeccak.Bytes()) - record(innerKeccak, []byte{}) - return outerKeccak + single := []byte{LeafByte} + keccak := crypto.Keccak256Hash(single) + record(keccak, single) + return keccak } length := uint32(len(unrolled)) leaves := []node{} for bin := uint32(0); bin < length; bin += BinSize { end := arbmath.MinUint32(bin+BinSize, length) - content := unrolled[bin:end] - innerKeccak := crypto.Keccak256Hash(content) - outerKeccak := crypto.Keccak256Hash(innerKeccak.Bytes()) - record(outerKeccak, innerKeccak.Bytes()) - record(innerKeccak, content) - leaves = append(leaves, node{outerKeccak, end - bin}) + single := append([]byte{LeafByte}, unrolled[bin:end]...) + keccak := crypto.Keccak256Hash(single) + record(keccak, single) + leaves = append(leaves, node{keccak, end - bin}) } layer := leaves @@ -68,7 +68,7 @@ func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { firstHash := layer[i].hash.Bytes() otherHash := layer[i+1].hash.Bytes() sizeUnder := layer[i].size + layer[i+1].size - dataUnder := firstHash + dataUnder := append([]byte{NodeByte}, firstHash...) dataUnder = append(dataUnder, otherHash...) dataUnder = append(dataUnder, arbmath.Uint32ToBytes(sizeUnder)...) parent := node{ @@ -83,6 +83,7 @@ func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { } layer = paired } + return layer[0].hash } @@ -101,6 +102,11 @@ func FlatHashToTreeHash(flat bytes32) bytes32 { return crypto.Keccak256Hash(flat[:]) } +func ValidHash(hash bytes32, preimage []byte) bool { + // TODO: remove keccak after committee upgrade + return hash == Hash(preimage) || hash == crypto.Keccak256Hash(preimage) +} + func Content(root bytes32, oracle func(bytes32) []byte) ([]byte, error) { // Reverses hashes to reveal the full preimage under the root using the preimage oracle. // This function also checks that the size-data is consistent and that the hash is canonical. @@ -113,32 +119,41 @@ func Content(root bytes32, oracle func(bytes32) []byte) ([]byte, error) { total := uint32(0) upper := oracle(root) - switch len(upper) { - case 32: - return oracle(common.BytesToHash(upper)), nil - case 68: - total = binary.BigEndian.Uint32(upper[64:]) + switch { + case len(upper) > 0 && upper[0] == LeafByte: + return upper[1:], nil + case len(upper) == 69 && upper[0] == NodeByte: + total = binary.BigEndian.Uint32(upper[65:]) default: return nil, fmt.Errorf("invalid root with preimage of size %v: %v %v", len(upper), root, upper) } - leaves := []node{} stack := []node{{hash: root, size: total}} + preimage := []byte{} for len(stack) > 0 { place := stack[len(stack)-1] stack = stack[:len(stack)-1] + + colors.PrintYellow("here ", place.hash, place.size) + under := oracle(place.hash) - switch len(under) { - case 32: - leaf := node{ - hash: common.BytesToHash(under), - size: place.size, + if len(under) == 0 || (under[0] == NodeByte && len(under) != 69) { + return nil, fmt.Errorf("invalid node for hash %v: %v", place.hash, under) + } + + kind := under[0] + content := under[1:] + + switch kind { + case LeafByte: + if len(content) != int(place.size) { + return nil, fmt.Errorf("leaf has a badly sized bin: %v vs %v", len(under), place.size) } - leaves = append(leaves, leaf) - case 68: - count := binary.BigEndian.Uint32(under[64:]) + preimage = append(preimage, content...) + case NodeByte: + count := binary.BigEndian.Uint32(content[64:]) power := uint32(arbmath.NextOrCurrentPowerOf2(uint64(count))) if place.size != count { @@ -146,11 +161,11 @@ func Content(root bytes32, oracle func(bytes32) []byte) ([]byte, error) { } prior := node{ - hash: common.BytesToHash(under[:32]), + hash: common.BytesToHash(content[:32]), size: power / 2, } after := node{ - hash: common.BytesToHash(under[32:64]), + hash: common.BytesToHash(content[32:64]), size: count - power/2, } @@ -161,15 +176,6 @@ func Content(root bytes32, oracle func(bytes32) []byte) ([]byte, error) { } } - preimage := []byte{} - for i, leaf := range leaves { - bin := oracle(leaf.hash) - if len(bin) != int(leaf.size) { - return nil, fmt.Errorf("leaf %v has an incorrectly sized bin: %v vs %v", i, len(bin), leaf.size) - } - preimage = append(preimage, bin...) - } - // Check the hash matches. Given the size data this should never fail but we'll check anyway if Hash(preimage) != root { return nil, fmt.Errorf("preimage not canonically hashed") diff --git a/das/dastree/dastree_test.go b/das/dastree/dastree_test.go index 335b2e3120..0dc8e9d763 100644 --- a/das/dastree/dastree_test.go +++ b/das/dastree/dastree_test.go @@ -26,7 +26,7 @@ func TestDASTree(t *testing.T) { } record := func(key bytes32, value []byte) { - colors.PrintGrey("storing ", key, " ", pretty.FirstFewBytes(value)) + colors.PrintGrey("storing ", key, " ", pretty.PrettyBytes(value)) store[key] = value if crypto.Keccak256Hash(value) != key { Fail(t, "key not the hash of value") @@ -40,7 +40,7 @@ func TestDASTree(t *testing.T) { if crypto.Keccak256Hash(preimage) != key { Fail(t, "key not the hash of preimage") } - colors.PrintBlue("loading ", key, " ", pretty.FirstFewBytes(preimage)) + colors.PrintBlue("loading ", key, " ", pretty.PrettyBytes(preimage)) return preimage } From 54c7daa7a95201f5f1519b6b6b9899ca59780e81 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Wed, 20 Jul 2022 16:12:39 -0500 Subject: [PATCH 18/23] relax hash-kind checks and invert roots --- arbstate/das_reader.go | 2 +- cmd/datool/datool.go | 2 +- contracts/src/bridge/SequencerInbox.sol | 3 +- das/aggregator.go | 2 +- das/chain_fetch_das.go | 4 +- das/dasrpc/dasRpcClient.go | 2 +- das/dastree/dastree.go | 88 +++++++++++++------------ das/fallback_storage_service.go | 2 +- das/restful_client.go | 2 +- das/simple_das_reader_aggregator.go | 2 +- util/arbmath/bits.go | 37 +++++++++++ util/arbmath/math.go | 20 ------ util/pretty/pretty_printing.go | 8 +++ 13 files changed, 101 insertions(+), 73 deletions(-) create mode 100644 util/arbmath/bits.go diff --git a/arbstate/das_reader.go b/arbstate/das_reader.go index 8c002a362f..081f820c73 100644 --- a/arbstate/das_reader.go +++ b/arbstate/das_reader.go @@ -150,7 +150,7 @@ func (cert *DataAvailabilityCertificate) RecoverKeyset( if err != nil { return nil, err } - if dastree.Hash(keysetBytes) != cert.KeysetHash { + if !dastree.ValidHash(cert.KeysetHash, keysetBytes) { return nil, errors.New("keyset hash does not match cert") } return DeserializeKeyset(bytes.NewReader(keysetBytes)) diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index 888f53eb8f..092f1ca941 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -175,7 +175,7 @@ func startRPCClientGetByHash(args []string) error { } ctx := context.Background() - message, err := client.GetByHash(ctx, common.BytesToHash((decodedHash))) + message, err := client.GetByHash(ctx, common.BytesToHash(decodedHash)) if err != nil { return err } diff --git a/contracts/src/bridge/SequencerInbox.sol b/contracts/src/bridge/SequencerInbox.sol index f794d4ee4e..2cdaa122a4 100644 --- a/contracts/src/bridge/SequencerInbox.sol +++ b/contracts/src/bridge/SequencerInbox.sol @@ -353,7 +353,8 @@ contract SequencerInbox is DelegateCallAware, GasRefundEnabled, ISequencerInbox * @param keysetBytes bytes of the serialized keyset */ function setValidKeyset(bytes calldata keysetBytes) external override onlyRollupOwner { - bytes32 ksHash = keccak256(bytes.concat(keccak256(keysetBytes))); + uint256 ksWord = uint256(keccak256(bytes.concat(keccak256(keysetBytes)))); + bytes32 ksHash = bytes32(ksWord ^ (1 << 255)); if (dasKeySetInfo[ksHash].isValidKeyset) revert AlreadyValidDASKeyset(ksHash); dasKeySetInfo[ksHash] = DasKeySetInfo({ diff --git a/das/aggregator.go b/das/aggregator.go index 324a66fd05..2cac67ebcc 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -177,7 +177,7 @@ func (a *Aggregator) GetByHash(ctx context.Context, hash common.Hash) ([]byte, e errorChan <- err return } - if dastree.Hash(blob) == hash { + if dastree.ValidHash(hash, blob) { blobChan <- blob } else { errorChan <- fmt.Errorf("DAS (mask %X) returned data that doesn't match requested hash!", d.signersMask) diff --git a/das/chain_fetch_das.go b/das/chain_fetch_das.go index 6105a73a16..643c98fa5d 100644 --- a/das/chain_fetch_das.go +++ b/das/chain_fetch_das.go @@ -112,7 +112,7 @@ func chainFetchGetByHash( // try to fetch from the inner DAS innerRes, err := daReader.GetByHash(ctx, hash) - if err == nil && hash == dastree.Hash(innerRes) { + if err == nil && dastree.ValidHash(hash, innerRes) { return innerRes, nil } @@ -137,7 +137,7 @@ func chainFetchGetByHash( return nil, err } for iter.Next() { - if hash == dastree.Hash(iter.Event.KeysetBytes) { + if dastree.ValidHash(hash, iter.Event.KeysetBytes) { cache.put(hash, iter.Event.KeysetBytes) return iter.Event.KeysetBytes, nil } diff --git a/das/dasrpc/dasRpcClient.go b/das/dasrpc/dasRpcClient.go index 9f93fd2119..5697a743c5 100644 --- a/das/dasrpc/dasRpcClient.go +++ b/das/dasrpc/dasRpcClient.go @@ -43,7 +43,7 @@ func (c *DASRPCClient) GetByHash(ctx context.Context, hash common.Hash) ([]byte, if err := c.clnt.CallContext(ctx, &ret, "das_getByHash", hexutil.Bytes(hash[:])); err != nil { return nil, err } - if hash != dastree.Hash(ret) { // check hash because RPC server might be untrusted + if !dastree.ValidHash(hash, ret) { // check hash because RPC server might be untrusted return nil, arbstate.ErrHashMismatch } return ret, nil diff --git a/das/dastree/dastree.go b/das/dastree/dastree.go index b0bdc3cc3c..a113f6a7b5 100644 --- a/das/dastree/dastree.go +++ b/das/dastree/dastree.go @@ -10,11 +10,9 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/offchainlabs/nitro/util/arbmath" - "github.com/offchainlabs/nitro/util/colors" ) const BinSize = 64 * 1024 // 64 kB -const LeafByte = 0xfe const NodeByte = 0xff type bytes32 = common.Hash @@ -38,25 +36,30 @@ func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { // Intermediate hashes like '*' from above may be recorded via the `record` closure // + keccord := func(value []byte) bytes32 { + hash := crypto.Keccak256Hash(value) + record(hash, value) + return hash + } + unrolled := []byte{} for _, slice := range preimage { unrolled = append(unrolled, slice...) } if len(unrolled) == 0 { - single := []byte{LeafByte} - keccak := crypto.Keccak256Hash(single) - record(keccak, single) - return keccak + innerKeccak := keccord([]byte{}) + outerKeccak := keccord(innerKeccak.Bytes()) + return arbmath.FlipBit(outerKeccak, 0) } length := uint32(len(unrolled)) leaves := []node{} for bin := uint32(0); bin < length; bin += BinSize { end := arbmath.MinUint32(bin+BinSize, length) - single := append([]byte{LeafByte}, unrolled[bin:end]...) - keccak := crypto.Keccak256Hash(single) - record(keccak, single) - leaves = append(leaves, node{keccak, end - bin}) + content := unrolled[bin:end] + innerKeccak := keccord(content) + outerKeccak := keccord(innerKeccak.Bytes()) + leaves = append(leaves, node{outerKeccak, end - bin}) } layer := leaves @@ -68,14 +71,13 @@ func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { firstHash := layer[i].hash.Bytes() otherHash := layer[i+1].hash.Bytes() sizeUnder := layer[i].size + layer[i+1].size - dataUnder := append([]byte{NodeByte}, firstHash...) + dataUnder := firstHash dataUnder = append(dataUnder, otherHash...) dataUnder = append(dataUnder, arbmath.Uint32ToBytes(sizeUnder)...) parent := node{ - crypto.Keccak256Hash(dataUnder), + keccord(dataUnder), sizeUnder, } - record(parent.hash, dataUnder) paired[i/2] = parent } if prior%2 == 1 { @@ -83,8 +85,7 @@ func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { } layer = paired } - - return layer[0].hash + return arbmath.FlipBit(layer[0].hash, 0) } func Hash(preimage ...[]byte) bytes32 { @@ -99,7 +100,7 @@ func HashBytes(preimage ...[]byte) []byte { func FlatHashToTreeHash(flat bytes32) bytes32 { // Forms a degenerate dastree that's just a single leaf // note: the inner preimage may be larger than the 64 kB standard - return crypto.Keccak256Hash(flat[:]) + return arbmath.FlipBit(crypto.Keccak256Hash(flat[:]), 0) } func ValidHash(hash bytes32, preimage []byte) bool { @@ -117,43 +118,35 @@ func Content(root bytes32, oracle func(bytes32) []byte) ([]byte, error) { // 3. Only the committee can produce trees unwrapped by this function // + start := arbmath.FlipBit(root, 0) total := uint32(0) - upper := oracle(root) - switch { - case len(upper) > 0 && upper[0] == LeafByte: - return upper[1:], nil - case len(upper) == 69 && upper[0] == NodeByte: - total = binary.BigEndian.Uint32(upper[65:]) + upper := oracle(start) + switch len(upper) { + case 32: + return oracle(common.BytesToHash(upper)), nil + case 68: + total = binary.BigEndian.Uint32(upper[64:]) default: return nil, fmt.Errorf("invalid root with preimage of size %v: %v %v", len(upper), root, upper) } - stack := []node{{hash: root, size: total}} - preimage := []byte{} + leaves := []node{} + stack := []node{{hash: start, size: total}} for len(stack) > 0 { place := stack[len(stack)-1] stack = stack[:len(stack)-1] - - colors.PrintYellow("here ", place.hash, place.size) - under := oracle(place.hash) - if len(under) == 0 || (under[0] == NodeByte && len(under) != 69) { - return nil, fmt.Errorf("invalid node for hash %v: %v", place.hash, under) - } - - kind := under[0] - content := under[1:] - - switch kind { - case LeafByte: - if len(content) != int(place.size) { - return nil, fmt.Errorf("leaf has a badly sized bin: %v vs %v", len(under), place.size) + switch len(under) { + case 32: + leaf := node{ + hash: common.BytesToHash(under), + size: place.size, } - preimage = append(preimage, content...) - case NodeByte: - count := binary.BigEndian.Uint32(content[64:]) + leaves = append(leaves, leaf) + case 68: + count := binary.BigEndian.Uint32(under[64:]) power := uint32(arbmath.NextOrCurrentPowerOf2(uint64(count))) if place.size != count { @@ -161,11 +154,11 @@ func Content(root bytes32, oracle func(bytes32) []byte) ([]byte, error) { } prior := node{ - hash: common.BytesToHash(content[:32]), + hash: common.BytesToHash(under[:32]), size: power / 2, } after := node{ - hash: common.BytesToHash(content[32:64]), + hash: common.BytesToHash(under[32:64]), size: count - power/2, } @@ -176,6 +169,15 @@ func Content(root bytes32, oracle func(bytes32) []byte) ([]byte, error) { } } + preimage := []byte{} + for i, leaf := range leaves { + bin := oracle(leaf.hash) + if len(bin) != int(leaf.size) { + return nil, fmt.Errorf("leaf %v has an incorrectly sized bin: %v vs %v", i, len(bin), leaf.size) + } + preimage = append(preimage, bin...) + } + // Check the hash matches. Given the size data this should never fail but we'll check anyway if Hash(preimage) != root { return nil, fmt.Errorf("preimage not canonically hashed") diff --git a/das/fallback_storage_service.go b/das/fallback_storage_service.go index 9ab892d436..923cbe9b35 100644 --- a/das/fallback_storage_service.go +++ b/das/fallback_storage_service.go @@ -80,7 +80,7 @@ func (f *FallbackStorageService) GetByHash(ctx context.Context, key common.Hash) if err != nil { return nil, err } - if key == dastree.Hash(data) { + if dastree.ValidHash(key, data) { putErr := f.StorageService.Put( ctx, data, arbmath.SaturatingUAdd(uint64(time.Now().Unix()), f.backupRetentionSeconds), ) diff --git a/das/restful_client.go b/das/restful_client.go index 8d00f82892..70fa09bbdb 100644 --- a/das/restful_client.go +++ b/das/restful_client.go @@ -64,7 +64,7 @@ func (c *RestfulDasClient) GetByHash(ctx context.Context, hash common.Hash) ([]b if err != nil { return nil, err } - if hash != dastree.Hash(decodedBytes) { + if !dastree.ValidHash(hash, decodedBytes) { return nil, arbstate.ErrHashMismatch } diff --git a/das/simple_das_reader_aggregator.go b/das/simple_das_reader_aggregator.go index e29f2de294..f63a7e085e 100644 --- a/das/simple_das_reader_aggregator.go +++ b/das/simple_das_reader_aggregator.go @@ -251,7 +251,7 @@ func (a *SimpleDASReaderAggregator) tryGetByHash( start := time.Now() result, err := reader.GetByHash(ctx, hash) if err == nil { - if dastree.Hash(result) == hash { + if dastree.ValidHash(hash, result) { stat.success = true } else { err = fmt.Errorf("SimpleDASReaderAggregator got result from reader(%v) not matching hash", reader) diff --git a/util/arbmath/bits.go b/util/arbmath/bits.go new file mode 100644 index 0000000000..d022455804 --- /dev/null +++ b/util/arbmath/bits.go @@ -0,0 +1,37 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package arbmath + +import ( + "encoding/binary" + + "github.com/ethereum/go-ethereum/common" +) + +type bytes32 = common.Hash + +// flips the nth bit in an ethereum word, starting from the left +func FlipBit(data bytes32, bit byte) bytes32 { + data[bit/8] ^= 1 << (7 - bit%8) + return data +} + +// the number of eth-words needed to store n bytes +func WordsForBytes(nbytes uint64) uint64 { + return (nbytes + 31) / 32 +} + +// casts a uint64 to its big-endian representation +func UintToBytes(value uint64) []byte { + result := make([]byte, 8) + binary.BigEndian.PutUint64(result, value) + return result +} + +// casts a uint32 to its big-endian representation +func Uint32ToBytes(value uint32) []byte { + result := make([]byte, 4) + binary.BigEndian.PutUint32(result, value) + return result +} diff --git a/util/arbmath/math.go b/util/arbmath/math.go index 13ec39bd8f..9b296534ec 100644 --- a/util/arbmath/math.go +++ b/util/arbmath/math.go @@ -4,7 +4,6 @@ package arbmath import ( - "encoding/binary" "math" "math/big" "math/bits" @@ -296,25 +295,6 @@ func SaturatingCastToUint(value *big.Int) uint64 { return value.Uint64() } -// the number of eth-words needed to store n bytes -func WordsForBytes(nbytes uint64) uint64 { - return (nbytes + 31) / 32 -} - -// casts a uint64 to its big-endian representation -func UintToBytes(value uint64) []byte { - result := make([]byte, 8) - binary.BigEndian.PutUint64(result, value) - return result -} - -// casts a uint32 to its big-endian representation -func Uint32ToBytes(value uint32) []byte { - result := make([]byte, 4) - binary.BigEndian.PutUint32(result, value) - return result -} - // Return the Maclaurin series approximation of e^x, where x is denominated in basis points. // This quartic polynomial will underestimate e^x by about 5% as x approaches 20000 bips. func ApproxExpBasisPoints(value Bips) Bips { diff --git a/util/pretty/pretty_printing.go b/util/pretty/pretty_printing.go index 4d22459693..72a8690eb7 100644 --- a/util/pretty/pretty_printing.go +++ b/util/pretty/pretty_printing.go @@ -17,6 +17,14 @@ func FirstFewBytes(b []byte) string { } } +func PrettyBytes(b []byte) string { + hex := common.Bytes2Hex(b) + if len(hex) > 24 { + return fmt.Sprintf("%v...", hex[:24]) + } + return hex +} + func PrettyHash(hash common.Hash) string { return FirstFewBytes(hash.Bytes()) } From 2dbbcb3020526e38f3ac5d1fd228cfd68973b5a9 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Wed, 20 Jul 2022 18:54:16 -0500 Subject: [PATCH 19/23] double hash and uniquify --- contracts/src/bridge/SequencerInbox.sol | 2 +- das/dastree/dastree.go | 74 +++++++++++++++---------- das/dastree/dastree_test.go | 1 + util/arbmath/bits.go | 9 +++ 4 files changed, 55 insertions(+), 31 deletions(-) diff --git a/contracts/src/bridge/SequencerInbox.sol b/contracts/src/bridge/SequencerInbox.sol index 2cdaa122a4..ab493a1b20 100644 --- a/contracts/src/bridge/SequencerInbox.sol +++ b/contracts/src/bridge/SequencerInbox.sol @@ -353,7 +353,7 @@ contract SequencerInbox is DelegateCallAware, GasRefundEnabled, ISequencerInbox * @param keysetBytes bytes of the serialized keyset */ function setValidKeyset(bytes calldata keysetBytes) external override onlyRollupOwner { - uint256 ksWord = uint256(keccak256(bytes.concat(keccak256(keysetBytes)))); + uint256 ksWord = uint256(keccak256(bytes.concat(hex"fe", keccak256(keysetBytes)))); bytes32 ksHash = bytes32(ksWord ^ (1 << 255)); if (dasKeySetInfo[ksHash].isValidKeyset) revert AlreadyValidDASKeyset(ksHash); diff --git a/das/dastree/dastree.go b/das/dastree/dastree.go index a113f6a7b5..5ae2d4c04a 100644 --- a/das/dastree/dastree.go +++ b/das/dastree/dastree.go @@ -13,7 +13,8 @@ import ( ) const BinSize = 64 * 1024 // 64 kB -const NodeByte = 0xff +const NodeByte = byte(0xff) +const LeafByte = byte(0xfe) type bytes32 = common.Hash @@ -24,15 +25,21 @@ type node struct { func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { // Algorithm - // 1. split the preimage into 64kB bins and hash them to produce the tree's leaves + // 1. split the preimage into 64kB bins and double hash them to produce the tree's leaves // 2. repeatedly hash pairs and their combined length, bubbling up any odd-one's out, to form the root + // 3. invert the first bit of the root hash // - // r <=> hash(hash(0, 1), 2, len(0:2)) step 2 + // r' <=> invert(H(0xff, H(0xff, 0, 1, L(0:1)), 2, L(0:2))) step 4 + // | + // r <=> H(0xff, H(0xff, 0, 1, L(0:1)), 2, L(0:2)) step 3 // / \ - // * 2 <=> hash(0, 1, len(0:1)), 2 step 1 + // * 2 <=> H(0xff, 0, 1, L(0:1)), 2 step 2 // / \ - // 0 1 <=> 0, 1, 2 step 0 + // 0 1 <=> 0, 1, 2 step 1 // + // 0 1 2 <=> leaf n = H(0xfe, H(bin n)) step 0 + // + // Where H is keccak and L is the length // Intermediate hashes like '*' from above may be recorded via the `record` closure // @@ -41,25 +48,21 @@ func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { record(hash, value) return hash } - - unrolled := []byte{} - for _, slice := range preimage { - unrolled = append(unrolled, slice...) + prepend := func(before byte, slice []byte) []byte { + return append([]byte{before}, slice...) } + + unrolled := arbmath.ConcatByteSlices(preimage...) if len(unrolled) == 0 { - innerKeccak := keccord([]byte{}) - outerKeccak := keccord(innerKeccak.Bytes()) - return arbmath.FlipBit(outerKeccak, 0) + return arbmath.FlipBit(keccord(prepend(LeafByte, keccord([]byte{}).Bytes())), 0) } length := uint32(len(unrolled)) leaves := []node{} for bin := uint32(0); bin < length; bin += BinSize { end := arbmath.MinUint32(bin+BinSize, length) - content := unrolled[bin:end] - innerKeccak := keccord(content) - outerKeccak := keccord(innerKeccak.Bytes()) - leaves = append(leaves, node{outerKeccak, end - bin}) + hash := keccord(prepend(LeafByte, keccord(unrolled[bin:end]).Bytes())) + leaves = append(leaves, node{hash, end - bin}) } layer := leaves @@ -75,7 +78,7 @@ func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { dataUnder = append(dataUnder, otherHash...) dataUnder = append(dataUnder, arbmath.Uint32ToBytes(sizeUnder)...) parent := node{ - keccord(dataUnder), + keccord(prepend(NodeByte, dataUnder)), sizeUnder, } paired[i/2] = parent @@ -100,7 +103,7 @@ func HashBytes(preimage ...[]byte) []byte { func FlatHashToTreeHash(flat bytes32) bytes32 { // Forms a degenerate dastree that's just a single leaf // note: the inner preimage may be larger than the 64 kB standard - return arbmath.FlipBit(crypto.Keccak256Hash(flat[:]), 0) + return arbmath.FlipBit(crypto.Keccak256Hash(append([]byte{LeafByte}, flat[:]...)), 0) } func ValidHash(hash bytes32, preimage []byte) bool { @@ -121,11 +124,11 @@ func Content(root bytes32, oracle func(bytes32) []byte) ([]byte, error) { start := arbmath.FlipBit(root, 0) total := uint32(0) upper := oracle(start) - switch len(upper) { - case 32: - return oracle(common.BytesToHash(upper)), nil - case 68: - total = binary.BigEndian.Uint32(upper[64:]) + switch { + case len(upper) == 33 && upper[0] == LeafByte: + return oracle(common.BytesToHash(upper[1:])), nil + case len(upper) == 69 && upper[0] == NodeByte: + total = binary.BigEndian.Uint32(upper[65:]) default: return nil, fmt.Errorf("invalid root with preimage of size %v: %v %v", len(upper), root, upper) } @@ -138,15 +141,26 @@ func Content(root bytes32, oracle func(bytes32) []byte) ([]byte, error) { stack = stack[:len(stack)-1] under := oracle(place.hash) - switch len(under) { - case 32: + if len(under) == 0 { + return nil, fmt.Errorf("invalid node for hash %v", place.hash) + } + + kind := under[0] + content := under[1:] + + if (kind == LeafByte && len(under) != 33) || (kind == NodeByte && len(under) != 69) { + return nil, fmt.Errorf("invalid node for hash %v: %v", place.hash, under) + } + + switch kind { + case LeafByte: leaf := node{ - hash: common.BytesToHash(under), + hash: common.BytesToHash(content), size: place.size, } leaves = append(leaves, leaf) - case 68: - count := binary.BigEndian.Uint32(under[64:]) + case NodeByte: + count := binary.BigEndian.Uint32(content[64:]) power := uint32(arbmath.NextOrCurrentPowerOf2(uint64(count))) if place.size != count { @@ -154,11 +168,11 @@ func Content(root bytes32, oracle func(bytes32) []byte) ([]byte, error) { } prior := node{ - hash: common.BytesToHash(under[:32]), + hash: common.BytesToHash(content[:32]), size: power / 2, } after := node{ - hash: common.BytesToHash(under[32:64]), + hash: common.BytesToHash(content[32:64]), size: count - power/2, } diff --git a/das/dastree/dastree_test.go b/das/dastree/dastree_test.go index 0dc8e9d763..a5e7ba6827 100644 --- a/das/dastree/dastree_test.go +++ b/das/dastree/dastree_test.go @@ -51,6 +51,7 @@ func TestDASTree(t *testing.T) { } for key, value := range hashes { + colors.PrintMint("testing ", key) preimage, err := Content(key, oracle) Require(t, err, key) diff --git a/util/arbmath/bits.go b/util/arbmath/bits.go index d022455804..7fb39a4e95 100644 --- a/util/arbmath/bits.go +++ b/util/arbmath/bits.go @@ -17,6 +17,15 @@ func FlipBit(data bytes32, bit byte) bytes32 { return data } +// unrolls a series of slices into a singular, concatenated slice +func ConcatByteSlices(slices ...[]byte) []byte { + unrolled := []byte{} + for _, slice := range slices { + unrolled = append(unrolled, slice...) + } + return unrolled +} + // the number of eth-words needed to store n bytes func WordsForBytes(nbytes uint64) uint64 { return (nbytes + 31) / 32 From 41263c0071558f17b3c77b272ebca661c21d7821 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Wed, 20 Jul 2022 19:19:51 -0500 Subject: [PATCH 20/23] make oldstyle hashes succeed --- das/dastree/dastree.go | 66 ++++++++++++++++++++++--------------- das/dastree/dastree_test.go | 2 +- 2 files changed, 41 insertions(+), 27 deletions(-) diff --git a/das/dastree/dastree.go b/das/dastree/dastree.go index 5ae2d4c04a..c8beee4a44 100644 --- a/das/dastree/dastree.go +++ b/das/dastree/dastree.go @@ -107,8 +107,14 @@ func FlatHashToTreeHash(flat bytes32) bytes32 { } func ValidHash(hash bytes32, preimage []byte) bool { - // TODO: remove keccak after committee upgrade - return hash == Hash(preimage) || hash == crypto.Keccak256Hash(preimage) + if hash == Hash(preimage) { + return true + } + if len(preimage) > 0 { + kind := preimage[0] + return kind != NodeByte && kind != LeafByte && hash == crypto.Keccak256Hash(preimage) + } + return false } func Content(root bytes32, oracle func(bytes32) []byte) ([]byte, error) { @@ -121,16 +127,32 @@ func Content(root bytes32, oracle func(bytes32) []byte) ([]byte, error) { // 3. Only the committee can produce trees unwrapped by this function // + unpeal := func(hash bytes32) (byte, []byte, error) { + data := oracle(hash) + size := len(data) + if size == 0 { + return 0, nil, fmt.Errorf("invalid node %v", hash) + } + kind := data[0] + if (kind == LeafByte && size != 33) || (kind == NodeByte && size != 69) { + return 0, nil, fmt.Errorf("invalid node for hash %v: %v", hash, data) + } + return kind, data[1:], nil + } + start := arbmath.FlipBit(root, 0) total := uint32(0) - upper := oracle(start) - switch { - case len(upper) == 33 && upper[0] == LeafByte: - return oracle(common.BytesToHash(upper[1:])), nil - case len(upper) == 69 && upper[0] == NodeByte: - total = binary.BigEndian.Uint32(upper[65:]) + kind, upper, err := unpeal(start) + if err != nil { + return nil, err + } + switch kind { + case LeafByte: + return oracle(common.BytesToHash(upper)), nil + case NodeByte: + total = binary.BigEndian.Uint32(upper[64:]) default: - return nil, fmt.Errorf("invalid root with preimage of size %v: %v %v", len(upper), root, upper) + return oracle(root), nil // accept old-style hashes } leaves := []node{} @@ -139,47 +161,39 @@ func Content(root bytes32, oracle func(bytes32) []byte) ([]byte, error) { for len(stack) > 0 { place := stack[len(stack)-1] stack = stack[:len(stack)-1] - under := oracle(place.hash) - - if len(under) == 0 { - return nil, fmt.Errorf("invalid node for hash %v", place.hash) - } - - kind := under[0] - content := under[1:] - - if (kind == LeafByte && len(under) != 33) || (kind == NodeByte && len(under) != 69) { - return nil, fmt.Errorf("invalid node for hash %v: %v", place.hash, under) + kind, data, err := unpeal(place.hash) + if err != nil { + return nil, err } switch kind { case LeafByte: leaf := node{ - hash: common.BytesToHash(content), + hash: common.BytesToHash(data), size: place.size, } leaves = append(leaves, leaf) case NodeByte: - count := binary.BigEndian.Uint32(content[64:]) + count := binary.BigEndian.Uint32(data[64:]) power := uint32(arbmath.NextOrCurrentPowerOf2(uint64(count))) if place.size != count { - return nil, fmt.Errorf("invalid size data: %v vs %v for %v", count, place.size, under) + return nil, fmt.Errorf("invalid size data: %v vs %v for %v", count, place.size, data) } prior := node{ - hash: common.BytesToHash(content[:32]), + hash: common.BytesToHash(data[:32]), size: power / 2, } after := node{ - hash: common.BytesToHash(content[32:64]), + hash: common.BytesToHash(data[32:64]), size: count - power/2, } // we want to expand leftward so we reverse their order stack = append(stack, after, prior) default: - return nil, fmt.Errorf("failed to resolve preimage %v %v", place.hash, under) + return nil, fmt.Errorf("failed to resolve preimage %v %v", place.hash, data) } } diff --git a/das/dastree/dastree_test.go b/das/dastree/dastree_test.go index a5e7ba6827..d03560a4e0 100644 --- a/das/dastree/dastree_test.go +++ b/das/dastree/dastree_test.go @@ -55,7 +55,7 @@ func TestDASTree(t *testing.T) { preimage, err := Content(key, oracle) Require(t, err, key) - if !bytes.Equal(preimage, value) { + if !bytes.Equal(preimage, value) || !ValidHash(key, preimage) { Fail(t, "incorrect preimage", pretty.FirstFewBytes(preimage), pretty.FirstFewBytes(value)) } } From 20ffcf5bd19077af131bd501934069c4ff5946b2 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Thu, 21 Jul 2022 19:33:53 -0500 Subject: [PATCH 21/23] remove bitflip --- contracts/src/bridge/SequencerInbox.sol | 3 +-- das/dastree/dastree.go | 22 +++++++++------------- util/arbmath/bits.go | 10 ---------- 3 files changed, 10 insertions(+), 25 deletions(-) diff --git a/contracts/src/bridge/SequencerInbox.sol b/contracts/src/bridge/SequencerInbox.sol index ab493a1b20..ccc536336a 100644 --- a/contracts/src/bridge/SequencerInbox.sol +++ b/contracts/src/bridge/SequencerInbox.sol @@ -353,8 +353,7 @@ contract SequencerInbox is DelegateCallAware, GasRefundEnabled, ISequencerInbox * @param keysetBytes bytes of the serialized keyset */ function setValidKeyset(bytes calldata keysetBytes) external override onlyRollupOwner { - uint256 ksWord = uint256(keccak256(bytes.concat(hex"fe", keccak256(keysetBytes)))); - bytes32 ksHash = bytes32(ksWord ^ (1 << 255)); + bytes32 ksHash = bytes32(keccak256(bytes.concat(hex"fe", keccak256(keysetBytes)))); if (dasKeySetInfo[ksHash].isValidKeyset) revert AlreadyValidDASKeyset(ksHash); dasKeySetInfo[ksHash] = DasKeySetInfo({ diff --git a/das/dastree/dastree.go b/das/dastree/dastree.go index c8beee4a44..826a032f2c 100644 --- a/das/dastree/dastree.go +++ b/das/dastree/dastree.go @@ -27,17 +27,14 @@ func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { // Algorithm // 1. split the preimage into 64kB bins and double hash them to produce the tree's leaves // 2. repeatedly hash pairs and their combined length, bubbling up any odd-one's out, to form the root - // 3. invert the first bit of the root hash // - // r' <=> invert(H(0xff, H(0xff, 0, 1, L(0:1)), 2, L(0:2))) step 4 - // | - // r <=> H(0xff, H(0xff, 0, 1, L(0:1)), 2, L(0:2)) step 3 + // r <=> H(0xff, H(0xff, 0, 1, L(0:1)), 2, L(0:2)) step 3 // / \ - // * 2 <=> H(0xff, 0, 1, L(0:1)), 2 step 2 + // * 2 <=> H(0xff, 0, 1, L(0:1)), 2 step 2 // / \ - // 0 1 <=> 0, 1, 2 step 1 + // 0 1 <=> 0, 1, 2 step 1 // - // 0 1 2 <=> leaf n = H(0xfe, H(bin n)) step 0 + // 0 1 2 <=> leaf n = H(0xfe, H(bin n)) step 0 // // Where H is keccak and L is the length // Intermediate hashes like '*' from above may be recorded via the `record` closure @@ -54,7 +51,7 @@ func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { unrolled := arbmath.ConcatByteSlices(preimage...) if len(unrolled) == 0 { - return arbmath.FlipBit(keccord(prepend(LeafByte, keccord([]byte{}).Bytes())), 0) + return keccord(prepend(LeafByte, keccord([]byte{}).Bytes())) } length := uint32(len(unrolled)) @@ -88,7 +85,7 @@ func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { } layer = paired } - return arbmath.FlipBit(layer[0].hash, 0) + return layer[0].hash } func Hash(preimage ...[]byte) bytes32 { @@ -103,7 +100,7 @@ func HashBytes(preimage ...[]byte) []byte { func FlatHashToTreeHash(flat bytes32) bytes32 { // Forms a degenerate dastree that's just a single leaf // note: the inner preimage may be larger than the 64 kB standard - return arbmath.FlipBit(crypto.Keccak256Hash(append([]byte{LeafByte}, flat[:]...)), 0) + return crypto.Keccak256Hash(append([]byte{LeafByte}, flat[:]...)) } func ValidHash(hash bytes32, preimage []byte) bool { @@ -140,9 +137,8 @@ func Content(root bytes32, oracle func(bytes32) []byte) ([]byte, error) { return kind, data[1:], nil } - start := arbmath.FlipBit(root, 0) total := uint32(0) - kind, upper, err := unpeal(start) + kind, upper, err := unpeal(root) if err != nil { return nil, err } @@ -156,7 +152,7 @@ func Content(root bytes32, oracle func(bytes32) []byte) ([]byte, error) { } leaves := []node{} - stack := []node{{hash: start, size: total}} + stack := []node{{hash: root, size: total}} for len(stack) > 0 { place := stack[len(stack)-1] diff --git a/util/arbmath/bits.go b/util/arbmath/bits.go index 7fb39a4e95..d3d14bda91 100644 --- a/util/arbmath/bits.go +++ b/util/arbmath/bits.go @@ -5,18 +5,8 @@ package arbmath import ( "encoding/binary" - - "github.com/ethereum/go-ethereum/common" ) -type bytes32 = common.Hash - -// flips the nth bit in an ethereum word, starting from the left -func FlipBit(data bytes32, bit byte) bytes32 { - data[bit/8] ^= 1 << (7 - bit%8) - return data -} - // unrolls a series of slices into a singular, concatenated slice func ConcatByteSlices(slices ...[]byte) []byte { unrolled := []byte{} From 3b1352bd4c4dd39a5b1a2deb18c998ff5426c267 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Thu, 21 Jul 2022 20:18:55 -0500 Subject: [PATCH 22/23] remove todo comments --- arbstate/inbox.go | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 5881153c4f..cfff184cb5 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -192,15 +192,7 @@ func RecoverPayloadFromDasBatch( } dataHash := cert.DataHash - treeHash := dataHash - // - // TODO: add back after committee upgrade - // if version == 0 { - // treeHash = dastree.FlatHashToTreeHash(dataHash) - // } - // - - payload, err := dasReader.GetByHash(ctx, treeHash) + payload, err := dasReader.GetByHash(ctx, dataHash) if err == nil { err = checkPreimage(dataHash, payload, "batch hash mismatch") } @@ -212,10 +204,6 @@ func RecoverPayloadFromDasBatch( if preimages != nil { if version == 0 { preimages[dataHash] = payload - // - // TODO: add back after committee upgrade - // preimages[treeHash] = dataHash[:] - // } else { dastree.RecordHash(recordPreimage, payload) } From 9db3f95919a3d3f416e449e55f2d78167b2c5a22 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Thu, 21 Jul 2022 21:25:55 -0500 Subject: [PATCH 23/23] address review comments --- contracts/src/bridge/SequencerInbox.sol | 1 + das/dasrpc/dasRpcClient.go | 2 +- das/dastree/dastree.go | 10 +++++----- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/contracts/src/bridge/SequencerInbox.sol b/contracts/src/bridge/SequencerInbox.sol index ccc536336a..cc1de7e794 100644 --- a/contracts/src/bridge/SequencerInbox.sol +++ b/contracts/src/bridge/SequencerInbox.sol @@ -354,6 +354,7 @@ contract SequencerInbox is DelegateCallAware, GasRefundEnabled, ISequencerInbox */ function setValidKeyset(bytes calldata keysetBytes) external override onlyRollupOwner { bytes32 ksHash = bytes32(keccak256(bytes.concat(hex"fe", keccak256(keysetBytes)))); + require(keysetBytes.length < 64 * 1024, "keyset is too large"); if (dasKeySetInfo[ksHash].isValidKeyset) revert AlreadyValidDASKeyset(ksHash); dasKeySetInfo[ksHash] = DasKeySetInfo({ diff --git a/das/dasrpc/dasRpcClient.go b/das/dasrpc/dasRpcClient.go index 5697a743c5..334f97d9ea 100644 --- a/das/dasrpc/dasRpcClient.go +++ b/das/dasrpc/dasRpcClient.go @@ -40,7 +40,7 @@ func (c *DASRPCClient) GetByHash(ctx context.Context, hash common.Hash) ([]byte, return nil, fmt.Errorf("Hash must be 32 bytes long, was %d", len(hash)) } var ret hexutil.Bytes - if err := c.clnt.CallContext(ctx, &ret, "das_getByHash", hexutil.Bytes(hash[:])); err != nil { + if err := c.clnt.CallContext(ctx, &ret, "das_getByHash", hash); err != nil { return nil, err } if !dastree.ValidHash(hash, ret) { // check hash because RPC server might be untrusted diff --git a/das/dastree/dastree.go b/das/dastree/dastree.go index 826a032f2c..dfafb48b21 100644 --- a/das/dastree/dastree.go +++ b/das/dastree/dastree.go @@ -71,9 +71,7 @@ func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { firstHash := layer[i].hash.Bytes() otherHash := layer[i+1].hash.Bytes() sizeUnder := layer[i].size + layer[i+1].size - dataUnder := firstHash - dataUnder = append(dataUnder, otherHash...) - dataUnder = append(dataUnder, arbmath.Uint32ToBytes(sizeUnder)...) + dataUnder := arbmath.ConcatByteSlices(firstHash, otherHash, arbmath.Uint32ToBytes(sizeUnder)) parent := node{ keccord(prepend(NodeByte, dataUnder)), sizeUnder, @@ -89,7 +87,7 @@ func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { } func Hash(preimage ...[]byte) bytes32 { - // Merkelizes without recording anything. All but the replay binary's DAS will call this + // Merkelizes without recording anything. All but the validator's DAS will call this return RecordHash(func(bytes32, []byte) {}, preimage...) } @@ -121,7 +119,9 @@ func Content(root bytes32, oracle func(bytes32) []byte) ([]byte, error) { // Notes // 1. Because we accept degenerate dastrees, we can't check that single-leaf trees are canonical. // 2. For any canonical dastree, there exists a degenerate single-leaf equivalent that we accept. - // 3. Only the committee can produce trees unwrapped by this function + // 3. We also accept old-style flat hashes + // 4. Only the committee can produce trees unwrapped by this function + // 5. Only the replay binary calls this // unpeal := func(hash bytes32) (byte, []byte, error) {