From 2532364eb76bf649fda5df7fdd31cc7be83694e7 Mon Sep 17 00:00:00 2001 From: junha-ahn Date: Mon, 8 Jul 2024 00:30:05 +0900 Subject: [PATCH 01/36] add Pebble DB --- blockchain/bench_test.go | 2 +- blockchain/state_migration_test.go | 2 +- build/packaging/linux/bin/kcnd | 4 + build/packaging/linux/bin/kend | 4 + build/packaging/linux/bin/kpnd | 4 + build/packaging/linux/bin/kscnd | 4 + build/packaging/linux/bin/ksend | 4 + build/packaging/linux/bin/kspnd | 4 + build/packaging/linux/conf/kcnd.conf | 1 + build/packaging/linux/conf/kcnd_kairos.conf | 1 + build/packaging/linux/conf/kpnd.conf | 1 + build/packaging/linux/conf/kpnd_kairos.conf | 1 + build/packaging/linux/conf/kspnd.conf | 1 + build/packaging/windows/conf/kcn-conf.cmd | 1 + build/packaging/windows/conf/kpn-conf.cmd | 1 + build/packaging/windows/start-kcn.bat | 4 + build/packaging/windows/start-ken.bat | 4 + build/packaging/windows/start-kpn.bat | 4 + build/rpm/etc/init.d/kcnd | 4 + build/rpm/etc/init.d/kend | 4 + build/rpm/etc/init.d/kpnd | 4 + build/rpm/etc/init.d/kscnd | 4 + build/rpm/etc/init.d/ksend | 4 + build/rpm/etc/init.d/kspnd | 4 + build/rpm/etc/kcnd/conf/kcnd.conf | 1 + build/rpm/etc/kcnd/conf/kcnd_kairos.conf | 1 + build/rpm/etc/kpnd/conf/kpnd.conf | 1 + build/rpm/etc/kpnd/conf/kpnd_kairos.conf | 1 + build/rpm/etc/kspnd/conf/kspnd.conf | 1 + cmd/utils/config.go | 2 + cmd/utils/config_test.go | 1 + cmd/utils/flaggroup.go | 1 + cmd/utils/flags.go | 16 + cmd/utils/nodecmd/chaincmd.go | 3 +- cmd/utils/nodecmd/flags_test.go | 7 + cmd/utils/nodecmd/migrationcmd.go | 4 + cmd/utils/nodecmd/snapshot.go | 2 + cmd/utils/nodecmd/testdata/test-config.yaml | 2 + cmd/utils/nodeflags.go | 3 + go.mod | 37 +- go.sum | 225 ++++++- node/cn/backend.go | 3 +- node/cn/config.go | 2 + node/sc/config.go | 1 + node/service_test.go | 2 + storage/database/child_chain_data_test.go | 12 +- storage/database/db_manager.go | 5 + storage/database/db_manager_test.go | 10 + storage/database/interface.go | 3 +- storage/database/pebbledb_database.go | 645 ++++++++++++++++++++ storage/database/sharded_database.go | 2 + tests/db_write_and_read_test.go | 2 + tests/kaia_test_blockchain_test.go | 2 +- work/worker.go | 1 - 54 files changed, 1013 insertions(+), 56 deletions(-) create mode 100644 storage/database/pebbledb_database.go diff --git a/blockchain/bench_test.go b/blockchain/bench_test.go index 64ffb0672..fbc8d0903 100644 --- a/blockchain/bench_test.go +++ b/blockchain/bench_test.go @@ -374,7 +374,7 @@ func genDBManagerForTest(dir string, dbType database.DBType) database.DBManager db := database.NewMemoryDBManager() return db } else { - dbc := &database.DBConfig{Dir: dir, DBType: dbType, LevelDBCacheSize: 128, OpenFilesLimit: 128} + dbc := &database.DBConfig{Dir: dir, DBType: dbType, LevelDBCacheSize: 128, PebbleDBCacheSize: 128, OpenFilesLimit: 128} return database.NewDBManager(dbc) } } diff --git a/blockchain/state_migration_test.go b/blockchain/state_migration_test.go index 8cb72bb74..a54445224 100644 --- a/blockchain/state_migration_test.go +++ b/blockchain/state_migration_test.go @@ -37,7 +37,7 @@ func createLocalTestDB(t *testing.T) (string, database.DBManager) { if err != nil { t.Fatalf("failed to create a database: %v", err) } - dbc := &database.DBConfig{Dir: dir, DBType: database.LevelDB, LevelDBCacheSize: 128, OpenFilesLimit: 128} + dbc := &database.DBConfig{Dir: dir, DBType: database.LevelDB, LevelDBCacheSize: 128, PebbleDBCacheSize: 128, OpenFilesLimit: 128} db := database.NewDBManager(dbc) return dir, db } diff --git a/build/packaging/linux/bin/kcnd b/build/packaging/linux/bin/kcnd index 93496e7e4..6c4d34ca4 100755 --- a/build/packaging/linux/bin/kcnd +++ b/build/packaging/linux/bin/kcnd @@ -251,6 +251,10 @@ __check_option() { OPTIONS="$OPTIONS --db.leveldb.cache-size $LDBCACHESIZE" fi + if [ ! -z $PDBCACHESIZE ]; then + OPTIONS="$OPTIONS --db.pebbledb.cache-size $PDBCACHESIZE" + fi + if [[ ! -z $NO_DISCOVER ]] && [[ $NO_DISCOVER -eq 1 ]]; then OPTIONS="$OPTIONS --nodiscover" fi diff --git a/build/packaging/linux/bin/kend b/build/packaging/linux/bin/kend index 2ad8ff43f..762966f53 100755 --- a/build/packaging/linux/bin/kend +++ b/build/packaging/linux/bin/kend @@ -267,6 +267,10 @@ __check_option() { OPTIONS="$OPTIONS --db.leveldb.cache-size $LDBCACHESIZE" fi + if [ ! -z $PDBCACHESIZE ]; then + OPTIONS="$OPTIONS --db.pebbledb.cache-size $PDBCACHESIZE" + fi + if [[ ! -z $SC_MAIN_BRIDGE ]] && [[ $SC_MAIN_BRIDGE -eq 1 ]]; then OPTIONS="$OPTIONS --mainbridge --mainbridgeport $SC_MAIN_BRIDGE_PORT" if [[ ! -z $SC_MAIN_BRIDGE_INDEXING ]] && [[ $SC_MAIN_BRIDGE_INDEXING -eq 1 ]]; then diff --git a/build/packaging/linux/bin/kpnd b/build/packaging/linux/bin/kpnd index 7415bf7b8..46a819bc4 100755 --- a/build/packaging/linux/bin/kpnd +++ b/build/packaging/linux/bin/kpnd @@ -252,6 +252,10 @@ __check_option() { OPTIONS="$OPTIONS --db.leveldb.cache-size $LDBCACHESIZE" fi + if [ ! -z $PDBCACHESIZE ]; then + OPTIONS="$OPTIONS --db.pebbledb.cache-size $PDBCACHESIZE" + fi + if [[ ! -z $NO_DISCOVER ]] && [[ $NO_DISCOVER -eq 1 ]]; then OPTIONS="$OPTIONS --nodiscover" fi diff --git a/build/packaging/linux/bin/kscnd b/build/packaging/linux/bin/kscnd index de4d4e3ad..6d4428a29 100755 --- a/build/packaging/linux/bin/kscnd +++ b/build/packaging/linux/bin/kscnd @@ -233,6 +233,10 @@ __check_option() { OPTIONS="$OPTIONS --db.leveldb.cache-size $LDBCACHESIZE" fi + if [ ! -z $PDBCACHESIZE ]; then + OPTIONS="$OPTIONS --db.pebbledb.cache-size $PDBCACHESIZE" + fi + if [[ ! -z $SCSIGNER ]] && [[ $SCSIGNER != "" ]]; then OPTIONS="$OPTIONS --scsigner $SCSIGNER --unlock $SCSIGNER" fi diff --git a/build/packaging/linux/bin/ksend b/build/packaging/linux/bin/ksend index 3948daf2a..35a415707 100755 --- a/build/packaging/linux/bin/ksend +++ b/build/packaging/linux/bin/ksend @@ -258,6 +258,10 @@ __check_option() { OPTIONS="$OPTIONS --db.leveldb.cache-size $LDBCACHESIZE" fi + if [ ! -z $PDBCACHESIZE ]; then + OPTIONS="$OPTIONS --db.pebbledb.cache-size $PDBCACHESIZE" + fi + if [[ ! -z $NO_DISCOVER ]] && [[ $NO_DISCOVER -eq 1 ]]; then OPTIONS="$OPTIONS --nodiscover" fi diff --git a/build/packaging/linux/bin/kspnd b/build/packaging/linux/bin/kspnd index d1c427697..907b10af5 100755 --- a/build/packaging/linux/bin/kspnd +++ b/build/packaging/linux/bin/kspnd @@ -243,6 +243,10 @@ __check_option() { OPTIONS="$OPTIONS --db.leveldb.cache-size $LDBCACHESIZE" fi + if [ ! -z $PDBCACHESIZE ]; then + OPTIONS="$OPTIONS --db.pebbledb.cache-size $PDBCACHESIZE" + fi + if [[ ! -z $NO_DISCOVER ]] && [[ $NO_DISCOVER -eq 1 ]]; then OPTIONS="$OPTIONS --nodiscover" fi diff --git a/build/packaging/linux/conf/kcnd.conf b/build/packaging/linux/conf/kcnd.conf index 5d389fb8b..a2050ff56 100644 --- a/build/packaging/linux/conf/kcnd.conf +++ b/build/packaging/linux/conf/kcnd.conf @@ -12,6 +12,7 @@ SYNCMODE="full" VERBOSITY=3 MAXCONNECTIONS=100 # LDBCACHESIZE=10240 +# PDBCACHESIZE=10240 REWARDBASE="0x0" # txpool options setting diff --git a/build/packaging/linux/conf/kcnd_kairos.conf b/build/packaging/linux/conf/kcnd_kairos.conf index 834a0e6ac..913d1a429 100644 --- a/build/packaging/linux/conf/kcnd_kairos.conf +++ b/build/packaging/linux/conf/kcnd_kairos.conf @@ -12,6 +12,7 @@ SYNCMODE="full" VERBOSITY=3 MAXCONNECTIONS=100 # LDBCACHESIZE=10240 +# PDBCACHESIZE=10240 REWARDBASE="0x0" # txpool options setting diff --git a/build/packaging/linux/conf/kpnd.conf b/build/packaging/linux/conf/kpnd.conf index fe077aab2..0cbece642 100644 --- a/build/packaging/linux/conf/kpnd.conf +++ b/build/packaging/linux/conf/kpnd.conf @@ -12,6 +12,7 @@ SYNCMODE=full VERBOSITY=3 MAXCONNECTIONS=200 # LDBCACHESIZE=10240 +# PDBCACHESIZE=10240 # txpool options setting TXPOOL_EXEC_SLOTS_ALL=8192 diff --git a/build/packaging/linux/conf/kpnd_kairos.conf b/build/packaging/linux/conf/kpnd_kairos.conf index ce3115638..2b3463ff9 100644 --- a/build/packaging/linux/conf/kpnd_kairos.conf +++ b/build/packaging/linux/conf/kpnd_kairos.conf @@ -12,6 +12,7 @@ SYNCMODE=full VERBOSITY=3 MAXCONNECTIONS=200 # LDBCACHESIZE=10240 +# PDBCACHESIZE=10240 # txpool options setting TXPOOL_EXEC_SLOTS_ALL=8192 diff --git a/build/packaging/linux/conf/kspnd.conf b/build/packaging/linux/conf/kspnd.conf index 33e1c4262..e7b836410 100644 --- a/build/packaging/linux/conf/kspnd.conf +++ b/build/packaging/linux/conf/kspnd.conf @@ -10,6 +10,7 @@ SYNCMODE=full VERBOSITY=3 MAXCONNECTIONS=200 # LDBCACHESIZE=10240 +# PDBCACHESIZE=10240 # txpool options setting TXPOOL_EXEC_SLOTS_ALL=8192 diff --git a/build/packaging/windows/conf/kcn-conf.cmd b/build/packaging/windows/conf/kcn-conf.cmd index 7cf2b0479..c7eea7967 100644 --- a/build/packaging/windows/conf/kcn-conf.cmd +++ b/build/packaging/windows/conf/kcn-conf.cmd @@ -9,6 +9,7 @@ set SYNCMODE="full" set VERBOSITY=3 set MAXCONNECTIONS=100 :: set LDBCACHESIZE=10240 +:: set PDBCACHESIZE=10240 set REWARDBASE="0x0" REM txpool options setting diff --git a/build/packaging/windows/conf/kpn-conf.cmd b/build/packaging/windows/conf/kpn-conf.cmd index 627682ad5..c195cd4de 100644 --- a/build/packaging/windows/conf/kpn-conf.cmd +++ b/build/packaging/windows/conf/kpn-conf.cmd @@ -9,6 +9,7 @@ set SYNCMODE="full" set VERBOSITY=3 set MAXCONNECTIONS=200 :: set LDBCACHESIZE=10240 +:: set PDBCACHESIZE=10240 REM txpool options setting set TXPOOL_EXEC_SLOTS_ALL=8192 diff --git a/build/packaging/windows/start-kcn.bat b/build/packaging/windows/start-kcn.bat index d079b87e2..68ae3b2e7 100644 --- a/build/packaging/windows/start-kcn.bat +++ b/build/packaging/windows/start-kcn.bat @@ -73,6 +73,10 @@ IF DEFINED LDBCACHESIZE ( set OPTIONS=%OPTIONS% --db.leveldb.cache-size %LDBCACHESIZE% ) +IF DEFINED PDBCACHESIZE ( + set OPTIONS=%OPTIONS% --db.pebbledb.cache-size %PDBCACHESIZE% +) + IF DEFINED REWARDBASE ( set OPTIONS=%OPTIONS% --rewardbase %REWARDBASE% ) diff --git a/build/packaging/windows/start-ken.bat b/build/packaging/windows/start-ken.bat index 128390945..f94af159b 100644 --- a/build/packaging/windows/start-ken.bat +++ b/build/packaging/windows/start-ken.bat @@ -69,6 +69,10 @@ IF DEFINED LDBCACHESIZE ( set OPTIONS=%OPTIONS% --db.leveldb.cache-size %LDBCACHESIZE% ) +IF DEFINED PDBCACHESIZE ( + set OPTIONS=%OPTIONS% --db.pebbledb.cache-size %PDBCACHESIZE% +) + IF DEFINED RPC_ENABLE ( IF %RPC_ENABLE%==1 ( set OPTIONS=%OPTIONS% --rpc --rpcapi %RPC_API% --rpcport %RPC_PORT% --rpcaddr %RPC_ADDR% --rpccorsdomain ^ diff --git a/build/packaging/windows/start-kpn.bat b/build/packaging/windows/start-kpn.bat index f2feecf61..15e150fca 100644 --- a/build/packaging/windows/start-kpn.bat +++ b/build/packaging/windows/start-kpn.bat @@ -73,6 +73,10 @@ IF DEFINED LDBCACHESIZE ( set OPTIONS=%OPTIONS% --db.leveldb.cache-size %LDBCACHESIZE% ) +IF DEFINED PDBCACHESIZE ( + set OPTIONS=%OPTIONS% --db.pebbledb.cache-size %PDBCACHESIZE% +) + IF DEFINED RPC_ENABLE ( IF %RPC_ENABLE%==1 ( set OPTIONS=%OPTIONS% --rpc --rpcapi %RPC_API% --rpcport %RPC_PORT% --rpcaddr %RPC_ADDR% --rpccorsdomain ^ diff --git a/build/rpm/etc/init.d/kcnd b/build/rpm/etc/init.d/kcnd index 00602aea1..1917ed6ac 100755 --- a/build/rpm/etc/init.d/kcnd +++ b/build/rpm/etc/init.d/kcnd @@ -234,6 +234,10 @@ if [ ! -z $LDBCACHESIZE ]; then OPTIONS="$OPTIONS --db.leveldb.cache-size $LDBCACHESIZE" fi +if [ ! -z $PDBCACHESIZE ]; then + OPTIONS="$OPTIONS --db.pebbledb.cache-size $PDBCACHESIZE" +fi + if [[ ! -z $NO_DISCOVER ]] && [[ $NO_DISCOVER -eq 1 ]]; then OPTIONS="$OPTIONS --nodiscover" fi diff --git a/build/rpm/etc/init.d/kend b/build/rpm/etc/init.d/kend index b7751df40..f82f59996 100755 --- a/build/rpm/etc/init.d/kend +++ b/build/rpm/etc/init.d/kend @@ -249,6 +249,10 @@ if [ ! -z $LDBCACHESIZE ]; then OPTIONS="$OPTIONS --db.leveldb.cache-size $LDBCACHESIZE" fi +if [ ! -z $PDBCACHESIZE ]; then + OPTIONS="$OPTIONS --db.pebbledb.cache-size $PDBCACHESIZE" +fi + if [[ ! -z $SC_MAIN_BRIDGE ]] && [[ $SC_MAIN_BRIDGE -eq 1 ]]; then OPTIONS="$OPTIONS --mainbridge --mainbridgeport $SC_MAIN_BRIDGE_PORT" if [[ ! -z $SC_MAIN_BRIDGE_INDEXING ]] && [[ $SC_MAIN_BRIDGE_INDEXING -eq 1 ]]; then diff --git a/build/rpm/etc/init.d/kpnd b/build/rpm/etc/init.d/kpnd index fd71ba4e6..40f64024a 100755 --- a/build/rpm/etc/init.d/kpnd +++ b/build/rpm/etc/init.d/kpnd @@ -234,6 +234,10 @@ if [ ! -z $LDBCACHESIZE ]; then OPTIONS="$OPTIONS --db.leveldb.cache-size $LDBCACHESIZE" fi +if [ ! -z $PDBCACHESIZE ]; then + OPTIONS="$OPTIONS --db.pebbledb.cache-size $PDBCACHESIZE" +fi + if [[ ! -z $NO_DISCOVER ]] && [[ $NO_DISCOVER -eq 1 ]]; then OPTIONS="$OPTIONS --nodiscover" fi diff --git a/build/rpm/etc/init.d/kscnd b/build/rpm/etc/init.d/kscnd index f1d06af1c..ef9866e29 100755 --- a/build/rpm/etc/init.d/kscnd +++ b/build/rpm/etc/init.d/kscnd @@ -215,6 +215,10 @@ if [ ! -z $LDBCACHESIZE ]; then OPTIONS="$OPTIONS --db.leveldb.cache-size $LDBCACHESIZE" fi +if [ ! -z $PDBCACHESIZE ]; then + OPTIONS="$OPTIONS --db.pebbledb.cache-size $PDBCACHESIZE" +fi + if [[ ! -z $SCSIGNER ]] && [[ $SCSIGNER != "" ]]; then OPTIONS="$OPTIONS --scsigner $SCSIGNER --unlock $SCSIGNER" fi diff --git a/build/rpm/etc/init.d/ksend b/build/rpm/etc/init.d/ksend index f90491733..dac3ded72 100755 --- a/build/rpm/etc/init.d/ksend +++ b/build/rpm/etc/init.d/ksend @@ -240,6 +240,10 @@ if [ ! -z $LDBCACHESIZE ]; then OPTIONS="$OPTIONS --db.leveldb.cache-size $LDBCACHESIZE" fi +if [ ! -z $PDBCACHESIZE ]; then + OPTIONS="$OPTIONS --db.pebbledb.cache-size $PDBCACHESIZE" +fi + if [[ ! -z $NO_DISCOVER ]] && [[ $NO_DISCOVER -eq 1 ]]; then OPTIONS="$OPTIONS --nodiscover" fi diff --git a/build/rpm/etc/init.d/kspnd b/build/rpm/etc/init.d/kspnd index c7a52bb85..0d4d78035 100755 --- a/build/rpm/etc/init.d/kspnd +++ b/build/rpm/etc/init.d/kspnd @@ -225,6 +225,10 @@ if [ ! -z $LDBCACHESIZE ]; then OPTIONS="$OPTIONS --db.leveldb.cache-size $LDBCACHESIZE" fi +if [ ! -z $PDBCACHESIZE ]; then + OPTIONS="$OPTIONS --db.pebbledb.cache-size $PDBCACHESIZE" +fi + if [[ ! -z $NO_DISCOVER ]] && [[ $NO_DISCOVER -eq 1 ]]; then OPTIONS="$OPTIONS --nodiscover" fi diff --git a/build/rpm/etc/kcnd/conf/kcnd.conf b/build/rpm/etc/kcnd/conf/kcnd.conf index e2caf07d1..45b99086c 100644 --- a/build/rpm/etc/kcnd/conf/kcnd.conf +++ b/build/rpm/etc/kcnd/conf/kcnd.conf @@ -12,6 +12,7 @@ SYNCMODE="full" VERBOSITY=3 MAXCONNECTIONS=100 # LDBCACHESIZE=10240 +# PDBCACHESIZE=10240 REWARDBASE="0x0" # txpool options setting diff --git a/build/rpm/etc/kcnd/conf/kcnd_kairos.conf b/build/rpm/etc/kcnd/conf/kcnd_kairos.conf index 27eed2679..0a028d337 100644 --- a/build/rpm/etc/kcnd/conf/kcnd_kairos.conf +++ b/build/rpm/etc/kcnd/conf/kcnd_kairos.conf @@ -12,6 +12,7 @@ SYNCMODE="full" VERBOSITY=3 MAXCONNECTIONS=100 # LDBCACHESIZE=10240 +# PDBCACHESIZE=10240 REWARDBASE="0x0" # txpool options setting diff --git a/build/rpm/etc/kpnd/conf/kpnd.conf b/build/rpm/etc/kpnd/conf/kpnd.conf index 5ee79d1f6..cf22f62dc 100644 --- a/build/rpm/etc/kpnd/conf/kpnd.conf +++ b/build/rpm/etc/kpnd/conf/kpnd.conf @@ -12,6 +12,7 @@ SYNCMODE=full VERBOSITY=3 MAXCONNECTIONS=200 # LDBCACHESIZE=10240 +# PDBCACHESIZE=10240 # txpool options setting TXPOOL_EXEC_SLOTS_ALL=8192 diff --git a/build/rpm/etc/kpnd/conf/kpnd_kairos.conf b/build/rpm/etc/kpnd/conf/kpnd_kairos.conf index 3b77e3b96..2ba98d426 100644 --- a/build/rpm/etc/kpnd/conf/kpnd_kairos.conf +++ b/build/rpm/etc/kpnd/conf/kpnd_kairos.conf @@ -12,6 +12,7 @@ SYNCMODE=full VERBOSITY=3 MAXCONNECTIONS=200 # LDBCACHESIZE=10240 +# PDBCACHESIZE=10240 # txpool options setting TXPOOL_EXEC_SLOTS_ALL=8192 diff --git a/build/rpm/etc/kspnd/conf/kspnd.conf b/build/rpm/etc/kspnd/conf/kspnd.conf index af6d417c6..ee6c367f1 100644 --- a/build/rpm/etc/kspnd/conf/kspnd.conf +++ b/build/rpm/etc/kspnd/conf/kspnd.conf @@ -10,6 +10,7 @@ SYNCMODE=full VERBOSITY=3 MAXCONNECTIONS=200 # LDBCACHESIZE=10240 +# PDBCACHESIZE=10240 # txpool options setting TXPOOL_EXEC_SLOTS_ALL=8192 diff --git a/cmd/utils/config.go b/cmd/utils/config.go index 2ea31c949..e54f16194 100644 --- a/cmd/utils/config.go +++ b/cmd/utils/config.go @@ -563,6 +563,8 @@ func (kCfg *KaiaConfig) SetKaiaConfig(ctx *cli.Context, stack *node.Node) { cfg.EnableDBPerfMetrics = !ctx.Bool(DBNoPerformanceMetricsFlag.Name) cfg.LevelDBCacheSize = ctx.Int(LevelDBCacheSizeFlag.Name) + cfg.PebbleDBCacheSize = ctx.Int(PebbleDBCacheSizeFlag.Name) + cfg.RocksDBConfig.Secondary = ctx.Bool(RocksDBSecondaryFlag.Name) cfg.RocksDBConfig.MaxOpenFiles = ctx.Int(RocksDBMaxOpenFilesFlag.Name) if cfg.RocksDBConfig.Secondary { diff --git a/cmd/utils/config_test.go b/cmd/utils/config_test.go index 68a1d3121..edd5a6ae2 100644 --- a/cmd/utils/config_test.go +++ b/cmd/utils/config_test.go @@ -82,6 +82,7 @@ func TestLoadYaml(t *testing.T) { "db.dynamo.write-capacity": true, // TODO-check after bugfix "db.dynamo.read-only": true, "db.leveldb.cache-size": true, + "db.pebbledb.cache-size": true, "db.no-parallel-write": true, "db.rocksdb.secondary": true, "db.rocksdb.cache-size": true, diff --git a/cmd/utils/flaggroup.go b/cmd/utils/flaggroup.go index 78f5fc9bb..ba8e0c377 100644 --- a/cmd/utils/flaggroup.go +++ b/cmd/utils/flaggroup.go @@ -91,6 +91,7 @@ var FlagGroups = []FlagGroup{ Name: "DATABASE", Flags: []cli.Flag{ LevelDBCacheSizeFlag, + PebbleDBCacheSizeFlag, SingleDBFlag, NumStateTrieShardsFlag, LevelDBCompressionTypeFlag, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 2ef84e9e8..8e5eb886d 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -352,6 +352,14 @@ var ( EnvVars: []string{"KLAYTN_DB_LEVELDB_NO_BUFFER_POOL", "KAIA_DB_LEVELDB_NO_BUFFER_POOL"}, Category: "DATABASE", } + PebbleDBCacheSizeFlag = &cli.IntFlag{ + Name: "db.pebbledb.cache-size", + Usage: "Size of in-memory cache in Pebble (MiB)", + Value: 768, + Aliases: []string{"migration.src.db.pebbledb.cache-size"}, + EnvVars: []string{"KLAYTN_DB_PEBBLE_CACHE_SIZE", "KAIA_DB_PEBBLE_CACHE_SIZE"}, + Category: "DATABASE", + } RocksDBSecondaryFlag = &cli.BoolFlag{ Name: "db.rocksdb.secondary", Usage: "Enable rocksdb secondary mode (read-only and catch-up with primary node dynamically)", @@ -1822,6 +1830,14 @@ var ( EnvVars: []string{"KLAYTN_DB_DST_LEVELDB_COMPRESSION", "KAIA_DB_DST_LEVELDB_COMPRESSION"}, Category: "DATABASE MIGRATION", } + DstPebbleDBCacheSizeFlag = &cli.IntFlag{ + Name: "db.dst.pebbledb.cache-size", + Usage: "Size of in-memory cache in PebbleDB (MiB)", + Value: 768, + Aliases: []string{"migration.dst.db.pebbledb.cache-size"}, + EnvVars: []string{"KLAYTN_DB_DST_PEBBLEDB_CACHE_SIZE", "KAIA_DB_DST_PEBBLEDB_CACHE_SIZE"}, + Category: "DATABASE MIGRATION", + } DstNumStateTrieShardsFlag = &cli.UintFlag{ Name: "db.dst.num-statetrie-shards", Usage: "Number of internal shards of state trie DB shards. Should be power of 2", diff --git a/cmd/utils/nodecmd/chaincmd.go b/cmd/utils/nodecmd/chaincmd.go index fdba72dba..391d72d39 100644 --- a/cmd/utils/nodecmd/chaincmd.go +++ b/cmd/utils/nodecmd/chaincmd.go @@ -182,7 +182,8 @@ func initGenesis(ctx *cli.Context) error { dbc := &database.DBConfig{ Dir: name, DBType: dbtype, ParallelDBWrite: parallelDBWrite, SingleDB: singleDB, NumStateTrieShards: numStateTrieShards, - LevelDBCacheSize: 0, OpenFilesLimit: 0, DynamoDBConfig: dynamoDBConfig, RocksDBConfig: rocksDBConfig, + LevelDBCacheSize: 0, PebbleDBCacheSize: 0, OpenFilesLimit: 0, + DynamoDBConfig: dynamoDBConfig, RocksDBConfig: rocksDBConfig, } chainDB := stack.OpenDatabase(dbc) diff --git a/cmd/utils/nodecmd/flags_test.go b/cmd/utils/nodecmd/flags_test.go index 559c3e216..9296c018c 100644 --- a/cmd/utils/nodecmd/flags_test.go +++ b/cmd/utils/nodecmd/flags_test.go @@ -226,6 +226,13 @@ var flagsWithValues = []struct { flag: "--db.leveldb.no-buffer-pool", flagType: FlagTypeBoolean, }, + { + flag: "--db.pebbledb.cache-size", + flagType: FlagTypeArgument, + values: []string{"768"}, + wrongValues: commonTwoErrors, + errors: []int{ErrorInvalidValue, ErrorInvalidValue}, + }, { flag: "--db.no-parallel-write", flagType: FlagTypeBoolean, diff --git a/cmd/utils/nodecmd/migrationcmd.go b/cmd/utils/nodecmd/migrationcmd.go index 6b5deecc5..6c83f953f 100644 --- a/cmd/utils/nodecmd/migrationcmd.go +++ b/cmd/utils/nodecmd/migrationcmd.go @@ -105,6 +105,8 @@ func createDBConfigForMigration(ctx *cli.Context) (*database.DBConfig, *database LevelDBCompression: database.LevelDBCompressionType(ctx.Int(utils.LevelDBCompressionTypeFlag.Name)), EnableDBPerfMetrics: !ctx.Bool(utils.DBNoPerformanceMetricsFlag.Name), + PebbleDBCacheSize: ctx.Int(utils.PebbleDBCacheSizeFlag.Name), + DynamoDBConfig: &database.DynamoDBConfig{ TableName: ctx.String(utils.DynamoDBTableNameFlag.Name), Region: ctx.String(utils.DynamoDBRegionFlag.Name), @@ -142,6 +144,8 @@ func createDBConfigForMigration(ctx *cli.Context) (*database.DBConfig, *database LevelDBCompression: database.LevelDBCompressionType(ctx.Int(utils.DstLevelDBCompressionTypeFlag.Name)), EnableDBPerfMetrics: !ctx.Bool(utils.DBNoPerformanceMetricsFlag.Name), + PebbleDBCacheSize: ctx.Int(utils.DstPebbleDBCacheSizeFlag.Name), + DynamoDBConfig: &database.DynamoDBConfig{ TableName: ctx.String(utils.DstDynamoDBTableNameFlag.Name), Region: ctx.String(utils.DstDynamoDBRegionFlag.Name), diff --git a/cmd/utils/nodecmd/snapshot.go b/cmd/utils/nodecmd/snapshot.go index 513a2a733..8bc6a4190 100644 --- a/cmd/utils/nodecmd/snapshot.go +++ b/cmd/utils/nodecmd/snapshot.go @@ -107,6 +107,8 @@ func getConfig(ctx *cli.Context) *database.DBConfig { LevelDBCompression: database.LevelDBCompressionType(ctx.Int(utils.LevelDBCompressionTypeFlag.Name)), EnableDBPerfMetrics: !ctx.Bool(utils.DBNoPerformanceMetricsFlag.Name), + PebbleDBCacheSize: ctx.Int(utils.PebbleDBCacheSizeFlag.Name), + DynamoDBConfig: &database.DynamoDBConfig{ TableName: ctx.String(utils.DynamoDBTableNameFlag.Name), Region: ctx.String(utils.DynamoDBRegionFlag.Name), diff --git a/cmd/utils/nodecmd/testdata/test-config.yaml b/cmd/utils/nodecmd/testdata/test-config.yaml index 870e752bf..15bbf2879 100644 --- a/cmd/utils/nodecmd/testdata/test-config.yaml +++ b/cmd/utils/nodecmd/testdata/test-config.yaml @@ -87,6 +87,8 @@ db: compression: 0 no-buffer-pool: false cache-size: 768 + pebbledb: + cache-size: 768 dynamo: table-name: "" region: ap-northeast-2 diff --git a/cmd/utils/nodeflags.go b/cmd/utils/nodeflags.go index c04b9b787..2ce286978 100644 --- a/cmd/utils/nodeflags.go +++ b/cmd/utils/nodeflags.go @@ -214,6 +214,7 @@ var CommonNodeFlags = []cli.Flag{ altsrc.NewInt64Flag(DynamoDBWriteCapacityFlag), altsrc.NewBoolFlag(DynamoDBReadOnlyFlag), altsrc.NewIntFlag(LevelDBCacheSizeFlag), + altsrc.NewIntFlag(PebbleDBCacheSizeFlag), altsrc.NewBoolFlag(NoParallelDBWriteFlag), altsrc.NewBoolFlag(SenderTxHashIndexingFlag), altsrc.NewIntFlag(TrieMemoryCacheSizeFlag), @@ -514,6 +515,7 @@ var DBMigrationSrcFlags = []cli.Flag{ altsrc.NewPathFlag(DataDirFlag), altsrc.NewBoolFlag(SingleDBFlag), altsrc.NewIntFlag(LevelDBCacheSizeFlag), + altsrc.NewIntFlag(PebbleDBCacheSizeFlag), altsrc.NewUintFlag(NumStateTrieShardsFlag), altsrc.NewStringFlag(DynamoDBTableNameFlag), altsrc.NewStringFlag(DynamoDBRegionFlag), @@ -538,6 +540,7 @@ var DBMigrationDstFlags = []cli.Flag{ altsrc.NewPathFlag(DstDataDirFlag), altsrc.NewBoolFlag(DstSingleDBFlag), altsrc.NewIntFlag(DstLevelDBCacheSizeFlag), + altsrc.NewIntFlag(DstPebbleDBCacheSizeFlag), altsrc.NewIntFlag(DstLevelDBCompressionTypeFlag), altsrc.NewUintFlag(DstNumStateTrieShardsFlag), altsrc.NewStringFlag(DstDynamoDBTableNameFlag), diff --git a/go.mod b/go.mod index 0ab353740..55880dd99 100644 --- a/go.mod +++ b/go.mod @@ -36,26 +36,26 @@ require ( github.com/jinzhu/gorm v1.9.15 github.com/julienschmidt/httprouter v1.3.0 github.com/linxGnu/grocksdb v1.7.17-0.20230425035833-f16fdbe0eb3c - github.com/mattn/go-colorable v0.1.11 - github.com/mattn/go-isatty v0.0.14 + github.com/mattn/go-colorable v0.1.13 + github.com/mattn/go-isatty v0.0.17 github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 github.com/newrelic/go-agent/v3 v3.11.0 github.com/otiai10/copy v1.0.1 github.com/pbnjay/memory v0.0.0-20190104145345-974d429e7ae4 github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.11.1 + github.com/prometheus/client_golang v1.12.0 github.com/prometheus/prometheus v2.1.0+incompatible github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 github.com/rjeczalik/notify v0.9.3 github.com/rs/cors v1.7.0 github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 // indirect - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 github.com/supranational/blst v0.3.11 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/urfave/cli/v2 v2.25.7 - github.com/valyala/fasthttp v1.34.0 + github.com/valyala/fasthttp v1.40.0 go.uber.org/zap v1.13.0 golang.org/x/crypto v0.21.0 golang.org/x/net v0.23.0 @@ -73,6 +73,7 @@ require ( require ( github.com/btcsuite/btcd/btcec/v2 v2.3.2 + github.com/cockroachdb/pebble v1.1.1 github.com/dop251/goja v0.0.0-20231014103939-873a1496dc8e github.com/google/uuid v1.6.0 github.com/satori/go.uuid v1.2.0 @@ -88,13 +89,19 @@ require ( github.com/DataDog/datadog-go v4.8.2+incompatible // indirect github.com/DataDog/datadog-go/v5 v5.0.2 // indirect github.com/DataDog/sketches-go v1.2.1 // indirect + github.com/DataDog/zstd v1.4.5 // indirect github.com/FactomProject/basen v0.0.0-20150613233007-fe3947df716e // indirect github.com/FactomProject/btcutilecc v0.0.0-20130527213604-d3a63a5752ec // indirect github.com/Microsoft/go-winio v0.5.1 // indirect - github.com/andybalholm/brotli v1.0.4 // indirect + github.com/andybalholm/brotli v1.0.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.7.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cockroachdb/errors v1.11.3 // indirect + github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/consensys/bavard v0.1.13 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect @@ -105,9 +112,9 @@ require ( github.com/eapache/go-resiliency v1.2.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect github.com/eapache/queue v1.1.0 // indirect - github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e // indirect - github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 // indirect + github.com/getsentry/sentry-go v0.27.0 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect + github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.1.0 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect @@ -116,21 +123,21 @@ require ( github.com/jinzhu/inflection v1.0.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/klauspost/compress v1.15.0 // indirect + github.com/klauspost/compress v1.16.0 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/naoina/go-stringutil v0.1.0 // indirect github.com/otiai10/mint v1.2.4 // indirect github.com/philhofer/fwd v1.1.1 // indirect github.com/pierrec/lz4 v2.5.2+incompatible // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.26.0 // indirect - github.com/prometheus/procfs v0.6.0 // indirect + github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a // indirect + github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/procfs v0.7.3 // indirect github.com/prometheus/tsdb v0.10.0 // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect @@ -144,9 +151,9 @@ require ( go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 // indirect golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect golang.org/x/mod v0.16.0 // indirect - golang.org/x/sync v0.6.0 // indirect + golang.org/x/sync v0.7.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect + golang.org/x/time v0.3.0 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect google.golang.org/protobuf v1.33.0 // indirect diff --git a/go.sum b/go.sum index 741dce326..6fabd3fa2 100644 --- a/go.sum +++ b/go.sum @@ -10,14 +10,31 @@ cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTj cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4= @@ -36,6 +53,8 @@ github.com/DataDog/datadog-go/v5 v5.0.2/go.mod h1:ZI9JFB4ewXbw1sBnF4sxsR2k1H3xjV github.com/DataDog/sketches-go v1.2.1 h1:qTBzWLnZ3kM2kw39ymh6rMcnN+5VULwFs++lEYUUsro= github.com/DataDog/sketches-go v1.2.1/go.mod h1:1xYmPLY1So10AwxV6MJV0J53XVH+WL9Ad1KetxVivVI= github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= +github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/FactomProject/basen v0.0.0-20150613233007-fe3947df716e h1:ahyvB3q25YnZWly5Gq1ekg6jcmWaGj/vG/MhF4aisoc= github.com/FactomProject/basen v0.0.0-20150613233007-fe3947df716e/go.mod h1:kGUqhHd//musdITWjFvNTHn90WG9bMLBEPQZ17Cmlpw= github.com/FactomProject/btcutilecc v0.0.0-20130527213604-d3a63a5752ec h1:1Qb69mGp/UtRPn422BH4/Y4Q3SLUrD9KHuDkm8iodFc= @@ -62,8 +81,9 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs= +github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= github.com/aristanetworks/fsnotify v1.4.2/go.mod h1:D/rtu7LpjYM8tRJphJ0hUBYpjai8SfX+aSNsWDTq/Ks= @@ -95,6 +115,7 @@ github.com/cespare/cp v1.0.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -108,6 +129,21 @@ github.com/clevergo/websocket v1.0.0/go.mod h1:4cxGDd7ljHn+Ng5VPV2neXYBGNZRWx9JM github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cmars/basen v0.0.0-20150613233007-fe3947df716e h1:0XBUw73chJ1VYSsfvcPvVT7auykAJce9FpRr10L6Qhw= github.com/cmars/basen v0.0.0-20150613233007-fe3947df716e/go.mod h1:P13beTBKr5Q18lJe1rIoLUqjM+CB1zYrRg44ZqGuQSA= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= +github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/pebble v1.1.1 h1:XnKU22oiCLy2Xn8vp1re67cXg4SAasg/WDt1NtcRFaw= +github.com/cockroachdb/pebble v1.1.1/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M= @@ -166,7 +202,9 @@ github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFP github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y= github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= @@ -176,8 +214,6 @@ github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/ferranbt/fastssz v0.1.3 h1:ZI+z3JH05h4kgmFXdHuR1aWYsgrg7o+Fw7/NCzM16Mo= github.com/ferranbt/fastssz v0.1.3/go.mod h1:0Y9TEd/9XuFlh7mskMPfXiI2Dkw4Ddg9EyXt1W7MRvE= -github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e h1:bBLctRc7kr01YGvaDfgLbTwjFNW5jdp5y5rj8XXBHfY= -github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= @@ -189,13 +225,16 @@ github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 h1:IZqZOB2fydHte3kUgxrzK5E1fW7RQGeDwE8F/ZZnUYc= -github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8= github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= +github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= @@ -217,6 +256,8 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= @@ -226,20 +267,27 @@ github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= @@ -258,6 +306,9 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= @@ -266,9 +317,14 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20230207041349-798e818bf904 h1:4/hN5RUoecvl+RmJRE2YxKWtnnQls6rQjjW5oV7qg2U= github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= @@ -332,6 +388,7 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= @@ -342,11 +399,13 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8 github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.15.0 h1:xqfchp4whNFxn5A4XFyyYtitiWI8Hy5EW59jEwcyL6U= github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= +github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= @@ -383,13 +442,14 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.11 h1:nQ+aFkoE2TMGc0b68U2OKSexC+eq46+XwZzWXHRmPYs= -github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= @@ -398,8 +458,9 @@ github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71 github.com/mattn/go-sqlite3 v1.14.12 h1:TJ1bhYJPV44phC+IMu1u2K/i5RriLTPe+yc68XDJ1Z0= github.com/mattn/go-sqlite3 v1.14.12/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -413,6 +474,7 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -464,6 +526,8 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -477,24 +541,28 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s= -github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.0 h1:C+UIj/QWtmqY13Arb8kwMt5j34/0Z2iKamrJ+ryC0Gg= +github.com/prometheus/client_golang v1.12.0/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a h1:CmF68hwI0XsOQ5UwlBopMi2Ow4Pbg32akc4KIVCOm+Y= +github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/prometheus v2.1.0+incompatible h1:yvJhKKQ7RlTPcQJ9iqza3p5ixGWGx9KUwhTDuXjG3Kk= github.com/prometheus/prometheus v2.1.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/prometheus/tsdb v0.10.0 h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38ic= @@ -539,8 +607,8 @@ github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639 github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.1.5-0.20170601210322-f6abca593680/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -548,8 +616,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= @@ -567,8 +635,8 @@ github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.34.0 h1:d3AAQJ2DRcxJYHm7OXNXtXt2as1vMDfxeIcFvhmGGm4= -github.com/valyala/fasthttp v1.34.0/go.mod h1:epZA5N+7pY6ZaEKRmstzOuYJx9HI8DI1oaCGZpdH4h0= +github.com/valyala/fasthttp v1.40.0 h1:CRq/00MfruPGFLTQKY8b+8SfdK60TxNztjRMnH0t1Yc= +github.com/valyala/fasthttp v1.40.0/go.mod h1:t/G+3rLek+CyY9bnIE+YlMRddxVAAGjhxndDB4i4C0I= github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= github.com/wealdtech/go-eth2-types/v2 v2.8.2 h1:b5aXlNBLKgjAg/Fft9VvGlqAUCQMP5LzYhlHRrr4yPg= github.com/wealdtech/go-eth2-types/v2 v2.8.2/go.mod h1:IAz9Lz1NVTaHabQa+4zjk2QDKMv8LVYo0n46M9o/TXw= @@ -585,11 +653,16 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRT github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/xtaci/kcp-go v5.4.5+incompatible/go.mod h1:bN6vIwHQbfHaHtFpEssmWsN45a+AZwO7eyRCmEIbtvE= github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae/go.mod h1:gXtu8J62kEgmN++bm9BVICuT/e8yiLI2KFobd/TRFsE= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -632,6 +705,9 @@ golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm0 golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20240318143956-a85f2c67cd81 h1:6R2FC06FonbXQ8pK11/PDFY6N6LWlf9KlzibaCapmqc= golang.org/x/exp v0.0.0-20240318143956-a85f2c67cd81/go.mod h1:CQ1k9gNrJ50XIzaKCRR2hssIjF07kZFEiieALBM/ARQ= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= @@ -645,6 +721,8 @@ golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= @@ -652,6 +730,8 @@ golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCc golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= @@ -670,17 +750,29 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190912160710-24e19bdeb0f2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= @@ -691,18 +783,21 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -724,6 +819,7 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190912141932-bc967efca4b8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -732,10 +828,22 @@ golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -745,17 +853,18 @@ golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -767,8 +876,9 @@ golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 h1:GZokNIeuVkl3aZHJchRrr13WCsols02MLUcz1U9is6M= -golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -799,7 +909,26 @@ golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw= @@ -822,11 +951,21 @@ google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEn google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -841,6 +980,23 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -848,8 +1004,14 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -857,7 +1019,11 @@ google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= @@ -916,6 +1082,7 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= inet.af/netaddr v0.0.0-20220617031823-097006376321 h1:B4dC8ySKTQXasnjDTMsoCMf1sQG4WsMej0WXaHxunmU= @@ -924,5 +1091,7 @@ launchpad.net/gocheck v0.0.0-20140225173054-000000000087 h1:Izowp2XBH6Ya6rv+hqbc launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= diff --git a/node/cn/backend.go b/node/cn/backend.go index 9a91a8fd9..a2175a678 100644 --- a/node/cn/backend.go +++ b/node/cn/backend.go @@ -498,7 +498,8 @@ func makeExtraData(extra []byte) []byte { func CreateDB(ctx *node.ServiceContext, config *Config, name string) database.DBManager { dbc := &database.DBConfig{ Dir: name, DBType: config.DBType, ParallelDBWrite: config.ParallelDBWrite, SingleDB: config.SingleDB, NumStateTrieShards: config.NumStateTrieShards, - LevelDBCacheSize: config.LevelDBCacheSize, OpenFilesLimit: database.GetOpenFilesLimit(), LevelDBCompression: config.LevelDBCompression, + LevelDBCacheSize: config.LevelDBCacheSize, LevelDBCompression: config.LevelDBCompression, + PebbleDBCacheSize: config.PebbleDBCacheSize, OpenFilesLimit: database.GetOpenFilesLimit(), LevelDBBufferPool: config.LevelDBBufferPool, EnableDBPerfMetrics: config.EnableDBPerfMetrics, RocksDBConfig: &config.RocksDBConfig, DynamoDBConfig: &config.DynamoDBConfig, } return ctx.OpenDatabase(dbc) diff --git a/node/cn/config.go b/node/cn/config.go index 826d017f2..8a62d3285 100644 --- a/node/cn/config.go +++ b/node/cn/config.go @@ -49,6 +49,7 @@ func GetDefaultConfig() *Config { SyncMode: downloader.FullSync, NetworkId: params.MainnetNetworkId, LevelDBCacheSize: 768, + PebbleDBCacheSize: 768, TrieCacheSize: 512, TrieTimeout: 5 * time.Minute, TrieBlockInterval: blockchain.DefaultBlockInterval, @@ -119,6 +120,7 @@ type Config struct { LevelDBCompression database.LevelDBCompressionType LevelDBBufferPool bool LevelDBCacheSize int + PebbleDBCacheSize int DynamoDBConfig database.DynamoDBConfig RocksDBConfig database.RocksDBConfig TrieCacheSize int diff --git a/node/sc/config.go b/node/sc/config.go index ad15aea6a..cf405672f 100644 --- a/node/sc/config.go +++ b/node/sc/config.go @@ -79,6 +79,7 @@ type SCConfig struct { SkipBcVersionCheck bool `toml:"-"` DatabaseHandles int `toml:"-"` LevelDBCacheSize int + PebbleDBCacheSize int TrieCacheSize int TrieTimeout time.Duration TrieBlockInterval uint diff --git a/node/service_test.go b/node/service_test.go index 6c3bfe569..35393b419 100644 --- a/node/service_test.go +++ b/node/service_test.go @@ -52,6 +52,7 @@ func TestContextDatabases(t *testing.T) { dbc := &database.DBConfig{ Dir: "persistent", DBType: database.LevelDB, LevelDBCacheSize: 0, OpenFilesLimit: 0, + PebbleDBCacheSize: 0, } db := ctx.OpenDatabase(dbc) db.Close() @@ -64,6 +65,7 @@ func TestContextDatabases(t *testing.T) { dbc = &database.DBConfig{ Dir: "ephemeral", DBType: database.LevelDB, LevelDBCacheSize: 0, OpenFilesLimit: 0, + PebbleDBCacheSize: 0, } db = ctx.OpenDatabase(dbc) diff --git a/storage/database/child_chain_data_test.go b/storage/database/child_chain_data_test.go index fa751fbc2..4e3f4044a 100644 --- a/storage/database/child_chain_data_test.go +++ b/storage/database/child_chain_data_test.go @@ -37,7 +37,7 @@ func TestChildChainData_ReadAndWrite_ChildChainTxHash(t *testing.T) { } defer os.RemoveAll(dir) - dbc := &DBConfig{Dir: dir, DBType: LevelDB, LevelDBCacheSize: 32, OpenFilesLimit: 32} + dbc := &DBConfig{Dir: dir, DBType: LevelDB, LevelDBCacheSize: 32, PebbleDBCacheSize: 32, OpenFilesLimit: 32} dbm := NewDBManager(dbc) defer dbm.Close() @@ -67,7 +67,7 @@ func TestLastIndexedBlockData_ReadAndWrite_AnchoredBlockNumber(t *testing.T) { } defer os.RemoveAll(dir) - dbc := &DBConfig{Dir: dir, DBType: LevelDB, LevelDBCacheSize: 32, OpenFilesLimit: 32} + dbc := &DBConfig{Dir: dir, DBType: LevelDB, LevelDBCacheSize: 32, PebbleDBCacheSize: 32, OpenFilesLimit: 32} dbm := NewDBManager(dbc) defer dbm.Close() @@ -93,7 +93,7 @@ func TestChildChainData_ReadAndWrite_AnchoredBlockNumber(t *testing.T) { } defer os.RemoveAll(dir) - dbc := &DBConfig{Dir: dir, DBType: LevelDB, LevelDBCacheSize: 32, OpenFilesLimit: 32} + dbc := &DBConfig{Dir: dir, DBType: LevelDB, LevelDBCacheSize: 32, PebbleDBCacheSize: 32, OpenFilesLimit: 32} dbm := NewDBManager(dbc) defer dbm.Close() @@ -119,7 +119,7 @@ func TestChildChainData_ReadAndWrite_ReceiptFromParentChain(t *testing.T) { } defer os.RemoveAll(dir) - dbc := &DBConfig{Dir: dir, DBType: LevelDB, LevelDBCacheSize: 32, OpenFilesLimit: 32} + dbc := &DBConfig{Dir: dir, DBType: LevelDB, LevelDBCacheSize: 32, PebbleDBCacheSize: 32, OpenFilesLimit: 32} dbm := NewDBManager(dbc) defer dbm.Close() @@ -151,7 +151,7 @@ func TestChildChainData_ReadAndWrite_ValueTransferTxHash(t *testing.T) { } defer os.RemoveAll(dir) - dbc := &DBConfig{Dir: dir, DBType: LevelDB, LevelDBCacheSize: 32, OpenFilesLimit: 32} + dbc := &DBConfig{Dir: dir, DBType: LevelDB, LevelDBCacheSize: 32, PebbleDBCacheSize: 32, OpenFilesLimit: 32} dbm := NewDBManager(dbc) defer dbm.Close() @@ -184,7 +184,7 @@ func TestChildChainData_ReadAndWrite_OperatorFeePayer(t *testing.T) { firstAddr := common.HexToAddress("0x1") secondAddr := common.HexToAddress("0x2") - dbc := &DBConfig{Dir: dir, DBType: LevelDB, LevelDBCacheSize: 32, OpenFilesLimit: 32} + dbc := &DBConfig{Dir: dir, DBType: LevelDB, LevelDBCacheSize: 32, PebbleDBCacheSize: 32, OpenFilesLimit: 32} dbm := NewDBManager(dbc) defer dbm.Close() diff --git a/storage/database/db_manager.go b/storage/database/db_manager.go index c202e4a1f..5a413c594 100644 --- a/storage/database/db_manager.go +++ b/storage/database/db_manager.go @@ -409,6 +409,8 @@ func getDBEntryConfig(originalDBC *DBConfig, i DBEntryType, dbDir string) *DBCon newDBC.LevelDBCacheSize = originalDBC.LevelDBCacheSize * ratio / 100 newDBC.OpenFilesLimit = originalDBC.OpenFilesLimit * ratio / 100 + newDBC.PebbleDBCacheSize = originalDBC.PebbleDBCacheSize * ratio / 100 + // Update dir to each Database specific directory. newDBC.Dir = filepath.Join(originalDBC.Dir, dbDir) // Update dynmao table name to Database specific name. @@ -469,6 +471,9 @@ type DBConfig struct { LevelDBCompression LevelDBCompressionType LevelDBBufferPool bool + // PebbleDB related configurations + PebbleDBCacheSize int + // RocksDB related configurations RocksDBConfig *RocksDBConfig diff --git a/storage/database/db_manager_test.go b/storage/database/db_manager_test.go index ed139d895..0059652ee 100644 --- a/storage/database/db_manager_test.go +++ b/storage/database/db_manager_test.go @@ -52,6 +52,16 @@ var ( {DBType: LevelDB, SingleDB: true, NumStateTrieShards: 1, ParallelDBWrite: true}, {DBType: LevelDB, SingleDB: true, NumStateTrieShards: 4, ParallelDBWrite: false}, {DBType: LevelDB, SingleDB: true, NumStateTrieShards: 4, ParallelDBWrite: true}, + + {DBType: PebbleDB, SingleDB: false, NumStateTrieShards: 1, ParallelDBWrite: false}, + {DBType: PebbleDB, SingleDB: false, NumStateTrieShards: 1, ParallelDBWrite: true}, + {DBType: PebbleDB, SingleDB: false, NumStateTrieShards: 4, ParallelDBWrite: false}, + {DBType: PebbleDB, SingleDB: false, NumStateTrieShards: 4, ParallelDBWrite: true}, + + {DBType: PebbleDB, SingleDB: true, NumStateTrieShards: 1, ParallelDBWrite: false}, + {DBType: PebbleDB, SingleDB: true, NumStateTrieShards: 1, ParallelDBWrite: true}, + {DBType: PebbleDB, SingleDB: true, NumStateTrieShards: 4, ParallelDBWrite: false}, + {DBType: PebbleDB, SingleDB: true, NumStateTrieShards: 4, ParallelDBWrite: true}, } ) diff --git a/storage/database/interface.go b/storage/database/interface.go index 433904140..98fb33f5e 100644 --- a/storage/database/interface.go +++ b/storage/database/interface.go @@ -36,12 +36,13 @@ const ( MemoryDB = "MemoryDB" DynamoDB = "DynamoDBS3" ShardedDB = "ShardedDB" + PebbleDB = "PebbleDB" ) // ToValid converts DBType to a valid one. // If it is unable to convert, "" is returned. func (db DBType) ToValid() DBType { - validDBType := []DBType{LevelDB, RocksDB, BadgerDB, MemoryDB, DynamoDB} + validDBType := []DBType{LevelDB, RocksDB, BadgerDB, MemoryDB, DynamoDB, PebbleDB} for _, vdb := range validDBType { if strings.ToLower(string(vdb)) == strings.ToLower(string(db)) { diff --git a/storage/database/pebbledb_database.go b/storage/database/pebbledb_database.go new file mode 100644 index 000000000..3488077fc --- /dev/null +++ b/storage/database/pebbledb_database.go @@ -0,0 +1,645 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package pebble implements the key-value database layer based on pebble. +package database + +import ( + "bytes" + "fmt" + "runtime" + "sync" + "sync/atomic" + "time" + + "github.com/kaiachain/kaia/log" + "github.com/rcrowley/go-metrics" + + "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/bloom" +) + +const ( + // minCache is the minimum amount of memory in megabytes to allocate to pebble + // read and write caching, split half and half. + minCache = 16 + + // minHandles is the minimum number of files handles to allocate to the open + // database files. + minHandles = 16 + + // metricsGatheringInterval specifies the interval to retrieve pebble database + // compaction, io and pause stats to report to the user. + metricsGatheringInterval = 3 * time.Second + + // degradationWarnInterval specifies how often warning should be printed if the + // leveldb database cannot keep up with requested writes. + degradationWarnInterval = time.Minute +) + +// PebbleDB is a persistent key-value store based on the pebble storage engine. +// Apart from basic data storage functionality it also supports batch writes and +// iterating over the keyspace in binary-alphabetical order. +type pebbleDB struct { + fn string // filename for reporting + db *pebble.DB // Underlying pebble storage engine + + compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction + compReadMeter metrics.Meter // Meter for measuring the data read during compaction + compWriteMeter metrics.Meter // Meter for measuring the data written during compaction + writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction + writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction + diskSizeGauge metrics.Gauge // Gauge for tracking the size of all the levels in the database + diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read + diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written + memCompGauge metrics.Gauge // Gauge for tracking the number of memory compaction + level0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in level0 + nonlevel0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in non0 level + seekCompGauge metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt + manualMemAllocGauge metrics.Gauge // Gauge for tracking amount of non-managed memory currently allocated + + levelsGauge []metrics.Gauge // Gauge for tracking the number of tables in levels + + quitLock sync.RWMutex // Mutex protecting the quit channel and the closed flag + quitChan chan chan error // Quit channel to stop the metrics collection before closing the database + closed bool + + log log.Logger // Contextual logger tracking the database path + + activeComp int // Current number of active compactions + compStartTime time.Time // The start time of the earliest currently-active compaction + compTime atomic.Int64 // Total time spent in compaction in ns + level0Comp atomic.Uint32 // Total number of level-zero compactions + nonLevel0Comp atomic.Uint32 // Total number of non level-zero compactions + + writeStalled atomic.Bool // Flag whether the write is stalled + writeDelayStartTime time.Time // The start time of the latest write stall + writeDelayCount atomic.Int64 // Total number of write stall counts + writeDelayTime atomic.Int64 // Total time spent in write stalls + + writeOptions *pebble.WriteOptions +} + +func (d *pebbleDB) onCompactionBegin(info pebble.CompactionInfo) { + if d.activeComp == 0 { + d.compStartTime = time.Now() + } + l0 := info.Input[0] + if l0.Level == 0 { + d.level0Comp.Add(1) + } else { + d.nonLevel0Comp.Add(1) + } + d.activeComp++ +} + +func (d *pebbleDB) onCompactionEnd(info pebble.CompactionInfo) { + if d.activeComp == 1 { + d.compTime.Add(int64(time.Since(d.compStartTime))) + } else if d.activeComp == 0 { + panic("should not happen") + } + d.activeComp-- +} + +func (d *pebbleDB) onWriteStallBegin(b pebble.WriteStallBeginInfo) { + d.writeDelayStartTime = time.Now() + d.writeDelayCount.Add(1) + d.writeStalled.Store(true) +} + +func (d *pebbleDB) onWriteStallEnd() { + d.writeDelayTime.Add(int64(time.Since(d.writeDelayStartTime))) + d.writeStalled.Store(false) +} + +type panicLogger struct{} + +func (l panicLogger) Infof(format string, args ...interface{}) { + logger.Info(fmt.Sprintf(format, args...)) +} + +func (l panicLogger) Errorf(format string, args ...interface{}) { + logger.Error(fmt.Sprintf(format, args...)) +} + +func (l panicLogger) Fatalf(format string, args ...interface{}) { + logger.Crit(fmt.Sprintf(format, args...)) +} + +// New returns a wrapped pebble DB object. The namespace is the prefix that the +// metrics reporting should use for surfacing internal stats. +func NewPebbleDB(dbc *DBConfig, file string) (*pebbleDB, error) { + // Ensure we have some minimal caching and file guarantees + ephemeral := false + readonly := false + if dbc.PebbleDBCacheSize < minCache { + dbc.PebbleDBCacheSize = minCache + } + if dbc.OpenFilesLimit < minHandles { + dbc.OpenFilesLimit = minHandles + } + + // The max memtable size is limited by the uint32 offsets stored in + // internal/arenaskl.node, DeferredBatchOp, and flushableBatchEntry. + // + // - MaxUint32 on 64-bit platforms; + // - MaxInt on 32-bit platforms. + // + // It is used when slices are limited to Uint32 on 64-bit platforms (the + // length limit for slices is naturally MaxInt on 32-bit platforms). + // + // Taken from https://github.com/cockroachdb/pebble/blob/master/internal/constants/constants.go + maxMemTableSize := (1<<31)<<(^uint(0)>>63) - 1 + + // Two memory tables is configured which is identical to leveldb, + // including a frozen memory table and another live one. + memTableLimit := 2 + memTableSize := dbc.PebbleDBCacheSize * 1024 * 1024 / 2 / memTableLimit + + // The memory table size is currently capped at maxMemTableSize-1 due to a + // known bug in the pebble where maxMemTableSize is not recognized as a + // valid size. + // + // TODO use the maxMemTableSize as the maximum table size once the issue + // in pebble is fixed. + if memTableSize >= maxMemTableSize { + memTableSize = maxMemTableSize - 1 + } + db := &pebbleDB{ + fn: file, + log: logger, + quitChan: make(chan chan error), + writeOptions: &pebble.WriteOptions{Sync: ephemeral}, + } + opt := &pebble.Options{ + // Pebble has a single combined cache area and the write + // buffers are taken from this too. Assign all available + // memory allowance for cache. + Cache: pebble.NewCache(int64(dbc.PebbleDBCacheSize * 1024 * 1024)), + MaxOpenFiles: dbc.OpenFilesLimit, + + // The size of memory table(as well as the write buffer). + // Note, there may have more than two memory tables in the system. + MemTableSize: uint64(memTableSize), + + // MemTableStopWritesThreshold places a hard limit on the size + // of the existent MemTables(including the frozen one). + // Note, this must be the number of tables not the size of all memtables + // according to https://github.com/cockroachdb/pebble/blob/master/options.go#L738-L742 + // and to https://github.com/cockroachdb/pebble/blob/master/db.go#L1892-L1903. + MemTableStopWritesThreshold: memTableLimit, + + // The default compaction concurrency(1 thread), + // Here use all available CPUs for faster compaction. + MaxConcurrentCompactions: func() int { return runtime.NumCPU() }, + + // Per-level options. Options for at least one level must be specified. The + // options for the last level are used for all subsequent levels. + Levels: []pebble.LevelOptions{ + {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, + {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, + {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, + {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, + {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, + {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, + {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, + }, + ReadOnly: readonly, + EventListener: &pebble.EventListener{ + CompactionBegin: db.onCompactionBegin, + CompactionEnd: db.onCompactionEnd, + WriteStallBegin: db.onWriteStallBegin, + WriteStallEnd: db.onWriteStallEnd, + }, + Logger: panicLogger{}, // TODO(karalabe): Delete when this is upstreamed in Pebble + } + + // Disable seek compaction explicitly. Check https://github.com/ethereum/go-ethereum/pull/20130 + // for more details. + opt.Experimental.ReadSamplingMultiplier = -1 + + // Open the db and recover any potential corruptions + innerDB, err := pebble.Open(file, opt) + if err != nil { + return nil, err + } + db.db = innerDB + + return db, nil +} + +func (d *pebbleDB) Meter(prefix string) { + // keys are referenced in leveldb_database.go + d.compTimeMeter = metrics.GetOrRegisterMeter(prefix+"compaction/time", nil) + d.compReadMeter = metrics.GetOrRegisterMeter(prefix+"compaction/read", nil) + d.compWriteMeter = metrics.GetOrRegisterMeter(prefix+"compaction/write", nil) + + d.diskSizeGauge = metrics.GetOrRegisterGauge(prefix+"disk/size", nil) + d.diskReadMeter = metrics.GetOrRegisterMeter(prefix+"disk/read", nil) + d.diskWriteMeter = metrics.GetOrRegisterMeter(prefix+"disk/write", nil) + + d.writeDelayMeter = metrics.GetOrRegisterMeter(prefix+"writedelay/duration", nil) + d.writeDelayNMeter = metrics.GetOrRegisterMeter(prefix+"writedelay/count", nil) + + d.memCompGauge = metrics.GetOrRegisterGauge(prefix+"compact/memory", nil) + d.level0CompGauge = metrics.GetOrRegisterGauge(prefix+"compact/level0", nil) + d.nonlevel0CompGauge = metrics.GetOrRegisterGauge(prefix+"compact/nonlevel0", nil) + d.seekCompGauge = metrics.GetOrRegisterGauge(prefix+"compact/seek", nil) + d.manualMemAllocGauge = metrics.GetOrRegisterGauge(prefix+"memory/manualalloc", nil) + + // Start up the metrics gathering and return + go d.meter(metricsGatheringInterval, prefix) +} + +func (db *pebbleDB) TryCatchUpWithPrimary() error { + return nil +} + +func (db *pebbleDB) Type() DBType { + return PebbleDB +} + +// Close stops the metrics collection, flushes any pending data to disk and closes +// all io accesses to the underlying key-value store. +func (d *pebbleDB) Close() { + d.quitLock.Lock() + defer d.quitLock.Unlock() + + // Allow double closing, simplifies things + if d.closed { + return + } + d.closed = true + if d.quitChan != nil { + errc := make(chan error) + d.quitChan <- errc + if err := <-errc; err != nil { + d.log.Error("Metrics collection failed", "err", err) + } + d.quitChan = nil + } + d.db.Close() +} + +// Has retrieves if a key is present in the key-value store. +func (d *pebbleDB) Has(key []byte) (bool, error) { + d.quitLock.RLock() + defer d.quitLock.RUnlock() + if d.closed { + return false, pebble.ErrClosed + } + _, closer, err := d.db.Get(key) + if err == pebble.ErrNotFound { + return false, nil + } else if err != nil { + return false, err + } + closer.Close() + return true, nil +} + +// Get retrieves the given key if it's present in the key-value store. +func (d *pebbleDB) Get(key []byte) ([]byte, error) { + d.quitLock.RLock() + defer d.quitLock.RUnlock() + if d.closed { + return nil, pebble.ErrClosed + } + dat, closer, err := d.db.Get(key) + if err != nil { + if err == pebble.ErrNotFound { + return nil, dataNotFoundErr + } + return nil, err + } + ret := make([]byte, len(dat)) + copy(ret, dat) + closer.Close() + return ret, nil +} + +// Put inserts the given value into the key-value store. +func (d *pebbleDB) Put(key []byte, value []byte) error { + d.quitLock.RLock() + defer d.quitLock.RUnlock() + if d.closed { + return pebble.ErrClosed + } + return d.db.Set(key, value, d.writeOptions) +} + +// Delete removes the key from the key-value store. +func (d *pebbleDB) Delete(key []byte) error { + d.quitLock.RLock() + defer d.quitLock.RUnlock() + if d.closed { + return pebble.ErrClosed + } + return d.db.Delete(key, nil) +} + +// NewBatch creates a write-only key-value store that buffers changes to its host +// database until a final write is called. +func (d *pebbleDB) NewBatch() Batch { + return &batch{ + b: d.db.NewBatch(), + db: d, + } +} + +func upperBound(prefix []byte) (limit []byte) { + for i := len(prefix) - 1; i >= 0; i-- { + c := prefix[i] + if c == 0xff { + continue + } + limit = make([]byte, i+1) + copy(limit, prefix) + limit[i] = c + 1 + break + } + return limit +} + +// Stat returns the internal metrics of Pebble in a text format. It's a developer +// method to read everything there is to read independent of Pebble version. +// +// The property is unused in Pebble as there's only one thing to retrieve. +func (d *pebbleDB) Stat(property string) (string, error) { + return d.db.Metrics().String(), nil +} + +// Compact flattens the underlying data store for the given key range. In essence, +// deleted and overwritten versions are discarded, and the data is rearranged to +// reduce the cost of operations needed to access them. +// +// A nil start is treated as a key before all keys in the data store; a nil limit +// is treated as a key after all keys in the data store. If both is nil then it +// will compact entire data store. +func (d *pebbleDB) Compact(start []byte, limit []byte) error { + // There is no special flag to represent the end of key range + // in pebble(nil in leveldb). Use an ugly hack to construct a + // large key to represent it. + // Note any prefixed database entry will be smaller than this + // flag, as for trie nodes we need the 32 byte 0xff because + // there might be a shared prefix starting with a number of + // 0xff-s, so 32 ensures than only a hash collision could touch it. + // https://github.com/cockroachdb/pebble/issues/2359#issuecomment-1443995833 + if limit == nil { + limit = bytes.Repeat([]byte{0xff}, 32) + } + return d.db.Compact(start, limit, true) // Parallelization is preferred +} + +// meter periodically retrieves internal pebble counters and reports them to +// the metrics subsystem. +func (d *pebbleDB) meter(refresh time.Duration, namespace string) { + var errc chan error + timer := time.NewTimer(refresh) + defer timer.Stop() + + // Create storage and warning log tracer for write delay. + var ( + compTimes [2]int64 + compWrites [2]int64 + compReads [2]int64 + + nWrites [2]int64 + + writeDelayTimes [2]int64 + writeDelayCounts [2]int64 + lastWriteStallReport time.Time + ) + + // Iterate ad infinitum and collect the stats + for i := 1; errc == nil; i++ { + var ( + compWrite int64 + compRead int64 + nWrite int64 + + stats = d.db.Metrics() + compTime = d.compTime.Load() + writeDelayCount = d.writeDelayCount.Load() + writeDelayTime = d.writeDelayTime.Load() + nonLevel0CompCount = int64(d.nonLevel0Comp.Load()) + level0CompCount = int64(d.level0Comp.Load()) + ) + writeDelayTimes[i%2] = writeDelayTime + writeDelayCounts[i%2] = writeDelayCount + compTimes[i%2] = compTime + + for _, levelMetrics := range stats.Levels { + nWrite += int64(levelMetrics.BytesCompacted) + nWrite += int64(levelMetrics.BytesFlushed) + compWrite += int64(levelMetrics.BytesCompacted) + compRead += int64(levelMetrics.BytesRead) + } + + nWrite += int64(stats.WAL.BytesWritten) + + compWrites[i%2] = compWrite + compReads[i%2] = compRead + nWrites[i%2] = nWrite + + if d.writeDelayNMeter != nil { + d.writeDelayNMeter.Mark(writeDelayCounts[i%2] - writeDelayCounts[(i-1)%2]) + } + if d.writeDelayMeter != nil { + d.writeDelayMeter.Mark(writeDelayTimes[i%2] - writeDelayTimes[(i-1)%2]) + } + // Print a warning log if writing has been stalled for a while. The log will + // be printed per minute to avoid overwhelming users. + if d.writeStalled.Load() && writeDelayCounts[i%2] == writeDelayCounts[(i-1)%2] && + time.Now().After(lastWriteStallReport.Add(degradationWarnInterval)) { + d.log.Warn("Database compacting, degraded performance") + lastWriteStallReport = time.Now() + } + if d.compTimeMeter != nil { + d.compTimeMeter.Mark(compTimes[i%2] - compTimes[(i-1)%2]) + } + if d.compReadMeter != nil { + d.compReadMeter.Mark(compReads[i%2] - compReads[(i-1)%2]) + } + if d.compWriteMeter != nil { + d.compWriteMeter.Mark(compWrites[i%2] - compWrites[(i-1)%2]) + } + if d.diskSizeGauge != nil { + d.diskSizeGauge.Update(int64(stats.DiskSpaceUsage())) + } + if d.diskReadMeter != nil { + d.diskReadMeter.Mark(0) // pebble doesn't track non-compaction reads + } + if d.diskWriteMeter != nil { + d.diskWriteMeter.Mark(nWrites[i%2] - nWrites[(i-1)%2]) + } + + // See https://github.com/cockroachdb/pebble/pull/1628#pullrequestreview-1026664054 + manuallyAllocated := stats.BlockCache.Size + int64(stats.MemTable.Size) + int64(stats.MemTable.ZombieSize) + d.manualMemAllocGauge.Update(manuallyAllocated) + d.memCompGauge.Update(stats.Flush.Count) + d.nonlevel0CompGauge.Update(nonLevel0CompCount) + d.level0CompGauge.Update(level0CompCount) + d.seekCompGauge.Update(stats.Compact.ReadCount) + + for i, level := range stats.Levels { + // Append metrics for additional layers + if i >= len(d.levelsGauge) { + d.levelsGauge = append(d.levelsGauge, metrics.GetOrRegisterGauge(namespace+fmt.Sprintf("tables/level%v", i), nil)) + } + d.levelsGauge[i].Update(level.NumFiles) + } + + // Sleep a bit, then repeat the stats collection + select { + case errc = <-d.quitChan: + // Quit requesting, stop hammering the database + case <-timer.C: + timer.Reset(refresh) + // Timeout, gather a new set of stats + } + } + errc <- nil +} + +// batch is a write-only batch that commits changes to its host database +// when Write is called. A batch cannot be used concurrently. +type batch struct { + b *pebble.Batch + db *pebbleDB + size int +} + +// Put inserts the given value into the batch for later committing. +func (b *batch) Put(key, value []byte) error { + b.b.Set(key, value, nil) + b.size += len(key) + len(value) + return nil +} + +// Delete inserts the a key removal into the batch for later committing. +func (b *batch) Delete(key []byte) error { + b.b.Delete(key, nil) + b.size += len(key) + return nil +} + +// ValueSize retrieves the amount of data queued up for writing. +func (b *batch) ValueSize() int { + return b.size +} + +// Write flushes any accumulated data to disk. +func (b *batch) Write() error { + b.db.quitLock.RLock() + defer b.db.quitLock.RUnlock() + if b.db.closed { + return pebble.ErrClosed + } + return b.b.Commit(b.db.writeOptions) +} + +// Reset resets the batch for reuse. +func (b *batch) Reset() { + b.b.Reset() + b.size = 0 +} + +func (b *batch) Release() { + // do nothing +} + +// Replay replays the batch contents. +func (b *batch) Replay(w KeyValueWriter) error { + reader := b.b.Reader() + for { + kind, k, v, ok, err := reader.Next() + if !ok || err != nil { + break + } + + if kind == pebble.InternalKeyKindSet { + w.Put(k, v) + } else if kind == pebble.InternalKeyKindDelete { + w.Delete(k) + } else { + return fmt.Errorf("unhandled operation, keytype: %v", kind) + } + } + return nil +} + +// pebbleIterator is a wrapper of underlying iterator in storage engine. +// The purpose of this structure is to implement the missing APIs. +// +// The pebble iterator is not thread-safe. +type pebbleIterator struct { + iter *pebble.Iterator + moved bool + released bool +} + +// NewIterator creates a binary-alphabetical iterator over a subset +// of database content with a particular key prefix, starting at a particular +// initial key (or after, if it does not exist). +func (d *pebbleDB) NewIterator(prefix []byte, start []byte) Iterator { + iter, _ := d.db.NewIter(&pebble.IterOptions{ + LowerBound: append(prefix, start...), + UpperBound: upperBound(prefix), + }) + iter.First() + return &pebbleIterator{iter: iter, moved: true, released: false} +} + +// Next moves the iterator to the next key/value pair. It returns whether the +// iterator is exhausted. +func (iter *pebbleIterator) Next() bool { + if iter.moved { + iter.moved = false + return iter.iter.Valid() + } + return iter.iter.Next() +} + +// Error returns any accumulated error. Exhausting all the key/value pairs +// is not considered to be an error. +func (iter *pebbleIterator) Error() error { + return iter.iter.Error() +} + +// Key returns the key of the current key/value pair, or nil if done. The caller +// should not modify the contents of the returned slice, and its contents may +// change on the next call to Next. +func (iter *pebbleIterator) Key() []byte { + return iter.iter.Key() +} + +// Value returns the value of the current key/value pair, or nil if done. The +// caller should not modify the contents of the returned slice, and its contents +// may change on the next call to Next. +func (iter *pebbleIterator) Value() []byte { + return iter.iter.Value() +} + +// Release releases associated resources. Release should always succeed and can +// be called multiple times without causing error. +func (iter *pebbleIterator) Release() { + if !iter.released { + iter.iter.Close() + iter.released = true + } +} diff --git a/storage/database/sharded_database.go b/storage/database/sharded_database.go index 183c54e2e..7eb2abd83 100644 --- a/storage/database/sharded_database.go +++ b/storage/database/sharded_database.go @@ -72,6 +72,7 @@ func newShardedDB(dbc *DBConfig, et DBEntryType, numShards uint) (*shardedDB, er shards := make([]Database, 0, numShards) sdbBatchTaskCh := make(chan sdbBatchTask, numShards*2) sdbLevelDBCacheSize := dbc.LevelDBCacheSize / int(numShards) + sdbPebbleDBCacheSize := dbc.PebbleDBCacheSize / int(numShards) sdbOpenFilesLimit := dbc.OpenFilesLimit / int(numShards) sdbRocksDBCacheSize := GetDefaultRocksDBConfig().CacheSize / uint64(numShards) sdbRocksDBMaxOpenFiles := GetDefaultRocksDBConfig().MaxOpenFiles / int(numShards) @@ -83,6 +84,7 @@ func newShardedDB(dbc *DBConfig, et DBEntryType, numShards uint) (*shardedDB, er copiedDBC := *dbc copiedDBC.Dir = path.Join(copiedDBC.Dir, strconv.Itoa(i)) copiedDBC.LevelDBCacheSize = sdbLevelDBCacheSize + copiedDBC.PebbleDBCacheSize = sdbPebbleDBCacheSize copiedDBC.OpenFilesLimit = sdbOpenFilesLimit if copiedDBC.RocksDBConfig != nil { copiedDBC.RocksDBConfig.CacheSize = sdbRocksDBCacheSize diff --git a/tests/db_write_and_read_test.go b/tests/db_write_and_read_test.go index a5e0dd834..4d6319e80 100644 --- a/tests/db_write_and_read_test.go +++ b/tests/db_write_and_read_test.go @@ -45,6 +45,8 @@ var testEntries = []testEntry{ {"MemoryDB", &database.DBConfig{DBType: database.MemoryDB, SingleDB: false, NumStateTrieShards: 4}}, {"LevelDB-Single", &database.DBConfig{DBType: database.LevelDB, SingleDB: true, LevelDBCacheSize: 128, OpenFilesLimit: 32}}, {"LevelDB", &database.DBConfig{DBType: database.LevelDB, SingleDB: false, LevelDBCacheSize: 128, OpenFilesLimit: 32, NumStateTrieShards: 4}}, + {"PebbleDB-Single", &database.DBConfig{DBType: database.PebbleDB, SingleDB: true, PebbleDBCacheSize: 128, OpenFilesLimit: 32}}, + {"PebbleDB", &database.DBConfig{DBType: database.PebbleDB, SingleDB: false, PebbleDBCacheSize: 128, OpenFilesLimit: 32, NumStateTrieShards: 4}}, } // TestDBManager_WriteAndRead_Functional checks basic functionality of database.DBManager interface diff --git a/tests/kaia_test_blockchain_test.go b/tests/kaia_test_blockchain_test.go index 21012b6c4..f54ea2c4e 100644 --- a/tests/kaia_test_blockchain_test.go +++ b/tests/kaia_test_blockchain_test.go @@ -431,7 +431,7 @@ func NewDatabase(dir string, dbType database.DBType) database.DBManager { return database.NewMemoryDBManager() } else { dbc := &database.DBConfig{ - Dir: dir, DBType: dbType, LevelDBCacheSize: 768, + Dir: dir, DBType: dbType, LevelDBCacheSize: 768, PebbleDBCacheSize: 768, OpenFilesLimit: 1024, SingleDB: false, NumStateTrieShards: 4, ParallelDBWrite: true, LevelDBCompression: database.AllNoCompression, LevelDBBufferPool: true, } diff --git a/work/worker.go b/work/worker.go index 28b9e8726..ff6bc7d25 100644 --- a/work/worker.go +++ b/work/worker.go @@ -498,7 +498,6 @@ func (self *worker) makeCurrent(parent *types.Block, header *types.Header) error } func (self *worker) commitNewWork() { - self.mu.Lock() defer self.mu.Unlock() self.currentMu.Lock() From 9f6b2a00f49b429ed9807e65857f4d91b3eb0e67 Mon Sep 17 00:00:00 2001 From: Junha Date: Tue, 9 Jul 2024 01:56:50 +0000 Subject: [PATCH 02/36] refactor: by gci --- storage/database/pebbledb_database.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/storage/database/pebbledb_database.go b/storage/database/pebbledb_database.go index 3488077fc..c23600333 100644 --- a/storage/database/pebbledb_database.go +++ b/storage/database/pebbledb_database.go @@ -25,11 +25,10 @@ import ( "sync/atomic" "time" - "github.com/kaiachain/kaia/log" - "github.com/rcrowley/go-metrics" - "github.com/cockroachdb/pebble" "github.com/cockroachdb/pebble/bloom" + "github.com/kaiachain/kaia/log" + "github.com/rcrowley/go-metrics" ) const ( From a1899e133b82b3c8f39ab0fa6592ecba61d398e4 Mon Sep 17 00:00:00 2001 From: "ollie.j" Date: Wed, 17 Jul 2024 22:04:11 +0900 Subject: [PATCH 03/36] ci: Fix circleci test-rpc apt install command --- .circleci/config.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 14413519a..8ad701292 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -307,6 +307,7 @@ commands: ./set_CNonly.sh cd .. cp config_template.json config.json + apt update apt install python3.8 python3-venv -y python3.8 -m venv --without-pip venv source venv/bin/activate From cb68e91bcd134a4d15172881c5b7dc3e6bc6f0ea Mon Sep 17 00:00:00 2001 From: "ollie.j" Date: Wed, 17 Jul 2024 22:06:42 +0900 Subject: [PATCH 04/36] (revertme) try test-rpc --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 8ad701292..21c9f2609 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -541,7 +541,7 @@ workflows: branches: ignore: /.*/ - test-rpc: - filters: *filter-only-version-tag + filters: *filter-version-not-release - pass-tests: requires: From 536488be09310925d4d2fb22020407b5e565525e Mon Sep 17 00:00:00 2001 From: "ollie.j" Date: Wed, 17 Jul 2024 22:19:38 +0900 Subject: [PATCH 05/36] Revert "(revertme) try test-rpc" This reverts commit cb68e91bcd134a4d15172881c5b7dc3e6bc6f0ea. --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 21c9f2609..8ad701292 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -541,7 +541,7 @@ workflows: branches: ignore: /.*/ - test-rpc: - filters: *filter-version-not-release + filters: *filter-only-version-tag - pass-tests: requires: From 9cf5df76b38ad9316cd39610bc64485ddd16e2f4 Mon Sep 17 00:00:00 2001 From: Sotatek-TinnNguyen Date: Thu, 18 Jul 2024 14:53:02 +0700 Subject: [PATCH 06/36] update new repo for rpm and update test repo --- .circleci/config.yml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 14413519a..215366969 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -141,7 +141,7 @@ commands: for item in kcn kpn ken kscn kspn ksen kbn kgen homi; do TARGET_RPM=$(find $item-linux-x86_64/rpmbuild/RPMS/x86_64/ | awk -v pat="$item(d)?-v" '$0~pat') - aws s3 cp $TARGET_RPM s3://$FRONTEND_BUCKET/packages/rhel/7/prod/ + aws s3 cp $TARGET_RPM s3://$FRONTEND_BUCKET/packages/rhel/7/kaia/ aws s3 cp $TARGET_RPM s3://$FRONTEND_BUCKET/packages/kaia/$KAIA_VERSION/ done rpm-tagging-baobab: @@ -158,7 +158,7 @@ commands: command: | for item in kcn kpn ken; do TARGET_RPM=$(find $item-linux-x86_64/rpmbuild/RPMS/x86_64/ | awk -v pat="$item(d)?-kairos-v" '$0~pat') - aws s3 cp $TARGET_RPM s3://$FRONTEND_BUCKET/packages/rhel/7/prod/ + aws s3 cp $TARGET_RPM s3://$FRONTEND_BUCKET/packages/rhel/7/kaia/ aws s3 cp $TARGET_RPM s3://$FRONTEND_BUCKET/packages/kaia/$KAIA_VERSION/ done createrepo-update: @@ -166,9 +166,9 @@ commands: - run: name: "createrepo update" command: | - aws s3 sync s3://$FRONTEND_BUCKET/packages/rhel/7/prod/ rhel/7/prod/ - createrepo --update rhel/7/prod - aws s3 sync --delete rhel/7/prod/repodata/ s3://$FRONTEND_BUCKET/packages/rhel/7/prod/repodata/ + aws s3 sync s3://$FRONTEND_BUCKET/packages/rhel/7/kaia/ rhel/7/kaia/ + createrepo --update rhel/7/kaia + aws s3 sync --delete rhel/7/kaia/repodata/ s3://$FRONTEND_BUCKET/packages/rhel/7/kaia/repodata/ tag-verify: steps: - run: @@ -231,7 +231,7 @@ commands: command: | export version=$(go run build/rpm/main.go version) >> $BASH_ENV echo "git tag $version" - git config --global user.email "team.devops@groundx.xyz" + git config --global user.email "team.devops@kaia.io" git config --global user.name "circleci-kaia" git tag -a $version -m "$CIRCLE_STAGE" git push origin $version @@ -300,8 +300,8 @@ commands: no_output_timeout: 30m command: | make kcn - git clone https://$TEST_TOKEN@github.com/klaytn/klaytn-rpc-tester.git - cd klaytn-rpc-tester + git clone https://$TEST_TOKEN@github.com/kaiachain/kaia-rpc-tester.git + cd kaia-rpc-tester cp ../build/bin/kcn script/cn/bin/ cd script ./set_CNonly.sh @@ -368,7 +368,7 @@ jobs: name: "Run test tests" no_output_timeout: 30m command: | - git clone --depth 1 https://$TEST_TOKEN@github.com/klaytn/klaytn-tests.git tests/testdata + git clone --depth 1 https://$TEST_TOKEN@github.com/kaiachain/kaia-core-tests.git tests/testdata make test-tests test-others: @@ -415,7 +415,7 @@ jobs: command: | set -e export GOPATH=/go - git clone --depth 1 https://$TEST_TOKEN@github.com/klaytn/klaytn-tests.git tests/testdata + git clone --depth 1 https://$TEST_TOKEN@github.com/kaiachain/kaia-core-tests.git tests/testdata make cover mkdir -p /tmp/coverage_reports cp coverage_report.txt /tmp/coverage_reports/ From aafe883fb060aa4eaabca54a6555afff65ce7ced Mon Sep 17 00:00:00 2001 From: markyim-klaytn <59378580+markyim-klaytn@users.noreply.github.com> Date: Fri, 19 Jul 2024 09:28:31 +0900 Subject: [PATCH 07/36] Update email address --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 215366969..5bf21857e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -231,7 +231,7 @@ commands: command: | export version=$(go run build/rpm/main.go version) >> $BASH_ENV echo "git tag $version" - git config --global user.email "team.devops@kaia.io" + git config --global user.email "devops@kaia.io" git config --global user.name "circleci-kaia" git tag -a $version -m "$CIRCLE_STAGE" git push origin $version From 9b3e0982695ad87c55fbb95d5926b800166ae506 Mon Sep 17 00:00:00 2001 From: "yumiel.ko" Date: Thu, 25 Jul 2024 11:36:48 +0800 Subject: [PATCH 08/36] enable darwin-arm64 packaging --- .circleci/config.yml | 50 +++++++++++++++++++++++++++++++++++++++++++- build/package-tar.sh | 10 ++++----- 2 files changed, 54 insertions(+), 6 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 14413519a..cd2e134d0 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -57,6 +57,11 @@ executors: KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT KAFKA_CFG_CONTROLLER_LISTENER_NAMES: CONTROLLER KAFKA_CFG_INTER_BROKER_LISTENER_NAME: PLAINTEXT + darwin-executor: # this executor is for packaging darwin binaries + working_directory: ~/go/src/github.com/kaiachain/kaia + macos: + xcode: 14.2.0 + resource_class: macos.m1.medium.gen1 rpm-executor: # this executor is for packaging rpm binaries working_directory: /go/src/github.com/kaiachain/kaia docker: @@ -70,6 +75,18 @@ executors: - image: cimg/go:1.22.1 commands: + install-darwin-dependencies: + description: Install dependencies on darwin machine + steps: + - run: + name: "install darwin dependencies" + command: | + # install awscli + brew install awscli + # install golang + curl -O https://dl.google.com/go/go1.22.1.darwin-arm64.tar.gz + mkdir $HOME/go1.22.1 + tar -C $HOME/go1.22.1 -xzf go1.22.1.darwin-arm64.tar.gz pre-build: description: "before build, set version" steps: @@ -470,7 +487,27 @@ jobs: baobab: "-b" - upload-repo: item: "kcn kpn ken" + packaging-darwin: + executor: darwin-executor + steps: + - checkout + - install-darwin-dependencies + - pre-build + - build-packaging: + os-network: "darwin-arm64" + - upload-repo + packaging-darwin-baobab: + executor: darwin-executor + steps: + - checkout + - install-darwin-dependencies + - pre-build + - build-packaging: + os-network: "darwin-arm64" + baobab: "-b" + - upload-repo: + item: "kcn kpn ken" rpm-tagged: executor: rpm-executor steps: @@ -593,6 +630,8 @@ workflows: - rpm-tagged-baobab - packaging-linux - packaging-linux-baobab + - packaging-darwin + - packaging-darwin-baobab filters: tags: only: /^v[0-9]+\.[0-9]+\.[0-9]/ @@ -605,6 +644,8 @@ workflows: - rpm-tagged-baobab - packaging-linux - packaging-linux-baobab + - packaging-darwin + - packaging-darwin-baobab filters: tags: only: /^v[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+.*/ @@ -627,7 +668,14 @@ workflows: filters: *filter-only-version-tag requires: - pass-tests - + - packaging-darwin: + filters: *filter-only-version-tag + requires: + - pass-tests + - packaging-darwin-baobab: + filters: *filter-only-version-tag + requires: + - pass-tests - major-tagging: filters: branches: diff --git a/build/package-tar.sh b/build/package-tar.sh index ff66c8703..96e670ca4 100755 --- a/build/package-tar.sh +++ b/build/package-tar.sh @@ -9,7 +9,7 @@ set -e function printUsage { echo "Usage: ${0} [-b] " echo " -b: use Kairos configuration" - echo " : linux-386 | linux-amd64 | darwin-amd64 | windows-386 | windows-amd64" + echo " : linux-386 | linux-amd64 | darwin-arm64 | windows-386 | windows-amd64" echo " : kcn | kpn | ken | kbn | kscn | kspn | ksen | kgen | homi" echo "" echo " ${0} linux-amd64 kcn" @@ -39,8 +39,8 @@ case "$SUBCOMMAND" in PLATFORM_SUFFIX="linux-amd64" shift ;; - darwin-amd64) - PLATFORM_SUFFIX="darwin-10.10-amd64" + darwin-arm64) + PLATFORM_SUFFIX="darwin-arm64" shift ;; windows-386) @@ -52,7 +52,7 @@ case "$SUBCOMMAND" in shift ;; *) - echo "Undefined architecture for packaging. Supported architectures: linux-386, linux-amd64, darwin-amd64, windows-386, windows-amd64" + echo "Undefined architecture for packaging. Supported architectures: linux-386, linux-amd64, darwin-arm64, windows-386, windows-amd64" printUsage ;; esac @@ -121,7 +121,7 @@ if [ ! -z "$DAEMON" ]; then fi fi cp build/packaging/linux/bin/${TARGET}d ${PACK_NAME}/bin/ - cp $CONF_FILE ${PACK_NAME}/conf/ + cp $CONF_FILE ${PACK_NAME}/conf/ fi # Compress! From 9194a6d46dec8db664a7e0a1864a382bb3e24f7a Mon Sep 17 00:00:00 2001 From: fivecut Date: Thu, 25 Jul 2024 15:41:50 +0800 Subject: [PATCH 09/36] chore: fix some comments Signed-off-by: fivecut --- log/format.go | 2 +- node/api.go | 2 +- node/sc/api_bridge.go | 2 +- snapshot/generate.go | 4 ++-- tests/pregenerated_data_util_test.go | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/log/format.go b/log/format.go index 52b89a448..692018ff7 100644 --- a/log/format.go +++ b/log/format.go @@ -49,7 +49,7 @@ func PrintOrigins(print bool) { var locationEnabled uint32 // locationLength is the maxmimum path length encountered, which all logs are -// padded to to aid in alignment. +// padded to aid in alignment. var locationLength uint32 // fieldPadding is a global map with maximum field value lengths seen until now diff --git a/node/api.go b/node/api.go index dfdafed63..f3c15fa43 100644 --- a/node/api.go +++ b/node/api.go @@ -77,7 +77,7 @@ func (api *PrivateAdminAPI) AddPeer(url string) (bool, error) { } } -// RemovePeer disconnects from a a remote node if the connection exists +// RemovePeer disconnects from a remote node if the connection exists func (api *PrivateAdminAPI) RemovePeer(url string) (bool, error) { // Make sure the server is running, fail otherwise server := api.node.Server() diff --git a/node/sc/api_bridge.go b/node/sc/api_bridge.go index 75abacb83..4e1c606e9 100644 --- a/node/sc/api_bridge.go +++ b/node/sc/api_bridge.go @@ -555,7 +555,7 @@ func addPeerInternal(server p2p.Server, url string) (*discover.Node, error) { return node, nil } -// RemovePeer disconnects from a a remote node if the connection exists +// RemovePeer disconnects from a remote node if the connection exists func (sb *SubBridgeAPI) RemovePeer(url string) (bool, error) { // Make sure the server is running, fail otherwise server := sb.subBridge.bridgeServer diff --git a/snapshot/generate.go b/snapshot/generate.go index 351c42971..fedceb5d9 100644 --- a/snapshot/generate.go +++ b/snapshot/generate.go @@ -44,13 +44,13 @@ var ( // accountCheckRange is the upper limit of the number of accounts involved in // each range check. This is a value estimated based on experience. If this // value is too large, the failure rate of range prove will increase. Otherwise - // the the value is too small, the efficiency of the state recovery will decrease. + // the value is too small, the efficiency of the state recovery will decrease. accountCheckRange = 128 // storageCheckRange is the upper limit of the number of storage slots involved // in each range check. This is a value estimated based on experience. If this // value is too large, the failure rate of range prove will increase. Otherwise - // the the value is too small, the efficiency of the state recovery will decrease. + // the value is too small, the efficiency of the state recovery will decrease. storageCheckRange = 1024 // errMissingTrie is returned if the target trie is missing while the generation diff --git a/tests/pregenerated_data_util_test.go b/tests/pregenerated_data_util_test.go index b0e9e1295..f5d46f1bd 100644 --- a/tests/pregenerated_data_util_test.go +++ b/tests/pregenerated_data_util_test.go @@ -327,7 +327,7 @@ func NewBCDataForPreGeneratedTest(testDataDir string, tc *preGeneratedTC) (*BCDa return nil, errors.New("numTotalSenders should be bigger numValidatorsForTest") } - // Remove test data directory if 1) exists and and 2) generating test. + // Remove test data directory if 1) exists and 2) generating test. if _, err := os.Stat(testDataDir); err == nil && tc.isGenerateTest { os.RemoveAll(testDataDir) } From e35215ec25ddff4482b1f132ce39733d5abb2aab Mon Sep 17 00:00:00 2001 From: "yumiel.ko" Date: Thu, 25 Jul 2024 18:31:30 +0800 Subject: [PATCH 10/36] fix package name of interfaces.go --- interfaces.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/interfaces.go b/interfaces.go index 1deb08091..c11a7b310 100644 --- a/interfaces.go +++ b/interfaces.go @@ -20,8 +20,8 @@ // Modified and improved for the klaytn development. // Modified and improved for the Kaia development. -// Package Kaia defines interfaces for interacting with Kaia. -package klaytn +// Package kaia defines interfaces for interacting with Kaia. +package kaia import ( "context" From 34f4a115ff208322aabc2e16d83980c77fd7c600 Mon Sep 17 00:00:00 2001 From: "yumiel.ko" Date: Fri, 26 Jul 2024 15:20:34 +0800 Subject: [PATCH 11/36] do not rename package kaia --- accounts/abi/bind/backend.go | 2 +- accounts/abi/bind/backends/blockchain.go | 2 +- accounts/abi/bind/backends/blockchain_test.go | 2 +- accounts/abi/bind/backends/simulated.go | 2 +- accounts/abi/bind/backends/simulated_test.go | 2 +- accounts/abi/bind/base.go | 2 +- accounts/abi/bind/base_test.go | 2 +- accounts/abi/bind/template.go | 2 +- accounts/accounts.go | 2 +- accounts/keystore/keystore_wallet.go | 2 +- api/backend.go | 2 +- api/mocks/backend_mock.go | 2 +- blockchain/system/multicall.go | 2 +- blockchain/system/rebalance.go | 2 +- client/bridge_client.go | 2 +- client/kaia_client.go | 2 +- client/kaia_client_test.go | 2 +- contracts/contracts/libs/kip13/InterfaceIdentifier.go | 2 +- contracts/contracts/service_chain/bridge/Bridge.go | 2 +- contracts/contracts/system_contracts/consensus/Kip163.go | 2 +- contracts/contracts/system_contracts/consensus/consensus.go | 2 +- contracts/contracts/system_contracts/gov/GovParam.go | 2 +- .../contracts/system_contracts/kip113/SimpleBlsRegistry.go | 2 +- contracts/contracts/system_contracts/kip149/Registry.go | 2 +- contracts/contracts/system_contracts/misc/credit.go | 2 +- .../contracts/system_contracts/multicall/MultiCallContract.go | 2 +- contracts/contracts/system_contracts/proxy/proxy.go | 2 +- contracts/contracts/system_contracts/rebalance/all.go | 2 +- contracts/contracts/testing/extbridge/ext_bridge.go | 2 +- contracts/contracts/testing/reward/all.go | 2 +- contracts/contracts/testing/sc_erc20/sc_token.go | 2 +- contracts/contracts/testing/sc_erc721/sc_nft.go | 2 +- contracts/contracts/testing/sc_erc721_no_uri/sc_nft_no_uri.go | 2 +- contracts/contracts/testing/system_contracts/all.go | 2 +- datasync/chaindatafetcher/kas/contract_caller.go | 2 +- datasync/downloader/api.go | 2 +- datasync/downloader/downloader.go | 2 +- datasync/downloader/downloader_fake.go | 2 +- node/cn/api_backend.go | 2 +- node/cn/backend.go | 2 +- node/cn/filters/api.go | 2 +- node/cn/filters/filter_system.go | 2 +- node/cn/filters/filter_system_test.go | 2 +- node/cn/mocks/downloader_mock.go | 2 +- node/cn/protocol.go | 2 +- node/sc/remote_backend.go | 2 +- tests/randao_fork_test.go | 2 +- 47 files changed, 47 insertions(+), 47 deletions(-) diff --git a/accounts/abi/bind/backend.go b/accounts/abi/bind/backend.go index de614c588..7e3ed4891 100644 --- a/accounts/abi/bind/backend.go +++ b/accounts/abi/bind/backend.go @@ -27,7 +27,7 @@ import ( "errors" "math/big" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/blockchain/types" "github.com/kaiachain/kaia/common" ) diff --git a/accounts/abi/bind/backends/blockchain.go b/accounts/abi/bind/backends/blockchain.go index 85a4be2d4..4b14cf2ae 100644 --- a/accounts/abi/bind/backends/blockchain.go +++ b/accounts/abi/bind/backends/blockchain.go @@ -23,7 +23,7 @@ import ( "errors" "math/big" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi/bind" "github.com/kaiachain/kaia/blockchain" "github.com/kaiachain/kaia/blockchain/state" diff --git a/accounts/abi/bind/backends/blockchain_test.go b/accounts/abi/bind/backends/blockchain_test.go index 60a7a905f..51f878b6a 100644 --- a/accounts/abi/bind/backends/blockchain_test.go +++ b/accounts/abi/bind/backends/blockchain_test.go @@ -29,7 +29,7 @@ import ( "time" "github.com/golang/mock/gomock" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi" "github.com/kaiachain/kaia/blockchain" "github.com/kaiachain/kaia/blockchain/types" diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index 755542806..679e3f857 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -30,7 +30,7 @@ import ( "sync" "time" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi/bind" "github.com/kaiachain/kaia/blockchain" "github.com/kaiachain/kaia/blockchain/bloombits" diff --git a/accounts/abi/bind/backends/simulated_test.go b/accounts/abi/bind/backends/simulated_test.go index 4068fa726..331cfdf5c 100644 --- a/accounts/abi/bind/backends/simulated_test.go +++ b/accounts/abi/bind/backends/simulated_test.go @@ -32,7 +32,7 @@ import ( "testing" "time" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi" "github.com/kaiachain/kaia/accounts/abi/bind" "github.com/kaiachain/kaia/blockchain" diff --git a/accounts/abi/bind/base.go b/accounts/abi/bind/base.go index a99e379f3..78dbf7550 100644 --- a/accounts/abi/bind/base.go +++ b/accounts/abi/bind/base.go @@ -30,7 +30,7 @@ import ( "strings" "sync" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi" "github.com/kaiachain/kaia/blockchain/types" "github.com/kaiachain/kaia/common" diff --git a/accounts/abi/bind/base_test.go b/accounts/abi/bind/base_test.go index 7ec0da3ea..8ef19759f 100644 --- a/accounts/abi/bind/base_test.go +++ b/accounts/abi/bind/base_test.go @@ -29,7 +29,7 @@ import ( "strings" "testing" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi" "github.com/kaiachain/kaia/accounts/abi/bind" "github.com/kaiachain/kaia/blockchain/types" diff --git a/accounts/abi/bind/template.go b/accounts/abi/bind/template.go index e4aac13c4..d67e607b4 100644 --- a/accounts/abi/bind/template.go +++ b/accounts/abi/bind/template.go @@ -99,7 +99,7 @@ import ( "strings" "errors" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi" "github.com/kaiachain/kaia/accounts/abi/bind" "github.com/kaiachain/kaia/common" diff --git a/accounts/accounts.go b/accounts/accounts.go index 1c98c51e6..27c532f28 100644 --- a/accounts/accounts.go +++ b/accounts/accounts.go @@ -25,7 +25,7 @@ package accounts import ( "math/big" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/blockchain/types" "github.com/kaiachain/kaia/common" "github.com/kaiachain/kaia/event" diff --git a/accounts/keystore/keystore_wallet.go b/accounts/keystore/keystore_wallet.go index 136ef50a6..b4101a6c7 100644 --- a/accounts/keystore/keystore_wallet.go +++ b/accounts/keystore/keystore_wallet.go @@ -25,7 +25,7 @@ package keystore import ( "math/big" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts" "github.com/kaiachain/kaia/blockchain/types" ) diff --git a/api/backend.go b/api/backend.go index 51ef1a70d..2a4fef078 100644 --- a/api/backend.go +++ b/api/backend.go @@ -27,7 +27,7 @@ import ( "math/big" "time" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts" "github.com/kaiachain/kaia/blockchain" "github.com/kaiachain/kaia/blockchain/state" diff --git a/api/mocks/backend_mock.go b/api/mocks/backend_mock.go index 43ac63a50..3151f84e1 100644 --- a/api/mocks/backend_mock.go +++ b/api/mocks/backend_mock.go @@ -11,7 +11,7 @@ import ( time "time" gomock "github.com/golang/mock/gomock" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" accounts "github.com/kaiachain/kaia/accounts" blockchain "github.com/kaiachain/kaia/blockchain" state "github.com/kaiachain/kaia/blockchain/state" diff --git a/blockchain/system/multicall.go b/blockchain/system/multicall.go index 90d61f5ae..0ee04a9e8 100644 --- a/blockchain/system/multicall.go +++ b/blockchain/system/multicall.go @@ -22,7 +22,7 @@ import ( "context" "math/big" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi/bind/backends" "github.com/kaiachain/kaia/blockchain" "github.com/kaiachain/kaia/blockchain/state" diff --git a/blockchain/system/rebalance.go b/blockchain/system/rebalance.go index 203c311a2..d798763a9 100644 --- a/blockchain/system/rebalance.go +++ b/blockchain/system/rebalance.go @@ -24,7 +24,7 @@ import ( "errors" "math/big" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi/bind" "github.com/kaiachain/kaia/accounts/abi/bind/backends" "github.com/kaiachain/kaia/blockchain" diff --git a/client/bridge_client.go b/client/bridge_client.go index 1a19f8e06..4437b322b 100644 --- a/client/bridge_client.go +++ b/client/bridge_client.go @@ -27,7 +27,7 @@ import ( "errors" "math/big" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/blockchain/types" "github.com/kaiachain/kaia/common" "github.com/kaiachain/kaia/common/hexutil" diff --git a/client/kaia_client.go b/client/kaia_client.go index 418ed1f05..e75a28549 100644 --- a/client/kaia_client.go +++ b/client/kaia_client.go @@ -29,7 +29,7 @@ import ( "fmt" "math/big" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/api" "github.com/kaiachain/kaia/blockchain/types" "github.com/kaiachain/kaia/common" diff --git a/client/kaia_client_test.go b/client/kaia_client_test.go index bf70d1a78..d18de0533 100644 --- a/client/kaia_client_test.go +++ b/client/kaia_client_test.go @@ -22,7 +22,7 @@ package client -import kaia "github.com/kaiachain/kaia" +import "github.com/kaiachain/kaia" // Verify that Client implements the Kaia interfaces. var ( diff --git a/contracts/contracts/libs/kip13/InterfaceIdentifier.go b/contracts/contracts/libs/kip13/InterfaceIdentifier.go index 61a1c3344..a7425ddf6 100644 --- a/contracts/contracts/libs/kip13/InterfaceIdentifier.go +++ b/contracts/contracts/libs/kip13/InterfaceIdentifier.go @@ -8,7 +8,7 @@ import ( "math/big" "strings" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi" "github.com/kaiachain/kaia/accounts/abi/bind" "github.com/kaiachain/kaia/blockchain/types" diff --git a/contracts/contracts/service_chain/bridge/Bridge.go b/contracts/contracts/service_chain/bridge/Bridge.go index 5ee51170d..6c8b795c0 100644 --- a/contracts/contracts/service_chain/bridge/Bridge.go +++ b/contracts/contracts/service_chain/bridge/Bridge.go @@ -8,7 +8,7 @@ import ( "math/big" "strings" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi" "github.com/kaiachain/kaia/accounts/abi/bind" "github.com/kaiachain/kaia/blockchain/types" diff --git a/contracts/contracts/system_contracts/consensus/Kip163.go b/contracts/contracts/system_contracts/consensus/Kip163.go index 0154d9876..9fe870ea2 100644 --- a/contracts/contracts/system_contracts/consensus/Kip163.go +++ b/contracts/contracts/system_contracts/consensus/Kip163.go @@ -8,7 +8,7 @@ import ( "math/big" "strings" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi" "github.com/kaiachain/kaia/accounts/abi/bind" "github.com/kaiachain/kaia/blockchain/types" diff --git a/contracts/contracts/system_contracts/consensus/consensus.go b/contracts/contracts/system_contracts/consensus/consensus.go index ca493d5c4..9cb72137e 100644 --- a/contracts/contracts/system_contracts/consensus/consensus.go +++ b/contracts/contracts/system_contracts/consensus/consensus.go @@ -8,7 +8,7 @@ import ( "math/big" "strings" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi" "github.com/kaiachain/kaia/accounts/abi/bind" "github.com/kaiachain/kaia/blockchain/types" diff --git a/contracts/contracts/system_contracts/gov/GovParam.go b/contracts/contracts/system_contracts/gov/GovParam.go index b8c4b0493..ed59c189f 100644 --- a/contracts/contracts/system_contracts/gov/GovParam.go +++ b/contracts/contracts/system_contracts/gov/GovParam.go @@ -8,7 +8,7 @@ import ( "math/big" "strings" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi" "github.com/kaiachain/kaia/accounts/abi/bind" "github.com/kaiachain/kaia/blockchain/types" diff --git a/contracts/contracts/system_contracts/kip113/SimpleBlsRegistry.go b/contracts/contracts/system_contracts/kip113/SimpleBlsRegistry.go index 375973e31..42b02f842 100644 --- a/contracts/contracts/system_contracts/kip113/SimpleBlsRegistry.go +++ b/contracts/contracts/system_contracts/kip113/SimpleBlsRegistry.go @@ -8,7 +8,7 @@ import ( "math/big" "strings" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi" "github.com/kaiachain/kaia/accounts/abi/bind" "github.com/kaiachain/kaia/blockchain/types" diff --git a/contracts/contracts/system_contracts/kip149/Registry.go b/contracts/contracts/system_contracts/kip149/Registry.go index 7ba1aa7b1..15d330407 100644 --- a/contracts/contracts/system_contracts/kip149/Registry.go +++ b/contracts/contracts/system_contracts/kip149/Registry.go @@ -8,7 +8,7 @@ import ( "math/big" "strings" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi" "github.com/kaiachain/kaia/accounts/abi/bind" "github.com/kaiachain/kaia/blockchain/types" diff --git a/contracts/contracts/system_contracts/misc/credit.go b/contracts/contracts/system_contracts/misc/credit.go index fa869d0e3..27ba8fdd7 100644 --- a/contracts/contracts/system_contracts/misc/credit.go +++ b/contracts/contracts/system_contracts/misc/credit.go @@ -8,7 +8,7 @@ import ( "math/big" "strings" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi" "github.com/kaiachain/kaia/accounts/abi/bind" "github.com/kaiachain/kaia/blockchain/types" diff --git a/contracts/contracts/system_contracts/multicall/MultiCallContract.go b/contracts/contracts/system_contracts/multicall/MultiCallContract.go index 83c4a2647..78d609af8 100644 --- a/contracts/contracts/system_contracts/multicall/MultiCallContract.go +++ b/contracts/contracts/system_contracts/multicall/MultiCallContract.go @@ -8,7 +8,7 @@ import ( "math/big" "strings" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi" "github.com/kaiachain/kaia/accounts/abi/bind" "github.com/kaiachain/kaia/blockchain/types" diff --git a/contracts/contracts/system_contracts/proxy/proxy.go b/contracts/contracts/system_contracts/proxy/proxy.go index 5f4ff3ca2..28d5fd323 100644 --- a/contracts/contracts/system_contracts/proxy/proxy.go +++ b/contracts/contracts/system_contracts/proxy/proxy.go @@ -8,7 +8,7 @@ import ( "math/big" "strings" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi" "github.com/kaiachain/kaia/accounts/abi/bind" "github.com/kaiachain/kaia/blockchain/types" diff --git a/contracts/contracts/system_contracts/rebalance/all.go b/contracts/contracts/system_contracts/rebalance/all.go index a90e9b2ff..b6db4384a 100644 --- a/contracts/contracts/system_contracts/rebalance/all.go +++ b/contracts/contracts/system_contracts/rebalance/all.go @@ -8,7 +8,7 @@ import ( "math/big" "strings" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi" "github.com/kaiachain/kaia/accounts/abi/bind" "github.com/kaiachain/kaia/blockchain/types" diff --git a/contracts/contracts/testing/extbridge/ext_bridge.go b/contracts/contracts/testing/extbridge/ext_bridge.go index 75b035d0f..6c8a534c9 100644 --- a/contracts/contracts/testing/extbridge/ext_bridge.go +++ b/contracts/contracts/testing/extbridge/ext_bridge.go @@ -8,7 +8,7 @@ import ( "math/big" "strings" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi" "github.com/kaiachain/kaia/accounts/abi/bind" "github.com/kaiachain/kaia/blockchain/types" diff --git a/contracts/contracts/testing/reward/all.go b/contracts/contracts/testing/reward/all.go index 43fa03d07..076013b04 100644 --- a/contracts/contracts/testing/reward/all.go +++ b/contracts/contracts/testing/reward/all.go @@ -8,7 +8,7 @@ import ( "math/big" "strings" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi" "github.com/kaiachain/kaia/accounts/abi/bind" "github.com/kaiachain/kaia/blockchain/types" diff --git a/contracts/contracts/testing/sc_erc20/sc_token.go b/contracts/contracts/testing/sc_erc20/sc_token.go index 20a398c8f..57b3f5f92 100644 --- a/contracts/contracts/testing/sc_erc20/sc_token.go +++ b/contracts/contracts/testing/sc_erc20/sc_token.go @@ -8,7 +8,7 @@ import ( "math/big" "strings" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi" "github.com/kaiachain/kaia/accounts/abi/bind" "github.com/kaiachain/kaia/blockchain/types" diff --git a/contracts/contracts/testing/sc_erc721/sc_nft.go b/contracts/contracts/testing/sc_erc721/sc_nft.go index 0ec23c4c2..caeba5345 100644 --- a/contracts/contracts/testing/sc_erc721/sc_nft.go +++ b/contracts/contracts/testing/sc_erc721/sc_nft.go @@ -8,7 +8,7 @@ import ( "math/big" "strings" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi" "github.com/kaiachain/kaia/accounts/abi/bind" "github.com/kaiachain/kaia/blockchain/types" diff --git a/contracts/contracts/testing/sc_erc721_no_uri/sc_nft_no_uri.go b/contracts/contracts/testing/sc_erc721_no_uri/sc_nft_no_uri.go index b863126a3..de115b657 100644 --- a/contracts/contracts/testing/sc_erc721_no_uri/sc_nft_no_uri.go +++ b/contracts/contracts/testing/sc_erc721_no_uri/sc_nft_no_uri.go @@ -8,7 +8,7 @@ import ( "math/big" "strings" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi" "github.com/kaiachain/kaia/accounts/abi/bind" "github.com/kaiachain/kaia/blockchain/types" diff --git a/contracts/contracts/testing/system_contracts/all.go b/contracts/contracts/testing/system_contracts/all.go index 507c365ea..582561b98 100644 --- a/contracts/contracts/testing/system_contracts/all.go +++ b/contracts/contracts/testing/system_contracts/all.go @@ -8,7 +8,7 @@ import ( "math/big" "strings" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi" "github.com/kaiachain/kaia/accounts/abi/bind" "github.com/kaiachain/kaia/blockchain/types" diff --git a/datasync/chaindatafetcher/kas/contract_caller.go b/datasync/chaindatafetcher/kas/contract_caller.go index 0bc5e79de..350f472a4 100644 --- a/datasync/chaindatafetcher/kas/contract_caller.go +++ b/datasync/chaindatafetcher/kas/contract_caller.go @@ -24,7 +24,7 @@ import ( "strings" "time" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi/bind" "github.com/kaiachain/kaia/api" "github.com/kaiachain/kaia/blockchain" diff --git a/datasync/downloader/api.go b/datasync/downloader/api.go index c526d2f8d..2d0cc0070 100644 --- a/datasync/downloader/api.go +++ b/datasync/downloader/api.go @@ -26,7 +26,7 @@ import ( "context" "sync" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/event" "github.com/kaiachain/kaia/networks/rpc" ) diff --git a/datasync/downloader/downloader.go b/datasync/downloader/downloader.go index 8ab2cd0f9..c1d70e05c 100644 --- a/datasync/downloader/downloader.go +++ b/datasync/downloader/downloader.go @@ -30,7 +30,7 @@ import ( "sync/atomic" "time" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/blockchain/types" "github.com/kaiachain/kaia/common" "github.com/kaiachain/kaia/event" diff --git a/datasync/downloader/downloader_fake.go b/datasync/downloader/downloader_fake.go index 79f54b270..32f30ac45 100644 --- a/datasync/downloader/downloader_fake.go +++ b/datasync/downloader/downloader_fake.go @@ -21,7 +21,7 @@ package downloader import ( "math/big" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/blockchain/types" "github.com/kaiachain/kaia/common" "github.com/kaiachain/kaia/node/cn/snap" diff --git a/node/cn/api_backend.go b/node/cn/api_backend.go index b1ad776fb..89e7afdd4 100644 --- a/node/cn/api_backend.go +++ b/node/cn/api_backend.go @@ -28,7 +28,7 @@ import ( "math/big" "time" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts" "github.com/kaiachain/kaia/blockchain" "github.com/kaiachain/kaia/blockchain/bloombits" diff --git a/node/cn/backend.go b/node/cn/backend.go index 76e9aba95..b675a8981 100644 --- a/node/cn/backend.go +++ b/node/cn/backend.go @@ -31,7 +31,7 @@ import ( "sync" "time" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts" "github.com/kaiachain/kaia/api" "github.com/kaiachain/kaia/blockchain" diff --git a/node/cn/filters/api.go b/node/cn/filters/api.go index c4511fd2b..9267f3b65 100644 --- a/node/cn/filters/api.go +++ b/node/cn/filters/api.go @@ -31,7 +31,7 @@ import ( "sync" "time" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/blockchain/types" "github.com/kaiachain/kaia/common" "github.com/kaiachain/kaia/common/hexutil" diff --git a/node/cn/filters/filter_system.go b/node/cn/filters/filter_system.go index 75a26fea4..659e92b9a 100644 --- a/node/cn/filters/filter_system.go +++ b/node/cn/filters/filter_system.go @@ -29,7 +29,7 @@ import ( "sync" "time" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/blockchain" "github.com/kaiachain/kaia/blockchain/types" "github.com/kaiachain/kaia/common" diff --git a/node/cn/filters/filter_system_test.go b/node/cn/filters/filter_system_test.go index b37864138..34f56e826 100644 --- a/node/cn/filters/filter_system_test.go +++ b/node/cn/filters/filter_system_test.go @@ -32,7 +32,7 @@ import ( "testing" "time" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/blockchain" "github.com/kaiachain/kaia/blockchain/bloombits" "github.com/kaiachain/kaia/blockchain/types" diff --git a/node/cn/mocks/downloader_mock.go b/node/cn/mocks/downloader_mock.go index 4b7c02dac..2214e148d 100644 --- a/node/cn/mocks/downloader_mock.go +++ b/node/cn/mocks/downloader_mock.go @@ -9,7 +9,7 @@ import ( reflect "reflect" gomock "github.com/golang/mock/gomock" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" types "github.com/kaiachain/kaia/blockchain/types" common "github.com/kaiachain/kaia/common" downloader "github.com/kaiachain/kaia/datasync/downloader" diff --git a/node/cn/protocol.go b/node/cn/protocol.go index 62ea38343..dc64a78ba 100644 --- a/node/cn/protocol.go +++ b/node/cn/protocol.go @@ -28,7 +28,7 @@ import ( "math/big" "time" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/blockchain/types" "github.com/kaiachain/kaia/common" "github.com/kaiachain/kaia/datasync/downloader" diff --git a/node/sc/remote_backend.go b/node/sc/remote_backend.go index c4dc7a28a..8b97e016a 100644 --- a/node/sc/remote_backend.go +++ b/node/sc/remote_backend.go @@ -24,7 +24,7 @@ import ( "net" "time" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/blockchain/types" "github.com/kaiachain/kaia/common" "github.com/kaiachain/kaia/common/hexutil" diff --git a/tests/randao_fork_test.go b/tests/randao_fork_test.go index 3de7fccad..b51263240 100644 --- a/tests/randao_fork_test.go +++ b/tests/randao_fork_test.go @@ -20,7 +20,7 @@ import ( "math/big" "testing" - kaia "github.com/kaiachain/kaia" + "github.com/kaiachain/kaia" "github.com/kaiachain/kaia/accounts/abi/bind" "github.com/kaiachain/kaia/accounts/abi/bind/backends" "github.com/kaiachain/kaia/blockchain" From d95e7c9f5f56d4005f1bb445eab8dab0a7365cc7 Mon Sep 17 00:00:00 2001 From: hyeonLewis Date: Tue, 30 Jul 2024 11:12:34 +0900 Subject: [PATCH 12/36] Update airdrop contract --- .../system_contracts/allocation/Airdrop.sol | 44 +++-- .../system_contracts/allocation/IAirdrop.sol | 8 +- contracts/test/Allocation/airdrop.test.ts | 171 +++++++++--------- contracts/test/common/fixtures.ts | 4 + 4 files changed, 125 insertions(+), 102 deletions(-) diff --git a/contracts/contracts/system_contracts/allocation/Airdrop.sol b/contracts/contracts/system_contracts/allocation/Airdrop.sol index 267c1be52..744f70d41 100644 --- a/contracts/contracts/system_contracts/allocation/Airdrop.sol +++ b/contracts/contracts/system_contracts/allocation/Airdrop.sol @@ -24,12 +24,6 @@ import "openzeppelin-contracts-5.0/access/Ownable.sol"; contract Airdrop is Ownable, IAirdrop { using EnumerableSet for EnumerableSet.AddressSet; - /* ========== CONSTANTS ========== */ - - uint256 public constant KAIA_UNIT = 1e18; - - uint256 public constant TOTAL_AIRDROP_AMOUNT = 80_000_000 * KAIA_UNIT; - /* ========== STATE VARIABLES ========== */ EnumerableSet.AddressSet private _beneficiaries; @@ -38,12 +32,25 @@ contract Airdrop is Ownable, IAirdrop { mapping(address => bool) public claimed; + bool public claimAllowed; + + /* ========== MODIFIER ========== */ + + modifier onlyClaimAllowed() { + require(claimAllowed, "Airdrop: claim not allowed"); + _; + } + /* ========== CONSTRUCTOR ========== */ constructor() Ownable(msg.sender) {} /* ========== OPERATOR FUNCTIONS ========== */ + function toggleClaimAllowed() external override onlyOwner { + claimAllowed = !claimAllowed; + } + function addClaim(address beneficiary, uint256 amount) external override onlyOwner { _addClaim(beneficiary, amount); } @@ -58,24 +65,27 @@ contract Airdrop is Ownable, IAirdrop { /* ========== PUBLIC FUNCTIONS ========== */ - function claim() external override { + receive() external payable override {} + + function claim() external override onlyClaimAllowed { _claim(msg.sender); } - function claimFor(address beneficiary) public override { + function claimFor(address beneficiary) public override onlyClaimAllowed { _claim(beneficiary); } - function claimBatch(address[] calldata beneficiary) external override { - for (uint256 i = 0; i < beneficiary.length; i++) { - _claim(beneficiary[i]); + function claimBatch(address[] calldata beneficiaries) external override onlyClaimAllowed { + for (uint256 i = 0; i < beneficiaries.length; i++) { + _claim(beneficiaries[i]); } } /* ========== INTERNAL FUNCTIONS ========== */ function _addClaim(address beneficiary, uint256 amount) internal { - require(_beneficiaries.add(beneficiary), "Airdrop: beneficiary already exists"); + // Override claim if beneficiary already exists + _beneficiaries.add(beneficiary); claims[beneficiary] = amount; } @@ -88,32 +98,32 @@ contract Airdrop is Ownable, IAirdrop { claimed[beneficiary] = true; (bool success, ) = beneficiary.call{value: _amount}(""); - require(success, "Transfer failed."); + require(success, "Airdrop: claim failed"); emit Claimed(beneficiary, _amount); } /* ========== GETTERS ========== */ - function getBeneficiaries(uint256 start, uint256 end) external override view returns (address[] memory result) { + function getBeneficiaries(uint256 start, uint256 end) external view override returns (address[] memory result) { end = end > _beneficiaries.length() ? _beneficiaries.length() : end; if (start >= end) { return new address[](0); } result = new address[](end - start); - for (uint256 i = start; i < end; i++) { + for (uint256 i = start; i < end; i++) { unchecked { result[i - start] = _beneficiaries.at(i); } } } - function getBeneficiaryAt(uint256 index) external override view returns (address) { + function getBeneficiaryAt(uint256 index) external view override returns (address) { return _beneficiaries.at(index); } - function getBeneficiariesLength() external override view returns (uint256) { + function getBeneficiariesLength() external view override returns (uint256) { return _beneficiaries.length(); } } diff --git a/contracts/contracts/system_contracts/allocation/IAirdrop.sol b/contracts/contracts/system_contracts/allocation/IAirdrop.sol index 01fd61133..5cbe87bd5 100644 --- a/contracts/contracts/system_contracts/allocation/IAirdrop.sol +++ b/contracts/contracts/system_contracts/allocation/IAirdrop.sol @@ -24,9 +24,7 @@ interface IAirdrop { /* ========== VIEWS ========== */ - function KAIA_UNIT() external view returns (uint256); - - function TOTAL_AIRDROP_AMOUNT() external view returns (uint256); + function claimAllowed() external view returns (bool); function claims(address) external view returns (uint256); @@ -40,6 +38,10 @@ interface IAirdrop { /* ========== MUTATIVE FUNCTIONS ========== */ + receive() external payable; + + function toggleClaimAllowed() external; + function addClaim(address beneficiary, uint256 amount) external; function addBatchClaims(address[] calldata beneficiaries, uint256[] calldata amounts) external; diff --git a/contracts/test/Allocation/airdrop.test.ts b/contracts/test/Allocation/airdrop.test.ts index de46e3175..7285a80e4 100644 --- a/contracts/test/Allocation/airdrop.test.ts +++ b/contracts/test/Allocation/airdrop.test.ts @@ -3,8 +3,8 @@ import { setBalance, } from "@nomicfoundation/hardhat-network-helpers"; import { expect } from "chai"; - import { airdropTestFixture } from "../common/fixtures"; +import { Airdrop__factory } from "../../typechain-types"; type UnPromisify = T extends Promise ? U : T; @@ -16,58 +16,43 @@ describe("Airdrop", function () { await setBalance(airdrop.address, totalAirdropAmount); }); - describe("Check constants", function () { - it("#totalAirdropAmount", async function () { - const { airdrop } = fixture; + describe("Set airdrop list", function () { + it("#addClaim/addBatchClaims: only owner can add claim", async function () { + const { airdrop, notClaimer, claimInfo } = fixture; - expect(await airdrop.KAIA_UNIT()).to.equal( - hre.ethers.utils.parseEther("1") - ); - expect(await airdrop.TOTAL_AIRDROP_AMOUNT()).to.equal( - hre.ethers.utils.parseEther("80000000") - ); + await expect( + airdrop.connect(notClaimer).addClaim(claimInfo[0].claimer, claimInfo[0].amount), + ).to.be.revertedWithCustomError(airdrop, "OwnableUnauthorizedAccount"); + + await expect( + airdrop.connect(notClaimer).addBatchClaims([claimInfo[0].claimer], [claimInfo[0].amount]), + ).to.be.revertedWithCustomError(airdrop, "OwnableUnauthorizedAccount"); }); - }); - describe("Set airdrop list", function () { it("#addClaim", async function () { const { airdrop, claimInfo } = fixture; await airdrop.addClaim(claimInfo[0].claimer, claimInfo[0].amount); - expect(await airdrop.claims(claimInfo[0].claimer)).to.equal( - claimInfo[0].amount - ); + expect(await airdrop.claims(claimInfo[0].claimer)).to.equal(claimInfo[0].amount); }); - it("#addClaim: failed to add duplicate beneficiary", async function () { + it("#addClaim: override existing beneficiary", async function () { const { airdrop, claimInfo } = fixture; await airdrop.addClaim(claimInfo[0].claimer, claimInfo[0].amount); - await expect( - airdrop.addClaim(claimInfo[0].claimer, claimInfo[0].amount) - ).to.be.revertedWith("Airdrop: beneficiary already exists"); - }); - it("#addClaim/addBatchClaims: only owner can add claim", async function () { - const { airdrop, notClaimer, claimInfo } = fixture; + expect(await airdrop.claims(claimInfo[0].claimer)).to.equal(claimInfo[0].amount); - await expect( - airdrop - .connect(notClaimer) - .addClaim(claimInfo[0].claimer, claimInfo[0].amount) - ).to.be.revertedWithCustomError(airdrop, "OwnableUnauthorizedAccount"); + await airdrop.addClaim(claimInfo[0].claimer, BigInt(claimInfo[0].amount) * 2n); - await expect( - airdrop - .connect(notClaimer) - .addBatchClaims([claimInfo[0].claimer], [claimInfo[0].amount]) - ).to.be.revertedWithCustomError(airdrop, "OwnableUnauthorizedAccount"); + expect(await airdrop.claims(claimInfo[0].claimer)).to.equal(BigInt(claimInfo[0].amount) * 2n); }); it("#addBatchClaims", async function () { const { airdrop, claimInfo } = fixture; - for (const claim of claimInfo) { - await airdrop.addClaim(claim.claimer, claim.amount); - } + await airdrop.addBatchClaims( + claimInfo.map((claim) => claim.claimer), + claimInfo.map((claim) => claim.amount), + ); for (const claim of claimInfo) { expect(await airdrop.claims(claim.claimer)).to.equal(claim.amount); @@ -78,56 +63,61 @@ describe("Airdrop", function () { this.beforeEach(async function () { const { airdrop, claimInfo } = fixture; - for (const claim of claimInfo) { - await airdrop.addClaim(claim.claimer, claim.amount); - } + await airdrop.addBatchClaims( + claimInfo.map((claim) => claim.claimer), + claimInfo.map((claim) => claim.amount), + ); + + await airdrop.toggleClaimAllowed(); + }); + it("toggleClaimAllowed: can't claim if claim not allowed", async function () { + const { airdrop, claimers } = fixture; + + await airdrop.toggleClaimAllowed(); + + expect(await airdrop.claimAllowed()).to.be.false; + + await expect(airdrop.connect(claimers[0]).claim()).to.be.revertedWith("Airdrop: claim not allowed"); + + await expect(airdrop.connect(claimers[0]).claimFor(claimers[0].address)).to.be.revertedWith( + "Airdrop: claim not allowed", + ); }); it("#claim/claimFor: can't claim if not in the list", async function () { const { airdrop, notClaimer, claimers } = fixture; - await expect(airdrop.connect(notClaimer).claim()).to.be.revertedWith( - "Airdrop: no claimable amount" - ); + await expect(airdrop.connect(notClaimer).claim()).to.be.revertedWith("Airdrop: no claimable amount"); - await expect( - airdrop.connect(claimers[0]).claimFor(notClaimer.address) - ).to.be.revertedWith("Airdrop: no claimable amount"); + await expect(airdrop.connect(claimers[0]).claimFor(notClaimer.address)).to.be.revertedWith( + "Airdrop: no claimable amount", + ); }); it("#claim: successfully get airdrop", async function () { const { airdrop, claimers, claimInfo } = fixture; - const beforeBalance = await hre.ethers.provider.getBalance( - claimers[0].address - ); + const beforeBalance = await hre.ethers.provider.getBalance(claimers[0].address); await expect(airdrop.connect(claimers[0]).claim()) .to.emit(airdrop, "Claimed") .withArgs(claimers[0].address, claimInfo[0].amount); - const afterBalance = await hre.ethers.provider.getBalance( - claimers[0].address - ); + const afterBalance = await hre.ethers.provider.getBalance(claimers[0].address); // 0.0001 is a transaction fee - expect(afterBalance.sub(beforeBalance)).to.be.closeTo( - claimInfo[0].amount, - hre.ethers.utils.parseEther("0.0001") - ); + expect(afterBalance.sub(beforeBalance)).to.be.closeTo(claimInfo[0].amount, hre.ethers.utils.parseEther("0.0001")); + expect(await airdrop.claimed(claimers[0].address)).to.be.true; }); it("#claimFor: successfully get airdrop", async function () { const { airdrop, notClaimer, claimers, claimInfo } = fixture; - const beforeBalance = await hre.ethers.provider.getBalance( - claimers[0].address - ); + const beforeBalance = await hre.ethers.provider.getBalance(claimers[0].address); await expect(airdrop.connect(notClaimer).claimFor(claimers[0].address)) .to.emit(airdrop, "Claimed") .withArgs(claimers[0].address, claimInfo[0].amount); - const afterBalance = await hre.ethers.provider.getBalance( - claimers[0].address - ); + const afterBalance = await hre.ethers.provider.getBalance(claimers[0].address); expect(afterBalance.sub(beforeBalance)).to.equal(claimInfo[0].amount); + expect(await airdrop.claimed(claimers[0].address)).to.be.true; }); it("#claim/claimFor: can't get twice", async function () { const { airdrop, notClaimer, claimers, claimInfo } = fixture; @@ -136,26 +126,20 @@ describe("Airdrop", function () { .to.emit(airdrop, "Claimed") .withArgs(claimers[0].address, claimInfo[0].amount); - await expect(airdrop.connect(claimers[0]).claim()).to.be.revertedWith( - "Airdrop: already claimed" - ); + await expect(airdrop.connect(claimers[0]).claim()).to.be.revertedWith("Airdrop: already claimed"); - await expect( - airdrop.connect(notClaimer).claimFor(claimers[0].address) - ).to.be.revertedWith("Airdrop: already claimed"); + await expect(airdrop.connect(notClaimer).claimFor(claimers[0].address)).to.be.revertedWith( + "Airdrop: already claimed", + ); }); it("#claimBatch: successfully get airdrop", async function () { const { airdrop, notClaimer, claimers, claimInfo } = fixture; const beforeBalances = await Promise.all( - claimers.map((claimer) => - hre.ethers.provider.getBalance(claimer.address) - ) + claimers.map((claimer) => hre.ethers.provider.getBalance(claimer.address)), ); - await airdrop - .connect(notClaimer) - .claimBatch(claimers.map((claimer) => claimer.address)); + await airdrop.connect(notClaimer).claimBatch(claimers.map((claimer) => claimer.address)); // const tx = await airdrop.connect(notClaimer).claimBatch(claimers.map((claimer) => claimer.address)); // console.log( @@ -164,16 +148,42 @@ describe("Airdrop", function () { // ); const afterBalances = await Promise.all( - claimers.map((claimer) => - hre.ethers.provider.getBalance(claimer.address) - ) + claimers.map((claimer) => hre.ethers.provider.getBalance(claimer.address)), ); for (let i = 0; i < claimers.length; i++) { - expect(afterBalances[i].sub(beforeBalances[i])).to.equal( - claimInfo[i].amount - ); + expect(afterBalances[i].sub(beforeBalances[i])).to.equal(claimInfo[i].amount); + expect(await airdrop.claimed(claimers[i].address)).to.be.true; } }); + it("#claimBatch: failed claim", async function () { + const { airdrop, notClaimer, claimInfo, noReceiverContract } = fixture; + + await airdrop.addClaim(noReceiverContract.address, claimInfo[0].amount); + + const beforeBalance = await hre.ethers.provider.getBalance(airdrop.address); + + await expect(airdrop.connect(notClaimer).claimFor(noReceiverContract.address)).to.be.revertedWith( + "Airdrop: claim failed", + ); + expect(await airdrop.claimed(noReceiverContract.address)).to.be.false; + expect(await hre.ethers.provider.getBalance(airdrop.address)).to.equal(beforeBalance); + }); + }); + describe("Divide airdrop contract", function () { + it("#register airdrop contract as claimer", async function () { + const { deployer, airdrop } = fixture; + + const newAirdrop = await new Airdrop__factory(deployer).deploy(); + + await airdrop.addClaim(newAirdrop.address, hre.ethers.utils.parseEther("100")); + + await airdrop.toggleClaimAllowed(); + + await airdrop.claimFor(newAirdrop.address); + + expect(await airdrop.claimed(newAirdrop.address)).to.be.true; + expect(await hre.ethers.provider.getBalance(newAirdrop.address)).to.equal(hre.ethers.utils.parseEther("100")); + }); }); describe("Check view functions", function () { this.beforeEach(async function () { @@ -204,10 +214,7 @@ describe("Airdrop", function () { it("#getBeneficiaries: end > length", async function () { const { airdrop, claimers } = fixture; - const beneficiaries = await airdrop.getBeneficiaries( - 0, - claimers.length + 5 - ); + const beneficiaries = await airdrop.getBeneficiaries(0, claimers.length + 5); for (let i = 0; i < claimers.length; i++) { expect(beneficiaries[i]).to.equal(claimers[i].address); } diff --git a/contracts/test/common/fixtures.ts b/contracts/test/common/fixtures.ts index be5652922..04bd3a8b1 100644 --- a/contracts/test/common/fixtures.ts +++ b/contracts/test/common/fixtures.ts @@ -594,6 +594,9 @@ export async function airdropTestFixture() { const airdrop = await new Airdrop__factory(deployer).deploy(); + // Just mock contract without receiver function to test claim failed case. + const noReceiverContract = await new StakingTrackerMockReceiver__factory(deployer).deploy(); + return { airdrop, deployer, @@ -601,6 +604,7 @@ export async function airdropTestFixture() { claimers, claimInfo, totalAirdropAmount, + noReceiverContract, }; } From d71c2a81992790884709a474d400627b1ca2ce48 Mon Sep 17 00:00:00 2001 From: "yumiel.ko" Date: Thu, 1 Aug 2024 19:59:00 +0800 Subject: [PATCH 13/36] db: adjust pebbledb metric --- cmd/utils/flags.go | 2 +- node/config.go | 2 +- storage/database/db_manager.go | 2 + storage/database/pebbledb_database.go | 64 ++++++++++++++++++++++++--- 4 files changed, 61 insertions(+), 9 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 8e5eb886d..756df99bd 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -96,7 +96,7 @@ var ( } DbTypeFlag = &cli.StringFlag{ Name: "dbtype", - Usage: `Blockchain storage database type ("LevelDB", "BadgerDB", "MemoryDB", "DynamoDBS3")`, + Usage: `Blockchain storage database type ("LevelDB", "BadgerDB", "MemoryDB", "DynamoDBS3", "PebbleDB")`, Value: "LevelDB", Aliases: []string{"db.type", "migration.src.dbtype"}, EnvVars: []string{"KLAYTN_DBTYPE", "KAIA_DBTYPE"}, diff --git a/node/config.go b/node/config.go index c0da3ef38..66a4707a5 100644 --- a/node/config.go +++ b/node/config.go @@ -67,7 +67,7 @@ type Config struct { // in the devp2p node identifier. Version string `toml:"-"` - // key-value database type [LevelDB, RocksDB, BadgerDB, MemoryDB, DynamoDB] + // key-value database type [LevelDB, RocksDB, BadgerDB, MemoryDB, DynamoDB, PebbleDB] DBType database.DBType // DataDir is the file system folder the node should use for any data storage diff --git a/storage/database/db_manager.go b/storage/database/db_manager.go index 5a413c594..69014d941 100644 --- a/storage/database/db_manager.go +++ b/storage/database/db_manager.go @@ -563,6 +563,8 @@ func newDatabase(dbc *DBConfig, entryType DBEntryType) (Database, error) { return NewLevelDB(dbc, entryType) case RocksDB: return NewRocksDB(dbc.Dir, dbc.RocksDBConfig) + case PebbleDB: + return NewPebbleDB(dbc, dbc.Dir) case BadgerDB: return NewBadgerDB(dbc.Dir) case MemoryDB: diff --git a/storage/database/pebbledb_database.go b/storage/database/pebbledb_database.go index c23600333..71a2b14a2 100644 --- a/storage/database/pebbledb_database.go +++ b/storage/database/pebbledb_database.go @@ -28,6 +28,7 @@ import ( "github.com/cockroachdb/pebble" "github.com/cockroachdb/pebble/bloom" "github.com/kaiachain/kaia/log" + kaiametrics "github.com/kaiachain/kaia/metrics" "github.com/rcrowley/go-metrics" ) @@ -70,7 +71,16 @@ type pebbleDB struct { seekCompGauge metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt manualMemAllocGauge metrics.Gauge // Gauge for tracking amount of non-managed memory currently allocated - levelsGauge []metrics.Gauge // Gauge for tracking the number of tables in levels + levelSizesGauge []metrics.Gauge + levelTablesGauge []metrics.Gauge // Gauge for tracking the number of tables in levels + levelReadGauge []metrics.Gauge + levelWriteGauge []metrics.Gauge + // levelDurationGauge []metrics.Gauge // Not impl in pebbledb, do not try to match it with leveldb + + perfCheck bool + getTimer kaiametrics.HybridTimer + putTimer kaiametrics.HybridTimer + batchWriteTimer kaiametrics.HybridTimer quitLock sync.RWMutex // Mutex protecting the quit channel and the closed flag quitChan chan chan error // Quit channel to stop the metrics collection before closing the database @@ -139,7 +149,7 @@ func (l panicLogger) Fatalf(format string, args ...interface{}) { logger.Crit(fmt.Sprintf(format, args...)) } -// New returns a wrapped pebble DB object. The namespace is the prefix that the +// NewPebbleDB returns a wrapped pebble DB object. The namespace is the prefix that the // metrics reporting should use for surfacing internal stats. func NewPebbleDB(dbc *DBConfig, file string) (*pebbleDB, error) { // Ensure we have some minimal caching and file guarantees @@ -183,6 +193,7 @@ func NewPebbleDB(dbc *DBConfig, file string) (*pebbleDB, error) { log: logger, quitChan: make(chan chan error), writeOptions: &pebble.WriteOptions{Sync: ephemeral}, + perfCheck: dbc.EnableDBPerfMetrics, } opt := &pebble.Options{ // Pebble has a single combined cache area and the write @@ -260,6 +271,10 @@ func (d *pebbleDB) Meter(prefix string) { d.seekCompGauge = metrics.GetOrRegisterGauge(prefix+"compact/seek", nil) d.manualMemAllocGauge = metrics.GetOrRegisterGauge(prefix+"memory/manualalloc", nil) + d.getTimer = kaiametrics.NewRegisteredHybridTimer(prefix+"get/time", nil) + d.putTimer = kaiametrics.NewRegisteredHybridTimer(prefix+"put/time", nil) + d.batchWriteTimer = kaiametrics.NewRegisteredHybridTimer(prefix+"batchwrite/time", nil) + // Start up the metrics gathering and return go d.meter(metricsGatheringInterval, prefix) } @@ -318,7 +333,24 @@ func (d *pebbleDB) Get(key []byte) ([]byte, error) { if d.closed { return nil, pebble.ErrClosed } + if d.perfCheck { + start := time.Now() + ret, err := d.get(key) + d.getTimer.Update(time.Since(start)) + return ret, err + } + + return d.get(key) +} + +func (d *pebbleDB) get(key []byte) ([]byte, error) { dat, closer, err := d.db.Get(key) + defer func() { + if err == nil { + closer.Close() + } + }() + if err != nil { if err == pebble.ErrNotFound { return nil, dataNotFoundErr @@ -327,7 +359,6 @@ func (d *pebbleDB) Get(key []byte) ([]byte, error) { } ret := make([]byte, len(dat)) copy(ret, dat) - closer.Close() return ret, nil } @@ -338,6 +369,12 @@ func (d *pebbleDB) Put(key []byte, value []byte) error { if d.closed { return pebble.ErrClosed } + if d.perfCheck { + start := time.Now() + err := d.db.Set(key, value, d.writeOptions) + d.putTimer.Update(time.Since(start)) + return err + } return d.db.Set(key, value, d.writeOptions) } @@ -495,12 +532,19 @@ func (d *pebbleDB) meter(refresh time.Duration, namespace string) { d.level0CompGauge.Update(level0CompCount) d.seekCompGauge.Update(stats.Compact.ReadCount) + // data can exist at level 6 even when levels 1 through 5 are empty for i, level := range stats.Levels { - // Append metrics for additional layers - if i >= len(d.levelsGauge) { - d.levelsGauge = append(d.levelsGauge, metrics.GetOrRegisterGauge(namespace+fmt.Sprintf("tables/level%v", i), nil)) + if i >= len(d.levelSizesGauge) { + prefix := namespace + fmt.Sprintf("level%v/", i) + d.levelTablesGauge = append(d.levelTablesGauge, metrics.GetOrRegisterGauge(prefix+"tables", nil)) + d.levelSizesGauge = append(d.levelSizesGauge, metrics.GetOrRegisterGauge(prefix+"size", nil)) + d.levelReadGauge = append(d.levelReadGauge, metrics.GetOrRegisterGauge(prefix+"read", nil)) + d.levelWriteGauge = append(d.levelWriteGauge, metrics.GetOrRegisterGauge(prefix+"write", nil)) } - d.levelsGauge[i].Update(level.NumFiles) + d.levelSizesGauge[i].Update(level.Size) + d.levelTablesGauge[i].Update(level.NumFiles) + d.levelReadGauge[i].Update(int64(level.BytesRead)) + d.levelWriteGauge[i].Update(int64(level.BytesCompacted)) } // Sleep a bit, then repeat the stats collection @@ -549,6 +593,12 @@ func (b *batch) Write() error { if b.db.closed { return pebble.ErrClosed } + if b.db.perfCheck { + start := time.Now() + err := b.b.Commit(b.db.writeOptions) + b.db.batchWriteTimer.Update(time.Since(start)) + return err + } return b.b.Commit(b.db.writeOptions) } From 56a6bd6c7bda0973c85be3a1708515b34a694b04 Mon Sep 17 00:00:00 2001 From: "ollie.j" Date: Wed, 7 Aug 2024 17:01:42 +0900 Subject: [PATCH 14/36] reward: SupplyManager returns wrapped errors --- reward/supply_manager.go | 4 ++-- reward/supply_manager_test.go | 33 +++++++++++++++++++++++++++++---- 2 files changed, 31 insertions(+), 6 deletions(-) diff --git a/reward/supply_manager.go b/reward/supply_manager.go index 1da5e2bda..22183a6d4 100644 --- a/reward/supply_manager.go +++ b/reward/supply_manager.go @@ -50,11 +50,11 @@ var ( ) func errNoCanonicalBurn(err error) error { - return fmt.Errorf("cannot determine canonical (0x0, 0xdead) burn amount: %v", err) + return fmt.Errorf("cannot determine canonical (0x0, 0xdead) burn amount: %w", err) } func errNoRebalanceBurn(err error) error { - return fmt.Errorf("cannot determine rebalance (kip103, kip160) burn amount: %v", err) + return fmt.Errorf("cannot determine rebalance (kip103, kip160) burn amount: %w", err) } // SupplyManager tracks the total supply of native tokens. diff --git a/reward/supply_manager_test.go b/reward/supply_manager_test.go index 291218121..a6f178d77 100644 --- a/reward/supply_manager_test.go +++ b/reward/supply_manager_test.go @@ -19,6 +19,7 @@ package reward import ( + "errors" "fmt" "math/big" "sort" @@ -47,6 +48,30 @@ import ( "github.com/stretchr/testify/suite" ) +// ---------------------------------------------------------------------------- +// Tests other than SupplyTestSuite + +// Test that the missing trie node errors returned by GetTotalSupply will trigger Upstream EN retry. +func TestSupplyManagerError(t *testing.T) { + // Mimics networks/rpc/handler.go:shouldReauestUpstream() + shouldRequestUpstream := func(err error) bool { + var missingNodeError *statedb.MissingNodeError + return errors.As(err, &missingNodeError) + } + + // If the error contains MissingNodeError, it must trigger Upstream EN retry. + mtn := &statedb.MissingNodeError{NodeHash: common.HexToHash("0x1234")} + assert.True(t, shouldRequestUpstream(errNoCanonicalBurn(mtn))) + assert.True(t, shouldRequestUpstream(errors.Join(errNoRebalanceBurn(errNoRebalanceMemo), errNoCanonicalBurn(mtn)))) + + // Other errors should not trigger Upstream EN retry + assert.False(t, shouldRequestUpstream(errNoAccReward)) + assert.False(t, shouldRequestUpstream(errNoRebalanceBurn(errNoRebalanceMemo))) +} + +// ---------------------------------------------------------------------------- +// SupplyTestSuite + // A test suite with the blockchain having a reward-related history similar to Mainnet. // | Block | Fork | Minting | Ratio | KIP82 | Event | // |----------- |------- |--------- |---------- |------- |---------------------------- | @@ -71,6 +96,10 @@ type SupplyTestSuite struct { sm *supplyManager } +func TestSupplyManager(t *testing.T) { + suite.Run(t, new(SupplyTestSuite)) +} + // ---------------------------------------------------------------------------- // Test cases @@ -294,10 +323,6 @@ func (s *SupplyTestSuite) waitAccReward() { } } -func TestSupplyManager(t *testing.T) { - suite.Run(t, new(SupplyTestSuite)) -} - // ---------------------------------------------------------------------------- // Setup test From ae2c6a9153e9d08acb4619f22ad5f8d8611506da Mon Sep 17 00:00:00 2001 From: "ollie.j" Date: Wed, 7 Aug 2024 17:01:58 +0900 Subject: [PATCH 15/36] api: GetTotalSupply can fall back to upstream --- api/api_public_klay.go | 10 +++++++--- networks/rpc/handler.go | 11 +++++------ 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/api/api_public_klay.go b/api/api_public_klay.go index 207a32a60..a0920111c 100644 --- a/api/api_public_klay.go +++ b/api/api_public_klay.go @@ -117,7 +117,9 @@ type TotalSupplyResult struct { Kip160Burn *hexutil.Big `json:"kip160Burn"` // by KIP160 fork. Read from its memo. } -func (s *PublicKaiaAPI) GetTotalSupply(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*TotalSupplyResult, error) { +// If showPartial == nil or *showPartial == false, the regular use case, this API either delivers the full result or fails. +// If showPartial == true, the advanced and debugging use case, this API delivers full or best effort partial result. +func (s *PublicKaiaAPI) GetTotalSupply(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash, showPartial *bool) (*TotalSupplyResult, error) { block, err := s.b.BlockByNumberOrHash(ctx, blockNrOrHash) if err != nil { return nil, err @@ -143,11 +145,13 @@ func (s *PublicKaiaAPI) GetTotalSupply(ctx context.Context, blockNrOrHash rpc.Bl Kip103Burn: (*hexutil.Big)(ts.Kip103Burn), Kip160Burn: (*hexutil.Big)(ts.Kip160Burn), } - if err != nil { + if showPartial != nil && *showPartial && err != nil { errStr := err.Error() res.Error = &errStr + return res, nil + } else { + return res, err } - return res, nil } // Syncing returns false in case the node is currently not syncing with the network. It can be up to date or has not diff --git a/networks/rpc/handler.go b/networks/rpc/handler.go index 653182ee4..9a6955149 100644 --- a/networks/rpc/handler.go +++ b/networks/rpc/handler.go @@ -25,6 +25,7 @@ package rpc import ( "context" "encoding/json" + "errors" "fmt" "reflect" "strconv" @@ -448,12 +449,10 @@ func (h *handler) runMethod(ctx context.Context, msg *jsonrpcMessage, callb *cal // shouldRequestUpstream is a function that determines whether must be requested upstream. func shouldRequestUpstream(err error) bool { - switch err.(type) { - case *statedb.MissingNodeError: - return true - default: - return false - } + // Checks if the error contains MissingNodeError in the wrapped error chain. + // MissingNodeError is a strong evidence that the node has no state and worth dialing the upstream. + var missingNodeError *statedb.MissingNodeError + return errors.As(err, &missingNodeError) } // requestUpstream is the function to request upstream archive en From 8b94c3f53241ac3fb780af84a9794de5c33b8615 Mon Sep 17 00:00:00 2001 From: "ollie.j" Date: Thu, 8 Aug 2024 13:57:16 +0900 Subject: [PATCH 16/36] Set version to v1.0.2 --- params/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/version.go b/params/version.go index 2bae31a9b..79d814d12 100644 --- a/params/version.go +++ b/params/version.go @@ -28,7 +28,7 @@ const ( ReleaseNum = 0 VersionMajor = 1 // Major version component of the current release VersionMinor = 0 // Minor version component of the current release - VersionPatch = 1 // Patch version component of the current release + VersionPatch = 2 // Patch version component of the current release ) // Version holds the textual version string. From 39687ed4e047cff9ca7e72efdf124404602049b5 Mon Sep 17 00:00:00 2001 From: "ollie.j" Date: Fri, 9 Aug 2024 11:57:04 +0900 Subject: [PATCH 17/36] reward: SupplyManager reaccumulates from nearest persisted AccReward Fix the side effect s.chain.Config().X = Y breaks other tests --- node/cn/backend.go | 2 ++ reward/supply_manager.go | 41 +++++++++++++++++++++++++------ reward/supply_manager_test.go | 45 ++++++++++++++++++++++++++++++++--- 3 files changed, 78 insertions(+), 10 deletions(-) diff --git a/node/cn/backend.go b/node/cn/backend.go index a59df506a..47c6b14b0 100644 --- a/node/cn/backend.go +++ b/node/cn/backend.go @@ -363,6 +363,8 @@ func New(ctx *node.ServiceContext, config *Config) (*CN, error) { // NewStakingManager is called with proper non-nil parameters reward.NewStakingManager(cn.blockchain, governance, cn.chainDB) } + // Note: archive nodes might have TrieBlockInterval == 128, then SupplyManager will store checkpoints every 128 blocks. + // Still it is not a problem since SupplyManager can re-accumulate from the nearest checkpoint. cn.supplyManager = reward.NewSupplyManager(cn.blockchain, cn.governance, cn.chainDB, config.TrieBlockInterval) // Governance states which are not yet applied to the db remains at in-memory storage diff --git a/reward/supply_manager.go b/reward/supply_manager.go index 22183a6d4..e8b0482d3 100644 --- a/reward/supply_manager.go +++ b/reward/supply_manager.go @@ -40,6 +40,7 @@ import ( var ( supplyCacheSize = 86400 // A day; Some total supply consumers might want daily supply. supplyLogInterval = uint64(102400) // Periodic total supply log. + supplyReaccLimit = uint64(1024) // Re-accumulate from the last accumulated block. zeroBurnAddress = common.HexToAddress("0x0") deadBurnAddress = common.HexToAddress("0xdead") @@ -143,9 +144,9 @@ func (sm *supplyManager) GetAccReward(num uint64) (*database.AccReward, error) { return nil, errNoAccReward } - accReward := sm.db.ReadAccReward(num) - if accReward == nil { - return nil, errNoAccReward + accReward, err := sm.getAccRewardUncached(num) + if err != nil { + return nil, err } sm.accRewardCache.Add(num, accReward.Copy()) @@ -318,7 +319,7 @@ func (sm *supplyManager) catchup() { for lastNum < headNum { logger.Info("Total supply big step catchup", "last", lastNum, "head", headNum, "minted", lastAccReward.Minted.String(), "burntFee", lastAccReward.BurntFee.String()) - accReward, err := sm.accumulateReward(lastNum, headNum, lastAccReward) + accReward, err := sm.accumulateReward(lastNum, headNum, lastAccReward, true) if err != nil { if err != errSupplyManagerQuit { logger.Error("Total supply accumulate failed", "from", lastNum, "to", headNum, "err", err) @@ -341,7 +342,7 @@ func (sm *supplyManager) catchup() { case head := <-sm.chainHeadChan: headNum = head.Block.NumberU64() - supply, err := sm.accumulateReward(lastNum, headNum, lastAccReward) + supply, err := sm.accumulateReward(lastNum, headNum, lastAccReward, true) if err != nil { if err != errSupplyManagerQuit { logger.Error("Total supply accumulate failed", "from", lastNum, "to", headNum, "err", err) @@ -379,9 +380,35 @@ func (sm *supplyManager) totalSupplyFromState(num uint64) (*big.Int, error) { return totalSupply, nil } +func (sm *supplyManager) getAccRewardUncached(num uint64) (*database.AccReward, error) { + accReward := sm.db.ReadAccReward(num) + if accReward != nil { + return accReward, nil + } + + // Trace back to the last stored accumulated reward. + var fromNum uint64 + var fromAcc *database.AccReward + for i := uint64(1); i < supplyReaccLimit; i++ { + accReward = sm.db.ReadAccReward(num - i) + if accReward != nil { + fromNum = num - i + fromAcc = accReward + break + } + } + if fromAcc == nil { + return nil, errNoAccReward + } + + logger.Trace("on-demand reaccumulating rewards", "from", fromNum, "to", num) + return sm.accumulateReward(fromNum, num, fromAcc, false) +} + // accumulateReward calculates the total supply from the last block to the current block. // Given supply at `from` is `fromSupply`, calculate the supply until `to`, inclusive. -func (sm *supplyManager) accumulateReward(from, to uint64, fromAcc *database.AccReward) (*database.AccReward, error) { +// If `write` is true, the result will be written to the database. +func (sm *supplyManager) accumulateReward(from, to uint64, fromAcc *database.AccReward, write bool) (*database.AccReward, error) { accReward := fromAcc.Copy() // make a copy because we're updating it in-place. for num := from + 1; num <= to; num++ { @@ -410,7 +437,7 @@ func (sm *supplyManager) accumulateReward(from, to uint64, fromAcc *database.Acc // Store to database, print progress log. sm.accRewardCache.Add(num, accReward.Copy()) - if (num % sm.checkpointInterval) == 0 { + if write && (num%sm.checkpointInterval) == 0 { sm.db.WriteAccReward(num, accReward) sm.db.WriteLastAccRewardBlockNumber(num) } diff --git a/reward/supply_manager_test.go b/reward/supply_manager_test.go index a6f178d77..de277760b 100644 --- a/reward/supply_manager_test.go +++ b/reward/supply_manager_test.go @@ -311,6 +311,44 @@ func (s *SupplyTestSuite) TestTotalSupplyPartialInfo() { assert.Nil(t, ts) } +// Test that when db.AccReward are missing, GetTotalSupply will re-accumulate from the nearest stored AccReward. +func (s *SupplyTestSuite) TestTotalSupplyReaccumulate() { + t := s.T() + s.setupHistory() + s.sm.Start() + defer s.sm.Stop() + s.waitAccReward() + + // Delete AccRewards not on the default block interval (128). + // This happens on full nodes, and archive nodes with default BlockInterval config. + // Note that archive nodes ars allowed to have BlockInterval > 1, still tries are committed every block. + for num := uint64(0); num <= 400; num++ { + if num%128 != 0 { + // Because it's for testing, we do not add db.DeleteAccReward method. + s.db.GetMiscDB().Delete(append([]byte("accReward"), common.Int64ToByteBigEndian(num)...)) + } + } + + // Still, all block data must be available. + testcases := s.testcases() + for _, tc := range testcases { + s.sm.accRewardCache.Purge() + ts, err := s.sm.GetTotalSupply(tc.number) + require.NoError(t, err) + + expected := tc.expectTotalSupply + actual := ts + bigEqual(t, expected.TotalSupply, actual.TotalSupply, tc.number) + bigEqual(t, expected.TotalMinted, actual.TotalMinted, tc.number) + bigEqual(t, expected.TotalBurnt, actual.TotalBurnt, tc.number) + bigEqual(t, expected.BurntFee, actual.BurntFee, tc.number) + bigEqual(t, expected.ZeroBurn, actual.ZeroBurn, tc.number) + bigEqual(t, expected.DeadBurn, actual.DeadBurn, tc.number) + bigEqual(t, expected.Kip103Burn, actual.Kip103Burn, tc.number) + bigEqual(t, expected.Kip160Burn, actual.Kip160Burn, tc.number) + } +} + func (s *SupplyTestSuite) waitAccReward() { for i := 0; i < 1000; i++ { // wait 10 seconds until catchup complete if s.db.ReadLastAccRewardBlockNumber() >= 400 { @@ -421,8 +459,9 @@ func (s *SupplyTestSuite) SetupTest() { t := s.T() s.db = database.NewMemoryDBManager() + chainConfig := s.config.Copy() // to avoid some tests (i.e. PartialInfo) breaking other tests genesis := &blockchain.Genesis{ - Config: s.config, + Config: chainConfig, Timestamp: uint64(time.Now().Unix()), BlockScore: common.Big1, Alloc: blockchain.GenesisAlloc{ @@ -443,11 +482,11 @@ func (s *SupplyTestSuite) SetupTest() { TriesInMemory: 128, TrieNodeCacheConfig: statedb.GetEmptyTrieNodeCacheConfig(), } - chain, err := blockchain.NewBlockChain(s.db, cacheConfig, s.config, s.engine, vm.Config{}) + chain, err := blockchain.NewBlockChain(s.db, cacheConfig, chainConfig, s.engine, vm.Config{}) require.NoError(t, err) s.chain = chain - s.sm = NewSupplyManager(s.chain, s.gov, s.db, 1) + s.sm = NewSupplyManager(s.chain, s.gov, s.db, 1) // 1 interval for testing } func (s *SupplyTestSuite) setupHistory() { From c14013755c4f8ca6aba895dc64bd3586f406f214 Mon Sep 17 00:00:00 2001 From: "ollie.j" Date: Fri, 9 Aug 2024 14:46:57 +0900 Subject: [PATCH 18/36] reward: Optimize SupplyManager.GetAccReward --- reward/supply_manager.go | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/reward/supply_manager.go b/reward/supply_manager.go index e8b0482d3..85301ea9a 100644 --- a/reward/supply_manager.go +++ b/reward/supply_manager.go @@ -389,12 +389,20 @@ func (sm *supplyManager) getAccRewardUncached(num uint64) (*database.AccReward, // Trace back to the last stored accumulated reward. var fromNum uint64 var fromAcc *database.AccReward - for i := uint64(1); i < supplyReaccLimit; i++ { - accReward = sm.db.ReadAccReward(num - i) - if accReward != nil { - fromNum = num - i - fromAcc = accReward - break + + // Fast path using checkpointInterval + if accReward := sm.db.ReadAccReward(num - num%sm.checkpointInterval); accReward != nil { + fromNum = num - num%sm.checkpointInterval + fromAcc = accReward + } else { + // Slow path in case the checkpoint has changed or checkpoint is missing. + for i := uint64(1); i < supplyReaccLimit; i++ { + accReward = sm.db.ReadAccReward(num - i) + if accReward != nil { + fromNum = num - i + fromAcc = accReward + break + } } } if fromAcc == nil { @@ -408,6 +416,8 @@ func (sm *supplyManager) getAccRewardUncached(num uint64) (*database.AccReward, // accumulateReward calculates the total supply from the last block to the current block. // Given supply at `from` is `fromSupply`, calculate the supply until `to`, inclusive. // If `write` is true, the result will be written to the database. +// If `write` is false, the result will not be written to the database, +// to prevent overwriting LastAccRewardBlockNumber (essentially rollback) and to keep the disk size small (only store at checkpointInterval). func (sm *supplyManager) accumulateReward(from, to uint64, fromAcc *database.AccReward, write bool) (*database.AccReward, error) { accReward := fromAcc.Copy() // make a copy because we're updating it in-place. From 2ee4e688f2de3d460a77620912a02507f05ebc1f Mon Sep 17 00:00:00 2001 From: osrm <90407222+osrm@users.noreply.github.com> Date: Fri, 9 Aug 2024 16:03:36 +0900 Subject: [PATCH 19/36] chore: fix typo voting.md "Klatyn" -> "Klaytn" "paramters" -> "parameters" "Voting contract has two category..." -> "Voting contract has two categories..." --- contracts/docs/voting.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/contracts/docs/voting.md b/contracts/docs/voting.md index 7fa7283d6..7ac012b24 100644 --- a/contracts/docs/voting.md +++ b/contracts/docs/voting.md @@ -2,7 +2,7 @@ ## Klaytn governance structure -Klatyn as it is today is run by a permissioned validators called the Governance Council (GC). Each GC member run one Consensus Node (CN) to produce and validate blocks. The GC members also have authority to change Klaytn's network parameters such as gas price and block reward. Our governance contracts are trying to facilitate a transparent and stake-based voting by the GC members. +Klaytn as it is today is run by a permissioned validators called the Governance Council (GC). Each GC member run one Consensus Node (CN) to produce and validate blocks. The GC members also have authority to change Klaytn's network parameters such as gas price and block reward. Our governance contracts are trying to facilitate a transparent and stake-based voting by the GC members. ### Voting power @@ -38,7 +38,7 @@ The [KIP-81 as of 2022-11-09](https://github.com/klaytn/kips/blob/a1d99a58a60d0e ## Access Control -Voting contract has two category of users, secretary and voters. +Voting contract has two categories of users, secretary and voters. - The secretary is a single account stored in the `secretary` variable. This account is intended to be controlled by the Klaytn Foundation. It will serve the GC by assisting administrative works such as submitting and executing proposals. - Voters are identified by their `NodeID`. The list of voters differs per proposal, depending on the list of GC members registered in AddressBook and their staking amounts at the time of proposal submission. @@ -62,7 +62,7 @@ Each function has different access control rule. In Klaytn there exists a on-chain governance system leveraging block headers ([docs](https://docs.klaytn.foundation/content/dapp/json-rpc/api-references/governance), [explainer in Korean](https://www.youtube.com/watch?v=UPyf7B0YvI0)). Major complaint about the existing system was that it takes 1 to 2 weeks for a parameter change to take effect ([example](https://medium.com/klaytn/klaytn-gas-price-reduction-schedule-2ba158e3630d)) -Therefore the Voting contract will allow each proposal to have custom timing. The `propose()` function accepts two timing paramters `votingDelay` and `votingPeriod`, given that they fall within the `timingRule` ranges. +Therefore the Voting contract will allow each proposal to have custom timing. The `propose()` function accepts two timing parameters `votingDelay` and `votingPeriod`, given that they fall within the `timingRule` ranges. In the default setting, both `votingDelay` and `votingPeriod` are bound between 1 and 28 days. This makes the quickest proposal execution down to 4 days (1 day Pending, 1 day Active, 2 days Queued). From 528b69cbebdad117b63be6d68dec903be485aebe Mon Sep 17 00:00:00 2001 From: osrm <90407222+osrm@users.noreply.github.com> Date: Fri, 9 Aug 2024 16:06:53 +0900 Subject: [PATCH 20/36] chore: refine texts CnStakingV2.md correction typos and grammar --- contracts/docs/CnStakingV2.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/contracts/docs/CnStakingV2.md b/contracts/docs/CnStakingV2.md index ec89e6aa6..d71d400fd 100644 --- a/contracts/docs/CnStakingV2.md +++ b/contracts/docs/CnStakingV2.md @@ -18,7 +18,7 @@ CnStakingV2 has built-in multisig facility that requires certain number of appro - `requirement`: the required number of approvals. - `isAdmin`: true for admins. -## Contract intialization +## Contract Initialization Initializing a CnStakingV2 takes multiple steps involving many entities. 1. `constructor()`, usually by the `contractValidator`, or CV. @@ -119,7 +119,7 @@ struct WithdrawalRequest { ``` A withdrawal request can be in following `WithdrawalStakingState` -- Unknown: before transfer or cancellation. Unlike `RequestState.Unknown`, `WithdrawalStakingState.Unknwon` is a meaningful enum. +- Unknown: before transfer or cancellation. Unlike `RequestState.Unknown`, `WithdrawalStakingState.Unknown` is a meaningful enum. - Transferred: successfully executed. - Canceled: canceled due to timeout or an explicit cancellation. @@ -139,7 +139,7 @@ Related functions: A Klaytn GC member uses several accounts for different purposes. Some of them are appointed via CnStakingV2 contract. Related functions: -- `multisig UpdateRewardAddress(addr)`: Update the `rewardAddress` and also update to the AddressBook contract. +- `multisig UpdateRewardAddress(addr)`: Update the `rewardAddress` and also update the AddressBook contract. - `multisig UpdateStakingTracker(addr)`: Update the `stakingTracker` address. CnStakingV2 contract notifies staking balance change and voter address change to the StakingTracker. - `multisig UpdateVoterAddress(addr)`: Update the `voterAddress` and also update to the StakingTracker contract. From e0538f34ab86ef545fe38f1b5e54062c256eef6a Mon Sep 17 00:00:00 2001 From: osrm <90407222+osrm@users.noreply.github.com> Date: Fri, 9 Aug 2024 16:08:52 +0900 Subject: [PATCH 21/36] chore: fix typo PublicDelegation.md compunded -> compounded --- contracts/docs/PublicDelegation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts/docs/PublicDelegation.md b/contracts/docs/PublicDelegation.md index 630111058..affe95c81 100644 --- a/contracts/docs/PublicDelegation.md +++ b/contracts/docs/PublicDelegation.md @@ -2,7 +2,7 @@ The public delegation (PD) is a non-transferable ERC-4626 based contract that allows general users to delegate and re-delegate their KAIA to a Kaia Governance Council (GC) who uses CnStakingV3MultiSig (CnSV3) and enable PD. -It mints the tokenized shares to the delegator, which is called `pdKAIA`. The `pdKAIA` is a non-transferable interest-bearing token that represents the delegator's share of the total KAIA delegated to the GC. As rewards are compunded, the exchange rate of `pdKAIA` to KAIA increases. The delegator can burn the `pdKAIA` to get the KAIA back. All the math comes from the ERC-4626 standard. +It mints the tokenized shares to the delegator, which is called `pdKAIA`. The `pdKAIA` is a non-transferable interest-bearing token that represents the delegator's share of the total KAIA delegated to the GC. As rewards are compounded, the exchange rate of `pdKAIA` to KAIA increases. The delegator can burn the `pdKAIA` to get the KAIA back. All the math comes from the ERC-4626 standard. Unlike usual ERC-4626 vault contracts, the reward is directly distributed to PD contract by state modification at the consensus-level. The reward will be automatically compounded to the CnSV3 contract. From 289dba2b8ed3d5c03aa2925a64dece8267a75ed2 Mon Sep 17 00:00:00 2001 From: "ollie.j" Date: Mon, 12 Aug 2024 10:28:59 +0900 Subject: [PATCH 22/36] console: kaia.getTotalSupply accepts second optional param --- console/web3ext/web3ext.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/console/web3ext/web3ext.go b/console/web3ext/web3ext.go index cc38de0db..b1b540ef5 100644 --- a/console/web3ext/web3ext.go +++ b/console/web3ext/web3ext.go @@ -1161,8 +1161,8 @@ var klayMethods = [ new web3._extend.Method({ name: 'getTotalSupply', call: 'klay_getTotalSupply', - params: 1, - inputFormatter: [web3._extend.formatters.inputBlockNumberFormatter] + params: 2, + inputFormatter: [web3._extend.formatters.inputBlockNumberFormatter, function (val) { return !!val; }] }), new web3._extend.Method({ name: 'getProof', From 0cdb8b264be0304550b42ea2caba9fb52f14b24c Mon Sep 17 00:00:00 2001 From: rubyisrust Date: Mon, 12 Aug 2024 16:53:13 +0800 Subject: [PATCH 23/36] chore: fix some function names Signed-off-by: rubyisrust --- api/debug/trace.go | 2 +- blockchain/types/account/account.go | 2 +- blockchain/types/account/account_serializer.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/api/debug/trace.go b/api/debug/trace.go index 2a9dcbe95..422bf6296 100644 --- a/api/debug/trace.go +++ b/api/debug/trace.go @@ -51,7 +51,7 @@ func (h *HandlerT) StartGoTrace(file string) error { return nil } -// StopTrace stops an ongoing trace. +// StopGoTrace stops an ongoing trace. func (h *HandlerT) StopGoTrace() error { h.mu.Lock() defer h.mu.Unlock() diff --git a/blockchain/types/account/account.go b/blockchain/types/account/account.go index cd1030c9a..3d02e7125 100644 --- a/blockchain/types/account/account.go +++ b/blockchain/types/account/account.go @@ -148,7 +148,7 @@ func NewAccountWithType(t AccountType) (Account, error) { return nil, ErrUndefinedAccountType } -// NewAccountWithType creates an Account object initialized with the given map. +// NewAccountWithMap creates an Account object initialized with the given map. func NewAccountWithMap(t AccountType, values map[AccountValueKeyType]interface{}) (Account, error) { switch t { case LegacyAccountType: diff --git a/blockchain/types/account/account_serializer.go b/blockchain/types/account/account_serializer.go index ec0267f92..e338bc94d 100644 --- a/blockchain/types/account/account_serializer.go +++ b/blockchain/types/account/account_serializer.go @@ -57,7 +57,7 @@ func NewAccountSerializerExt() *AccountSerializer { return &AccountSerializer{preserveExtHash: true} } -// NewAccountSerializerWithAccount creates a new AccountSerializer object with the given account. +// NewAccountSerializerExtWithAccount creates a new AccountSerializer object with the given account. func NewAccountSerializerExtWithAccount(a Account) *AccountSerializer { return &AccountSerializer{a.Type(), a, true} } From 2474b4621e140cff86e0e89f204c23a8f03674c4 Mon Sep 17 00:00:00 2001 From: "ollie.j" Date: Tue, 13 Aug 2024 13:21:51 +0900 Subject: [PATCH 24/36] database: Rename SupplyCheckpoint accessors --- api/api_public_klay.go | 2 +- reward/supply_manager.go | 28 +++++++++++----------- reward/supply_manager_test.go | 6 ++--- storage/database/db_manager.go | 36 ++++++++++++++--------------- storage/database/db_manager_test.go | 16 ++++++------- storage/database/schema.go | 9 ++++---- 6 files changed, 48 insertions(+), 49 deletions(-) diff --git a/api/api_public_klay.go b/api/api_public_klay.go index a0920111c..7e0e3f0c4 100644 --- a/api/api_public_klay.go +++ b/api/api_public_klay.go @@ -110,7 +110,7 @@ type TotalSupplyResult struct { TotalSupply *hexutil.Big `json:"totalSupply"` // The total supply of the native token. i.e. Minted - Burnt. TotalMinted *hexutil.Big `json:"totalMinted"` // Total minted amount. TotalBurnt *hexutil.Big `json:"totalBurnt"` // Total burnt amount. Sum of all burnt amounts below. - BurntFee *hexutil.Big `json:"burntFee"` // from tx fee burn. ReadAccReward(num).BurntFee. + BurntFee *hexutil.Big `json:"burntFee"` // from tx fee burn. ZeroBurn *hexutil.Big `json:"zeroBurn"` // balance of 0x0 (zero) address. DeadBurn *hexutil.Big `json:"deadBurn"` // balance of 0xdead (dead) address. Kip103Burn *hexutil.Big `json:"kip103Burn"` // by KIP103 fork. Read from its memo. diff --git a/reward/supply_manager.go b/reward/supply_manager.go index 85301ea9a..04725666b 100644 --- a/reward/supply_manager.go +++ b/reward/supply_manager.go @@ -78,7 +78,7 @@ type TotalSupply struct { TotalSupply *big.Int // The total supply of the native token. i.e. Minted - Burnt. TotalMinted *big.Int // Total minted amount. TotalBurnt *big.Int // Total burnt amount. Sum of all burnt amounts below. - BurntFee *big.Int // from tx fee burn. ReadAccReward(num).BurntFee. + BurntFee *big.Int // from tx fee burn. ZeroBurn *big.Int // balance of 0x0 (zero) address. DeadBurn *big.Int // balance of 0xdead (dead) address. Kip103Burn *big.Int // by KIP103 fork. Read from its memo. @@ -139,7 +139,7 @@ func (sm *supplyManager) GetAccReward(num uint64) (*database.AccReward, error) { return accReward.(*database.AccReward), nil } - lastNum := sm.db.ReadLastAccRewardBlockNumber() + lastNum := sm.db.ReadLastSupplyCheckpointNumber() if lastNum < num { // soft deleted return nil, errNoAccReward } @@ -284,12 +284,12 @@ func (sm *supplyManager) catchup() { var ( headNum = sm.chain.CurrentBlock().NumberU64() - lastNum = sm.db.ReadLastAccRewardBlockNumber() + lastNum = sm.db.ReadLastSupplyCheckpointNumber() ) - if lastNum > 0 && sm.db.ReadAccReward(lastNum) == nil { + if lastNum > 0 && sm.db.ReadSupplyCheckpoint(lastNum) == nil { logger.Error("Last accumulated reward not found. Restarting supply catchup", "last", lastNum, "head", headNum) - sm.db.WriteLastAccRewardBlockNumber(0) // soft reset to genesis + sm.db.WriteLastSupplyCheckpointNumber(0) // soft reset to genesis lastNum = 0 } @@ -301,17 +301,17 @@ func (sm *supplyManager) catchup() { logger.Error("totalSupplyFromState failed", "number", 0, "err", err) return } - sm.db.WriteAccReward(0, &database.AccReward{ + sm.db.WriteSupplyCheckpoint(0, &database.AccReward{ Minted: genesisTotalSupply, BurntFee: big.NewInt(0), }) - sm.db.WriteLastAccRewardBlockNumber(0) + sm.db.WriteLastSupplyCheckpointNumber(0) lastNum = 0 logger.Info("Stored genesis total supply", "supply", genesisTotalSupply) } - lastNum = sm.db.ReadLastAccRewardBlockNumber() - lastAccReward := sm.db.ReadAccReward(lastNum) + lastNum = sm.db.ReadLastSupplyCheckpointNumber() + lastAccReward := sm.db.ReadSupplyCheckpoint(lastNum) // Big-step catchup; accumulate until the head block as of now. // The head block can be obsolete by the time catchup finished, so the big-step can end up being a bit short. @@ -381,7 +381,7 @@ func (sm *supplyManager) totalSupplyFromState(num uint64) (*big.Int, error) { } func (sm *supplyManager) getAccRewardUncached(num uint64) (*database.AccReward, error) { - accReward := sm.db.ReadAccReward(num) + accReward := sm.db.ReadSupplyCheckpoint(num) if accReward != nil { return accReward, nil } @@ -391,13 +391,13 @@ func (sm *supplyManager) getAccRewardUncached(num uint64) (*database.AccReward, var fromAcc *database.AccReward // Fast path using checkpointInterval - if accReward := sm.db.ReadAccReward(num - num%sm.checkpointInterval); accReward != nil { + if accReward := sm.db.ReadSupplyCheckpoint(num - num%sm.checkpointInterval); accReward != nil { fromNum = num - num%sm.checkpointInterval fromAcc = accReward } else { // Slow path in case the checkpoint has changed or checkpoint is missing. for i := uint64(1); i < supplyReaccLimit; i++ { - accReward = sm.db.ReadAccReward(num - i) + accReward = sm.db.ReadSupplyCheckpoint(num - i) if accReward != nil { fromNum = num - i fromAcc = accReward @@ -448,8 +448,8 @@ func (sm *supplyManager) accumulateReward(from, to uint64, fromAcc *database.Acc // Store to database, print progress log. sm.accRewardCache.Add(num, accReward.Copy()) if write && (num%sm.checkpointInterval) == 0 { - sm.db.WriteAccReward(num, accReward) - sm.db.WriteLastAccRewardBlockNumber(num) + sm.db.WriteSupplyCheckpoint(num, accReward) + sm.db.WriteLastSupplyCheckpointNumber(num) } if (num % supplyLogInterval) == 0 { logger.Info("Accumulated block rewards", "number", num, "minted", accReward.Minted.String(), "burntFee", accReward.BurntFee.String()) diff --git a/reward/supply_manager_test.go b/reward/supply_manager_test.go index de277760b..f1faa7e68 100644 --- a/reward/supply_manager_test.go +++ b/reward/supply_manager_test.go @@ -304,7 +304,7 @@ func (s *SupplyTestSuite) TestTotalSupplyPartialInfo() { assert.Equal(t, expected.Kip160Burn, ts.Kip160Burn) // No AccReward - s.db.WriteLastAccRewardBlockNumber(num - 1) + s.db.WriteLastSupplyCheckpointNumber(num - 1) s.sm.accRewardCache.Purge() ts, err = s.sm.GetTotalSupply(num) assert.ErrorIs(t, err, errNoAccReward) @@ -351,12 +351,12 @@ func (s *SupplyTestSuite) TestTotalSupplyReaccumulate() { func (s *SupplyTestSuite) waitAccReward() { for i := 0; i < 1000; i++ { // wait 10 seconds until catchup complete - if s.db.ReadLastAccRewardBlockNumber() >= 400 { + if s.db.ReadLastSupplyCheckpointNumber() >= 400 { break } time.Sleep(10 * time.Millisecond) } - if s.db.ReadLastAccRewardBlockNumber() < 400 { + if s.db.ReadLastSupplyCheckpointNumber() < 400 { s.T().Fatal("Catchup not finished in time") } } diff --git a/storage/database/db_manager.go b/storage/database/db_manager.go index 69014d941..b857986df 100644 --- a/storage/database/db_manager.go +++ b/storage/database/db_manager.go @@ -300,11 +300,11 @@ type DBManager interface { HasStakingInfo(blockNum uint64) (bool, error) DeleteStakingInfo(blockNum uint64) - // Accumulated block rewards functions - ReadAccReward(blockNum uint64) *AccReward - WriteAccReward(blockNum uint64, accReward *AccReward) - ReadLastAccRewardBlockNumber() uint64 - WriteLastAccRewardBlockNumber(blockNum uint64) + // TotalSupply checkpoint functions + ReadSupplyCheckpoint(blockNum uint64) *AccReward + WriteSupplyCheckpoint(blockNum uint64, accReward *AccReward) + ReadLastSupplyCheckpointNumber() uint64 + WriteLastSupplyCheckpointNumber(blockNum uint64) // DB migration related function StartDBMigration(DBManager) error @@ -2875,11 +2875,11 @@ func (dbm *databaseManager) ReadGovernanceState() ([]byte, error) { return db.Get(governanceStateKey) } -// ReadAccReward retrieves the accumulated reward (minted, burntFee) up to a specific block number. -// Returns nil if the accumulated reward is not stored. -func (dbm *databaseManager) ReadAccReward(blockNum uint64) *AccReward { +// ReadSupplyCheckpoint retrieves the SupplyCheckpoint for a block number +// Returns nil if the SupplyCheckpoint is not found. +func (dbm *databaseManager) ReadSupplyCheckpoint(blockNum uint64) *AccReward { db := dbm.getDatabase(MiscDB) - data, err := db.Get(accRewardKey(blockNum)) + data, err := db.Get(supplyCheckpointKey(blockNum)) if len(data) == 0 || err != nil { return nil } @@ -2896,8 +2896,8 @@ func (dbm *databaseManager) ReadAccReward(blockNum uint64) *AccReward { } } -// WriteAccReward stores the accumulated reward (minted, burntFee) up to a specific block number. -func (dbm *databaseManager) WriteAccReward(blockNum uint64, accReward *AccReward) { +// WriteSupplyCheckpoint stores the SupplyCheckpoint for a specific block number. +func (dbm *databaseManager) WriteSupplyCheckpoint(blockNum uint64, accReward *AccReward) { db := dbm.getDatabase(MiscDB) stored := struct { Minted []byte @@ -2910,15 +2910,15 @@ func (dbm *databaseManager) WriteAccReward(blockNum uint64, accReward *AccReward if err != nil { logger.Crit("Failed to write accumulated reward", "err", err) } - if err := db.Put(accRewardKey(blockNum), data); err != nil { + if err := db.Put(supplyCheckpointKey(blockNum), data); err != nil { logger.Crit("Failed to write accumulated reward", "err", err) } } -// ReadLastAccRewardBlockNumber retrieves the last block number for which the accumulated reward is stored. -func (dbm *databaseManager) ReadLastAccRewardBlockNumber() uint64 { +// ReadLastSupplyCheckpointNumber retrieves the highest number for which the SupplyCheckpoint is stored. +func (dbm *databaseManager) ReadLastSupplyCheckpointNumber() uint64 { db := dbm.getDatabase(MiscDB) - data, err := db.Get(lastAccRewardBlockNumberKey) + data, err := db.Get(lastSupplyCheckpointNumberKey) if len(data) == 0 || err != nil { return 0 } else { @@ -2926,11 +2926,11 @@ func (dbm *databaseManager) ReadLastAccRewardBlockNumber() uint64 { } } -// WriteLastAccRewardBlockNumber stores the last block number for which the accumulated reward is stored. -func (dbm *databaseManager) WriteLastAccRewardBlockNumber(blockNum uint64) { +// WriteLastSupplyCheckpointNumber stores the highest number for which the SupplyCheckpoint is stored. +func (dbm *databaseManager) WriteLastSupplyCheckpointNumber(blockNum uint64) { db := dbm.getDatabase(MiscDB) data := common.Int64ToByteBigEndian(blockNum) - if err := db.Put(lastAccRewardBlockNumberKey, data); err != nil { + if err := db.Put(lastSupplyCheckpointNumberKey, data); err != nil { logger.Crit("Failed to write last accumulated reward block number", "err", err) } } diff --git a/storage/database/db_manager_test.go b/storage/database/db_manager_test.go index 0059652ee..212397f67 100644 --- a/storage/database/db_manager_test.go +++ b/storage/database/db_manager_test.go @@ -824,7 +824,7 @@ func TestDBManager_Governance(t *testing.T) { func TestDBManager_AccReward(t *testing.T) { for _, dbm := range dbManagers { - // AccReward + // SupplyCheckpoint testcases := []struct { Number uint64 AccReward *AccReward @@ -835,16 +835,16 @@ func TestDBManager_AccReward(t *testing.T) { {4000, &AccReward{big.NewInt(0), big.NewInt(0)}}, } for _, tc := range testcases { - assert.Nil(t, dbm.ReadAccReward(tc.Number)) - dbm.WriteAccReward(tc.Number, tc.AccReward) - assert.Equal(t, tc.AccReward, dbm.ReadAccReward(tc.Number)) + assert.Nil(t, dbm.ReadSupplyCheckpoint(tc.Number)) + dbm.WriteSupplyCheckpoint(tc.Number, tc.AccReward) + assert.Equal(t, tc.AccReward, dbm.ReadSupplyCheckpoint(tc.Number)) } - // LastAccRewardBlockNumber + // LastSupplyCheckpointNumber lastNum := uint64(54321) - assert.Zero(t, dbm.ReadLastAccRewardBlockNumber()) - dbm.WriteLastAccRewardBlockNumber(lastNum) - assert.Equal(t, lastNum, dbm.ReadLastAccRewardBlockNumber()) + assert.Zero(t, dbm.ReadLastSupplyCheckpointNumber()) + dbm.WriteLastSupplyCheckpointNumber(lastNum) + assert.Equal(t, lastNum, dbm.ReadLastSupplyCheckpointNumber()) } } diff --git a/storage/database/schema.go b/storage/database/schema.go index dce7d3af9..027f560a4 100644 --- a/storage/database/schema.go +++ b/storage/database/schema.go @@ -132,8 +132,8 @@ var ( stakingInfoPrefix = []byte("stakingInfo") - accRewardPrefix = []byte("accReward") - lastAccRewardBlockNumberKey = []byte("lastAccRewardBlockNumber") + supplyCheckpointPrefix = []byte("accReward") + lastSupplyCheckpointNumberKey = []byte("lastAccRewardBlockNumber") chaindatafetcherCheckpointKey = []byte("chaindatafetcherCheckpoint") ) @@ -312,7 +312,6 @@ func (ar *AccReward) Copy() *AccReward { } } -// AccRewardKey = accRewardPrefix + blockNumber -func accRewardKey(blockNumber uint64) []byte { - return append(accRewardPrefix, common.Int64ToByteBigEndian(blockNumber)...) +func supplyCheckpointKey(blockNumber uint64) []byte { + return append(supplyCheckpointPrefix, common.Int64ToByteBigEndian(blockNumber)...) } From 0275f540daba1b4acbef2f2d6ebd9e40ba0d00ad Mon Sep 17 00:00:00 2001 From: "ollie.j" Date: Tue, 13 Aug 2024 13:23:33 +0900 Subject: [PATCH 25/36] database: Rename SupplyCheckpoint type --- reward/supply_manager.go | 12 ++++++------ storage/database/db_manager.go | 14 +++++++------- storage/database/db_manager_test.go | 10 +++++----- storage/database/schema.go | 10 +++++----- 4 files changed, 23 insertions(+), 23 deletions(-) diff --git a/reward/supply_manager.go b/reward/supply_manager.go index 04725666b..fa9e79527 100644 --- a/reward/supply_manager.go +++ b/reward/supply_manager.go @@ -134,9 +134,9 @@ func (sm *supplyManager) Stop() { } } -func (sm *supplyManager) GetAccReward(num uint64) (*database.AccReward, error) { +func (sm *supplyManager) GetAccReward(num uint64) (*database.SupplyCheckpoint, error) { if accReward, ok := sm.accRewardCache.Get(num); ok { - return accReward.(*database.AccReward), nil + return accReward.(*database.SupplyCheckpoint), nil } lastNum := sm.db.ReadLastSupplyCheckpointNumber() @@ -301,7 +301,7 @@ func (sm *supplyManager) catchup() { logger.Error("totalSupplyFromState failed", "number", 0, "err", err) return } - sm.db.WriteSupplyCheckpoint(0, &database.AccReward{ + sm.db.WriteSupplyCheckpoint(0, &database.SupplyCheckpoint{ Minted: genesisTotalSupply, BurntFee: big.NewInt(0), }) @@ -380,7 +380,7 @@ func (sm *supplyManager) totalSupplyFromState(num uint64) (*big.Int, error) { return totalSupply, nil } -func (sm *supplyManager) getAccRewardUncached(num uint64) (*database.AccReward, error) { +func (sm *supplyManager) getAccRewardUncached(num uint64) (*database.SupplyCheckpoint, error) { accReward := sm.db.ReadSupplyCheckpoint(num) if accReward != nil { return accReward, nil @@ -388,7 +388,7 @@ func (sm *supplyManager) getAccRewardUncached(num uint64) (*database.AccReward, // Trace back to the last stored accumulated reward. var fromNum uint64 - var fromAcc *database.AccReward + var fromAcc *database.SupplyCheckpoint // Fast path using checkpointInterval if accReward := sm.db.ReadSupplyCheckpoint(num - num%sm.checkpointInterval); accReward != nil { @@ -418,7 +418,7 @@ func (sm *supplyManager) getAccRewardUncached(num uint64) (*database.AccReward, // If `write` is true, the result will be written to the database. // If `write` is false, the result will not be written to the database, // to prevent overwriting LastAccRewardBlockNumber (essentially rollback) and to keep the disk size small (only store at checkpointInterval). -func (sm *supplyManager) accumulateReward(from, to uint64, fromAcc *database.AccReward, write bool) (*database.AccReward, error) { +func (sm *supplyManager) accumulateReward(from, to uint64, fromAcc *database.SupplyCheckpoint, write bool) (*database.SupplyCheckpoint, error) { accReward := fromAcc.Copy() // make a copy because we're updating it in-place. for num := from + 1; num <= to; num++ { diff --git a/storage/database/db_manager.go b/storage/database/db_manager.go index b857986df..c4b8695a6 100644 --- a/storage/database/db_manager.go +++ b/storage/database/db_manager.go @@ -301,8 +301,8 @@ type DBManager interface { DeleteStakingInfo(blockNum uint64) // TotalSupply checkpoint functions - ReadSupplyCheckpoint(blockNum uint64) *AccReward - WriteSupplyCheckpoint(blockNum uint64, accReward *AccReward) + ReadSupplyCheckpoint(blockNum uint64) *SupplyCheckpoint + WriteSupplyCheckpoint(blockNum uint64, checkpoint *SupplyCheckpoint) ReadLastSupplyCheckpointNumber() uint64 WriteLastSupplyCheckpointNumber(blockNum uint64) @@ -2877,7 +2877,7 @@ func (dbm *databaseManager) ReadGovernanceState() ([]byte, error) { // ReadSupplyCheckpoint retrieves the SupplyCheckpoint for a block number // Returns nil if the SupplyCheckpoint is not found. -func (dbm *databaseManager) ReadSupplyCheckpoint(blockNum uint64) *AccReward { +func (dbm *databaseManager) ReadSupplyCheckpoint(blockNum uint64) *SupplyCheckpoint { db := dbm.getDatabase(MiscDB) data, err := db.Get(supplyCheckpointKey(blockNum)) if len(data) == 0 || err != nil { @@ -2890,21 +2890,21 @@ func (dbm *databaseManager) ReadSupplyCheckpoint(blockNum uint64) *AccReward { if err := rlp.DecodeBytes(data, &stored); err != nil { logger.Crit("Corrupt accumulated reward", "err", err) } - return &AccReward{ + return &SupplyCheckpoint{ Minted: new(big.Int).SetBytes(stored.Minted), BurntFee: new(big.Int).SetBytes(stored.BurntFee), } } // WriteSupplyCheckpoint stores the SupplyCheckpoint for a specific block number. -func (dbm *databaseManager) WriteSupplyCheckpoint(blockNum uint64, accReward *AccReward) { +func (dbm *databaseManager) WriteSupplyCheckpoint(blockNum uint64, checkpoint *SupplyCheckpoint) { db := dbm.getDatabase(MiscDB) stored := struct { Minted []byte BurntFee []byte }{ - Minted: accReward.Minted.Bytes(), - BurntFee: accReward.BurntFee.Bytes(), + Minted: checkpoint.Minted.Bytes(), + BurntFee: checkpoint.BurntFee.Bytes(), } data, err := rlp.EncodeToBytes(stored) if err != nil { diff --git a/storage/database/db_manager_test.go b/storage/database/db_manager_test.go index 212397f67..0573d4369 100644 --- a/storage/database/db_manager_test.go +++ b/storage/database/db_manager_test.go @@ -827,12 +827,12 @@ func TestDBManager_AccReward(t *testing.T) { // SupplyCheckpoint testcases := []struct { Number uint64 - AccReward *AccReward + AccReward *SupplyCheckpoint }{ - {1000, &AccReward{big.NewInt(1111), big.NewInt(99)}}, - {2000, &AccReward{big.NewInt(0), big.NewInt(88)}}, - {3000, &AccReward{big.NewInt(2222), big.NewInt(0)}}, - {4000, &AccReward{big.NewInt(0), big.NewInt(0)}}, + {1000, &SupplyCheckpoint{big.NewInt(1111), big.NewInt(99)}}, + {2000, &SupplyCheckpoint{big.NewInt(0), big.NewInt(88)}}, + {3000, &SupplyCheckpoint{big.NewInt(2222), big.NewInt(0)}}, + {4000, &SupplyCheckpoint{big.NewInt(0), big.NewInt(0)}}, } for _, tc := range testcases { assert.Nil(t, dbm.ReadSupplyCheckpoint(tc.Number)) diff --git a/storage/database/schema.go b/storage/database/schema.go index 027f560a4..de326847d 100644 --- a/storage/database/schema.go +++ b/storage/database/schema.go @@ -300,15 +300,15 @@ func parsePruningMarkKey(key []byte) PruningMark { } } -type AccReward struct { +type SupplyCheckpoint struct { Minted *big.Int BurntFee *big.Int } -func (ar *AccReward) Copy() *AccReward { - return &AccReward{ - Minted: new(big.Int).Set(ar.Minted), - BurntFee: new(big.Int).Set(ar.BurntFee), +func (c *SupplyCheckpoint) Copy() *SupplyCheckpoint { + return &SupplyCheckpoint{ + Minted: new(big.Int).Set(c.Minted), + BurntFee: new(big.Int).Set(c.BurntFee), } } From 0c5aa14965a52d796dd2f4ab092b4db45da8d9b9 Mon Sep 17 00:00:00 2001 From: "ollie.j" Date: Tue, 13 Aug 2024 13:33:28 +0900 Subject: [PATCH 26/36] reward: Rename accReward to checkpoint --- reward/supply_manager.go | 96 ++++++++++++++--------------- reward/supply_manager_test.go | 38 ++++++------ storage/database/db_manager.go | 8 +-- storage/database/db_manager_test.go | 10 +-- 4 files changed, 76 insertions(+), 76 deletions(-) diff --git a/reward/supply_manager.go b/reward/supply_manager.go index fa9e79527..96f8a0624 100644 --- a/reward/supply_manager.go +++ b/reward/supply_manager.go @@ -45,7 +45,7 @@ var ( deadBurnAddress = common.HexToAddress("0xdead") errSupplyManagerQuit = errors.New("supply manager quit") - errNoAccReward = errors.New("accumulated reward not stored") + errNoCheckpoint = errors.New("no supply checkpoint found") errNoBlock = errors.New("block not found") errNoRebalanceMemo = errors.New("rebalance memo not yet stored") ) @@ -95,17 +95,17 @@ type supplyManager struct { checkpointInterval uint64 // Internal data structures - accRewardCache *lru.ARCCache // Cache (number uint64) -> (accReward *database.AccReward) - memoCache *lru.ARCCache // Cache (address Address) -> (memo.Burnt *big.Int) - quit uint32 // Stop the goroutine in initial catchup stage - quitCh chan struct{} // Stop the goroutine in event subscription state - wg sync.WaitGroup // background goroutine wait group for shutting down + checkpointCache *lru.ARCCache // Cache (number uint64) -> (checkpoint *database.SupplyCheckpoint) + memoCache *lru.ARCCache // Cache (address Address) -> (memo.Burnt *big.Int) + quit uint32 // Stop the goroutine in initial catchup stage + quitCh chan struct{} // Stop the goroutine in event subscription state + wg sync.WaitGroup // background goroutine wait group for shutting down } // NewSupplyManager creates a new supply manager. // The TotalSupply data is stored every checkpointInterval blocks. func NewSupplyManager(chain blockChain, gov governanceHelper, db database.DBManager, checkpointInterval uint) *supplyManager { - accRewardCache, _ := lru.NewARC(supplyCacheSize) + checkpointCache, _ := lru.NewARC(supplyCacheSize) memoCache, _ := lru.NewARC(10) return &supplyManager{ @@ -114,7 +114,7 @@ func NewSupplyManager(chain blockChain, gov governanceHelper, db database.DBMana gov: gov, db: db, checkpointInterval: uint64(checkpointInterval), - accRewardCache: accRewardCache, + checkpointCache: checkpointCache, memoCache: memoCache, quitCh: make(chan struct{}, 1), // make sure Stop() doesn't block if catchup() has exited before Stop() } @@ -134,23 +134,23 @@ func (sm *supplyManager) Stop() { } } -func (sm *supplyManager) GetAccReward(num uint64) (*database.SupplyCheckpoint, error) { - if accReward, ok := sm.accRewardCache.Get(num); ok { - return accReward.(*database.SupplyCheckpoint), nil +func (sm *supplyManager) GetCheckpoint(num uint64) (*database.SupplyCheckpoint, error) { + if checkpoint, ok := sm.checkpointCache.Get(num); ok { + return checkpoint.(*database.SupplyCheckpoint), nil } lastNum := sm.db.ReadLastSupplyCheckpointNumber() if lastNum < num { // soft deleted - return nil, errNoAccReward + return nil, errNoCheckpoint } - accReward, err := sm.getAccRewardUncached(num) + checkpoint, err := sm.getCheckpointUncached(num) if err != nil { return nil, err } - sm.accRewardCache.Add(num, accReward.Copy()) - return accReward, nil + sm.checkpointCache.Add(num, checkpoint.Copy()) + return checkpoint, nil } func (sm *supplyManager) GetCanonicalBurn(num uint64) (zero *big.Int, dead *big.Int, err error) { @@ -234,14 +234,14 @@ func (sm *supplyManager) GetTotalSupply(num uint64) (*TotalSupply, error) { errs := make([]error, 0) ts := new(TotalSupply) - // Read accumulated rewards (minted, burntFee) + // Read accumulated supply checkpoint (minted, burntFee) // This is an essential component, so failure to read it immediately aborts the function. - accReward, err := sm.GetAccReward(num) + checkpoint, err := sm.GetCheckpoint(num) if err != nil { return nil, err } - ts.TotalMinted = accReward.Minted - ts.BurntFee = accReward.BurntFee + ts.TotalMinted = checkpoint.Minted + ts.BurntFee = checkpoint.BurntFee // Read canonical burn address balances // Leave them nil if the historic state isn't available. @@ -288,7 +288,7 @@ func (sm *supplyManager) catchup() { ) if lastNum > 0 && sm.db.ReadSupplyCheckpoint(lastNum) == nil { - logger.Error("Last accumulated reward not found. Restarting supply catchup", "last", lastNum, "head", headNum) + logger.Error("Last supply checkpoint not found. Restarting supply catchup", "last", lastNum, "head", headNum) sm.db.WriteLastSupplyCheckpointNumber(0) // soft reset to genesis lastNum = 0 } @@ -311,15 +311,15 @@ func (sm *supplyManager) catchup() { } lastNum = sm.db.ReadLastSupplyCheckpointNumber() - lastAccReward := sm.db.ReadSupplyCheckpoint(lastNum) + lastCheckpoint := sm.db.ReadSupplyCheckpoint(lastNum) // Big-step catchup; accumulate until the head block as of now. // The head block can be obsolete by the time catchup finished, so the big-step can end up being a bit short. // Repeat until the difference is close enough so that the headNum stays the same after one iteration. for lastNum < headNum { - logger.Info("Total supply big step catchup", "last", lastNum, "head", headNum, "minted", lastAccReward.Minted.String(), "burntFee", lastAccReward.BurntFee.String()) + logger.Info("Total supply big step catchup", "last", lastNum, "head", headNum, "minted", lastCheckpoint.Minted.String(), "burntFee", lastCheckpoint.BurntFee.String()) - accReward, err := sm.accumulateReward(lastNum, headNum, lastAccReward, true) + checkpoint, err := sm.accumulateReward(lastNum, headNum, lastCheckpoint, true) if err != nil { if err != errSupplyManagerQuit { logger.Error("Total supply accumulate failed", "from", lastNum, "to", headNum, "err", err) @@ -328,10 +328,10 @@ func (sm *supplyManager) catchup() { } lastNum = headNum - lastAccReward = accReward + lastCheckpoint = checkpoint headNum = sm.chain.CurrentBlock().NumberU64() } - logger.Info("Total supply big step catchup done", "last", lastNum, "minted", lastAccReward.Minted.String(), "burntFee", lastAccReward.BurntFee.String()) + logger.Info("Total supply big step catchup done", "last", lastNum, "minted", lastCheckpoint.Minted.String(), "burntFee", lastCheckpoint.BurntFee.String()) // Subscribe to chain head events and accumulate on demand. sm.chainHeadSub = sm.chain.SubscribeChainHeadEvent(sm.chainHeadChan) @@ -342,7 +342,7 @@ func (sm *supplyManager) catchup() { case head := <-sm.chainHeadChan: headNum = head.Block.NumberU64() - supply, err := sm.accumulateReward(lastNum, headNum, lastAccReward, true) + supply, err := sm.accumulateReward(lastNum, headNum, lastCheckpoint, true) if err != nil { if err != errSupplyManagerQuit { logger.Error("Total supply accumulate failed", "from", lastNum, "to", headNum, "err", err) @@ -351,7 +351,7 @@ func (sm *supplyManager) catchup() { } lastNum = headNum - lastAccReward = supply + lastCheckpoint = supply } } } @@ -380,33 +380,33 @@ func (sm *supplyManager) totalSupplyFromState(num uint64) (*big.Int, error) { return totalSupply, nil } -func (sm *supplyManager) getAccRewardUncached(num uint64) (*database.SupplyCheckpoint, error) { - accReward := sm.db.ReadSupplyCheckpoint(num) - if accReward != nil { - return accReward, nil +func (sm *supplyManager) getCheckpointUncached(num uint64) (*database.SupplyCheckpoint, error) { + checkpoint := sm.db.ReadSupplyCheckpoint(num) + if checkpoint != nil { + return checkpoint, nil } - // Trace back to the last stored accumulated reward. + // Trace back to the last stored supply checkpoint. var fromNum uint64 var fromAcc *database.SupplyCheckpoint // Fast path using checkpointInterval - if accReward := sm.db.ReadSupplyCheckpoint(num - num%sm.checkpointInterval); accReward != nil { + if checkpoint := sm.db.ReadSupplyCheckpoint(num - num%sm.checkpointInterval); checkpoint != nil { fromNum = num - num%sm.checkpointInterval - fromAcc = accReward + fromAcc = checkpoint } else { // Slow path in case the checkpoint has changed or checkpoint is missing. for i := uint64(1); i < supplyReaccLimit; i++ { - accReward = sm.db.ReadSupplyCheckpoint(num - i) - if accReward != nil { + checkpoint = sm.db.ReadSupplyCheckpoint(num - i) + if checkpoint != nil { fromNum = num - i - fromAcc = accReward + fromAcc = checkpoint break } } } if fromAcc == nil { - return nil, errNoAccReward + return nil, errNoCheckpoint } logger.Trace("on-demand reaccumulating rewards", "from", fromNum, "to", num) @@ -414,12 +414,12 @@ func (sm *supplyManager) getAccRewardUncached(num uint64) (*database.SupplyCheck } // accumulateReward calculates the total supply from the last block to the current block. -// Given supply at `from` is `fromSupply`, calculate the supply until `to`, inclusive. +// Given the supply at `from` is `fromCheckpoint`, calculate the supply until `to`, inclusive. // If `write` is true, the result will be written to the database. // If `write` is false, the result will not be written to the database, -// to prevent overwriting LastAccRewardBlockNumber (essentially rollback) and to keep the disk size small (only store at checkpointInterval). -func (sm *supplyManager) accumulateReward(from, to uint64, fromAcc *database.SupplyCheckpoint, write bool) (*database.SupplyCheckpoint, error) { - accReward := fromAcc.Copy() // make a copy because we're updating it in-place. +// to prevent overwriting LastSupplyCheckpointNumber (essentially rollback) and to keep the disk size small (only store at checkpointInterval). +func (sm *supplyManager) accumulateReward(from, to uint64, fromCheckpoint *database.SupplyCheckpoint, write bool) (*database.SupplyCheckpoint, error) { + checkpoint := fromCheckpoint.Copy() // make a copy because we're updating it in-place. for num := from + 1; num <= to; num++ { // Abort upon quit signal @@ -442,18 +442,18 @@ func (sm *supplyManager) accumulateReward(from, to uint64, fromAcc *database.Sup if err != nil { return nil, err } - accReward.Minted.Add(accReward.Minted, blockTotal.Minted) - accReward.BurntFee.Add(accReward.BurntFee, blockTotal.BurntFee) + checkpoint.Minted.Add(checkpoint.Minted, blockTotal.Minted) + checkpoint.BurntFee.Add(checkpoint.BurntFee, blockTotal.BurntFee) // Store to database, print progress log. - sm.accRewardCache.Add(num, accReward.Copy()) + sm.checkpointCache.Add(num, checkpoint.Copy()) if write && (num%sm.checkpointInterval) == 0 { - sm.db.WriteSupplyCheckpoint(num, accReward) + sm.db.WriteSupplyCheckpoint(num, checkpoint) sm.db.WriteLastSupplyCheckpointNumber(num) } if (num % supplyLogInterval) == 0 { - logger.Info("Accumulated block rewards", "number", num, "minted", accReward.Minted.String(), "burntFee", accReward.BurntFee.String()) + logger.Info("Accumulated block rewards", "number", num, "minted", checkpoint.Minted.String(), "burntFee", checkpoint.BurntFee.String()) } } - return accReward, nil + return checkpoint, nil } diff --git a/reward/supply_manager_test.go b/reward/supply_manager_test.go index f1faa7e68..3086219bf 100644 --- a/reward/supply_manager_test.go +++ b/reward/supply_manager_test.go @@ -65,7 +65,7 @@ func TestSupplyManagerError(t *testing.T) { assert.True(t, shouldRequestUpstream(errors.Join(errNoRebalanceBurn(errNoRebalanceMemo), errNoCanonicalBurn(mtn)))) // Other errors should not trigger Upstream EN retry - assert.False(t, shouldRequestUpstream(errNoAccReward)) + assert.False(t, shouldRequestUpstream(errNoCheckpoint)) assert.False(t, shouldRequestUpstream(errNoRebalanceBurn(errNoRebalanceMemo))) } @@ -201,14 +201,14 @@ func (s *SupplyTestSuite) TestCatchupBigStep() { s.setupHistory() // head block is already 400 s.sm.Start() // start catchup defer s.sm.Stop() - s.waitAccReward() // Wait for catchup to finish + s.waitCatchup() // Wait for catchup to finish testcases := s.testcases() for _, tc := range testcases { - accReward, err := s.sm.GetAccReward(tc.number) + checkpoint, err := s.sm.GetCheckpoint(tc.number) require.NoError(t, err) - bigEqual(t, tc.expectTotalSupply.TotalMinted, accReward.Minted, tc.number) - bigEqual(t, tc.expectTotalSupply.BurntFee, accReward.BurntFee, tc.number) + bigEqual(t, tc.expectTotalSupply.TotalMinted, checkpoint.Minted, tc.number) + bigEqual(t, tc.expectTotalSupply.BurntFee, checkpoint.BurntFee, tc.number) } } @@ -219,14 +219,14 @@ func (s *SupplyTestSuite) TestCatchupEventSubscription() { defer s.sm.Stop() time.Sleep(10 * time.Millisecond) // yield to the catchup goroutine to start s.setupHistory() // block is inserted after the catchup started - s.waitAccReward() + s.waitCatchup() testcases := s.testcases() for _, tc := range testcases { - accReward, err := s.sm.GetAccReward(tc.number) + checkpoint, err := s.sm.GetCheckpoint(tc.number) require.NoError(t, err) - bigEqual(t, tc.expectTotalSupply.TotalMinted, accReward.Minted, tc.number) - bigEqual(t, tc.expectTotalSupply.BurntFee, accReward.BurntFee, tc.number) + bigEqual(t, tc.expectTotalSupply.TotalMinted, checkpoint.Minted, tc.number) + bigEqual(t, tc.expectTotalSupply.BurntFee, checkpoint.BurntFee, tc.number) } } @@ -236,7 +236,7 @@ func (s *SupplyTestSuite) TestTotalSupply() { s.setupHistory() s.sm.Start() defer s.sm.Stop() - s.waitAccReward() + s.waitCatchup() testcases := s.testcases() for _, tc := range testcases { @@ -261,7 +261,7 @@ func (s *SupplyTestSuite) TestTotalSupplyPartialInfo() { t := s.T() s.setupHistory() s.sm.Start() - s.waitAccReward() + s.waitCatchup() s.sm.Stop() var num uint64 = 200 @@ -305,21 +305,21 @@ func (s *SupplyTestSuite) TestTotalSupplyPartialInfo() { // No AccReward s.db.WriteLastSupplyCheckpointNumber(num - 1) - s.sm.accRewardCache.Purge() + s.sm.checkpointCache.Purge() ts, err = s.sm.GetTotalSupply(num) - assert.ErrorIs(t, err, errNoAccReward) + assert.ErrorIs(t, err, errNoCheckpoint) assert.Nil(t, ts) } -// Test that when db.AccReward are missing, GetTotalSupply will re-accumulate from the nearest stored AccReward. +// Test that when SupplyCheckpoint are missing, GetTotalSupply will re-accumulate from the nearest stored SupplyCheckpoint. func (s *SupplyTestSuite) TestTotalSupplyReaccumulate() { t := s.T() s.setupHistory() s.sm.Start() defer s.sm.Stop() - s.waitAccReward() + s.waitCatchup() - // Delete AccRewards not on the default block interval (128). + // Delete checkpoints not on the default block interval (128). // This happens on full nodes, and archive nodes with default BlockInterval config. // Note that archive nodes ars allowed to have BlockInterval > 1, still tries are committed every block. for num := uint64(0); num <= 400; num++ { @@ -332,7 +332,7 @@ func (s *SupplyTestSuite) TestTotalSupplyReaccumulate() { // Still, all block data must be available. testcases := s.testcases() for _, tc := range testcases { - s.sm.accRewardCache.Purge() + s.sm.checkpointCache.Purge() ts, err := s.sm.GetTotalSupply(tc.number) require.NoError(t, err) @@ -349,7 +349,7 @@ func (s *SupplyTestSuite) TestTotalSupplyReaccumulate() { } } -func (s *SupplyTestSuite) waitAccReward() { +func (s *SupplyTestSuite) waitCatchup() { for i := 0; i < 1000; i++ { // wait 10 seconds until catchup complete if s.db.ReadLastSupplyCheckpointNumber() >= 400 { break @@ -557,7 +557,7 @@ func (s *SupplyTestSuite) testcases() []supplyTestTC { zeroBurn = bigMult(amount1B, big.NewInt(1)) deadBurn = bigMult(amount1B, big.NewInt(2)) ) - // accumulated rewards: segment sums + // supply checkpoints: segment sums minted := make(map[uint64]*big.Int) burntFee := make(map[uint64]*big.Int) diff --git a/storage/database/db_manager.go b/storage/database/db_manager.go index c4b8695a6..757aba990 100644 --- a/storage/database/db_manager.go +++ b/storage/database/db_manager.go @@ -2888,7 +2888,7 @@ func (dbm *databaseManager) ReadSupplyCheckpoint(blockNum uint64) *SupplyCheckpo BurntFee []byte }{} if err := rlp.DecodeBytes(data, &stored); err != nil { - logger.Crit("Corrupt accumulated reward", "err", err) + logger.Crit("Corrupt supply checkpoint", "err", err) } return &SupplyCheckpoint{ Minted: new(big.Int).SetBytes(stored.Minted), @@ -2908,10 +2908,10 @@ func (dbm *databaseManager) WriteSupplyCheckpoint(blockNum uint64, checkpoint *S } data, err := rlp.EncodeToBytes(stored) if err != nil { - logger.Crit("Failed to write accumulated reward", "err", err) + logger.Crit("Failed to write supply checkpoint", "err", err) } if err := db.Put(supplyCheckpointKey(blockNum), data); err != nil { - logger.Crit("Failed to write accumulated reward", "err", err) + logger.Crit("Failed to write supply checkpoint", "err", err) } } @@ -2931,7 +2931,7 @@ func (dbm *databaseManager) WriteLastSupplyCheckpointNumber(blockNum uint64) { db := dbm.getDatabase(MiscDB) data := common.Int64ToByteBigEndian(blockNum) if err := db.Put(lastSupplyCheckpointNumberKey, data); err != nil { - logger.Crit("Failed to write last accumulated reward block number", "err", err) + logger.Crit("Failed to write last supply checkpoint number", "err", err) } } diff --git a/storage/database/db_manager_test.go b/storage/database/db_manager_test.go index 0573d4369..43442bb58 100644 --- a/storage/database/db_manager_test.go +++ b/storage/database/db_manager_test.go @@ -822,12 +822,12 @@ func TestDBManager_Governance(t *testing.T) { // TODO-Kaia-Database Implement this! } -func TestDBManager_AccReward(t *testing.T) { +func TestDBManager_SupplyCheckpoint(t *testing.T) { for _, dbm := range dbManagers { // SupplyCheckpoint testcases := []struct { - Number uint64 - AccReward *SupplyCheckpoint + Number uint64 + Checkpoint *SupplyCheckpoint }{ {1000, &SupplyCheckpoint{big.NewInt(1111), big.NewInt(99)}}, {2000, &SupplyCheckpoint{big.NewInt(0), big.NewInt(88)}}, @@ -836,8 +836,8 @@ func TestDBManager_AccReward(t *testing.T) { } for _, tc := range testcases { assert.Nil(t, dbm.ReadSupplyCheckpoint(tc.Number)) - dbm.WriteSupplyCheckpoint(tc.Number, tc.AccReward) - assert.Equal(t, tc.AccReward, dbm.ReadSupplyCheckpoint(tc.Number)) + dbm.WriteSupplyCheckpoint(tc.Number, tc.Checkpoint) + assert.Equal(t, tc.Checkpoint, dbm.ReadSupplyCheckpoint(tc.Number)) } // LastSupplyCheckpointNumber From 968342eaa2127ef965382f9a2430c93e06c8c741 Mon Sep 17 00:00:00 2001 From: "ollie.j" Date: Tue, 13 Aug 2024 13:47:28 +0900 Subject: [PATCH 27/36] database: Add DeleteSupplyCheckpoint --- reward/supply_manager_test.go | 5 ++--- storage/database/db_manager.go | 9 +++++++++ 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/reward/supply_manager_test.go b/reward/supply_manager_test.go index 3086219bf..750ea719b 100644 --- a/reward/supply_manager_test.go +++ b/reward/supply_manager_test.go @@ -303,7 +303,7 @@ func (s *SupplyTestSuite) TestTotalSupplyPartialInfo() { assert.Nil(t, ts.Kip103Burn) assert.Equal(t, expected.Kip160Burn, ts.Kip160Burn) - // No AccReward + // No SupplyCheckpoint s.db.WriteLastSupplyCheckpointNumber(num - 1) s.sm.checkpointCache.Purge() ts, err = s.sm.GetTotalSupply(num) @@ -324,8 +324,7 @@ func (s *SupplyTestSuite) TestTotalSupplyReaccumulate() { // Note that archive nodes ars allowed to have BlockInterval > 1, still tries are committed every block. for num := uint64(0); num <= 400; num++ { if num%128 != 0 { - // Because it's for testing, we do not add db.DeleteAccReward method. - s.db.GetMiscDB().Delete(append([]byte("accReward"), common.Int64ToByteBigEndian(num)...)) + s.db.DeleteSupplyCheckpoint(num) } } diff --git a/storage/database/db_manager.go b/storage/database/db_manager.go index 757aba990..6d162e408 100644 --- a/storage/database/db_manager.go +++ b/storage/database/db_manager.go @@ -303,6 +303,7 @@ type DBManager interface { // TotalSupply checkpoint functions ReadSupplyCheckpoint(blockNum uint64) *SupplyCheckpoint WriteSupplyCheckpoint(blockNum uint64, checkpoint *SupplyCheckpoint) + DeleteSupplyCheckpoint(blockNum uint64) ReadLastSupplyCheckpointNumber() uint64 WriteLastSupplyCheckpointNumber(blockNum uint64) @@ -2915,6 +2916,14 @@ func (dbm *databaseManager) WriteSupplyCheckpoint(blockNum uint64, checkpoint *S } } +// DeleteSupplyCheckpoint removes the SupplyCheckpoint for a specific block number. +func (dbm *databaseManager) DeleteSupplyCheckpoint(blockNum uint64) { + db := dbm.getDatabase(MiscDB) + if err := db.Delete(supplyCheckpointKey(blockNum)); err != nil { + logger.Crit("Failed to delete supply checkpoint", "err", err) + } +} + // ReadLastSupplyCheckpointNumber retrieves the highest number for which the SupplyCheckpoint is stored. func (dbm *databaseManager) ReadLastSupplyCheckpointNumber() uint64 { db := dbm.getDatabase(MiscDB) From f3e1748ac14b0fb0336232c11ac2040e0d5aa54a Mon Sep 17 00:00:00 2001 From: "ollie.j" Date: Tue, 13 Aug 2024 14:14:07 +0900 Subject: [PATCH 28/36] reward: Tidy SupplyTestSuite --- reward/supply_manager_test.go | 60 ++++++++++++++++++----------------- 1 file changed, 31 insertions(+), 29 deletions(-) diff --git a/reward/supply_manager_test.go b/reward/supply_manager_test.go index 750ea719b..27f7bebcb 100644 --- a/reward/supply_manager_test.go +++ b/reward/supply_manager_test.go @@ -93,6 +93,7 @@ type SupplyTestSuite struct { db database.DBManager genesis *types.Block chain *blockchain.BlockChain + blocks []*types.Block sm *supplyManager } @@ -103,11 +104,11 @@ func TestSupplyManager(t *testing.T) { // ---------------------------------------------------------------------------- // Test cases -// Tests totalSupplyFromState as well as the setupHistory() itself. -// This accounts for (AccMinted - AccBurntFee - kip103Burn - kip160Burn) but not (- zeroBurn - deadBurn). +// Tests totalSupplyFromState as well as the insertBlocks() itself. +// totalSupplyFromState accounts for (AccMinted - AccBurntFee - kip103Burn - kip160Burn) but not (- zeroBurn - deadBurn). func (s *SupplyTestSuite) TestFromState() { t := s.T() - s.setupHistory() + s.insertBlocks() s.sm.Start() // start catchup testcases := s.testcases() @@ -126,7 +127,7 @@ func (s *SupplyTestSuite) TestFromState() { // Test reading canonical burn amounts from the state. func (s *SupplyTestSuite) TestCanonicalBurn() { t := s.T() - s.setupHistory() + s.insertBlocks() // Delete state at 199 root := s.db.ReadBlockByNumber(199).Root() @@ -148,7 +149,7 @@ func (s *SupplyTestSuite) TestCanonicalBurn() { // Test reading rebalance memo from the contract. func (s *SupplyTestSuite) TestRebalanceMemo() { t := s.T() - s.setupHistory() + s.insertBlocks() // rebalance not configured amount, err := s.sm.GetRebalanceBurn(199, nil, common.Address{}) @@ -177,7 +178,7 @@ func (s *SupplyTestSuite) TestRebalanceMemo() { // Tests sm.Stop() does not block. func (s *SupplyTestSuite) TestStop() { - s.setupHistory() // head block is already 400 + s.insertBlocks() // head block is already 400 s.sm.Start() // start catchup, enters big-step mode endCh := make(chan struct{}) @@ -198,7 +199,7 @@ func (s *SupplyTestSuite) TestStop() { // Tests the insertBlock -> go catchup, where the catchup enters the big-step mode. func (s *SupplyTestSuite) TestCatchupBigStep() { t := s.T() - s.setupHistory() // head block is already 400 + s.insertBlocks() // head block is already 400 s.sm.Start() // start catchup defer s.sm.Stop() s.waitCatchup() // Wait for catchup to finish @@ -217,8 +218,8 @@ func (s *SupplyTestSuite) TestCatchupEventSubscription() { t := s.T() s.sm.Start() // start catchup defer s.sm.Stop() - time.Sleep(10 * time.Millisecond) // yield to the catchup goroutine to start - s.setupHistory() // block is inserted after the catchup started + time.Sleep(100 * time.Millisecond) // yield to the catchup goroutine to start + s.insertBlocks() // block is inserted after the catchup started s.waitCatchup() testcases := s.testcases() @@ -233,7 +234,7 @@ func (s *SupplyTestSuite) TestCatchupEventSubscription() { // Tests all supply components. func (s *SupplyTestSuite) TestTotalSupply() { t := s.T() - s.setupHistory() + s.insertBlocks() s.sm.Start() defer s.sm.Stop() s.waitCatchup() @@ -259,7 +260,7 @@ func (s *SupplyTestSuite) TestTotalSupply() { // Test that when some data are missing, GetTotalSupply leaves some fields nil and returns an error. func (s *SupplyTestSuite) TestTotalSupplyPartialInfo() { t := s.T() - s.setupHistory() + s.insertBlocks() s.sm.Start() s.waitCatchup() s.sm.Stop() @@ -314,7 +315,7 @@ func (s *SupplyTestSuite) TestTotalSupplyPartialInfo() { // Test that when SupplyCheckpoint are missing, GetTotalSupply will re-accumulate from the nearest stored SupplyCheckpoint. func (s *SupplyTestSuite) TestTotalSupplyReaccumulate() { t := s.T() - s.setupHistory() + s.insertBlocks() s.sm.Start() defer s.sm.Stop() s.waitCatchup() @@ -350,14 +351,12 @@ func (s *SupplyTestSuite) TestTotalSupplyReaccumulate() { func (s *SupplyTestSuite) waitCatchup() { for i := 0; i < 1000; i++ { // wait 10 seconds until catchup complete - if s.db.ReadLastSupplyCheckpointNumber() >= 400 { - break + if s.sm.checkpointCache.Contains(uint64(400)) { + return } time.Sleep(10 * time.Millisecond) } - if s.db.ReadLastSupplyCheckpointNumber() < 400 { - s.T().Fatal("Catchup not finished in time") - } + s.T().Fatal("Catchup not finished in time") } // ---------------------------------------------------------------------------- @@ -485,12 +484,6 @@ func (s *SupplyTestSuite) SetupTest() { require.NoError(t, err) s.chain = chain - s.sm = NewSupplyManager(s.chain, s.gov, s.db, 1) // 1 interval for testing -} - -func (s *SupplyTestSuite) setupHistory() { - t := s.T() - var ( // Generate blocks with 1 tx per block. Send 1 kei from Genesis4 to Genesis3. signer = types.LatestSignerForChainID(s.config.ChainID) key = keyGenesis4 @@ -516,13 +509,22 @@ func (s *SupplyTestSuite) setupHistory() { } } ) - blocks, _ := blockchain.GenerateChain(s.config, s.genesis, s.engine, s.db, 400, genFunc) - require.NotEmpty(t, blocks) + s.blocks, _ = blockchain.GenerateChain(s.config, s.genesis, s.engine, s.db, 400, genFunc) + require.NotEmpty(t, s.blocks) - // Insert s.chain - _, err := s.chain.InsertChain(blocks) - require.NoError(t, err) - expected := blocks[len(blocks)-1] + s.sm = NewSupplyManager(s.chain, s.gov, s.db, 1) // 1 interval for testing +} + +func (s *SupplyTestSuite) insertBlocks() { + t := s.T() + + // Insert blocks to chain. Note that duplicating blocks will automatically be ignored. + // ChainHeadEvent will be published after each block insertion. + for _, block := range s.blocks { + _, err := s.chain.InsertChain([]*types.Block{block}) + require.NoError(t, err) + } + expected := s.blocks[len(s.blocks)-1] actual := s.chain.CurrentBlock() assert.Equal(t, expected.Hash(), actual.Hash()) } From 008e6c0e0e28e190354d1b988cabcb59b18afb4c Mon Sep 17 00:00:00 2001 From: "ollie.j" Date: Tue, 13 Aug 2024 14:16:09 +0900 Subject: [PATCH 29/36] reward: Add TestCatchupRewind --- reward/supply_manager_test.go | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/reward/supply_manager_test.go b/reward/supply_manager_test.go index 27f7bebcb..873ec0889 100644 --- a/reward/supply_manager_test.go +++ b/reward/supply_manager_test.go @@ -231,6 +231,29 @@ func (s *SupplyTestSuite) TestCatchupEventSubscription() { } } +// Tests go catchup -> insertBlock case, where the catchup follows the chain head event. +func (s *SupplyTestSuite) TestCatchupRewind() { + t := s.T() + s.sm.Start() // start catchup + defer s.sm.Stop() + time.Sleep(100 * time.Millisecond) // yield to the catchup goroutine to start + s.insertBlocks() // block is inserted after the catchup started + s.waitCatchup() // catchup to block 400 + + // Rewind to 200, and re-insert blocks. + // The catchup thread should correctly handle ChainHeadEvents less than 400. + s.chain.SetHead(200) + s.insertBlocks() + + testcases := s.testcases() + for _, tc := range testcases { + checkpoint, err := s.sm.GetCheckpoint(tc.number) + require.NoError(t, err) + bigEqual(t, tc.expectTotalSupply.TotalMinted, checkpoint.Minted, tc.number) + bigEqual(t, tc.expectTotalSupply.BurntFee, checkpoint.BurntFee, tc.number) + } +} + // Tests all supply components. func (s *SupplyTestSuite) TestTotalSupply() { t := s.T() From 5a10244a62325e5cb74067971f90554f9bb80dd3 Mon Sep 17 00:00:00 2001 From: "ollie.j" Date: Tue, 13 Aug 2024 14:16:48 +0900 Subject: [PATCH 30/36] reward: Fix SupplyManager catchup with out-of-order ChainHeadEvents --- reward/supply_manager.go | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/reward/supply_manager.go b/reward/supply_manager.go index 96f8a0624..709211202 100644 --- a/reward/supply_manager.go +++ b/reward/supply_manager.go @@ -342,16 +342,18 @@ func (sm *supplyManager) catchup() { case head := <-sm.chainHeadChan: headNum = head.Block.NumberU64() - supply, err := sm.accumulateReward(lastNum, headNum, lastCheckpoint, true) - if err != nil { - if err != errSupplyManagerQuit { - logger.Error("Total supply accumulate failed", "from", lastNum, "to", headNum, "err", err) + if lastNum < headNum { + supply, err := sm.accumulateReward(lastNum, headNum, lastCheckpoint, true) + if err != nil { + if err != errSupplyManagerQuit { + logger.Error("Total supply accumulate failed", "from", lastNum, "to", headNum, "err", err) + } + return } - return - } - lastNum = headNum - lastCheckpoint = supply + lastNum = headNum + lastCheckpoint = supply + } } } } From 2620be24c682b0f168978d63652d04fef97599c1 Mon Sep 17 00:00:00 2001 From: "ollie.j" Date: Tue, 13 Aug 2024 15:10:28 +0900 Subject: [PATCH 31/36] blockchain: SetHead deletes SupplyCheckpoints --- blockchain/blockchain.go | 6 ++++++ params/governance_params.go | 3 ++- reward/supply_manager_test.go | 9 +++++++-- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/blockchain/blockchain.go b/blockchain/blockchain.go index d96620189..58ff3b0e7 100644 --- a/blockchain/blockchain.go +++ b/blockchain/blockchain.go @@ -611,6 +611,11 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo // to low, so it's safe the update in-memory markers directly. bc.currentFastBlock.Store(newHeadFastBlock) } + + // Rewind the supply checkpoint + newLastSupplyCheckpointNumber := header.Number.Uint64() - (header.Number.Uint64() % params.SupplyCheckpointInterval) + bc.db.WriteLastSupplyCheckpointNumber(newLastSupplyCheckpointNumber) + return bc.CurrentBlock().Number().Uint64(), nil } @@ -629,6 +634,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo if bc.Config().Istanbul.ProposerPolicy == params.WeightedRandom && !bc.Config().IsKaiaForkEnabled(new(big.Int).SetUint64(num)) && params.IsStakingUpdateInterval(num) { bc.db.DeleteStakingInfo(num) } + bc.db.DeleteSupplyCheckpoint(num) } // If SetHead was only called as a chain reparation method, try to skip diff --git a/params/governance_params.go b/params/governance_params.go index 82b08a056..3c41776e0 100644 --- a/params/governance_params.go +++ b/params/governance_params.go @@ -38,7 +38,8 @@ const ( // The prefix for governance cache GovernanceCachePrefix = "governance" - CheckpointInterval = 1024 + CheckpointInterval = 1024 // For Istanbul snapshot + SupplyCheckpointInterval = 128 // for SupplyManager tracking native token supply ) const ( diff --git a/reward/supply_manager_test.go b/reward/supply_manager_test.go index 873ec0889..bc07810c5 100644 --- a/reward/supply_manager_test.go +++ b/reward/supply_manager_test.go @@ -240,9 +240,14 @@ func (s *SupplyTestSuite) TestCatchupRewind() { s.insertBlocks() // block is inserted after the catchup started s.waitCatchup() // catchup to block 400 - // Rewind to 200, and re-insert blocks. - // The catchup thread should correctly handle ChainHeadEvents less than 400. + // Rewind to 200; relevant data must have been deleted. s.chain.SetHead(200) + assert.Equal(t, uint64(128), s.db.ReadLastSupplyCheckpointNumber()) + assert.NotNil(t, s.db.ReadSupplyCheckpoint(128)) + assert.Nil(t, s.db.ReadSupplyCheckpoint(256)) + + // Re-insert blocks. + // The catchup thread should correctly handle ChainHeadEvents less than 400. s.insertBlocks() testcases := s.testcases() From e0774552621937d7481f54ce2808471fb23143be Mon Sep 17 00:00:00 2001 From: "ollie.j" Date: Tue, 13 Aug 2024 15:16:13 +0900 Subject: [PATCH 32/36] reward: SupplyManager checkpointInterval is constant --- node/cn/backend.go | 4 +-- reward/supply_manager.go | 38 +++++++-------------------- reward/supply_manager_test.go | 48 +++++------------------------------ 3 files changed, 17 insertions(+), 73 deletions(-) diff --git a/node/cn/backend.go b/node/cn/backend.go index 47c6b14b0..46025c5e2 100644 --- a/node/cn/backend.go +++ b/node/cn/backend.go @@ -363,9 +363,7 @@ func New(ctx *node.ServiceContext, config *Config) (*CN, error) { // NewStakingManager is called with proper non-nil parameters reward.NewStakingManager(cn.blockchain, governance, cn.chainDB) } - // Note: archive nodes might have TrieBlockInterval == 128, then SupplyManager will store checkpoints every 128 blocks. - // Still it is not a problem since SupplyManager can re-accumulate from the nearest checkpoint. - cn.supplyManager = reward.NewSupplyManager(cn.blockchain, cn.governance, cn.chainDB, config.TrieBlockInterval) + cn.supplyManager = reward.NewSupplyManager(cn.blockchain, cn.governance, cn.chainDB) // Governance states which are not yet applied to the db remains at in-memory storage // It disappears during the node restart, so restoration is needed before the sync starts diff --git a/reward/supply_manager.go b/reward/supply_manager.go index 709211202..6f04c65b0 100644 --- a/reward/supply_manager.go +++ b/reward/supply_manager.go @@ -34,13 +34,13 @@ import ( "github.com/kaiachain/kaia/common" "github.com/kaiachain/kaia/contracts/contracts/system_contracts/rebalance" "github.com/kaiachain/kaia/event" + "github.com/kaiachain/kaia/params" "github.com/kaiachain/kaia/storage/database" ) var ( supplyCacheSize = 86400 // A day; Some total supply consumers might want daily supply. supplyLogInterval = uint64(102400) // Periodic total supply log. - supplyReaccLimit = uint64(1024) // Re-accumulate from the last accumulated block. zeroBurnAddress = common.HexToAddress("0x0") deadBurnAddress = common.HexToAddress("0xdead") @@ -104,7 +104,7 @@ type supplyManager struct { // NewSupplyManager creates a new supply manager. // The TotalSupply data is stored every checkpointInterval blocks. -func NewSupplyManager(chain blockChain, gov governanceHelper, db database.DBManager, checkpointInterval uint) *supplyManager { +func NewSupplyManager(chain blockChain, gov governanceHelper, db database.DBManager) *supplyManager { checkpointCache, _ := lru.NewARC(supplyCacheSize) memoCache, _ := lru.NewARC(10) @@ -113,7 +113,7 @@ func NewSupplyManager(chain blockChain, gov governanceHelper, db database.DBMana chainHeadChan: make(chan blockchain.ChainHeadEvent, chainHeadChanSize), gov: gov, db: db, - checkpointInterval: uint64(checkpointInterval), + checkpointInterval: uint64(params.SupplyCheckpointInterval), checkpointCache: checkpointCache, memoCache: memoCache, quitCh: make(chan struct{}, 1), // make sure Stop() doesn't block if catchup() has exited before Stop() @@ -139,11 +139,6 @@ func (sm *supplyManager) GetCheckpoint(num uint64) (*database.SupplyCheckpoint, return checkpoint.(*database.SupplyCheckpoint), nil } - lastNum := sm.db.ReadLastSupplyCheckpointNumber() - if lastNum < num { // soft deleted - return nil, errNoCheckpoint - } - checkpoint, err := sm.getCheckpointUncached(num) if err != nil { return nil, err @@ -383,36 +378,21 @@ func (sm *supplyManager) totalSupplyFromState(num uint64) (*big.Int, error) { } func (sm *supplyManager) getCheckpointUncached(num uint64) (*database.SupplyCheckpoint, error) { + // Read from DB checkpoint := sm.db.ReadSupplyCheckpoint(num) if checkpoint != nil { return checkpoint, nil } - // Trace back to the last stored supply checkpoint. - var fromNum uint64 - var fromAcc *database.SupplyCheckpoint - - // Fast path using checkpointInterval - if checkpoint := sm.db.ReadSupplyCheckpoint(num - num%sm.checkpointInterval); checkpoint != nil { - fromNum = num - num%sm.checkpointInterval - fromAcc = checkpoint - } else { - // Slow path in case the checkpoint has changed or checkpoint is missing. - for i := uint64(1); i < supplyReaccLimit; i++ { - checkpoint = sm.db.ReadSupplyCheckpoint(num - i) - if checkpoint != nil { - fromNum = num - i - fromAcc = checkpoint - break - } - } - } - if fromAcc == nil { + // Re-accumulate from the the nearest checkpoint + fromNum := num - (num % sm.checkpointInterval) + fromCheckpoint := sm.db.ReadSupplyCheckpoint(fromNum) + if fromCheckpoint == nil { return nil, errNoCheckpoint } logger.Trace("on-demand reaccumulating rewards", "from", fromNum, "to", num) - return sm.accumulateReward(fromNum, num, fromAcc, false) + return sm.accumulateReward(fromNum, num, fromCheckpoint, false) } // accumulateReward calculates the total supply from the last block to the current block. diff --git a/reward/supply_manager_test.go b/reward/supply_manager_test.go index bc07810c5..797525cd6 100644 --- a/reward/supply_manager_test.go +++ b/reward/supply_manager_test.go @@ -269,6 +269,7 @@ func (s *SupplyTestSuite) TestTotalSupply() { testcases := s.testcases() for _, tc := range testcases { + s.sm.checkpointCache.Purge() // To test re-accumulate ts, err := s.sm.GetTotalSupply(tc.number) require.NoError(t, err) @@ -286,12 +287,12 @@ func (s *SupplyTestSuite) TestTotalSupply() { } // Test that when some data are missing, GetTotalSupply leaves some fields nil and returns an error. -func (s *SupplyTestSuite) TestTotalSupplyPartialInfo() { +func (s *SupplyTestSuite) TestPartialInfo() { t := s.T() s.insertBlocks() s.sm.Start() + defer s.sm.Stop() s.waitCatchup() - s.sm.Stop() var num uint64 = 200 var expected *TotalSupply @@ -333,50 +334,15 @@ func (s *SupplyTestSuite) TestTotalSupplyPartialInfo() { assert.Equal(t, expected.Kip160Burn, ts.Kip160Burn) // No SupplyCheckpoint - s.db.WriteLastSupplyCheckpointNumber(num - 1) + s.db.WriteLastSupplyCheckpointNumber(num - (num % 128)) + s.db.DeleteSupplyCheckpoint(num - (num % 128)) s.sm.checkpointCache.Purge() + ts, err = s.sm.GetTotalSupply(num) assert.ErrorIs(t, err, errNoCheckpoint) assert.Nil(t, ts) } -// Test that when SupplyCheckpoint are missing, GetTotalSupply will re-accumulate from the nearest stored SupplyCheckpoint. -func (s *SupplyTestSuite) TestTotalSupplyReaccumulate() { - t := s.T() - s.insertBlocks() - s.sm.Start() - defer s.sm.Stop() - s.waitCatchup() - - // Delete checkpoints not on the default block interval (128). - // This happens on full nodes, and archive nodes with default BlockInterval config. - // Note that archive nodes ars allowed to have BlockInterval > 1, still tries are committed every block. - for num := uint64(0); num <= 400; num++ { - if num%128 != 0 { - s.db.DeleteSupplyCheckpoint(num) - } - } - - // Still, all block data must be available. - testcases := s.testcases() - for _, tc := range testcases { - s.sm.checkpointCache.Purge() - ts, err := s.sm.GetTotalSupply(tc.number) - require.NoError(t, err) - - expected := tc.expectTotalSupply - actual := ts - bigEqual(t, expected.TotalSupply, actual.TotalSupply, tc.number) - bigEqual(t, expected.TotalMinted, actual.TotalMinted, tc.number) - bigEqual(t, expected.TotalBurnt, actual.TotalBurnt, tc.number) - bigEqual(t, expected.BurntFee, actual.BurntFee, tc.number) - bigEqual(t, expected.ZeroBurn, actual.ZeroBurn, tc.number) - bigEqual(t, expected.DeadBurn, actual.DeadBurn, tc.number) - bigEqual(t, expected.Kip103Burn, actual.Kip103Burn, tc.number) - bigEqual(t, expected.Kip160Burn, actual.Kip160Burn, tc.number) - } -} - func (s *SupplyTestSuite) waitCatchup() { for i := 0; i < 1000; i++ { // wait 10 seconds until catchup complete if s.sm.checkpointCache.Contains(uint64(400)) { @@ -540,7 +506,7 @@ func (s *SupplyTestSuite) SetupTest() { s.blocks, _ = blockchain.GenerateChain(s.config, s.genesis, s.engine, s.db, 400, genFunc) require.NotEmpty(t, s.blocks) - s.sm = NewSupplyManager(s.chain, s.gov, s.db, 1) // 1 interval for testing + s.sm = NewSupplyManager(s.chain, s.gov, s.db) } func (s *SupplyTestSuite) insertBlocks() { From 2ce37f01c0efc63073eaf6936cc227b5546aa71e Mon Sep 17 00:00:00 2001 From: "ollie.j" Date: Tue, 13 Aug 2024 15:17:53 +0900 Subject: [PATCH 33/36] node: SupplyManager restared by SetHead API call --- node/cn/api_backend.go | 2 ++ node/cn/api_backend_test.go | 13 +++++++++++++ 2 files changed, 15 insertions(+) diff --git a/node/cn/api_backend.go b/node/cn/api_backend.go index 89e7afdd4..58ba82f51 100644 --- a/node/cn/api_backend.go +++ b/node/cn/api_backend.go @@ -100,6 +100,8 @@ func (b *CNAPIBackend) SetHead(number uint64) error { b.cn.protocolManager.Downloader().Cancel() b.cn.protocolManager.SetSyncStop(true) defer b.cn.protocolManager.SetSyncStop(false) + b.cn.supplyManager.Stop() + defer b.cn.supplyManager.Start() return doSetHead(b.cn.blockchain, b.cn.engine, b.cn.governance, b.gpo, number) } diff --git a/node/cn/api_backend_test.go b/node/cn/api_backend_test.go index 6dff66337..21da8995d 100644 --- a/node/cn/api_backend_test.go +++ b/node/cn/api_backend_test.go @@ -173,6 +173,18 @@ func testGov() *governance.MixedEngine { return governance.NewMixedEngine(config, db) } +type testSupplyManager struct{} + +func (sm *testSupplyManager) Start() { +} + +func (sm *testSupplyManager) Stop() { +} + +func (sm *testSupplyManager) GetTotalSupply(num uint64) (*reward.TotalSupply, error) { + return &reward.TotalSupply{}, nil +} + func TestCNAPIBackend_SetHead(t *testing.T) { mockCtrl, mockBlockChain, _, api := newCNAPIBackend(t) defer mockCtrl.Finish() @@ -183,6 +195,7 @@ func TestCNAPIBackend_SetHead(t *testing.T) { api.cn.protocolManager = pm api.cn.engine = gxhash.NewFullFaker() api.cn.governance = testGov() + api.cn.supplyManager = &testSupplyManager{} api.gpo = gasprice.NewOracle(api, gasprice.Config{}, nil, api.cn.governance) number := uint64(123) From 22e64e2abe83e49b22162571624b5ebe7632782e Mon Sep 17 00:00:00 2001 From: "ollie.j" Date: Tue, 13 Aug 2024 16:20:11 +0900 Subject: [PATCH 34/36] database: Change schema prefix for SupplyCheckpoint Makes items created by v1.0.1 or prior obsolete. Nodes upgraded to v1.0.2 are re-accumulated from the genesis. --- storage/database/schema.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/storage/database/schema.go b/storage/database/schema.go index de326847d..0dbaecd8e 100644 --- a/storage/database/schema.go +++ b/storage/database/schema.go @@ -132,8 +132,8 @@ var ( stakingInfoPrefix = []byte("stakingInfo") - supplyCheckpointPrefix = []byte("accReward") - lastSupplyCheckpointNumberKey = []byte("lastAccRewardBlockNumber") + supplyCheckpointPrefix = []byte("supplyCheckpoint") + lastSupplyCheckpointNumberKey = []byte("lastSupplyCheckpointNumber") chaindatafetcherCheckpointKey = []byte("chaindatafetcherCheckpoint") ) From 0342eb573af9919ee6bd6eef1cb9983fd4cb71f1 Mon Sep 17 00:00:00 2001 From: "ollie.j" Date: Fri, 16 Aug 2024 19:30:43 +0900 Subject: [PATCH 35/36] api: Fix recoverFromTransaction with Legacy type --- api/api_public_transaction_pool.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/api_public_transaction_pool.go b/api/api_public_transaction_pool.go index 9d1edb5c0..560b19179 100644 --- a/api/api_public_transaction_pool.go +++ b/api/api_public_transaction_pool.go @@ -592,7 +592,7 @@ func (s *PublicTransactionPoolAPI) RecoverFromTransaction(ctx context.Context, e if err != nil { return common.Address{}, err } - return tx.From() + return types.Sender(signer, tx) } // RecoverFromMessage validates that the message is signed by one of the keys in the given account. From a71c262d4126fe657b9c23a203498f6040c4009b Mon Sep 17 00:00:00 2001 From: "ollie.j" Date: Thu, 8 Aug 2024 13:58:27 +0900 Subject: [PATCH 36/36] params: Set Mainnet Kaia and KIP160 fork block number --- params/config.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/params/config.go b/params/config.go index bac9c8249..d81edf074 100644 --- a/params/config.go +++ b/params/config.go @@ -51,7 +51,7 @@ var ( KoreCompatibleBlock: big.NewInt(119750400), ShanghaiCompatibleBlock: big.NewInt(135456000), CancunCompatibleBlock: big.NewInt(147534000), - KaiaCompatibleBlock: big.NewInt(999999999), // TODO-Kaia: set Mainnet KaiaCompatibleBlock + KaiaCompatibleBlock: big.NewInt(162900480), RandaoCompatibleBlock: big.NewInt(147534000), RandaoRegistry: &RegistryConfig{ Records: map[string]common.Address{ @@ -61,7 +61,7 @@ var ( }, Kip103CompatibleBlock: big.NewInt(119750400), Kip103ContractAddress: common.HexToAddress("0xD5ad6D61Dd87EdabE2332607C328f5cc96aeCB95"), - Kip160CompatibleBlock: big.NewInt(999999999), // TODO-Kaia: set Mainnet Kip160CompatibleBlock + Kip160CompatibleBlock: big.NewInt(162900480), Kip160ContractAddress: common.HexToAddress("0xa4df15717Da40077C0aD528296AdBBd046579Ee9"), DeriveShaImpl: 2, Governance: &GovernanceConfig{