From 494c652c02f61277485f552eb573088982360768 Mon Sep 17 00:00:00 2001 From: Jack <87339414+algojack@users.noreply.github.com> Date: Wed, 25 Jan 2023 14:42:56 -0500 Subject: [PATCH 01/81] CICD: Adding github release notes template (#5044) --- .github/{.release.yml => release.yml} | 1 + 1 file changed, 1 insertion(+) rename .github/{.release.yml => release.yml} (99%) diff --git a/.github/.release.yml b/.github/release.yml similarity index 99% rename from .github/.release.yml rename to .github/release.yml index 1ea816c33d..0b15a11879 100644 --- a/.github/.release.yml +++ b/.github/release.yml @@ -18,3 +18,4 @@ changelog: - title: Other labels: - "*" + From 427a11963a201f38af9dd99fd43097a8889f59fc Mon Sep 17 00:00:00 2001 From: cce <51567+cce@users.noreply.github.com> Date: Wed, 25 Jan 2023 17:22:58 -0500 Subject: [PATCH 02/81] build: add -Wno-deprecated to sortition (#5050) --- data/committee/sortition/sortition.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/committee/sortition/sortition.go b/data/committee/sortition/sortition.go index 05f3476f7f..9bc5bac022 100644 --- a/data/committee/sortition/sortition.go +++ b/data/committee/sortition/sortition.go @@ -17,7 +17,7 @@ package sortition // #cgo CFLAGS: -O3 -// #cgo CXXFLAGS: -std=c++11 +// #cgo CXXFLAGS: -std=c++11 -Wno-deprecated // #include // #include // #include "sortition.h" From 2ba3f4b7045a151af8cc5be64ab2ebd2239a43c0 Mon Sep 17 00:00:00 2001 From: Musab Alturki <42160792+malturki@users.noreply.github.com> Date: Wed, 25 Jan 2023 20:13:06 -0600 Subject: [PATCH 03/81] Fix the inline comment for proposalAccepted (#4889) Co-authored-by: cce --- agreement/events.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agreement/events.go b/agreement/events.go index 5304b1798f..0c858aa9f1 100644 --- a/agreement/events.go +++ b/agreement/events.go @@ -146,7 +146,7 @@ const ( // that a certificate has formed for that proposal-value. proposalCommittable - // proposalCommittable is returned by the proposal state machines when a + // proposalAccepted is returned by the proposal state machines when a // proposal-value is accepted. proposalAccepted From 008d23c0fede77490049e7b5dcde9e71dc390d19 Mon Sep 17 00:00:00 2001 From: Will Winder Date: Thu, 26 Jan 2023 10:24:27 -0500 Subject: [PATCH 04/81] development: tool to convert validated blocks. (#5048) --- daemon/algod/api/server/v2/delta.go | 7 +- daemon/algod/api/server/v2/delta_test.go | 2 +- daemon/algod/api/server/v2/handlers.go | 13 +-- tools/debug/vbconvert/README.md | 4 + tools/debug/vbconvert/main.go | 125 +++++++++++++++++++++++ 5 files changed, 137 insertions(+), 14 deletions(-) create mode 100644 tools/debug/vbconvert/README.md create mode 100644 tools/debug/vbconvert/main.go diff --git a/daemon/algod/api/server/v2/delta.go b/daemon/algod/api/server/v2/delta.go index f48115a3f7..d70e8275a6 100644 --- a/daemon/algod/api/server/v2/delta.go +++ b/daemon/algod/api/server/v2/delta.go @@ -70,8 +70,11 @@ func convertAssetResourceRecordToGenerated(asset ledgercore.AssetResourceRecord) } } -// stateDeltaToLedgerDelta converts ledgercore.StateDelta to v2.model.LedgerStateDelta -func stateDeltaToLedgerDelta(sDelta ledgercore.StateDelta, consensus config.ConsensusParams, rewardsLevel uint64, round uint64) (response model.LedgerStateDelta, err error) { +// StateDeltaToLedgerDelta converts ledgercore.StateDelta to v2.model.LedgerStateDelta +func StateDeltaToLedgerDelta(sDelta ledgercore.StateDelta, consensus config.ConsensusParams) (response model.LedgerStateDelta, err error) { + rewardsLevel := sDelta.Hdr.RewardsLevel + round := sDelta.Hdr.Round + var accts []model.AccountBalanceRecord var apps []model.AppResourceRecord var assets []model.AssetResourceRecord diff --git a/daemon/algod/api/server/v2/delta_test.go b/daemon/algod/api/server/v2/delta_test.go index 596954f43d..29135ce47c 100644 --- a/daemon/algod/api/server/v2/delta_test.go +++ b/daemon/algod/api/server/v2/delta_test.go @@ -110,7 +110,7 @@ func TestDelta(t *testing.T) { Totals: ledgercore.AccountTotals{}, } - converted, err := stateDeltaToLedgerDelta(original, config.Consensus[protocol.ConsensusCurrentVersion], 25, 4) + converted, err := StateDeltaToLedgerDelta(original, config.Consensus[protocol.ConsensusCurrentVersion]) require.NoError(t, err) require.Equal(t, original.Accts.Len(), len(*converted.Accts.Accounts)) expAccDelta := original.Accts.Accts[0] diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go index 21e1f91703..264d4f1445 100644 --- a/daemon/algod/api/server/v2/handlers.go +++ b/daemon/algod/api/server/v2/handlers.go @@ -1083,20 +1083,11 @@ func (v2 *Handlers) GetLedgerStateDelta(ctx echo.Context, round uint64) error { if err != nil { return internalError(ctx, err, errFailedRetrievingStateDelta, v2.Log) } - consensusParams, err := v2.Node.LedgerForAPI().ConsensusParams(basics.Round(round)) - if err != nil { - return internalError(ctx, fmt.Errorf("unable to retrieve consensus params for round %d", round), errInternalFailure, v2.Log) - } - hdr, err := v2.Node.LedgerForAPI().BlockHdr(basics.Round(round)) - if err != nil { - return internalError(ctx, fmt.Errorf("unable to retrieve block header for round %d", round), errInternalFailure, v2.Log) - } - - response, err := stateDeltaToLedgerDelta(sDelta, consensusParams, hdr.RewardsLevel, round) + consensusParams := config.Consensus[sDelta.Hdr.CurrentProtocol] + response, err := StateDeltaToLedgerDelta(sDelta, consensusParams) if err != nil { return internalError(ctx, err, errInternalFailure, v2.Log) } - return ctx.JSON(http.StatusOK, response) } diff --git a/tools/debug/vbconvert/README.md b/tools/debug/vbconvert/README.md new file mode 100644 index 0000000000..3ac7747494 --- /dev/null +++ b/tools/debug/vbconvert/README.md @@ -0,0 +1,4 @@ +# vbconvert + +Utility tool to assist converting ledgercore.ValidatedBlock objects into a +format that can be parsed using types in the Algorand Go SDK. diff --git a/tools/debug/vbconvert/main.go b/tools/debug/vbconvert/main.go new file mode 100644 index 0000000000..a6acc19cb1 --- /dev/null +++ b/tools/debug/vbconvert/main.go @@ -0,0 +1,125 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . +package main + +import ( + "fmt" + "os" + "strings" + + "github.com/spf13/cobra" + + "github.com/algorand/go-codec/codec" + + "github.com/algorand/go-algorand/config" + v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2" + "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" + "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/ledger/ledgercore" + "github.com/algorand/go-algorand/protocol" +) + +type arguments struct { + inputFile string + outputFile string + format string +} + +type algodVB struct { + Blk bookkeeping.Block + Delta ledgercore.StateDelta +} + +type conduitVB struct { + Blk bookkeeping.Block + Delta model.LedgerStateDelta +} + +func run(args arguments) { + var algodType algodVB + + // Read + data, err := os.ReadFile(args.inputFile) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to read input file '%s': %s\n", args.inputFile, err) + os.Exit(1) + } + + err = protocol.DecodeReflect(data, &algodType) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to decode input file '%s': %s\n", args.inputFile, err) + os.Exit(1) + } + + // Convert + consensusParams := config.Consensus[algodType.Delta.Hdr.CurrentProtocol] + modelDelta, err := v2.StateDeltaToLedgerDelta(algodType.Delta, consensusParams) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to convert ledgercore.StateDelta from input file '%s': %s\n", args.inputFile, err) + os.Exit(1) + } + + // Write + outputFile, err := os.Create(args.outputFile) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to open output file '%s': %s\n", args.outputFile, err) + os.Exit(1) + } + + var enc *codec.Encoder + switch strings.ToLower(args.format) { + case "json": + enc = protocol.NewJSONEncoder(outputFile) + case "msgp": + enc = protocol.NewEncoder(outputFile) + default: + fmt.Fprintf(os.Stderr, "Unknown encoder type '%s', valid encoders: json, msgp.\n", args.format) + os.Exit(1) + } + + conduitType := conduitVB{ + Blk: algodType.Blk, + Delta: modelDelta, + } + err = enc.Encode(conduitType) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to decode input file '%s': %s\n", args.outputFile, err) + os.Exit(1) + } +} + +func main() { + var args arguments + + command := &cobra.Command{ + Use: "vbconvert", + Long: "Convert a ledgercore.ValidatedBlock into the conduit version of a ValidatedBlock.", + Run: func(_ *cobra.Command, _ []string) { + run(args) + }, + } + + command.Flags().StringVarP(&args.inputFile, "input", "i", "", "Input filename.") + command.Flags().StringVarP(&args.outputFile, "output", "o", "", "Optional output filename. If not present a default .convert is created.") + command.Flags().StringVarP(&args.format, "format", "f", "json", "Optional output format. Valid formats are 'json' and 'msgp'.") + command.MarkFlagRequired("input") + command.MarkFlagRequired("output") + + if err := command.Execute(); err != nil { + fmt.Fprintf(os.Stderr, "An error occurred while running vbconvert: %s.\n", err) + os.Exit(1) + } +} From fe832867b036470319615434822451035a1bf43e Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Thu, 26 Jan 2023 10:33:05 -0500 Subject: [PATCH 05/81] e2e tests: log pending transactions in case of failure (#5062) --- test/framework/fixtures/restClientFixture.go | 27 +++++++++++++++++--- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/test/framework/fixtures/restClientFixture.go b/test/framework/fixtures/restClientFixture.go index 1599843544..37d78797a3 100644 --- a/test/framework/fixtures/restClientFixture.go +++ b/test/framework/fixtures/restClientFixture.go @@ -276,22 +276,41 @@ func (f *RestClientFixture) WaitForAllTxnsToConfirm(roundTimeout uint64, txidsAn for txid, addr := range txidsAndAddresses { _, err := f.WaitForConfirmedTxn(roundTimeout, addr, txid) if err != nil { - f.t.Logf("txn failed to confirm: ", addr, txid) + f.t.Logf("txn failed to confirm: addr=%s, txid=%s", addr, txid) pendingTxns, err := f.LibGoalClient.GetParsedPendingTransactions(0) if err == nil { pendingTxids := make([]string, 0, pendingTxns.TotalTransactions) for _, txn := range pendingTxns.TopTransactions { pendingTxids = append(pendingTxids, txn.Txn.ID().String()) } - f.t.Logf("pending txids: ", pendingTxids) + f.t.Logf("pending txids: %v", pendingTxids) } else { - f.t.Logf("unable to log pending txns, ", err) + f.t.Logf("unable to log pending txns: %v", err) } allTxids := make([]string, 0, len(txidsAndAddresses)) for txID := range txidsAndAddresses { allTxids = append(allTxids, txID) } - f.t.Logf("all txids: ", allTxids) + f.t.Logf("all txids: %s", allTxids) + + dataDirs := f.network.NodeDataDirs() + for _, nodedir := range dataDirs { + client, err := libgoal.MakeClientWithBinDir(f.binDir, nodedir, nodedir, libgoal.FullClient) + if err != nil { + f.t.Logf("failed to make a node client for %s: %v", nodedir, err) + continue + } + pendingTxns, err := client.GetParsedPendingTransactions(0) + if err != nil { + f.t.Logf("failed to get pending txns for %s: %v", nodedir, err) + continue + } + pendingTxids := make([]string, 0, pendingTxns.TotalTransactions) + for _, txn := range pendingTxns.TopTransactions { + pendingTxids = append(pendingTxids, txn.Txn.ID().String()) + } + f.t.Logf("pending txids at node %s: %v", nodedir, pendingTxids) + } return false } } From 26a860fdb4bc6c2612191c3839003a44148f8ce7 Mon Sep 17 00:00:00 2001 From: Jacob Daitzman Date: Thu, 26 Jan 2023 21:37:55 -0500 Subject: [PATCH 06/81] build: add start anchor to paralleltest exclusion regex (#5046) --- .golangci.yml | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 30b4d28128..335da20685 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -117,61 +117,61 @@ issues: - staticcheck - typecheck # Ignore missing parallel tests in existing packages - - path: agreement.*_test\.go + - path: ^agreement.*_test\.go linters: - paralleltest - - path: catchup.*_test\.go + - path: ^catchup.*_test\.go linters: - paralleltest - - path: cmd.*_test\.go + - path: ^cmd.*_test\.go linters: - paralleltest - - path: config.*_test\.go + - path: ^config.*_test\.go linters: - paralleltest - - path: crypto.*_test\.go + - path: ^crypto.*_test\.go linters: - paralleltest - - path: daemon.*_test\.go + - path: ^daemon.*_test\.go linters: - paralleltest - - path: data.*_test\.go + - path: ^data.*_test\.go linters: - paralleltest - - path: gen.*_test\.go + - path: ^gen.*_test\.go linters: - paralleltest - - path: ledger.*_test\.go + - path: ^ledger.*_test\.go linters: - paralleltest - - path: logging.*_test\.go + - path: ^logging.*_test\.go linters: - paralleltest - - path: netdeploy.*_test\.go + - path: ^netdeploy.*_test\.go linters: - paralleltest - - path: network.*_test\.go + - path: ^network.*_test\.go linters: - paralleltest - - path: node.*_test\.go + - path: ^node.*_test\.go linters: - paralleltest - - path: protocol.*_test\.go + - path: ^protocol.*_test\.go linters: - paralleltest - - path: rpcs.*_test\.go + - path: ^rpcs.*_test\.go linters: - paralleltest - - path: stateproof.*_test\.go + - path: ^stateproof.*_test\.go linters: - paralleltest - - path: test.*_test\.go + - path: ^test.*_test\.go linters: - paralleltest - - path: tools.*_test\.go + - path: ^tools.*_test\.go linters: - paralleltest - - path: util.*_test\.go + - path: ^util.*_test\.go linters: - paralleltest # Add all linters here -- Comment this block out for testing linters From ec75266d2f8323506cf83048b84d01f55c1d2ba1 Mon Sep 17 00:00:00 2001 From: abebeos <110243666+abebeos@users.noreply.github.com> Date: Fri, 27 Jan 2023 22:27:03 +0200 Subject: [PATCH 07/81] goal: allow relative dataDir via -d cmd option (#5067) --- cmd/goal/commands.go | 9 ++++++-- cmd/goal/commands_test.go | 44 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 2 deletions(-) diff --git a/cmd/goal/commands.go b/cmd/goal/commands.go index ce17c9de48..2a790f3c58 100644 --- a/cmd/goal/commands.go +++ b/cmd/goal/commands.go @@ -278,8 +278,13 @@ func resolveDataDir() string { // Figure out what data directory to tell algod to use. // If not specified on cmdline with '-d', look for default in environment. var dir string - if len(dataDirs) > 0 { - dir = dataDirs[0] + if (len(dataDirs) > 0) && (dataDirs[0] != "") { + // calculate absolute path, see https://github.com/algorand/go-algorand/issues/589 + absDir, err := filepath.Abs(dataDirs[0]) + if err != nil { + reportErrorf("Absolute path conversion error: %s", err) + } + dir = absDir } if dir == "" { dir = os.Getenv("ALGORAND_DATA") diff --git a/cmd/goal/commands_test.go b/cmd/goal/commands_test.go index d83751331d..5a2e6cd021 100644 --- a/cmd/goal/commands_test.go +++ b/cmd/goal/commands_test.go @@ -31,3 +31,47 @@ func TestEnsureDataDirReturnsWhenDataDirIsProvided(t *testing.T) { actualDir := ensureFirstDataDir() require.Equal(t, expectedDir, actualDir) } + +func TestEnsureDataDirReturnsWhenWorkDirIsProvided(t *testing.T) { + partitiontest.PartitionTest(t) + expectedDir, err := os.Getwd() + if err != nil { + reportErrorf("Error getting work dir: %s", err) + } + dataDirs[0] = "." + actualDir := ensureFirstDataDir() + require.Equal(t, expectedDir, actualDir) +} + +func TestEnsureDataDirReturnsWhenRelPath1IsProvided(t *testing.T) { + partitiontest.PartitionTest(t) + expectedDir, err := os.Getwd() + if err != nil { + reportErrorf("Error getting work dir: %s", err) + } + dataDirs[0] = "./../goal" + actualDir := ensureFirstDataDir() + require.Equal(t, expectedDir, actualDir) +} + +func TestEnsureDataDirReturnsWhenRelPath2IsProvided(t *testing.T) { + partitiontest.PartitionTest(t) + expectedDir, err := os.Getwd() + if err != nil { + reportErrorf("Error getting work dir: %s", err) + } + dataDirs[0] = "../goal" + actualDir := ensureFirstDataDir() + require.Equal(t, expectedDir, actualDir) +} + +func TestEnsureDataDirReturnsWhenRelPath3IsProvided(t *testing.T) { + partitiontest.PartitionTest(t) + expectedDir, err := os.Getwd() + if err != nil { + reportErrorf("Error getting work dir: %s", err) + } + dataDirs[0] = "../../cmd/goal" + actualDir := ensureFirstDataDir() + require.Equal(t, expectedDir, actualDir) +} From cb9dfd2825883b22d90574705d4998299f511e2a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 27 Jan 2023 19:40:50 -0500 Subject: [PATCH 08/81] build(deps): bump github.com/aws/aws-sdk-go from 1.16.5 to 1.33.0 (#4955) --- go.mod | 5 ++--- go.sum | 17 +++++++---------- 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/go.mod b/go.mod index a1f20d7dfe..a63e13e5c2 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/algorand/msgp v1.1.53 github.com/algorand/oapi-codegen v1.12.0-algorand.0 github.com/algorand/websocket v1.4.5 - github.com/aws/aws-sdk-go v1.16.5 + github.com/aws/aws-sdk-go v1.33.0 github.com/consensys/gnark-crypto v0.7.0 github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018 github.com/dchest/siphash v1.2.1 @@ -49,7 +49,7 @@ require ( github.com/google/uuid v1.3.0 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/invopop/yaml v0.1.0 // indirect - github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af // indirect + github.com/jmespath/go-jmespath v0.3.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/labstack/gommon v0.4.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -69,7 +69,6 @@ require ( golang.org/x/net v0.1.0 // indirect golang.org/x/term v0.1.0 // indirect golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect - google.golang.org/appengine v1.6.7 // indirect gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 68a08e0848..0bf38663bd 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,8 @@ github.com/algorand/websocket v1.4.5 h1:Cs6UTaCReAl02evYxmN8k57cNHmBILRcspfSxYg4 github.com/algorand/websocket v1.4.5/go.mod h1:79n6FSZY08yQagHzE/YWZqTPBYfY5wc3IS+UTZe1W5c= github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= -github.com/aws/aws-sdk-go v1.16.5 h1:NVxzZXIuwX828VcJrpNxxWjur1tlOBISdMdDdHIKHcc= -github.com/aws/aws-sdk-go v1.16.5/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.33.0 h1:Bq5Y6VTLbfnJp1IV8EL/qUU5qO1DYHda/zis/sqevkY= +github.com/aws/aws-sdk-go v1.33.0/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e h1:CHPYEbz71w8DqJ7DRIq+MXyCQsdibK08vdcQTY4ufas= github.com/consensys/gnark-crypto v0.7.0 h1:rwdy8+ssmLYRqKp+ryRRgQJl/rCq2uv+n83cOydm5UE= @@ -47,13 +47,13 @@ github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUe github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-sql-driver/mysql v1.4.0 h1:7LxgVwFb2hIQtMm87NdgAVfXjnt4OePseqT1tKx+opk= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/gofrs/flock v0.7.0 h1:pGFUjl501gafK9HBt1VGL1KCOd/YhIooID+xgyJCf3g= github.com/gofrs/flock v0.7.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= @@ -67,8 +67,8 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/invopop/yaml v0.1.0 h1:YW3WGUoJEXYfzWBjn00zIlrw7brGVD0fUKRYDPAPhrc= github.com/invopop/yaml v0.1.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA= github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -149,9 +149,9 @@ golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0= @@ -176,7 +176,6 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -191,8 +190,6 @@ golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= From 1ad3db9b5426db2a35a8c2fc2d8a19489be8bebb Mon Sep 17 00:00:00 2001 From: Ben Guidarelli Date: Mon, 30 Jan 2023 07:19:15 -0500 Subject: [PATCH 09/81] docs: fix typo in avm/teal spec (#5074) * remove duped word * change needs to be in README_in --- data/transactions/logic/README.md | 2 +- data/transactions/logic/README_in.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/data/transactions/logic/README.md b/data/transactions/logic/README.md index 850d02a3e1..324cd7d536 100644 --- a/data/transactions/logic/README.md +++ b/data/transactions/logic/README.md @@ -212,7 +212,7 @@ Constants can be pushed onto the stack in two different ways: 2. Constants can be loaded into storage separate from the stack and scratch space, using two opcodes `intcblock` and - `bytecblock`. Then, constants from this storage can be pushed + `bytecblock`. Then, constants from this storage can be pushed onto the stack by referring to the type and index using `intc`, `intc_[0123]`, `bytec`, and `bytec_[0123]`. This method is more efficient for constants that are used multiple times. diff --git a/data/transactions/logic/README_in.md b/data/transactions/logic/README_in.md index 6dfcfe9664..58652ebcf7 100644 --- a/data/transactions/logic/README_in.md +++ b/data/transactions/logic/README_in.md @@ -212,7 +212,7 @@ Constants can be pushed onto the stack in two different ways: 2. Constants can be loaded into storage separate from the stack and scratch space, using two opcodes `intcblock` and - `bytecblock`. Then, constants from this storage can be pushed + `bytecblock`. Then, constants from this storage can be pushed onto the stack by referring to the type and index using `intc`, `intc_[0123]`, `bytec`, and `bytec_[0123]`. This method is more efficient for constants that are used multiple times. From f7e96d2f18d6e878e4ea3ef30456d17c337dacdb Mon Sep 17 00:00:00 2001 From: Mark Ciccarello Date: Mon, 30 Jan 2023 10:37:25 -0800 Subject: [PATCH 10/81] goal: add --start option for network create command (#4902) --- cmd/goal/network.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/cmd/goal/network.go b/cmd/goal/network.go index 35565fb6fa..715b5707d5 100644 --- a/cmd/goal/network.go +++ b/cmd/goal/network.go @@ -35,6 +35,7 @@ var startNode string var noImportKeys bool var noClean bool var devModeOverride bool +var startOnCreation bool func init() { networkCmd.AddCommand(networkCreateCmd) @@ -47,6 +48,7 @@ func init() { networkCreateCmd.Flags().BoolVarP(&noImportKeys, "noimportkeys", "K", false, "Do not import root keys when creating the network (by default will import)") networkCreateCmd.Flags().BoolVar(&noClean, "noclean", false, "Prevents auto-cleanup on error - for diagnosing problems") networkCreateCmd.Flags().BoolVar(&devModeOverride, "devMode", false, "Forces the configuration to enable DevMode, returns an error if the template is not compatible with DevMode.") + networkCreateCmd.Flags().BoolVarP(&startOnCreation, "start", "s", false, "Automatically start the network after creating it.") networkStartCmd.Flags().StringVarP(&startNode, "node", "n", "", "Specify the name of a specific node to start") @@ -112,6 +114,15 @@ var networkCreateCmd = &cobra.Command{ } reportInfof(infoNetworkCreated, network.Name(), networkRootDir) + + if startOnCreation { + network, binDir := getNetworkAndBinDir() + err := network.Start(binDir, false) + if err != nil { + reportErrorf(errorStartingNetwork, err) + } + reportInfof(infoNetworkStarted, networkRootDir) + } }, } From 8970ddf11aebf0712201413d74cc94c54aab4311 Mon Sep 17 00:00:00 2001 From: Ignacio Corderi Date: Mon, 30 Jan 2023 16:21:31 -0300 Subject: [PATCH 11/81] ledger: abstract store and remove all db.Atomic direct usages (#5021) --- ledger/acctdeltas_test.go | 4 +- ledger/acctonline.go | 8 +- ledger/acctonline_test.go | 8 +- ledger/acctupdates.go | 10 +-- ledger/acctupdates_test.go | 27 +++--- ledger/archival_test.go | 3 +- ledger/catchpointtracker.go | 50 +++++------ ledger/catchpointtracker_test.go | 11 ++- ledger/catchpointwriter_test.go | 147 +++++++++++++++---------------- ledger/catchupaccessor.go | 47 +++++----- ledger/ledger.go | 14 +-- ledger/ledger_test.go | 8 +- ledger/store/interface.go | 7 ++ ledger/store/store.go | 146 ++++++++++++++++++++++++++++++ ledger/store/testing.go | 13 +++ ledger/tracker.go | 12 +-- ledger/trackerdb.go | 2 +- ledger/txtail.go | 3 +- ledger/txtail_test.go | 56 ++++++------ 19 files changed, 371 insertions(+), 205 deletions(-) create mode 100644 ledger/store/store.go diff --git a/ledger/acctdeltas_test.go b/ledger/acctdeltas_test.go index 21ac1e5dcc..017677a96c 100644 --- a/ledger/acctdeltas_test.go +++ b/ledger/acctdeltas_test.go @@ -924,7 +924,7 @@ func benchmarkWriteCatchpointStagingBalancesSub(b *testing.B, ascendingOrder boo normalizedAccountBalances, err := prepareNormalizedBalancesV6(chunk.Balances, proto) require.NoError(b, err) b.StartTimer() - err = l.trackerDBs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err = l.trackerDBs.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { crw := store.NewCatchpointSQLReaderWriter(tx) err = crw.WriteCatchpointStagingBalances(ctx, normalizedAccountBalances) return @@ -937,7 +937,7 @@ func benchmarkWriteCatchpointStagingBalancesSub(b *testing.B, ascendingOrder boo last64KDuration := time.Since(last64KStart) - last64KAccountCreationTime fmt.Printf("%-82s%-7d (last 64k) %-6d ns/account %d accounts/sec\n", b.Name(), last64KSize, (last64KDuration / time.Duration(last64KSize)).Nanoseconds(), int(float64(last64KSize)/float64(last64KDuration.Seconds()))) } - stats, err := l.trackerDBs.Wdb.Vacuum(context.Background()) + stats, err := l.trackerDBs.Vacuum(context.Background()) require.NoError(b, err) fmt.Printf("%-82sdb fragmentation %.1f%%\n", b.Name(), float32(stats.PagesBefore-stats.PagesAfter)*100/float32(stats.PagesBefore)) b.ReportMetric(float64(b.N)/float64((time.Since(accountsWritingStarted)-accountsGenerationDuration).Seconds()), "accounts/sec") diff --git a/ledger/acctonline.go b/ledger/acctonline.go index f73b153e24..2cacc0efe7 100644 --- a/ledger/acctonline.go +++ b/ledger/acctonline.go @@ -62,7 +62,7 @@ type cachedOnlineAccount struct { // onlineAccounts tracks history of online accounts type onlineAccounts struct { // Connection to the database. - dbs db.Pair + dbs store.TrackerStore // Prepared SQL statements for fast accounts DB lookups. accountsq store.OnlineAccountsReader @@ -151,7 +151,7 @@ func (ao *onlineAccounts) initializeFromDisk(l ledgerForTracker, lastBalancesRou ao.dbs = l.trackerDB() ao.log = l.trackerLog() - err = ao.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error { + err = ao.dbs.Snapshot(func(ctx context.Context, tx *sql.Tx) error { arw := store.NewAccountsSQLReaderWriter(tx) var err0 error var endRound basics.Round @@ -175,7 +175,7 @@ func (ao *onlineAccounts) initializeFromDisk(l ledgerForTracker, lastBalancesRou return } - ao.accountsq, err = store.OnlineAccountsInitDbQueries(ao.dbs.Rdb.Handle) + ao.accountsq, err = ao.dbs.CreateOnlineAccountsReader() if err != nil { return } @@ -815,7 +815,7 @@ func (ao *onlineAccounts) TopOnlineAccounts(rnd basics.Round, voteRnd basics.Rou var accts map[basics.Address]*ledgercore.OnlineAccount start := time.Now() ledgerAccountsonlinetopCount.Inc(nil) - err = ao.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err = ao.dbs.Snapshot(func(ctx context.Context, tx *sql.Tx) (err error) { arw := store.NewAccountsSQLReaderWriter(tx) accts, err = arw.AccountsOnlineTop(rnd, batchOffset, batchSize, genesisProto) if err != nil { diff --git a/ledger/acctonline_test.go b/ledger/acctonline_test.go index d892c21b67..050785e10a 100644 --- a/ledger/acctonline_test.go +++ b/ledger/acctonline_test.go @@ -81,7 +81,7 @@ func commitSyncPartial(t *testing.T, oa *onlineAccounts, ml *mockLedgerForTracke err := lt.prepareCommit(dcc) require.NoError(t, err) } - err := ml.trackers.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err := ml.trackers.dbs.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { arw := store.NewAccountsSQLReaderWriter(tx) for _, lt := range ml.trackers.trackers { err0 := lt.commitRound(ctx, tx, dcc) @@ -807,7 +807,7 @@ func TestAcctOnlineRoundParamsCache(t *testing.T) { var dbOnlineRoundParams []ledgercore.OnlineRoundParamsData var endRound basics.Round - err := ao.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err := ao.dbs.Snapshot(func(ctx context.Context, tx *sql.Tx) (err error) { arw := store.NewAccountsSQLReaderWriter(tx) dbOnlineRoundParams, endRound, err = arw.AccountsOnlineRoundParams() return err @@ -1292,7 +1292,7 @@ func TestAcctOnlineVotersLongerHistory(t *testing.T) { // DB has all the required history tho var dbOnlineRoundParams []ledgercore.OnlineRoundParamsData var endRound basics.Round - err = oa.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err = oa.dbs.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { arw := store.NewAccountsSQLReaderWriter(tx) dbOnlineRoundParams, endRound, err = arw.AccountsOnlineRoundParams() return err @@ -1680,7 +1680,7 @@ func TestAcctOnlineTopDBBehindMemRound(t *testing.T) { go func() { time.Sleep(2 * time.Second) // tweak the database to move backwards - err = oa.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err = oa.dbs.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { _, err = tx.Exec("update acctrounds set rnd = 1 WHERE id='acctbase' ") return }) diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index dbaaefca69..dc92684ea0 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -154,9 +154,9 @@ type modifiedKvValue struct { type accountUpdates struct { // Connection to the database. - dbs db.Pair + dbs store.TrackerStore - // Prepared SQL statements for fast accounts DB lookups. + // Optimized reader for fast accounts DB lookups. accountsq store.AccountsReader // cachedDBRound is always exactly tracker DB round (and therefore, accountsRound()), @@ -928,7 +928,7 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker, lastBalancesRou start := time.Now() ledgerAccountsinitCount.Inc(nil) - err = au.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error { + err = au.dbs.Snapshot(func(ctx context.Context, tx *sql.Tx) error { arw := store.NewAccountsSQLReaderWriter(tx) totals, err0 := arw.AccountsTotals(ctx, false) if err0 != nil { @@ -944,7 +944,7 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker, lastBalancesRou return } - au.accountsq, err = store.AccountsInitDbQueries(au.dbs.Rdb.Handle) + au.accountsq, err = au.dbs.CreateAccountsReader() if err != nil { return } @@ -1962,7 +1962,7 @@ func (au *accountUpdates) vacuumDatabase(ctx context.Context) (err error) { }() ledgerVacuumCount.Inc(nil) - vacuumStats, err := au.dbs.Wdb.Vacuum(ctx) + vacuumStats, err := au.dbs.Vacuum(ctx) close(vacuumExitCh) vacuumLoggingAbort.Wait() diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go index 723e2c1986..69e4506517 100644 --- a/ledger/acctupdates_test.go +++ b/ledger/acctupdates_test.go @@ -51,7 +51,7 @@ var testPoolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff var testSinkAddr = basics.Address{0x2c, 0x2a, 0x6c, 0xe9, 0xa9, 0xa7, 0xc2, 0x8c, 0x22, 0x95, 0xfd, 0x32, 0x4f, 0x77, 0xa5, 0x4, 0x8b, 0x42, 0xc2, 0xb7, 0xa8, 0x54, 0x84, 0xb6, 0x80, 0xb1, 0xe1, 0x3d, 0x59, 0x9b, 0xeb, 0x36} type mockLedgerForTracker struct { - dbs db.Pair + dbs store.TrackerStore blocks []blockEntry deltas []ledgercore.StateDelta log logging.Logger @@ -94,9 +94,8 @@ func setupAccts(niter int) []map[basics.Address]basics.AccountData { } func makeMockLedgerForTrackerWithLogger(t testing.TB, inMemory bool, initialBlocksCount int, consensusVersion protocol.ConsensusVersion, accts []map[basics.Address]basics.AccountData, l logging.Logger) *mockLedgerForTracker { - dbs, fileName := storetesting.DbOpenTest(t, inMemory) - dbs.Rdb.SetLogger(l) - dbs.Wdb.SetLogger(l) + dbs, fileName := store.DbOpenTrackerTest(t, inMemory) + dbs.SetLogger(l) blocks := randomInitChain(consensusVersion, initialBlocksCount) deltas := make([]ledgercore.StateDelta, initialBlocksCount) @@ -154,7 +153,7 @@ func (ml *mockLedgerForTracker) fork(t testing.TB) *mockLedgerForTracker { copy(newLedgerTracker.deltas, ml.deltas) // calling Vacuum implies flushing the database content to disk.. - ml.dbs.Wdb.Vacuum(context.Background()) + ml.dbs.Vacuum(context.Background()) // copy the database files. for _, ext := range []string{"", "-shm", "-wal"} { bytes, err := os.ReadFile(ml.filename + ext) @@ -167,7 +166,7 @@ func (ml *mockLedgerForTracker) fork(t testing.TB) *mockLedgerForTracker { dbs.Rdb.SetLogger(dblogger) dbs.Wdb.SetLogger(dblogger) - newLedgerTracker.dbs = dbs + newLedgerTracker.dbs = store.CreateTrackerSQLStore(dbs) return newLedgerTracker } @@ -220,7 +219,7 @@ func (ml *mockLedgerForTracker) BlockHdr(rnd basics.Round) (bookkeeping.BlockHea return ml.blocks[int(rnd)].block.BlockHeader, nil } -func (ml *mockLedgerForTracker) trackerDB() db.Pair { +func (ml *mockLedgerForTracker) trackerDB() store.TrackerStore { return ml.dbs } @@ -264,7 +263,7 @@ func (au *accountUpdates) allBalances(rnd basics.Round) (bals map[basics.Address return } - err = au.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) error { + err = au.dbs.Snapshot(func(ctx context.Context, tx *sql.Tx) error { var err0 error bals, err0 = accountsAll(tx) return err0 @@ -572,7 +571,7 @@ func TestAcctUpdates(t *testing.T) { // check the account totals. var dbRound basics.Round - err := ml.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err := ml.dbs.Snapshot(func(ctx context.Context, tx *sql.Tx) (err error) { arw := store.NewAccountsSQLReaderWriter(tx) dbRound, err = arw.AccountsRound() return @@ -586,7 +585,7 @@ func TestAcctUpdates(t *testing.T) { expectedTotals := ledgertesting.CalculateNewRoundAccountTotals(t, updates, rewardsLevels[dbRound], proto, nil, ledgercore.AccountTotals{}) var actualTotals ledgercore.AccountTotals - err = ml.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err = ml.dbs.Snapshot(func(ctx context.Context, tx *sql.Tx) (err error) { arw := store.NewAccountsSQLReaderWriter(tx) actualTotals, err = arw.AccountsTotals(ctx, false) return @@ -1578,14 +1577,14 @@ func BenchmarkLargeMerkleTrieRebuild(b *testing.B) { i++ } - err := ml.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err := ml.dbs.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { _, _, _, err = accountsNewRound(tx, updates, compactResourcesDeltas{}, nil, nil, proto, basics.Round(1)) return }) require.NoError(b, err) } - err := ml.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err := ml.dbs.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { arw := store.NewAccountsSQLReaderWriter(tx) return arw.UpdateAccountsHashRound(ctx, 1) }) @@ -2352,7 +2351,7 @@ func TestAcctUpdatesResources(t *testing.T) { err := au.prepareCommit(dcc) require.NoError(t, err) - err = ml.trackers.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err = ml.trackers.dbs.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { arw := store.NewAccountsSQLReaderWriter(tx) err = au.commitRound(ctx, tx, dcc) if err != nil { @@ -2636,7 +2635,7 @@ func auCommitSync(t *testing.T, rnd basics.Round, au *accountUpdates, ml *mockLe err := au.prepareCommit(dcc) require.NoError(t, err) - err = ml.trackers.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err = ml.trackers.dbs.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { arw := store.NewAccountsSQLReaderWriter(tx) err = au.commitRound(ctx, tx, dcc) if err != nil { diff --git a/ledger/archival_test.go b/ledger/archival_test.go index c7f00b7d6a..cf96cb05c3 100644 --- a/ledger/archival_test.go +++ b/ledger/archival_test.go @@ -39,6 +39,7 @@ import ( "github.com/algorand/go-algorand/data/transactions/logic" "github.com/algorand/go-algorand/ledger/internal" "github.com/algorand/go-algorand/ledger/ledgercore" + "github.com/algorand/go-algorand/ledger/store" "github.com/algorand/go-algorand/ledger/store/blockdb" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" @@ -75,7 +76,7 @@ func (wl *wrappedLedger) Latest() basics.Round { return wl.l.Latest() } -func (wl *wrappedLedger) trackerDB() db.Pair { +func (wl *wrappedLedger) trackerDB() store.TrackerStore { return wl.l.trackerDB() } diff --git a/ledger/catchpointtracker.go b/ledger/catchpointtracker.go index 6968f580a0..220c6af6be 100644 --- a/ledger/catchpointtracker.go +++ b/ledger/catchpointtracker.go @@ -77,11 +77,6 @@ func catchpointStage1Decoder(r io.Reader) (io.ReadCloser, error) { return snappyReadCloser{snappy.NewReader(r)}, nil } -type catchpointStore interface { - store.CatchpointWriter - store.CatchpointReader -} - type catchpointTracker struct { // dbDirectory is the directory where the ledger and block sql file resides as well as the parent directory for the catchup files to be generated dbDirectory string @@ -103,8 +98,8 @@ type catchpointTracker struct { log logging.Logger // Connection to the database. - dbs db.Pair - catchpointStore catchpointStore + dbs store.TrackerStore + catchpointStore store.CatchpointReaderWriter // The last catchpoint label that was written to the database. Should always align with what's in the database. // note that this is the last catchpoint *label* and not the catchpoint file. @@ -216,7 +211,7 @@ func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basic } } - f := func(ctx context.Context, tx *sql.Tx) error { + return ct.dbs.Batch(func(ctx context.Context, tx *sql.Tx) error { crw := store.NewCatchpointSQLReaderWriter(tx) err := ct.recordFirstStageInfo(ctx, tx, dbRound, totalKVs, totalAccounts, totalChunks, biggestChunkLen) if err != nil { @@ -225,8 +220,7 @@ func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basic // Clear the db record. return crw.WriteCatchpointStateUint64(ctx, store.CatchpointStateWritingFirstStageInfo, 0) - } - return ct.dbs.Wdb.Atomic(f) + }) } // Possibly finish generating first stage catchpoint db record and data file after @@ -319,7 +313,10 @@ func (ct *catchpointTracker) recoverFromCrash(dbRound basics.Round) error { func (ct *catchpointTracker) loadFromDisk(l ledgerForTracker, dbRound basics.Round) (err error) { ct.log = l.trackerLog() ct.dbs = l.trackerDB() - ct.catchpointStore = store.NewCatchpointSQLReaderWriter(l.trackerDB().Wdb.Handle) + ct.catchpointStore, err = l.trackerDB().CreateCatchpointReaderWriter() + if err != nil { + return err + } ct.roundDigest = nil ct.catchpointDataWriting = 0 @@ -327,14 +324,14 @@ func (ct *catchpointTracker) loadFromDisk(l ledgerForTracker, dbRound basics.Rou ct.catchpointDataSlowWriting = make(chan struct{}, 1) close(ct.catchpointDataSlowWriting) - err = ct.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error { + err = ct.dbs.Batch(func(ctx context.Context, tx *sql.Tx) error { return ct.initializeHashes(ctx, tx, dbRound) }) if err != nil { return err } - ct.accountsq, err = store.AccountsInitDbQueries(ct.dbs.Rdb.Handle) + ct.accountsq, err = ct.dbs.CreateAccountsReader() if err != nil { return } @@ -777,9 +774,9 @@ func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound return err } - err = ct.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err = ct.dbs.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { crw := store.NewCatchpointSQLReaderWriter(tx) - err = ct.recordCatchpointFile(ctx, tx, round, relCatchpointFilePath, fileInfo.Size()) + err = ct.recordCatchpointFile(ctx, crw, round, relCatchpointFilePath, fileInfo.Size()) if err != nil { return err } @@ -1090,7 +1087,7 @@ func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, account var catchpointWriter *catchpointWriter start := time.Now() ledgerGeneratecatchpointCount.Inc(nil) - err = ct.dbs.Rdb.AtomicContext(ctx, func(dbCtx context.Context, tx *sql.Tx) (err error) { + err = ct.dbs.BatchContext(ctx, func(dbCtx context.Context, tx *sql.Tx) (err error) { catchpointWriter, err = makeCatchpointWriter(dbCtx, catchpointDataFilePath, tx, ResourcesPerCatchpointFileChunk) if err != nil { return @@ -1213,8 +1210,7 @@ func makeCatchpointDataFilePath(accountsRound basics.Round) string { // after a successful insert operation to the database, it would delete up to 2 old entries, as needed. // deleting 2 entries while inserting single entry allow us to adjust the size of the backing storage and have the // database and storage realign. -func (ct *catchpointTracker) recordCatchpointFile(ctx context.Context, e db.Executable, round basics.Round, relCatchpointFilePath string, fileSize int64) (err error) { - crw := store.NewCatchpointSQLReaderWriter(e) +func (ct *catchpointTracker) recordCatchpointFile(ctx context.Context, crw store.CatchpointReaderWriter, round basics.Round, relCatchpointFilePath string, fileSize int64) (err error) { if ct.catchpointFileHistoryLength != 0 { err = crw.StoreCatchpoint(ctx, round, relCatchpointFilePath, "", fileSize) if err != nil { @@ -1257,7 +1253,7 @@ func (ct *catchpointTracker) GetCatchpointStream(round basics.Round) (ReadCloseS ledgerGetcatchpointCount.Inc(nil) // TODO: we need to generalize this, check @cce PoC PR, he has something // somewhat broken for some KVs.. - err := ct.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err := ct.dbs.Snapshot(func(ctx context.Context, tx *sql.Tx) (err error) { crw := store.NewCatchpointSQLReaderWriter(tx) dbFileName, _, fileSize, err = crw.GetCatchpoint(ctx, round) return @@ -1277,8 +1273,11 @@ func (ct *catchpointTracker) GetCatchpointStream(round basics.Round) (ReadCloseS if os.IsNotExist(err) { // the database told us that we have this file.. but we couldn't find it. // delete it from the database. - err := ct.recordCatchpointFile( - context.Background(), ct.dbs.Wdb.Handle, round, "", 0) + crw, err := ct.dbs.CreateCatchpointReaderWriter() + if err != nil { + return nil, err + } + err = ct.recordCatchpointFile(context.Background(), crw, round, "", 0) if err != nil { ct.log.Warnf("catchpointTracker.GetCatchpointStream() unable to delete missing catchpoint entry: %v", err) return nil, err @@ -1302,10 +1301,11 @@ func (ct *catchpointTracker) GetCatchpointStream(round basics.Round) (ReadCloseS // we couldn't get the stat, so just return with the file. return &readCloseSizer{ReadCloser: file, size: -1}, nil } - - err = ct.recordCatchpointFile( - context.Background(), ct.dbs.Wdb.Handle, round, relCatchpointFilePath, - fileInfo.Size()) + crw, err := ct.dbs.CreateCatchpointReaderWriter() + if err != nil { + return nil, err + } + err = ct.recordCatchpointFile(context.Background(), crw, round, relCatchpointFilePath, fileInfo.Size()) if err != nil { ct.log.Warnf("catchpointTracker.GetCatchpointStream() unable to save missing catchpoint entry: %v", err) } diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go index 33cfe9baed..22f38e599f 100644 --- a/ledger/catchpointtracker_test.go +++ b/ledger/catchpointtracker_test.go @@ -353,7 +353,7 @@ func BenchmarkLargeCatchpointDataWriting(b *testing.B) { // at this point, the database was created. We want to fill the accounts data accountsNumber := 6000000 * b.N - err = ml.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err = ml.dbs.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { arw := store.NewAccountsSQLReaderWriter(tx) for i := 0; i < accountsNumber-5-2; { // subtract the account we've already created above, plus the sink/reward @@ -1002,7 +1002,8 @@ func TestFirstStagePersistence(t *testing.T) { defer ml2.Close() ml.Close() - cps2 := store.NewCatchpointSQLReaderWriter(ml2.dbs.Wdb.Handle) + cps2, err := ml2.dbs.CreateCatchpointReaderWriter() + require.NoError(t, err) // Insert unfinished first stage record. err = cps2.WriteCatchpointStateUint64( @@ -1131,7 +1132,8 @@ func TestSecondStagePersistence(t *testing.T) { err = os.WriteFile(catchpointDataFilePath, catchpointData, 0644) require.NoError(t, err) - cps2 := store.NewCatchpointSQLReaderWriter(ml2.dbs.Wdb.Handle) + cps2, err := ml2.dbs.CreateCatchpointReaderWriter() + require.NoError(t, err) // Restore the first stage database record. err = cps2.InsertOrReplaceCatchpointFirstStageInfo(context.Background(), firstStageRound, &firstStageInfo) @@ -1322,7 +1324,8 @@ func TestSecondStageDeletesUnfinishedCatchpointRecordAfterRestart(t *testing.T) defer ml2.Close() ml.Close() - cps2 := store.NewCatchpointSQLReaderWriter(ml2.dbs.Wdb.Handle) + cps2, err := ml2.dbs.CreateCatchpointReaderWriter() + require.NoError(t, err) // Sanity check: first stage record should be deleted. _, exists, err := cps2.SelectCatchpointFirstStageInfo(context.Background(), firstStageRound) diff --git a/ledger/catchpointwriter_test.go b/ledger/catchpointwriter_test.go index 478df2bb25..5bba123e84 100644 --- a/ledger/catchpointwriter_test.go +++ b/ledger/catchpointwriter_test.go @@ -46,7 +46,6 @@ import ( "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/test/partitiontest" - "github.com/algorand/go-algorand/util/db" "github.com/algorand/msgp/msgp" ) @@ -129,8 +128,7 @@ func TestBasicCatchpointWriter(t *testing.T) { au.close() fileName := filepath.Join(temporaryDirectory, "15.data") - readDb := ml.trackerDB().Rdb - err = readDb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err = ml.trackerDB().Snapshot(func(ctx context.Context, tx *sql.Tx) (err error) { writer, err := makeCatchpointWriter(context.Background(), fileName, tx, ResourcesPerCatchpointFileChunk) if err != nil { return err @@ -185,7 +183,7 @@ func TestBasicCatchpointWriter(t *testing.T) { require.Equal(t, io.EOF, err) } -func testWriteCatchpoint(t *testing.T, rdb db.Accessor, datapath string, filepath string, maxResourcesPerChunk int) CatchpointFileHeader { +func testWriteCatchpoint(t *testing.T, rdb store.TrackerStore, datapath string, filepath string, maxResourcesPerChunk int) CatchpointFileHeader { var totalAccounts uint64 var totalChunks uint64 var biggestChunkLen uint64 @@ -195,7 +193,7 @@ func testWriteCatchpoint(t *testing.T, rdb db.Accessor, datapath string, filepat maxResourcesPerChunk = ResourcesPerCatchpointFileChunk } - err := rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err := rdb.Snapshot(func(ctx context.Context, tx *sql.Tx) (err error) { writer, err := makeCatchpointWriter(context.Background(), datapath, tx, maxResourcesPerChunk) arw := store.NewAccountsSQLReaderWriter(tx) @@ -285,9 +283,8 @@ func TestCatchpointReadDatabaseOverflowSingleAccount(t *testing.T) { require.NoError(t, err) au.close() catchpointDataFilePath := filepath.Join(temporaryDirectory, "15.data") - readDb := ml.trackerDB().Rdb - err = readDb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err = ml.trackerDB().Snapshot(func(ctx context.Context, tx *sql.Tx) (err error) { expectedTotalAccounts := uint64(1) totalAccountsWritten := uint64(0) totalResources := 0 @@ -372,9 +369,8 @@ func TestCatchpointReadDatabaseOverflowAccounts(t *testing.T) { require.NoError(t, err) au.close() catchpointDataFilePath := filepath.Join(temporaryDirectory, "15.data") - readDb := ml.trackerDB().Rdb - err = readDb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err = ml.trackerDB().Snapshot(func(ctx context.Context, tx *sql.Tx) (err error) { arw := store.NewAccountsSQLReaderWriter(tx) expectedTotalAccounts, err := arw.TotalAccounts(ctx) if err != nil { @@ -444,9 +440,9 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) { catchpointDataFilePath := filepath.Join(temporaryDirectory, "15.data") catchpointFilePath := filepath.Join(temporaryDirectory, "15.catchpoint") const maxResourcesPerChunk = 5 - testWriteCatchpoint(t, ml.trackerDB().Rdb, catchpointDataFilePath, catchpointFilePath, maxResourcesPerChunk) + testWriteCatchpoint(t, ml.trackerDB(), catchpointDataFilePath, catchpointFilePath, maxResourcesPerChunk) - l := testNewLedgerFromCatchpoint(t, ml.trackerDB().Rdb, catchpointFilePath) + l := testNewLedgerFromCatchpoint(t, ml.trackerDB(), catchpointFilePath) defer l.Close() // verify that the account data aligns with what we originally stored : @@ -463,65 +459,68 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) { // now manually construct the MT and ensure the reading makeOrderedAccountsIter works as expected: // no errors on read, hashes match ctx := context.Background() - tx, err := l.trackerDBs.Wdb.Handle.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelSerializable}) - require.NoError(t, err) - defer tx.Rollback() - - arw := store.NewAccountsSQLReaderWriter(tx) - - // save the existing hash - committer, err := store.MakeMerkleCommitter(tx, false) - require.NoError(t, err) - trie, err := merkletrie.MakeTrie(committer, store.TrieMemoryConfig) - require.NoError(t, err) + // tx, err := l.trackerDBs.Wdb.Handle.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelSerializable}) + err = l.trackerDBs.TransactionContext(ctx, func(ctx context.Context, tx *sql.Tx) (err error) { + arw := store.NewAccountsSQLReaderWriter(tx) - h1, err := trie.RootHash() - require.NoError(t, err) - require.NotEmpty(t, h1) + // save the existing hash + committer, err := store.MakeMerkleCommitter(tx, false) + require.NoError(t, err) + trie, err := merkletrie.MakeTrie(committer, store.TrieMemoryConfig) + require.NoError(t, err) - // reset hashes - err = arw.ResetAccountHashes(ctx) - require.NoError(t, err) + h1, err := trie.RootHash() + require.NoError(t, err) + require.NotEmpty(t, h1) - // rebuild the MT - committer, err = store.MakeMerkleCommitter(tx, false) - require.NoError(t, err) - trie, err = merkletrie.MakeTrie(committer, store.TrieMemoryConfig) - require.NoError(t, err) + // reset hashes + err = arw.ResetAccountHashes(ctx) + require.NoError(t, err) - h, err := trie.RootHash() - require.NoError(t, err) - require.Zero(t, h) + // rebuild the MT + committer, err = store.MakeMerkleCommitter(tx, false) + require.NoError(t, err) + trie, err = merkletrie.MakeTrie(committer, store.TrieMemoryConfig) + require.NoError(t, err) - iter := store.MakeOrderedAccountsIter(tx, trieRebuildAccountChunkSize) - defer iter.Close(ctx) - for { - accts, _, err := iter.Next(ctx) - if err == sql.ErrNoRows { - // the account builder would return sql.ErrNoRows when no more data is available. - err = nil - break - } else if err != nil { - require.NoError(t, err) - } + h, err := trie.RootHash() + require.NoError(t, err) + require.Zero(t, h) - if len(accts) > 0 { - for _, acct := range accts { - added, err := trie.Add(acct.Digest) + iter := store.MakeOrderedAccountsIter(tx, trieRebuildAccountChunkSize) + defer iter.Close(ctx) + for { + accts, _, err := iter.Next(ctx) + if err == sql.ErrNoRows { + // the account builder would return sql.ErrNoRows when no more data is available. + err = nil + break + } else if err != nil { require.NoError(t, err) - require.True(t, added) + } + + if len(accts) > 0 { + for _, acct := range accts { + added, err := trie.Add(acct.Digest) + require.NoError(t, err) + require.True(t, added) + } } } - } - require.NoError(t, err) - h2, err := trie.RootHash() - require.NoError(t, err) - require.NotEmpty(t, h2) - require.Equal(t, h1, h2) + require.NoError(t, err) + h2, err := trie.RootHash() + require.NoError(t, err) + require.NotEmpty(t, h2) + + require.Equal(t, h1, h2) + + return nil + }) + require.NoError(t, err) } -func testNewLedgerFromCatchpoint(t *testing.T, catchpointWriterReadAccess db.Accessor, filepath string) *Ledger { +func testNewLedgerFromCatchpoint(t *testing.T, catchpointWriterReadAccess store.TrackerStore, filepath string) *Ledger { // create a ledger. var initState ledgercore.InitState initState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion @@ -573,16 +572,16 @@ func testNewLedgerFromCatchpoint(t *testing.T, catchpointWriterReadAccess db.Acc err = accessor.BuildMerkleTrie(context.Background(), nil) require.NoError(t, err) - err = l.trackerDBs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error { + err = l.trackerDBs.Batch(func(ctx context.Context, tx *sql.Tx) error { crw := store.NewCatchpointSQLReaderWriter(tx) err := crw.ApplyCatchpointStagingBalances(ctx, 0, 0) return err }) require.NoError(t, err) - balanceTrieStats := func(db db.Accessor) merkletrie.Stats { + balanceTrieStats := func(db store.TrackerStore) merkletrie.Stats { var stats merkletrie.Stats - err = db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err = db.Transaction(func(ctx context.Context, tx *sql.Tx) (err error) { committer, err := store.MakeMerkleCommitter(tx, false) if err != nil { return err @@ -606,7 +605,7 @@ func testNewLedgerFromCatchpoint(t *testing.T, catchpointWriterReadAccess db.Acc // Skip invariant check for tests using mocks that do _not_ update // balancesTrie by checking for zero value stats. if ws != (merkletrie.Stats{}) { - require.Equal(t, ws, balanceTrieStats(l.trackerDBs.Rdb), "Invariant broken - Catchpoint writer and reader merkle tries should _always_ agree") + require.Equal(t, ws, balanceTrieStats(l.trackerDBs), "Invariant broken - Catchpoint writer and reader merkle tries should _always_ agree") } return l @@ -640,9 +639,9 @@ func TestFullCatchpointWriter(t *testing.T) { catchpointDataFilePath := filepath.Join(temporaryDirectory, "15.data") catchpointFilePath := filepath.Join(temporaryDirectory, "15.catchpoint") - testWriteCatchpoint(t, ml.trackerDB().Rdb, catchpointDataFilePath, catchpointFilePath, 0) + testWriteCatchpoint(t, ml.trackerDB(), catchpointDataFilePath, catchpointFilePath, 0) - l := testNewLedgerFromCatchpoint(t, ml.trackerDB().Rdb, catchpointFilePath) + l := testNewLedgerFromCatchpoint(t, ml.trackerDB(), catchpointFilePath) defer l.Close() // verify that the account data aligns with what we originally stored : for addr, acct := range accts { @@ -688,10 +687,10 @@ func TestExactAccountChunk(t *testing.T) { catchpointDataFilePath := filepath.Join(tempDir, t.Name()+".data") catchpointFilePath := filepath.Join(tempDir, t.Name()+".catchpoint.tar.gz") - cph := testWriteCatchpoint(t, dl.validator.trackerDB().Rdb, catchpointDataFilePath, catchpointFilePath, 0) + cph := testWriteCatchpoint(t, dl.validator.trackerDB(), catchpointDataFilePath, catchpointFilePath, 0) require.EqualValues(t, cph.TotalChunks, 1) - l := testNewLedgerFromCatchpoint(t, dl.generator.trackerDB().Rdb, catchpointFilePath) + l := testNewLedgerFromCatchpoint(t, dl.generator.trackerDB(), catchpointFilePath) defer l.Close() } @@ -739,10 +738,10 @@ func TestCatchpointAfterTxns(t *testing.T) { catchpointDataFilePath := filepath.Join(tempDir, t.Name()+".data") catchpointFilePath := filepath.Join(tempDir, t.Name()+".catchpoint.tar.gz") - cph := testWriteCatchpoint(t, dl.validator.trackerDB().Rdb, catchpointDataFilePath, catchpointFilePath, 0) + cph := testWriteCatchpoint(t, dl.validator.trackerDB(), catchpointDataFilePath, catchpointFilePath, 0) require.EqualValues(t, 2, cph.TotalChunks) - l := testNewLedgerFromCatchpoint(t, dl.validator.trackerDB().Rdb, catchpointFilePath) + l := testNewLedgerFromCatchpoint(t, dl.validator.trackerDB(), catchpointFilePath) defer l.Close() values, err := l.LookupKeysByPrefix(l.Latest(), "bx:", 10) require.NoError(t, err) @@ -755,12 +754,12 @@ func TestCatchpointAfterTxns(t *testing.T) { dl.fullBlock(&newacctpay) // Write and read back in, and ensure even the last effect exists. - cph = testWriteCatchpoint(t, dl.validator.trackerDB().Rdb, catchpointDataFilePath, catchpointFilePath, 0) + cph = testWriteCatchpoint(t, dl.validator.trackerDB(), catchpointDataFilePath, catchpointFilePath, 0) require.EqualValues(t, cph.TotalChunks, 2) // Still only 2 chunks, as last was in a recent block // Drive home the point that `last` is _not_ included in the catchpoint by inspecting balance read from catchpoint. { - l = testNewLedgerFromCatchpoint(t, dl.validator.trackerDB().Rdb, catchpointFilePath) + l = testNewLedgerFromCatchpoint(t, dl.validator.trackerDB(), catchpointFilePath) defer l.Close() _, _, algos, err := l.LookupLatest(last) require.NoError(t, err) @@ -771,10 +770,10 @@ func TestCatchpointAfterTxns(t *testing.T) { dl.fullBlock(pay.Noted(strconv.Itoa(i))) } - cph = testWriteCatchpoint(t, dl.validator.trackerDB().Rdb, catchpointDataFilePath, catchpointFilePath, 0) + cph = testWriteCatchpoint(t, dl.validator.trackerDB(), catchpointDataFilePath, catchpointFilePath, 0) require.EqualValues(t, cph.TotalChunks, 3) - l = testNewLedgerFromCatchpoint(t, dl.validator.trackerDB().Rdb, catchpointFilePath) + l = testNewLedgerFromCatchpoint(t, dl.validator.trackerDB(), catchpointFilePath) defer l.Close() values, err = l.LookupKeysByPrefix(l.Latest(), "bx:", 10) require.NoError(t, err) @@ -860,10 +859,10 @@ func TestCatchpointAfterBoxTxns(t *testing.T) { catchpointDataFilePath := filepath.Join(tempDir, t.Name()+".data") catchpointFilePath := filepath.Join(tempDir, t.Name()+".catchpoint.tar.gz") - cph := testWriteCatchpoint(t, dl.generator.trackerDB().Rdb, catchpointDataFilePath, catchpointFilePath, 0) + cph := testWriteCatchpoint(t, dl.generator.trackerDB(), catchpointDataFilePath, catchpointFilePath, 0) require.EqualValues(t, 2, cph.TotalChunks) - l := testNewLedgerFromCatchpoint(t, dl.generator.trackerDB().Rdb, catchpointFilePath) + l := testNewLedgerFromCatchpoint(t, dl.generator.trackerDB(), catchpointFilePath) defer l.Close() values, err := l.LookupKeysByPrefix(l.Latest(), "bx:", 10) diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go index 41e14a167e..67d83f1f94 100644 --- a/ledger/catchupaccessor.go +++ b/ledger/catchupaccessor.go @@ -103,18 +103,18 @@ type stagingWriter interface { } type stagingWriterImpl struct { - wdb db.Accessor + wdb store.TrackerStore } func (w *stagingWriterImpl) writeBalances(ctx context.Context, balances []store.NormalizedAccountBalance) error { - return w.wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + return w.wdb.Transaction(func(ctx context.Context, tx *sql.Tx) (err error) { crw := store.NewCatchpointSQLReaderWriter(tx) return crw.WriteCatchpointStagingBalances(ctx, balances) }) } func (w *stagingWriterImpl) writeKVs(ctx context.Context, kvrs []encoded.KVRecordV6) error { - return w.wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + return w.wdb.Transaction(func(ctx context.Context, tx *sql.Tx) (err error) { crw := store.NewCatchpointSQLReaderWriter(tx) keys := make([][]byte, len(kvrs)) @@ -131,14 +131,14 @@ func (w *stagingWriterImpl) writeKVs(ctx context.Context, kvrs []encoded.KVRecor } func (w *stagingWriterImpl) writeCreatables(ctx context.Context, balances []store.NormalizedAccountBalance) error { - return w.wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error { + return w.wdb.Transaction(func(ctx context.Context, tx *sql.Tx) error { crw := store.NewCatchpointSQLReaderWriter(tx) return crw.WriteCatchpointStagingCreatable(ctx, balances) }) } func (w *stagingWriterImpl) writeHashes(ctx context.Context, balances []store.NormalizedAccountBalance) error { - return w.wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error { + return w.wdb.Transaction(func(ctx context.Context, tx *sql.Tx) error { crw := store.NewCatchpointSQLReaderWriter(tx) err := crw.WriteCatchpointStagingHashes(ctx, balances) return err @@ -152,7 +152,7 @@ func (w *stagingWriterImpl) isShared() bool { // catchpointCatchupAccessorImpl is the concrete implementation of the CatchpointCatchupAccessor interface type catchpointCatchupAccessorImpl struct { ledger *Ledger - catchpointStore catchpointStore + catchpointStore store.CatchpointReaderWriter stagingWriter stagingWriter @@ -204,10 +204,11 @@ type CatchupAccessorClientLedger interface { // MakeCatchpointCatchupAccessor creates a CatchpointCatchupAccessor given a ledger func MakeCatchpointCatchupAccessor(ledger *Ledger, log logging.Logger) CatchpointCatchupAccessor { + crw, _ := ledger.trackerDB().CreateCatchpointReaderWriter() return &catchpointCatchupAccessorImpl{ ledger: ledger, - catchpointStore: store.NewCatchpointSQLReaderWriter(ledger.trackerDB().Wdb.Handle), - stagingWriter: &stagingWriterImpl{wdb: ledger.trackerDB().Wdb}, + catchpointStore: crw, + stagingWriter: &stagingWriterImpl{wdb: ledger.trackerDB()}, log: log, } } @@ -260,13 +261,12 @@ func (c *catchpointCatchupAccessorImpl) SetLabel(ctx context.Context, label stri // ResetStagingBalances resets the current staging balances, preparing for a new set of balances to be added func (c *catchpointCatchupAccessorImpl) ResetStagingBalances(ctx context.Context, newCatchup bool) (err error) { - wdb := c.ledger.trackerDB().Wdb if !newCatchup { c.ledger.setSynchronousMode(ctx, c.ledger.synchronousMode) } start := time.Now() ledgerResetstagingbalancesCount.Inc(nil) - err = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err = c.ledger.trackerDB().Batch(func(ctx context.Context, tx *sql.Tx) (err error) { crw := store.NewCatchpointSQLReaderWriter(tx) err = crw.ResetCatchpointStagingBalances(ctx, newCatchup) if err != nil { @@ -353,10 +353,9 @@ func (c *catchpointCatchupAccessorImpl) processStagingContent(ctx context.Contex // the following fields are now going to be ignored. We could add these to the database and validate these // later on: // TotalAccounts, TotalAccounts, Catchpoint, BlockHeaderDigest, BalancesRound - wdb := c.ledger.trackerDB().Wdb start := time.Now() ledgerProcessstagingcontentCount.Inc(nil) - err = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err = c.ledger.trackerDB().Batch(func(ctx context.Context, tx *sql.Tx) (err error) { crw := store.NewCatchpointSQLReaderWriter(tx) arw := store.NewAccountsSQLReaderWriter(tx) @@ -642,9 +641,8 @@ func countHashes(hashes [][]byte) (accountCount, kvCount uint64) { // BuildMerkleTrie would process the catchpointpendinghashes and insert all the items in it into the merkle trie func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, progressUpdates func(uint64, uint64)) (err error) { - wdb := c.ledger.trackerDB().Wdb - rdb := c.ledger.trackerDB().Rdb - err = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + trackerdb := c.ledger.trackerDB() + err = trackerdb.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { crw := store.NewCatchpointSQLReaderWriter(tx) // creating the index can take a while, so ensure we don't generate false alerts for no good reason. db.ResetTransactionWarnDeadline(ctx, tx, time.Now().Add(120*time.Second)) @@ -667,7 +665,7 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro defer wg.Done() defer close(writerQueue) - err := rdb.Atomic(func(transactionCtx context.Context, tx *sql.Tx) (err error) { + err := trackerdb.Snapshot(func(transactionCtx context.Context, tx *sql.Tx) (err error) { it := store.MakeCatchpointPendingHashesIterator(trieRebuildAccountChunkSize, tx) var hashes [][]byte for { @@ -705,7 +703,7 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro accountHashesWritten, kvHashesWritten := uint64(0), uint64(0) var mc *store.MerkleCommitter - err := wdb.Atomic(func(transactionCtx context.Context, tx *sql.Tx) (err error) { + err := trackerdb.Batch(func(transactionCtx context.Context, tx *sql.Tx) (err error) { // create the merkle trie for the balances mc, err = store.MakeMerkleCommitter(tx, true) if err != nil { @@ -734,7 +732,7 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro continue } - err = rdb.Atomic(func(transactionCtx context.Context, tx *sql.Tx) (err error) { + err = trackerdb.Snapshot(func(transactionCtx context.Context, tx *sql.Tx) (err error) { mc, err = store.MakeMerkleCommitter(tx, true) if err != nil { return @@ -764,7 +762,7 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro } if uncommitedHashesCount >= trieRebuildCommitFrequency { - err = wdb.Atomic(func(transactionCtx context.Context, tx *sql.Tx) (err error) { + err = trackerdb.Batch(func(transactionCtx context.Context, tx *sql.Tx) (err error) { // set a long 30-second window for the evict before warning is generated. db.ResetTransactionWarnDeadline(transactionCtx, tx, time.Now().Add(30*time.Second)) mc, err = store.MakeMerkleCommitter(tx, true) @@ -794,7 +792,7 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro return } if uncommitedHashesCount > 0 { - err = wdb.Atomic(func(transactionCtx context.Context, tx *sql.Tx) (err error) { + err = trackerdb.Batch(func(transactionCtx context.Context, tx *sql.Tx) (err error) { // set a long 30-second window for the evict before warning is generated. db.ResetTransactionWarnDeadline(transactionCtx, tx, time.Now().Add(30*time.Second)) mc, err = store.MakeMerkleCommitter(tx, true) @@ -835,7 +833,6 @@ func (c *catchpointCatchupAccessorImpl) GetCatchupBlockRound(ctx context.Context // VerifyCatchpoint verifies that the catchpoint is valid by reconstructing the label. func (c *catchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, blk *bookkeeping.Block) (err error) { - rdb := c.ledger.trackerDB().Rdb var balancesHash crypto.Digest var blockRound basics.Round var totals ledgercore.AccountTotals @@ -855,7 +852,7 @@ func (c *catchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, bl start := time.Now() ledgerVerifycatchpointCount.Inc(nil) - err = rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err = c.ledger.trackerDB().Snapshot(func(ctx context.Context, tx *sql.Tx) (err error) { arw := store.NewAccountsSQLReaderWriter(tx) // create the merkle trie for the balances mc, err0 := store.MakeMerkleCommitter(tx, true) @@ -905,10 +902,9 @@ func (c *catchpointCatchupAccessorImpl) StoreBalancesRound(ctx context.Context, catchpointLookback = config.Consensus[blk.CurrentProtocol].MaxBalLookback } balancesRound := blk.Round() - basics.Round(catchpointLookback) - wdb := c.ledger.trackerDB().Wdb start := time.Now() ledgerStorebalancesroundCount.Inc(nil) - err = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err = c.ledger.trackerDB().Batch(func(ctx context.Context, tx *sql.Tx) (err error) { crw := store.NewCatchpointSQLReaderWriter(tx) err = crw.WriteCatchpointStateUint64(ctx, store.CatchpointStateCatchupBalancesRound, uint64(balancesRound)) if err != nil { @@ -1002,10 +998,9 @@ func (c *catchpointCatchupAccessorImpl) CompleteCatchup(ctx context.Context) (er // finishBalances concludes the catchup of the balances(tracker) database. func (c *catchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err error) { - wdb := c.ledger.trackerDB().Wdb start := time.Now() ledgerCatchpointFinishBalsCount.Inc(nil) - err = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err = c.ledger.trackerDB().Batch(func(ctx context.Context, tx *sql.Tx) (err error) { crw := store.NewCatchpointSQLReaderWriter(tx) arw := store.NewAccountsSQLReaderWriter(tx) diff --git a/ledger/ledger.go b/ledger/ledger.go index cd023e5023..bf3e6f0a74 100644 --- a/ledger/ledger.go +++ b/ledger/ledger.go @@ -35,6 +35,7 @@ import ( "github.com/algorand/go-algorand/ledger/apply" "github.com/algorand/go-algorand/ledger/internal" "github.com/algorand/go-algorand/ledger/ledgercore" + "github.com/algorand/go-algorand/ledger/store" "github.com/algorand/go-algorand/ledger/store/blockdb" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" @@ -48,7 +49,7 @@ type Ledger struct { // Database connections to the DBs storing blocks and tracker state. // We use potentially different databases to avoid SQLite contention // during catchup. - trackerDBs db.Pair + trackerDBs store.TrackerStore blockDBs db.Pair // blockQ is the buffer of added blocks that will be flushed to @@ -138,8 +139,7 @@ func OpenLedger( err = fmt.Errorf("OpenLedger.openLedgerDB %v", err) return nil, err } - l.trackerDBs.Rdb.SetLogger(log) - l.trackerDBs.Wdb.SetLogger(log) + l.trackerDBs.SetLogger(log) l.blockDBs.Rdb.SetLogger(log) l.blockDBs.Wdb.SetLogger(log) @@ -273,7 +273,7 @@ func (l *Ledger) verifyMatchingGenesisHash() (err error) { return } -func openLedgerDB(dbPathPrefix string, dbMem bool) (trackerDBs db.Pair, blockDBs db.Pair, err error) { +func openLedgerDB(dbPathPrefix string, dbMem bool) (trackerDBs store.TrackerStore, blockDBs db.Pair, err error) { // Backwards compatibility: we used to store both blocks and tracker // state in a single SQLite db file. var trackerDBFilename string @@ -297,7 +297,7 @@ func openLedgerDB(dbPathPrefix string, dbMem bool) (trackerDBs db.Pair, blockDBs outErr := make(chan error, 2) go func() { var lerr error - trackerDBs, lerr = db.OpenPair(trackerDBFilename, dbMem) + trackerDBs, lerr = store.OpenTrackerSQLStore(trackerDBFilename, dbMem) outErr <- lerr }() @@ -328,7 +328,7 @@ func (l *Ledger) setSynchronousMode(ctx context.Context, synchronousMode db.Sync return } - err = l.trackerDBs.Wdb.SetSynchronousMode(ctx, synchronousMode, synchronousMode >= db.SynchronousModeFull) + err = l.trackerDBs.SetSynchronousMode(ctx, synchronousMode, synchronousMode >= db.SynchronousModeFull) if err != nil { l.log.Warnf("ledger.setSynchronousMode unable to set synchronous mode on trackers db: %v", err) return @@ -765,7 +765,7 @@ func (l *Ledger) GetCatchpointStream(round basics.Round) (ReadCloseSizer, error) } // ledgerForTracker methods -func (l *Ledger) trackerDB() db.Pair { +func (l *Ledger) trackerDB() store.TrackerStore { return l.trackerDBs } diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go index b9a1c64037..6a2ca25e65 100644 --- a/ledger/ledger_test.go +++ b/ledger/ledger_test.go @@ -2279,7 +2279,7 @@ func TestLedgerReloadTxTailHistoryAccess(t *testing.T) { // reset tables and re-init again, similary to the catchpount apply code // since the ledger has only genesis accounts, this recreates them - err = l.trackerDBs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error { + err = l.trackerDBs.Batch(func(ctx context.Context, tx *sql.Tx) error { arw := store.NewAccountsSQLReaderWriter(tx) err0 := arw.AccountsReset(ctx) if err0 != nil { @@ -2335,7 +2335,7 @@ func TestLedgerReloadTxTailHistoryAccess(t *testing.T) { // drop new tables // reloadLedger should migrate db properly - err = l.trackerDBs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error { + err = l.trackerDBs.Batch(func(ctx context.Context, tx *sql.Tx) error { var resetExprs = []string{ `DROP TABLE IF EXISTS onlineaccounts`, `DROP TABLE IF EXISTS txtail`, @@ -2458,7 +2458,7 @@ func TestLedgerMigrateV6ShrinkDeltas(t *testing.T) { blockDB.Close() }() // create tables so online accounts can still be written - err = trackerDB.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error { + err = trackerDB.Batch(func(ctx context.Context, tx *sql.Tx) error { if err := store.AccountsUpdateSchemaTest(ctx, tx); err != nil { return err } @@ -2635,7 +2635,7 @@ func TestLedgerMigrateV6ShrinkDeltas(t *testing.T) { cfg.MaxAcctLookback = shorterLookback store.AccountDBVersion = 7 // delete tables since we want to check they can be made from other data - err = trackerDB.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error { + err = trackerDB.Batch(func(ctx context.Context, tx *sql.Tx) error { if _, err := tx.ExecContext(ctx, "DROP TABLE IF EXISTS onlineaccounts"); err != nil { return err } diff --git a/ledger/store/interface.go b/ledger/store/interface.go index d2f8fc4e00..3a4d7a7e3a 100644 --- a/ledger/store/interface.go +++ b/ledger/store/interface.go @@ -90,6 +90,7 @@ type CatchpointWriter interface { InsertUnfinishedCatchpoint(ctx context.Context, round basics.Round, blockHash crypto.Digest) error DeleteUnfinishedCatchpoint(ctx context.Context, round basics.Round) error DeleteOldCatchpointFirstStageInfo(ctx context.Context, maxRoundToDelete basics.Round) error + InsertOrReplaceCatchpointFirstStageInfo(ctx context.Context, round basics.Round, info *CatchpointFirstStageInfo) error DeleteStoredCatchpoints(ctx context.Context, dbDirectory string) (err error) } @@ -107,3 +108,9 @@ type CatchpointReader interface { SelectCatchpointFirstStageInfo(ctx context.Context, round basics.Round) (CatchpointFirstStageInfo, bool /*exists*/, error) SelectOldCatchpointFirstStageInfoRounds(ctx context.Context, maxRound basics.Round) ([]basics.Round, error) } + +// CatchpointReaderWriter is CatchpointReader+CatchpointWriter +type CatchpointReaderWriter interface { + CatchpointReader + CatchpointWriter +} diff --git a/ledger/store/store.go b/ledger/store/store.go new file mode 100644 index 0000000000..071c29a246 --- /dev/null +++ b/ledger/store/store.go @@ -0,0 +1,146 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package store + +import ( + "context" + "database/sql" + + "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/util/db" +) + +type trackerSQLStore struct { + // expose the internals for now so we can slowly change the code depending on them + pair db.Pair +} + +// TODO: maintain a SQL tx for now +type batchFn func(ctx context.Context, tx *sql.Tx) error + +// TODO: maintain a SQL tx for now +type snapshotFn func(ctx context.Context, tx *sql.Tx) error + +// TODO: maintain a SQL tx for now +type transactionFn func(ctx context.Context, tx *sql.Tx) error + +// TrackerStore is the interface for the tracker db. +type TrackerStore interface { + SetLogger(log logging.Logger) + SetSynchronousMode(ctx context.Context, mode db.SynchronousMode, fullfsync bool) (err error) + IsSharedCacheConnection() bool + + Batch(fn batchFn) (err error) + BatchContext(ctx context.Context, fn batchFn) (err error) + + Snapshot(fn snapshotFn) (err error) + SnapshotContext(ctx context.Context, fn snapshotFn) (err error) + + Transaction(fn transactionFn) (err error) + TransactionContext(ctx context.Context, fn transactionFn) (err error) + + CreateAccountsReader() (AccountsReader, error) + CreateOnlineAccountsReader() (OnlineAccountsReader, error) + + CreateCatchpointReaderWriter() (CatchpointReaderWriter, error) + + Vacuum(ctx context.Context) (stats db.VacuumStats, err error) + Close() +} + +// OpenTrackerSQLStore opens the sqlite database store +func OpenTrackerSQLStore(dbFilename string, dbMem bool) (store *trackerSQLStore, err error) { + db, err := db.OpenPair(dbFilename, dbMem) + if err != nil { + return + } + + return &trackerSQLStore{db}, nil +} + +// CreateTrackerSQLStore crates a tracker SQL db from sql db handle. +func CreateTrackerSQLStore(pair db.Pair) *trackerSQLStore { + return &trackerSQLStore{pair} +} + +// SetLogger sets the Logger, mainly for unit test quietness +func (s *trackerSQLStore) SetLogger(log logging.Logger) { + s.pair.Rdb.SetLogger(log) + s.pair.Wdb.SetLogger(log) +} + +func (s *trackerSQLStore) SetSynchronousMode(ctx context.Context, mode db.SynchronousMode, fullfsync bool) (err error) { + return s.pair.Wdb.SetSynchronousMode(ctx, mode, fullfsync) +} + +func (s *trackerSQLStore) IsSharedCacheConnection() bool { + return s.pair.Wdb.IsSharedCacheConnection() +} + +func (s *trackerSQLStore) Batch(fn batchFn) (err error) { + return s.BatchContext(context.Background(), fn) +} + +func (s *trackerSQLStore) BatchContext(ctx context.Context, fn batchFn) (err error) { + return s.pair.Wdb.AtomicContext(ctx, func(ctx context.Context, tx *sql.Tx) error { + return fn(ctx, tx) + }) +} + +func (s *trackerSQLStore) Snapshot(fn snapshotFn) (err error) { + return s.SnapshotContext(context.Background(), fn) +} + +func (s *trackerSQLStore) SnapshotContext(ctx context.Context, fn snapshotFn) (err error) { + return s.pair.Rdb.AtomicContext(ctx, func(ctx context.Context, tx *sql.Tx) error { + return fn(ctx, tx) + }) +} + +func (s *trackerSQLStore) Transaction(fn transactionFn) (err error) { + return s.TransactionContext(context.Background(), fn) +} + +func (s *trackerSQLStore) TransactionContext(ctx context.Context, fn transactionFn) (err error) { + return s.pair.Wdb.AtomicContext(ctx, func(ctx context.Context, tx *sql.Tx) error { + return fn(ctx, tx) + }) +} + +func (s *trackerSQLStore) CreateAccountsReader() (AccountsReader, error) { + return AccountsInitDbQueries(s.pair.Rdb.Handle) +} + +func (s *trackerSQLStore) CreateOnlineAccountsReader() (OnlineAccountsReader, error) { + return OnlineAccountsInitDbQueries(s.pair.Rdb.Handle) +} + +func (s *trackerSQLStore) CreateCatchpointReaderWriter() (CatchpointReaderWriter, error) { + w := NewCatchpointSQLReaderWriter(s.pair.Wdb.Handle) + return w, nil +} + +// TODO: rename: this is a sqlite specific name, this could also be used to trigger compact on KV stores. +// it seems to only be used during a v2 migration +func (s *trackerSQLStore) Vacuum(ctx context.Context) (stats db.VacuumStats, err error) { + _, err = s.pair.Wdb.Vacuum(ctx) + return +} + +func (s *trackerSQLStore) Close() { + s.pair.Close() +} diff --git a/ledger/store/testing.go b/ledger/store/testing.go index 0e426a28c3..babeca5094 100644 --- a/ledger/store/testing.go +++ b/ledger/store/testing.go @@ -19,15 +19,28 @@ package store import ( "context" "database/sql" + "fmt" + "strings" "testing" "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" "github.com/stretchr/testify/require" ) +// DbOpenTrackerTest opens a sqlite db file for testing purposes. +func DbOpenTrackerTest(t testing.TB, inMemory bool) (TrackerStore, string) { + fn := fmt.Sprintf("%s.%d", strings.ReplaceAll(t.Name(), "/", "."), crypto.RandUint64()) + + dbs, err := db.OpenPair(fn, inMemory) + require.NoErrorf(t, err, "Filename : %s\nInMemory: %v", fn, inMemory) + + return &trackerSQLStore{dbs}, fn +} + // AccountsInitLightTest initializes an empty database for testing without the extra methods being called. func AccountsInitLightTest(tb testing.TB, tx *sql.Tx, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabase bool, err error) { newDB, err := accountsInit(tx, initAccounts, proto) diff --git a/ledger/tracker.go b/ledger/tracker.go index b87c6fbe3f..d43faec3f5 100644 --- a/ledger/tracker.go +++ b/ledger/tracker.go @@ -134,7 +134,7 @@ type ledgerTracker interface { // ledgerForTracker defines the part of the ledger that a tracker can // access. This is particularly useful for testing trackers in isolation. type ledgerForTracker interface { - trackerDB() db.Pair + trackerDB() store.TrackerStore blockDB() db.Pair trackerLog() logging.Logger trackerEvalVerified(bookkeeping.Block, internal.LedgerForEvaluator) (ledgercore.StateDelta, error) @@ -174,7 +174,7 @@ type trackerRegistry struct { // cached to avoid SQL queries. dbRound basics.Round - dbs db.Pair + dbs store.TrackerStore log logging.Logger // the synchronous mode that would be used for the account database. @@ -279,7 +279,7 @@ func (tr *trackerRegistry) initialize(l ledgerForTracker, trackers []ledgerTrack tr.dbs = l.trackerDB() tr.log = l.trackerLog() - err = tr.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err = tr.dbs.Snapshot(func(ctx context.Context, tx *sql.Tx) (err error) { arw := store.NewAccountsSQLReaderWriter(tx) tr.dbRound, err = arw.AccountsRound() return err @@ -510,7 +510,7 @@ func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) error { start := time.Now() ledgerCommitroundCount.Inc(nil) - err := tr.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err := tr.dbs.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { arw := store.NewAccountsSQLReaderWriter(tx) for _, lt := range tr.trackers { err0 := lt.commitRound(ctx, tx, dcc) @@ -631,7 +631,7 @@ func (tr *trackerRegistry) replay(l ledgerForTracker) (err error) { defer func() { if rollbackSynchronousMode { // restore default synchronous mode - err0 := tr.dbs.Wdb.SetSynchronousMode(context.Background(), tr.synchronousMode, tr.synchronousMode >= db.SynchronousModeFull) + err0 := tr.dbs.SetSynchronousMode(context.Background(), tr.synchronousMode, tr.synchronousMode >= db.SynchronousModeFull) // override the returned error only in case there is no error - since this // operation has a lower criticality. if err == nil { @@ -662,7 +662,7 @@ func (tr *trackerRegistry) replay(l ledgerForTracker) (err error) { if !rollbackSynchronousMode { // switch to rebuild synchronous mode to improve performance - err0 := tr.dbs.Wdb.SetSynchronousMode(context.Background(), tr.accountsRebuildSynchronousMode, tr.accountsRebuildSynchronousMode >= db.SynchronousModeFull) + err0 := tr.dbs.SetSynchronousMode(context.Background(), tr.accountsRebuildSynchronousMode, tr.accountsRebuildSynchronousMode >= db.SynchronousModeFull) if err0 != nil { tr.log.Warnf("trackerRegistry.replay was unable to switch to rbuild synchronous mode : %v", err0) } else { diff --git a/ledger/trackerdb.go b/ledger/trackerdb.go index 17c6872d95..e65b7ba2cb 100644 --- a/ledger/trackerdb.go +++ b/ledger/trackerdb.go @@ -39,7 +39,7 @@ func trackerDBInitialize(l ledgerForTracker, catchpointEnabled bool, dbPathPrefi return } - err = dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error { + err = dbs.Batch(func(ctx context.Context, tx *sql.Tx) error { arw := store.NewAccountsSQLReaderWriter(tx) tp := store.TrackerDBParams{ diff --git a/ledger/txtail.go b/ledger/txtail.go index 879f0ee7d3..4fc1ea63d4 100644 --- a/ledger/txtail.go +++ b/ledger/txtail.go @@ -91,14 +91,13 @@ type txTail struct { } func (t *txTail) loadFromDisk(l ledgerForTracker, dbRound basics.Round) error { - rdb := l.trackerDB().Rdb t.log = l.trackerLog() var roundData []*store.TxTailRound var roundTailHashes []crypto.Digest var baseRound basics.Round if dbRound > 0 { - err := rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err := l.trackerDB().Snapshot(func(ctx context.Context, tx *sql.Tx) (err error) { arw := store.NewAccountsSQLReaderWriter(tx) roundData, roundTailHashes, baseRound, err = arw.LoadTxTail(ctx, dbRound) return diff --git a/ledger/txtail_test.go b/ledger/txtail_test.go index fce531aa9f..f794699a1e 100644 --- a/ledger/txtail_test.go +++ b/ledger/txtail_test.go @@ -18,6 +18,7 @@ package ledger import ( "context" + "database/sql" "errors" "fmt" "testing" @@ -150,32 +151,34 @@ func (t *txTailTestLedger) initialize(ts *testing.T, protoVersion protocol.Conse // create a corresponding blockdb. inMemory := true t.blockDBs, _ = storetesting.DbOpenTest(ts, inMemory) - t.trackerDBs, _ = storetesting.DbOpenTest(ts, inMemory) + t.trackerDBs, _ = store.DbOpenTrackerTest(ts, inMemory) t.protoVersion = protoVersion - tx, err := t.trackerDBs.Wdb.Handle.Begin() - require.NoError(ts, err) - - arw := store.NewAccountsSQLReaderWriter(tx) - - accts := ledgertesting.RandomAccounts(20, true) - proto := config.Consensus[protoVersion] - newDB := store.AccountsInitTest(ts, tx, accts, protoVersion) - require.True(ts, newDB) - - roundData := make([][]byte, 0, proto.MaxTxnLife) - startRound := t.Latest() - basics.Round(proto.MaxTxnLife) + 1 - for i := startRound; i <= t.Latest(); i++ { - blk, err := t.Block(i) - require.NoError(ts, err) - tail, err := store.TxTailRoundFromBlock(blk) + err := t.trackerDBs.Transaction(func(transactionCtx context.Context, tx *sql.Tx) (err error) { + arw := store.NewAccountsSQLReaderWriter(tx) + + accts := ledgertesting.RandomAccounts(20, true) + proto := config.Consensus[protoVersion] + newDB := store.AccountsInitTest(ts, tx, accts, protoVersion) + require.True(ts, newDB) + + roundData := make([][]byte, 0, proto.MaxTxnLife) + startRound := t.Latest() - basics.Round(proto.MaxTxnLife) + 1 + for i := startRound; i <= t.Latest(); i++ { + blk, err := t.Block(i) + require.NoError(ts, err) + tail, err := store.TxTailRoundFromBlock(blk) + require.NoError(ts, err) + encoded, _ := tail.Encode() + roundData = append(roundData, encoded) + } + err = arw.TxtailNewRound(context.Background(), startRound, roundData, 0) require.NoError(ts, err) - encoded, _ := tail.Encode() - roundData = append(roundData, encoded) - } - err = arw.TxtailNewRound(context.Background(), startRound, roundData, 0) + + return nil + }) require.NoError(ts, err) - tx.Commit() + return nil } @@ -296,12 +299,13 @@ func TestTxTailDeltaTracking(t *testing.T) { err = txtail.prepareCommit(dcc) require.NoError(t, err) - tx, err := ledger.trackerDBs.Wdb.Handle.Begin() + err := ledger.trackerDBs.Transaction(func(transactionCtx context.Context, tx *sql.Tx) (err error) { + err = txtail.commitRound(context.Background(), tx, dcc) + require.NoError(t, err) + return nil + }) require.NoError(t, err) - err = txtail.commitRound(context.Background(), tx, dcc) - require.NoError(t, err) - tx.Commit() proto := config.Consensus[protoVersion] retainSize := proto.MaxTxnLife + proto.DeeperBlockHeaderHistory if uint64(i) > proto.MaxTxnLife*2 { From 23748597a57c969153cc93b3dd06685c2fdd7671 Mon Sep 17 00:00:00 2001 From: Ignacio Corderi Date: Mon, 30 Jan 2023 17:14:48 -0300 Subject: [PATCH 12/81] ledger: remove sql.Tx from the transaction callback (#5031) --- ledger/catchpointtracker.go | 2 +- ledger/catchpointwriter_test.go | 19 ++++++++++-------- ledger/catchupaccessor.go | 35 ++++++++++++++++++++++----------- ledger/store/interface.go | 19 ++++++++++++++++++ ledger/store/merkle_commiter.go | 18 ++++++++++------- ledger/store/store.go | 34 +++++++++++++++++++++++++++----- ledger/txtail_test.go | 4 ++-- 7 files changed, 97 insertions(+), 34 deletions(-) diff --git a/ledger/catchpointtracker.go b/ledger/catchpointtracker.go index 220c6af6be..ef205d9e5f 100644 --- a/ledger/catchpointtracker.go +++ b/ledger/catchpointtracker.go @@ -498,7 +498,7 @@ func (ct *catchpointTracker) commitRound(ctx context.Context, tx *sql.Tx, dcc *d arw := store.NewAccountsSQLReaderWriter(tx) if ct.catchpointEnabled() { - var mc *store.MerkleCommitter + var mc store.MerkleCommitter mc, err = store.MakeMerkleCommitter(tx, false) if err != nil { return diff --git a/ledger/catchpointwriter_test.go b/ledger/catchpointwriter_test.go index 5bba123e84..68d5854380 100644 --- a/ledger/catchpointwriter_test.go +++ b/ledger/catchpointwriter_test.go @@ -459,12 +459,15 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) { // now manually construct the MT and ensure the reading makeOrderedAccountsIter works as expected: // no errors on read, hashes match ctx := context.Background() - // tx, err := l.trackerDBs.Wdb.Handle.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelSerializable}) - err = l.trackerDBs.TransactionContext(ctx, func(ctx context.Context, tx *sql.Tx) (err error) { - arw := store.NewAccountsSQLReaderWriter(tx) + + err = l.trackerDBs.TransactionContext(ctx, func(ctx context.Context, tx store.TransactionScope) (err error) { + arw, err := tx.CreateAccountsReaderWriter() + if err != nil { + return nil + } // save the existing hash - committer, err := store.MakeMerkleCommitter(tx, false) + committer, err := tx.CreateMerkleCommitter(false) require.NoError(t, err) trie, err := merkletrie.MakeTrie(committer, store.TrieMemoryConfig) require.NoError(t, err) @@ -478,7 +481,7 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) { require.NoError(t, err) // rebuild the MT - committer, err = store.MakeMerkleCommitter(tx, false) + committer, err = tx.CreateMerkleCommitter(false) require.NoError(t, err) trie, err = merkletrie.MakeTrie(committer, store.TrieMemoryConfig) require.NoError(t, err) @@ -487,7 +490,7 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) { require.NoError(t, err) require.Zero(t, h) - iter := store.MakeOrderedAccountsIter(tx, trieRebuildAccountChunkSize) + iter := tx.CreateOrderedAccountsIter(trieRebuildAccountChunkSize) defer iter.Close(ctx) for { accts, _, err := iter.Next(ctx) @@ -581,8 +584,8 @@ func testNewLedgerFromCatchpoint(t *testing.T, catchpointWriterReadAccess store. balanceTrieStats := func(db store.TrackerStore) merkletrie.Stats { var stats merkletrie.Stats - err = db.Transaction(func(ctx context.Context, tx *sql.Tx) (err error) { - committer, err := store.MakeMerkleCommitter(tx, false) + err = db.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + committer, err := tx.CreateMerkleCommitter(false) if err != nil { return err } diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go index 67d83f1f94..88c57d392b 100644 --- a/ledger/catchupaccessor.go +++ b/ledger/catchupaccessor.go @@ -107,15 +107,21 @@ type stagingWriterImpl struct { } func (w *stagingWriterImpl) writeBalances(ctx context.Context, balances []store.NormalizedAccountBalance) error { - return w.wdb.Transaction(func(ctx context.Context, tx *sql.Tx) (err error) { - crw := store.NewCatchpointSQLReaderWriter(tx) + return w.wdb.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + crw, err := tx.CreateCatchpointReaderWriter() + if err != nil { + return err + } return crw.WriteCatchpointStagingBalances(ctx, balances) }) } func (w *stagingWriterImpl) writeKVs(ctx context.Context, kvrs []encoded.KVRecordV6) error { - return w.wdb.Transaction(func(ctx context.Context, tx *sql.Tx) (err error) { - crw := store.NewCatchpointSQLReaderWriter(tx) + return w.wdb.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + crw, err := tx.CreateCatchpointReaderWriter() + if err != nil { + return err + } keys := make([][]byte, len(kvrs)) values := make([][]byte, len(kvrs)) @@ -131,17 +137,24 @@ func (w *stagingWriterImpl) writeKVs(ctx context.Context, kvrs []encoded.KVRecor } func (w *stagingWriterImpl) writeCreatables(ctx context.Context, balances []store.NormalizedAccountBalance) error { - return w.wdb.Transaction(func(ctx context.Context, tx *sql.Tx) error { - crw := store.NewCatchpointSQLReaderWriter(tx) + return w.wdb.Transaction(func(ctx context.Context, tx store.TransactionScope) error { + crw, err := tx.CreateCatchpointReaderWriter() + if err != nil { + return err + } + return crw.WriteCatchpointStagingCreatable(ctx, balances) }) } func (w *stagingWriterImpl) writeHashes(ctx context.Context, balances []store.NormalizedAccountBalance) error { - return w.wdb.Transaction(func(ctx context.Context, tx *sql.Tx) error { - crw := store.NewCatchpointSQLReaderWriter(tx) - err := crw.WriteCatchpointStagingHashes(ctx, balances) - return err + return w.wdb.Transaction(func(ctx context.Context, tx store.TransactionScope) error { + crw, err := tx.CreateCatchpointReaderWriter() + if err != nil { + return err + } + + return crw.WriteCatchpointStagingHashes(ctx, balances) }) } @@ -701,7 +714,7 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro uncommitedHashesCount := 0 keepWriting := true accountHashesWritten, kvHashesWritten := uint64(0), uint64(0) - var mc *store.MerkleCommitter + var mc store.MerkleCommitter err := trackerdb.Batch(func(transactionCtx context.Context, tx *sql.Tx) (err error) { // create the merkle trie for the balances diff --git a/ledger/store/interface.go b/ledger/store/interface.go index 3a4d7a7e3a..98a46e485a 100644 --- a/ledger/store/interface.go +++ b/ledger/store/interface.go @@ -43,6 +43,13 @@ type AccountsWriter interface { Close() } +// AccountsWriterExt is the write interface used inside transactions and batch operations. +type AccountsWriterExt interface { + ResetAccountHashes(ctx context.Context) (err error) + TxtailNewRound(ctx context.Context, baseRound basics.Round, roundData [][]byte, forgetBeforeRound basics.Round) error + UpdateAccountsRound(rnd basics.Round) (err error) +} + // AccountsReader is the read interface for: // - accounts, resources, app kvs, creatables type AccountsReader interface { @@ -61,6 +68,13 @@ type AccountsReader interface { Close() } +// AccountsReaderWriter is AccountsReader+AccountsWriter +type AccountsReaderWriter interface { + // AccountsReader + // AccountsWriter + AccountsWriterExt +} + // OnlineAccountsWriter is the write interface for: // - online accounts type OnlineAccountsWriter interface { @@ -87,6 +101,11 @@ type CatchpointWriter interface { WriteCatchpointStateUint64(ctx context.Context, stateName CatchpointState, setValue uint64) (err error) WriteCatchpointStateString(ctx context.Context, stateName CatchpointState, setValue string) (err error) + WriteCatchpointStagingBalances(ctx context.Context, bals []NormalizedAccountBalance) error + WriteCatchpointStagingKVs(ctx context.Context, keys [][]byte, values [][]byte, hashes [][]byte) error + WriteCatchpointStagingCreatable(ctx context.Context, bals []NormalizedAccountBalance) error + WriteCatchpointStagingHashes(ctx context.Context, bals []NormalizedAccountBalance) error + InsertUnfinishedCatchpoint(ctx context.Context, round basics.Round, blockHash crypto.Digest) error DeleteUnfinishedCatchpoint(ctx context.Context, round basics.Round) error DeleteOldCatchpointFirstStageInfo(ctx context.Context, maxRoundToDelete basics.Round) error diff --git a/ledger/store/merkle_commiter.go b/ledger/store/merkle_commiter.go index bc7502dac6..f908c24d02 100644 --- a/ledger/store/merkle_commiter.go +++ b/ledger/store/merkle_commiter.go @@ -18,20 +18,24 @@ package store import "database/sql" -// MerkleCommitter allows storing and loading merkletrie pages from a sqlite database. -// //msgp:ignore MerkleCommitter -type MerkleCommitter struct { +type merkleCommitter struct { tx *sql.Tx deleteStmt *sql.Stmt insertStmt *sql.Stmt selectStmt *sql.Stmt } +// MerkleCommitter allows storing and loading merkletrie pages from a sqlite database. +type MerkleCommitter interface { + StorePage(page uint64, content []byte) error + LoadPage(page uint64) (content []byte, err error) +} + // MakeMerkleCommitter creates a MerkleCommitter object that implements the merkletrie.Committer interface allowing storing and loading // merkletrie pages from a sqlite database. -func MakeMerkleCommitter(tx *sql.Tx, staging bool) (mc *MerkleCommitter, err error) { - mc = &MerkleCommitter{tx: tx} +func MakeMerkleCommitter(tx *sql.Tx, staging bool) (mc *merkleCommitter, err error) { + mc = &merkleCommitter{tx: tx} accountHashesTable := "accounthashes" if staging { accountHashesTable = "catchpointaccounthashes" @@ -52,7 +56,7 @@ func MakeMerkleCommitter(tx *sql.Tx, staging bool) (mc *MerkleCommitter, err err } // StorePage is the merkletrie.Committer interface implementation, stores a single page in a sqlite database table. -func (mc *MerkleCommitter) StorePage(page uint64, content []byte) error { +func (mc *merkleCommitter) StorePage(page uint64, content []byte) error { if len(content) == 0 { _, err := mc.deleteStmt.Exec(page) return err @@ -62,7 +66,7 @@ func (mc *MerkleCommitter) StorePage(page uint64, content []byte) error { } // LoadPage is the merkletrie.Committer interface implementation, load a single page from a sqlite database table. -func (mc *MerkleCommitter) LoadPage(page uint64) (content []byte, err error) { +func (mc *merkleCommitter) LoadPage(page uint64) (content []byte, err error) { err = mc.selectStmt.QueryRow(page).Scan(&content) if err == sql.ErrNoRows { content = nil diff --git a/ledger/store/store.go b/ledger/store/store.go index 071c29a246..e3e09345ae 100644 --- a/ledger/store/store.go +++ b/ledger/store/store.go @@ -29,14 +29,22 @@ type trackerSQLStore struct { pair db.Pair } -// TODO: maintain a SQL tx for now type batchFn func(ctx context.Context, tx *sql.Tx) error -// TODO: maintain a SQL tx for now type snapshotFn func(ctx context.Context, tx *sql.Tx) error -// TODO: maintain a SQL tx for now -type transactionFn func(ctx context.Context, tx *sql.Tx) error +type transactionFn func(ctx context.Context, tx TransactionScope) error + +// TransactionScope read/write scope to the store. +type TransactionScope interface { + CreateCatchpointReaderWriter() (CatchpointReaderWriter, error) + CreateAccountsReaderWriter() (AccountsReaderWriter, error) + CreateMerkleCommitter(staging bool) (MerkleCommitter, error) + CreateOrderedAccountsIter(accountCount int) *orderedAccountsIter +} +type sqlTransactionScope struct { + tx *sql.Tx +} // TrackerStore is the interface for the tracker db. type TrackerStore interface { @@ -117,7 +125,7 @@ func (s *trackerSQLStore) Transaction(fn transactionFn) (err error) { func (s *trackerSQLStore) TransactionContext(ctx context.Context, fn transactionFn) (err error) { return s.pair.Wdb.AtomicContext(ctx, func(ctx context.Context, tx *sql.Tx) error { - return fn(ctx, tx) + return fn(ctx, sqlTransactionScope{tx}) }) } @@ -144,3 +152,19 @@ func (s *trackerSQLStore) Vacuum(ctx context.Context) (stats db.VacuumStats, err func (s *trackerSQLStore) Close() { s.pair.Close() } + +func (txs sqlTransactionScope) CreateCatchpointReaderWriter() (CatchpointReaderWriter, error) { + return NewCatchpointSQLReaderWriter(txs.tx), nil +} + +func (txs sqlTransactionScope) CreateAccountsReaderWriter() (AccountsReaderWriter, error) { + return NewAccountsSQLReaderWriter(txs.tx), nil +} + +func (txs sqlTransactionScope) CreateMerkleCommitter(staging bool) (MerkleCommitter, error) { + return MakeMerkleCommitter(txs.tx, staging) +} + +func (txs sqlTransactionScope) CreateOrderedAccountsIter(accountCount int) *orderedAccountsIter { + return MakeOrderedAccountsIter(txs.tx, accountCount) +} diff --git a/ledger/txtail_test.go b/ledger/txtail_test.go index f794699a1e..5831db3239 100644 --- a/ledger/txtail_test.go +++ b/ledger/txtail_test.go @@ -154,7 +154,7 @@ func (t *txTailTestLedger) initialize(ts *testing.T, protoVersion protocol.Conse t.trackerDBs, _ = store.DbOpenTrackerTest(ts, inMemory) t.protoVersion = protoVersion - err := t.trackerDBs.Transaction(func(transactionCtx context.Context, tx *sql.Tx) (err error) { + err := t.trackerDBs.Batch(func(transactionCtx context.Context, tx *sql.Tx) (err error) { arw := store.NewAccountsSQLReaderWriter(tx) accts := ledgertesting.RandomAccounts(20, true) @@ -299,7 +299,7 @@ func TestTxTailDeltaTracking(t *testing.T) { err = txtail.prepareCommit(dcc) require.NoError(t, err) - err := ledger.trackerDBs.Transaction(func(transactionCtx context.Context, tx *sql.Tx) (err error) { + err := ledger.trackerDBs.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { err = txtail.commitRound(context.Background(), tx, dcc) require.NoError(t, err) return nil From e85d752ebeb5f23682ab6633577697da91914a62 Mon Sep 17 00:00:00 2001 From: Ian Suvak Date: Mon, 30 Jan 2023 14:34:22 -0600 Subject: [PATCH 13/81] network: Add GetUnderlyingConnTcpInfo method to wsPeer (#5077) --- network/wsNetwork.go | 11 +---------- network/wsPeer.go | 23 +++++++++++++++++++++++ 2 files changed, 24 insertions(+), 10 deletions(-) diff --git a/network/wsNetwork.go b/network/wsNetwork.go index b617cf63b1..549acac867 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -1842,16 +1842,7 @@ func (wn *WebsocketNetwork) getPeerConnectionTelemetryDetails(now time.Time, pee AVCount: atomic.LoadUint64(&peer.avMessageCount), PPCount: atomic.LoadUint64(&peer.ppMessageCount), } - // unwrap websocket.Conn, requestTrackedConnection, rejectingLimitListenerConn - var uconn net.Conn = peer.conn.UnderlyingConn() - for i := 0; i < 10; i++ { - wconn, ok := uconn.(wrappedConn) - if !ok { - break - } - uconn = wconn.UnderlyingConn() - } - if tcpInfo, err := util.GetConnTCPInfo(uconn); err == nil && tcpInfo != nil { + if tcpInfo, err := peer.GetUnderlyingConnTCPInfo(); err == nil && tcpInfo != nil { connDetail.TCP = *tcpInfo } if peer.outgoing { diff --git a/network/wsPeer.go b/network/wsPeer.go index b608d5dd1b..7bec904821 100644 --- a/network/wsPeer.go +++ b/network/wsPeer.go @@ -37,6 +37,7 @@ import ( "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/util" "github.com/algorand/go-algorand/util/metrics" ) @@ -299,6 +300,12 @@ type UnicastPeer interface { Respond(ctx context.Context, reqMsg IncomingMessage, topics Topics) (e error) } +// TCPInfoUnicastPeer exposes information about the underlying connection if available on the platform +type TCPInfoUnicastPeer interface { + UnicastPeer + GetUnderlyingConnTCPInfo() (*util.TCPInfo, error) +} + // Create a wsPeerCore object func makePeerCore(net *WebsocketNetwork, rootURL string, roundTripper http.RoundTripper, originAddress string) wsPeerCore { return wsPeerCore{ @@ -350,6 +357,22 @@ func (wp *wsPeer) Unicast(ctx context.Context, msg []byte, tag protocol.Tag) err return err } +// GetUnderlyingConnTCPInfo unwraps the connection and returns statistics about it on supported underlying implementations +// +// (Implements TCPInfoUnicastPeer) +func (wp *wsPeer) GetUnderlyingConnTCPInfo() (*util.TCPInfo, error) { + // unwrap websocket.Conn, requestTrackedConnection, rejectingLimitListenerConn + var uconn net.Conn = wp.conn.UnderlyingConn() + for i := 0; i < 10; i++ { + wconn, ok := uconn.(wrappedConn) + if !ok { + break + } + uconn = wconn.UnderlyingConn() + } + return util.GetConnTCPInfo(uconn) +} + // Respond sends the response of a request message func (wp *wsPeer) Respond(ctx context.Context, reqMsg IncomingMessage, responseTopics Topics) (e error) { From c8b5debee20ec295a34af0f0aaf2e6173288b86d Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Mon, 30 Jan 2023 15:34:43 -0500 Subject: [PATCH 14/81] tests: add more logging to expect test (#5078) --- netdeploy/networkTemplate.go | 7 +++++-- .../cli/goal/expect/goalExpectCommon.exp | 21 +++++++++++++++++++ 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/netdeploy/networkTemplate.go b/netdeploy/networkTemplate.go index b0a913a445..f4e2c1aaa3 100644 --- a/netdeploy/networkTemplate.go +++ b/netdeploy/networkTemplate.go @@ -133,14 +133,17 @@ func (t NetworkTemplate) createNodeDirectories(targetFolder string, binDir strin if importKeys && hasWallet { var client libgoal.Client client, err = libgoal.MakeClientWithBinDir(binDir, nodeDir, "", libgoal.KmdClient) + if err != nil { + return + } _, err = client.CreateWallet(libgoal.UnencryptedWalletName, nil, crypto.MasterDerivationKey{}) if err != nil { return } - _, _, err = util.ExecAndCaptureOutput(importKeysCmd, "account", "importrootkey", "-w", string(libgoal.UnencryptedWalletName), "-d", nodeDir) + stdout, stderr, err := util.ExecAndCaptureOutput(importKeysCmd, "account", "importrootkey", "-w", string(libgoal.UnencryptedWalletName), "-d", nodeDir) if err != nil { - return + return nil, nil, fmt.Errorf("goal account importrootkey failed: %w\nstdout: %s\nstderr: %s", err, stdout, stderr) } } diff --git a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp index 2f2f4f8265..a0e174b12a 100644 --- a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp +++ b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp @@ -64,6 +64,13 @@ proc ::AlgorandGoal::Abort { ERROR } { set nodeLog [exec -- tail -n 50 $NODE_DATA_DIR/node.log] puts "\n$NODE_DATA_DIR/node.log:\r\n$nodeLog" set LOGS_COLLECTED 1 + + set outLog [exec cat $NODE_DATA_DIR/kmd-v0.5/kmd-out.log] + puts "\n$NODE_DATA_DIR/kmd-v0.5/kmd-out.log:\r\n$outLog" + set errLog [exec cat $NODE_DATA_DIR/kmd-v0.5/kmd-err.log] + puts "\n$NODE_DATA_DIR/kmd-v0.5/kmd-err.log:\r\n$errLog" + set kmdLog [exec -- tail -n 50 $NODE_DATA_DIR/kmd-v0.5/kmd.log] + puts "\n$NODE_DATA_DIR/kmd-v0.5/kmd.log:\r\n$kmdLog" } set NODE_DATA_DIR $::GLOBAL_TEST_ROOT_DIR/Node puts "Node path $NODE_DATA_DIR" @@ -75,6 +82,13 @@ proc ::AlgorandGoal::Abort { ERROR } { set nodeLog [exec -- tail -n 50 $NODE_DATA_DIR/node.log] puts "\n$NODE_DATA_DIR/node.log:\r\n$nodeLog" set LOGS_COLLECTED 1 + + set outLog [exec cat $NODE_DATA_DIR/kmd-v0.5/kmd-out.log] + puts "\n$NODE_DATA_DIR/kmd-v0.5/kmd-out.log:\r\n$outLog" + set errLog [exec cat $NODE_DATA_DIR/kmd-v0.5/kmd-err.log] + puts "\n$NODE_DATA_DIR/kmd-v0.5/kmd-err.log:\r\n$errLog" + set kmdLog [exec -- tail -n 50 $NODE_DATA_DIR/kmd-v0.5/kmd.log] + puts "\n$NODE_DATA_DIR/kmd-v0.5/kmd.log:\r\n$kmdLog" } } @@ -91,6 +105,13 @@ proc ::AlgorandGoal::Abort { ERROR } { puts "\n$::GLOBAL_TEST_ALGO_DIR/algod-err.log:\r\n$errLog" set nodeLog [exec -- tail -n 50 $::GLOBAL_TEST_ALGO_DIR/node.log] puts "\n$::GLOBAL_TEST_ALGO_DIR/node.log:\r\n$nodeLog" + + set outLog [exec cat $GLOBAL_TEST_ALGO_DIR/kmd-v0.5/kmd-out.log] + puts "\n$GLOBAL_TEST_ALGO_DIR/kmd-v0.5/kmd-out.log:\r\n$outLog" + set errLog [exec cat $GLOBAL_TEST_ALGO_DIR/kmd-v0.5/kmd-err.log] + puts "\n$GLOBAL_TEST_ALGO_DIR/kmd-v0.5/kmd-err.log:\r\n$errLog" + set kmdLog [exec -- tail -n 50 $GLOBAL_TEST_ALGO_DIR/kmd-v0.5/kmd.log] + puts "\n$GLOBAL_TEST_ALGO_DIR/kmd-v0.5/kmd.log:\r\n$kmdLog" } } From 8e5df380ff5ef5e547f4e461f8de1d4779ec31cf Mon Sep 17 00:00:00 2001 From: abebeos <110243666+abebeos@users.noreply.github.com> Date: Tue, 31 Jan 2023 21:45:29 +0200 Subject: [PATCH 15/81] goal: resolve data dir code duplication (#5073) --- cmd/algocfg/datadir.go | 72 ----------------- cmd/algocfg/getCommand.go | 3 +- cmd/algocfg/main.go | 3 +- cmd/algocfg/resetCommand.go | 3 +- cmd/algocfg/setCommand.go | 3 +- cmd/goal/account.go | 51 ++++++------ cmd/goal/application.go | 3 +- cmd/goal/asset.go | 15 ++-- cmd/goal/clerk.go | 15 ++-- cmd/goal/commands.go | 77 ++---------------- cmd/goal/commands_test.go | 19 ++--- cmd/goal/interact.go | 5 +- cmd/goal/kmd.go | 5 +- cmd/goal/ledger.go | 5 +- cmd/goal/logging.go | 9 ++- cmd/goal/multisig.go | 5 +- cmd/goal/network.go | 3 +- cmd/goal/node.go | 25 +++--- cmd/goal/wallet.go | 7 +- cmd/util/datadir/datadir.go | 95 +++++++++++++++++++++++ cmd/{algocfg => util/datadir}/messages.go | 2 +- cmd/util/datadir/report.go | 39 ++++++++++ 22 files changed, 239 insertions(+), 225 deletions(-) delete mode 100644 cmd/algocfg/datadir.go create mode 100644 cmd/util/datadir/datadir.go rename cmd/{algocfg => util/datadir}/messages.go (98%) create mode 100644 cmd/util/datadir/report.go diff --git a/cmd/algocfg/datadir.go b/cmd/algocfg/datadir.go deleted file mode 100644 index 93aa4d7a10..0000000000 --- a/cmd/algocfg/datadir.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (C) 2019-2023 Algorand, Inc. -// This file is part of go-algorand -// -// go-algorand is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as -// published by the Free Software Foundation, either version 3 of the -// License, or (at your option) any later version. -// -// go-algorand is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with go-algorand. If not, see . - -package main - -import "os" - -var dataDirs []string - -func resolveDataDir() string { - // Figure out what data directory to tell algod to use. - // If not specified on cmdline with '-d', look for default in environment. - var dir string - if len(dataDirs) > 0 { - dir = dataDirs[0] - } - if dir == "" { - dir = os.Getenv("ALGORAND_DATA") - } - return dir -} - -func ensureFirstDataDir() string { - // Get the target data directory to work against, - // then handle the scenario where no data directory is provided. - dir := resolveDataDir() - if dir == "" { - reportErrorln(errorNoDataDirectory) - } - return dir -} - -func ensureSingleDataDir() string { - if len(dataDirs) > 1 { - reportErrorln(errorOneDataDirSupported) - } - return ensureFirstDataDir() -} - -func getDataDirs() (dirs []string) { - if len(dataDirs) == 0 { - reportErrorln(errorNoDataDirectory) - } - dirs = append(dirs, ensureFirstDataDir()) - dirs = append(dirs, dataDirs[1:]...) - return -} - -func onDataDirs(action func(dataDir string)) { - dirs := getDataDirs() - report := len(dirs) > 1 - - for _, dir := range dirs { - if report { - reportInfof(infoDataDir, dir) - } - action(dir) - } -} diff --git a/cmd/algocfg/getCommand.go b/cmd/algocfg/getCommand.go index 3ad9a17943..016de6cd34 100644 --- a/cmd/algocfg/getCommand.go +++ b/cmd/algocfg/getCommand.go @@ -23,6 +23,7 @@ import ( "github.com/spf13/cobra" + "github.com/algorand/go-algorand/cmd/util/datadir" "github.com/algorand/go-algorand/config" ) @@ -43,7 +44,7 @@ var getCmd = &cobra.Command{ Args: cobra.NoArgs, Run: func(cmd *cobra.Command, _ []string) { anyError := false - onDataDirs(func(dataDir string) { + datadir.OnDataDirs(func(dataDir string) { cfg, err := config.LoadConfigFromDisk(dataDir) if err != nil && !os.IsNotExist(err) { reportWarnf("Error loading config file from '%s' - %s", dataDir, err) diff --git a/cmd/algocfg/main.go b/cmd/algocfg/main.go index d3ee5c8a96..9fe71879e3 100644 --- a/cmd/algocfg/main.go +++ b/cmd/algocfg/main.go @@ -20,13 +20,14 @@ import ( "fmt" "os" + "github.com/algorand/go-algorand/cmd/util/datadir" "github.com/spf13/cobra" ) func init() { // Config defaultDataDirValue := []string{""} - rootCmd.PersistentFlags().StringArrayVarP(&dataDirs, "datadir", "d", defaultDataDirValue, "Data directory for the node") + rootCmd.PersistentFlags().StringArrayVarP(&datadir.DataDirs, "datadir", "d", defaultDataDirValue, "Data directory for the node") } var rootCmd = &cobra.Command{ diff --git a/cmd/algocfg/resetCommand.go b/cmd/algocfg/resetCommand.go index 6e57bb2329..24f9cf1dad 100644 --- a/cmd/algocfg/resetCommand.go +++ b/cmd/algocfg/resetCommand.go @@ -24,6 +24,7 @@ import ( "github.com/spf13/cobra" + "github.com/algorand/go-algorand/cmd/util/datadir" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/util/codecs" ) @@ -45,7 +46,7 @@ var resetCmd = &cobra.Command{ Args: cobra.NoArgs, Run: func(cmd *cobra.Command, _ []string) { anyError := false - onDataDirs(func(dataDir string) { + datadir.OnDataDirs(func(dataDir string) { cfg, err := config.LoadConfigFromDisk(dataDir) if err != nil && !os.IsNotExist(err) { reportWarnf("Error loading config file from '%s' - %s", dataDir, err) diff --git a/cmd/algocfg/setCommand.go b/cmd/algocfg/setCommand.go index ac1d7c6ea7..8367857592 100644 --- a/cmd/algocfg/setCommand.go +++ b/cmd/algocfg/setCommand.go @@ -25,6 +25,7 @@ import ( "github.com/spf13/cobra" + "github.com/algorand/go-algorand/cmd/util/datadir" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/util/codecs" ) @@ -49,7 +50,7 @@ var setCmd = &cobra.Command{ Args: cobra.NoArgs, Run: func(cmd *cobra.Command, _ []string) { anyError := false - onDataDirs(func(dataDir string) { + datadir.OnDataDirs(func(dataDir string) { cfg, err := config.LoadConfigFromDisk(dataDir) if err != nil && !os.IsNotExist(err) { reportWarnf("Error loading config file from '%s' - %s", dataDir, err) diff --git a/cmd/goal/account.go b/cmd/goal/account.go index 792d39b097..c8becdb8fd 100644 --- a/cmd/goal/account.go +++ b/cmd/goal/account.go @@ -28,6 +28,7 @@ import ( "github.com/spf13/cobra" + "github.com/algorand/go-algorand/cmd/util/datadir" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/crypto/passphrase" @@ -240,7 +241,7 @@ var accountCmd = &cobra.Command{ Long: `Collection of commands to support the creation and management of accounts / wallets tied to a specific Algorand node instance.`, Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, args []string) { - accountList := makeAccountsList(ensureSingleDataDir()) + accountList := makeAccountsList(datadir.EnsureSingleDataDir()) // Update the default account if defaultAccountName != "" { @@ -275,7 +276,7 @@ var renameCmd = &cobra.Command{ Long: `Change the human-friendly name of an account. This is a local-only name, it is not stored on the network.`, Args: cobra.ExactArgs(2), Run: func(cmd *cobra.Command, args []string) { - accountList := makeAccountsList(ensureSingleDataDir()) + accountList := makeAccountsList(datadir.EnsureSingleDataDir()) oldName := args[0] newName := args[1] @@ -307,7 +308,7 @@ var newCmd = &cobra.Command{ Long: `Coordinates the creation of a new account with KMD. The name specified here is stored in a local configuration file and is only used by goal when working against that specific node instance.`, Args: cobra.RangeArgs(0, 1), Run: func(cmd *cobra.Command, args []string) { - accountList := makeAccountsList(ensureSingleDataDir()) + accountList := makeAccountsList(datadir.EnsureSingleDataDir()) // Choose an account name if len(args) == 0 { accountName = accountList.getUnnamed() @@ -325,7 +326,7 @@ var newCmd = &cobra.Command{ reportErrorf(errorNameAlreadyTaken, accountName) } - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() // Get a wallet handle wh := ensureWalletHandle(dataDir, walletName) @@ -355,7 +356,7 @@ var deletePartKeyCmd = &cobra.Command{ Long: `Delete the indicated participation key.`, Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, args []string) { - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client := ensureAlgodClient(dataDir) @@ -373,7 +374,7 @@ var deleteCmd = &cobra.Command{ Long: `Delete the indicated account. The key management daemon will no longer know about this account, although the account will still exist on the network.`, Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, args []string) { - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() accountList := makeAccountsList(dataDir) client := ensureKmdClient(dataDir) @@ -394,7 +395,7 @@ var newMultisigCmd = &cobra.Command{ Long: `Create a new multisig account from a list of existing non-multisig addresses`, Args: cobra.MinimumNArgs(1), Run: func(cmd *cobra.Command, args []string) { - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() accountList := makeAccountsList(dataDir) // Get a wallet handle to the default wallet @@ -437,7 +438,7 @@ var deleteMultisigCmd = &cobra.Command{ Long: `Delete a multisig account. Like ordinary account delete, the local node will no longer know about the account, but it may still exist on the network.`, Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, args []string) { - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() accountList := makeAccountsList(dataDir) client := ensureKmdClient(dataDir) @@ -458,7 +459,7 @@ var infoMultisigCmd = &cobra.Command{ Long: `Print information about a multisig account, such as its Algorand multisig version, or the number of keys needed to validate a transaction from the multisig account.`, Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, args []string) { - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client := ensureKmdClient(dataDir) wh := ensureWalletHandle(dataDir, walletName) @@ -482,7 +483,7 @@ var listCmd = &cobra.Command{ Long: `Show the list of Algorand accounts on this machine. Indicates whether the account is [offline] or [online], and if the account is the default account for goal. Also displays account information with --info.`, Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, args []string) { - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() accountList := makeAccountsList(dataDir) // Get a wallet handle to the specified wallet @@ -539,7 +540,7 @@ var infoCmd = &cobra.Command{ Long: `Retrieve information about the assets and applications the specified account has created or opted into.`, Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, args []string) { - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client := ensureAlgodClient(dataDir) response, err := client.AccountInformation(accountAddress, true) if err != nil { @@ -729,7 +730,7 @@ var balanceCmd = &cobra.Command{ Long: `Retrieve the balance record for the specified account. Algo balance is displayed in microAlgos.`, Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, args []string) { - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client := ensureAlgodClient(dataDir) response, err := client.AccountInformation(accountAddress, false) if err != nil { @@ -746,7 +747,7 @@ var dumpCmd = &cobra.Command{ Long: `Dump the balance record for the specified account to terminal as JSON or to a file as MessagePack.`, Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, args []string) { - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client := ensureAlgodClient(dataDir) rawAddress, err := basics.UnmarshalChecksumAddress(accountAddress) if err != nil { @@ -774,7 +775,7 @@ var rewardsCmd = &cobra.Command{ Long: `Retrieve the rewards for the specified account, including pending rewards. Units displayed are microAlgos.`, Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, args []string) { - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client := ensureAlgodClient(dataDir) response, err := client.AccountInformation(accountAddress, false) if err != nil { @@ -801,7 +802,7 @@ var changeOnlineCmd = &cobra.Command{ reportErrorf("Going offline does not support --partkeyfile\n") } - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() var client libgoal.Client if statusChangeTxFile != "" { // writing out a txn, don't need kmd @@ -891,7 +892,7 @@ var addParticipationKeyCmd = &cobra.Command{ Long: `Generate and install participation key for the specified account. This participation key can then be used for going online and participating in consensus.`, Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, args []string) { - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() if partKeyOutDir != "" && !util.IsDir(partKeyOutDir) { reportErrorf(errorDirectoryNotExist, partKeyOutDir) @@ -939,7 +940,7 @@ system security. No --delete-input flag specified, exiting without installing key.`) } - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client := ensureAlgodClient(dataDir) addResponse, err := client.AddParticipationKey(partKeyFile) @@ -967,7 +968,7 @@ var renewParticipationKeyCmd = &cobra.Command{ Long: `Generate a participation key for the specified account and issue the necessary transaction to register it.`, Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, args []string) { - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client := ensureAlgodClient(dataDir) @@ -1046,7 +1047,7 @@ var renewAllParticipationKeyCmd = &cobra.Command{ Long: `Generate new participation keys for all existing accounts with participation keys and issue the necessary transactions to register them.`, Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, args []string) { - onDataDirs(func(dataDir string) { + datadir.OnDataDirs(func(dataDir string) { fmt.Printf("Renewing participation keys in %s...\n", dataDir) err := renewPartKeysInDir(dataDir, roundLastValid, transactionFee, scLeaseBytes(cmd), keyDilution, walletName) if err != nil { @@ -1138,7 +1139,7 @@ var listParticipationKeysCmd = &cobra.Command{ Long: `List all participation keys tracked by algod along with summary of additional information. For detailed key information use 'partkeyinfo'.`, Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, args []string) { - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client := ensureGoalClient(dataDir, libgoal.DynamicClient) parts, err := client.ListParticipationKeys() @@ -1202,7 +1203,7 @@ var importCmd = &cobra.Command{ Short: "Import an account key from mnemonic", Long: "Import an account key from a mnemonic generated by the export command or by algokey (NOT a mnemonic from the goal wallet command). The imported account will be listed alongside your wallet-generated accounts, but will not be tied to your wallet.", Run: func(cmd *cobra.Command, args []string) { - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() accountList := makeAccountsList(dataDir) // Choose an account name if len(args) == 0 { @@ -1260,7 +1261,7 @@ var exportCmd = &cobra.Command{ Short: "Export an account key for use with account import", Long: "Export an account mnemonic seed, for use with account import. This exports the seed for a single account and should NOT be confused with the wallet mnemonic.", Run: func(cmd *cobra.Command, args []string) { - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client := ensureKmdClient(dataDir) wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true) @@ -1294,7 +1295,7 @@ var importRootKeysCmd = &cobra.Command{ Long: "Import .rootkey files from the data directory into a kmd wallet. This is analogous to using the import command with an account seed mnemonic: the imported account will be displayed alongside your wallet-derived accounts, but will not be tied to your wallet mnemonic.", Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, args []string) { - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() // Generate a participation keys database and install it client := ensureKmdClient(dataDir) @@ -1386,7 +1387,7 @@ var partkeyInfoCmd = &cobra.Command{ Long: `Output details about all available part keys in the specified data directory(ies), such as key validity period.`, Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, args []string) { - onDataDirs(func(dataDir string) { + datadir.OnDataDirs(func(dataDir string) { fmt.Printf("Dumping participation key info from %s...\n", dataDir) client := ensureAlgodClient(dataDir) @@ -1428,7 +1429,7 @@ var markNonparticipatingCmd = &cobra.Command{ checkTxValidityPeriodCmdFlags(cmd) - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client := ensureFullClient(dataDir) firstTxRound, lastTxRound, _, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds) if err != nil { diff --git a/cmd/goal/application.go b/cmd/goal/application.go index e01e1fdee5..0920793300 100644 --- a/cmd/goal/application.go +++ b/cmd/goal/application.go @@ -32,6 +32,7 @@ import ( "github.com/algorand/avm-abi/abi" "github.com/algorand/avm-abi/apps" + "github.com/algorand/go-algorand/cmd/util/datadir" "github.com/algorand/go-algorand/crypto" apiclient "github.com/algorand/go-algorand/daemon/algod/api/client" "github.com/algorand/go-algorand/data/basics" @@ -398,7 +399,7 @@ func mustParseOnCompletion(ocString string) (oc transactions.OnCompletion) { } func getDataDirAndClient() (dataDir string, client libgoal.Client) { - dataDir = ensureSingleDataDir() + dataDir = datadir.EnsureSingleDataDir() client = ensureFullClient(dataDir) return } diff --git a/cmd/goal/asset.go b/cmd/goal/asset.go index 97606c4cba..f882f85c0d 100644 --- a/cmd/goal/asset.go +++ b/cmd/goal/asset.go @@ -22,6 +22,7 @@ import ( "github.com/spf13/cobra" + "github.com/algorand/go-algorand/cmd/util/datadir" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/libgoal" ) @@ -229,7 +230,7 @@ var createAssetCmd = &cobra.Command{ reportErrorf("The [--clawback] flag and the [--no-clawback] flag are mutually exclusive, do not provide both flags.") } - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client := ensureFullClient(dataDir) accountList := makeAccountsList(dataDir) creator := accountList.getAddressByName(assetCreator) @@ -345,7 +346,7 @@ var destroyAssetCmd = &cobra.Command{ Run: func(cmd *cobra.Command, _ []string) { checkTxValidityPeriodCmdFlags(cmd) - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client := ensureFullClient(dataDir) accountList := makeAccountsList(dataDir) @@ -421,7 +422,7 @@ var configAssetCmd = &cobra.Command{ Run: func(cmd *cobra.Command, _ []string) { checkTxValidityPeriodCmdFlags(cmd) - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client := ensureFullClient(dataDir) accountList := makeAccountsList(dataDir) @@ -514,7 +515,7 @@ var sendAssetCmd = &cobra.Command{ Run: func(cmd *cobra.Command, _ []string) { checkTxValidityPeriodCmdFlags(cmd) - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client := ensureFullClient(dataDir) accountList := makeAccountsList(dataDir) @@ -601,7 +602,7 @@ var freezeAssetCmd = &cobra.Command{ Run: func(cmd *cobra.Command, _ []string) { checkTxValidityPeriodCmdFlags(cmd) - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client := ensureFullClient(dataDir) accountList := makeAccountsList(dataDir) @@ -684,7 +685,7 @@ var optinAssetCmd = &cobra.Command{ Run: func(cmd *cobra.Command, _ []string) { checkTxValidityPeriodCmdFlags(cmd) - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client := ensureFullClient(dataDir) accountList := makeAccountsList(dataDir) // Opt in txns are always 0 @@ -757,7 +758,7 @@ var infoAssetCmd = &cobra.Command{ Long: `Look up asset information stored on the network, such as asset creator, management addresses, or asset name.`, Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, _ []string) { - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client := ensureFullClient(dataDir) accountList := makeAccountsList(dataDir) creator := accountList.getAddressByName(assetCreator) diff --git a/cmd/goal/clerk.go b/cmd/goal/clerk.go index dce74a81d4..ed226733d0 100644 --- a/cmd/goal/clerk.go +++ b/cmd/goal/clerk.go @@ -27,6 +27,7 @@ import ( "strings" "time" + "github.com/algorand/go-algorand/cmd/util/datadir" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" @@ -321,7 +322,7 @@ var sendCmd = &cobra.Command{ checkTxValidityPeriodCmdFlags(cmd) - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() accountList := makeAccountsList(dataDir) var fromAddressResolved string @@ -543,7 +544,7 @@ var rawsendCmd = &cobra.Command{ } dec := protocol.NewMsgpDecoderBytes(data) - client := ensureAlgodClient(ensureSingleDataDir()) + client := ensureAlgodClient(datadir.EnsureSingleDataDir()) txnIDs := make(map[transactions.Txid]transactions.SignedTxn) var txns []transactions.SignedTxn @@ -710,7 +711,7 @@ func getProto(versArg string) (protocol.ConsensusVersion, config.ConsensusParams if versArg != "" { cvers = protocol.ConsensusVersion(versArg) } else { - dataDir := maybeSingleDataDir() + dataDir := datadir.MaybeSingleDataDir() if dataDir != "" { client := ensureAlgodClient(dataDir) params, err := client.SuggestedParams() @@ -760,7 +761,7 @@ var signCmd = &cobra.Command{ } if lsig.Logic == nil { // sign the usual way - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client = ensureKmdClient(dataDir) wh, pw = ensureWalletHandleMaybePassword(dataDir, walletName, true) } else if signerAddress != "" { @@ -1058,7 +1059,7 @@ var compileCmd = &cobra.Command{ program, sourceMap := assembleFileWithMap(fname, true) outblob := program if signProgram { - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() accountList := makeAccountsList(dataDir) client := ensureKmdClient(dataDir) wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true) @@ -1135,7 +1136,7 @@ var dryrunCmd = &cobra.Command{ proto, params := getProto(protoVersion) if dumpForDryrun { // Write dryrun data to file - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client := ensureFullClient(dataDir) accts, err := unmarshalSlice(dumpForDryrunAccts) if err != nil { @@ -1192,7 +1193,7 @@ var dryrunRemoteCmd = &cobra.Command{ reportErrorf(fileReadError, txFilename, err) } - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client := ensureFullClient(dataDir) resp, err := client.Dryrun(data) if err != nil { diff --git a/cmd/goal/commands.go b/cmd/goal/commands.go index 2a790f3c58..a12f08b2e6 100644 --- a/cmd/goal/commands.go +++ b/cmd/goal/commands.go @@ -30,6 +30,7 @@ import ( "github.com/spf13/cobra/doc" "golang.org/x/crypto/ssh/terminal" + "github.com/algorand/go-algorand/cmd/util/datadir" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/daemon/algod/api/spec/common" "github.com/algorand/go-algorand/data/bookkeeping" @@ -41,8 +42,6 @@ import ( var log = logging.Base() -var dataDirs []string - var defaultCacheDir = "goal.cache" var verboseVersionPrint bool @@ -95,7 +94,7 @@ func init() { // Config defaultDataDirValue := []string{""} - rootCmd.PersistentFlags().StringArrayVarP(&dataDirs, "datadir", "d", defaultDataDirValue, "Data directory for the node") + rootCmd.PersistentFlags().StringArrayVarP(&datadir.DataDirs, "datadir", "d", defaultDataDirValue, "Data directory for the node") rootCmd.PersistentFlags().StringVarP(&kmdDataDirFlag, "kmddir", "k", "", "Data directory for kmd") } @@ -161,7 +160,7 @@ var versionCmd = &cobra.Command{ Short: "The current version of the Algorand daemon (algod)", Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, args []string) { - onDataDirs(func(dataDir string) { + datadir.OnDataDirs(func(dataDir string) { response, err := ensureAlgodClient(dataDir).AlgodVersions() if err != nil { fmt.Println(err) @@ -204,7 +203,7 @@ var reportCmd = &cobra.Command{ } fmt.Println(string(data)) - dirs := getDataDirs() + dirs := datadir.GetDataDirs() report := len(dirs) > 1 for _, dir := range dirs { if report { @@ -218,7 +217,7 @@ var reportCmd = &cobra.Command{ fmt.Printf("Genesis ID from genesis.json: %s\n", genesis.ID()) } fmt.Println() - onDataDirs(getStatus) + datadir.OnDataDirs(getStatus) }, } @@ -254,7 +253,7 @@ func resolveKmdDataDir(dataDir string) string { return out } if dataDir == "" { - dataDir = resolveDataDir() + dataDir = datadir.ResolveDataDir() } if libgoal.AlgorandDataIsPrivate(dataDir) { algodKmdPath, _ := filepath.Abs(filepath.Join(dataDir, libgoal.DefaultKMDDataDir)) @@ -274,70 +273,6 @@ func resolveKmdDataDir(dataDir string) string { return filepath.Join(cu.HomeDir, ".algorand", genesis.ID(), libgoal.DefaultKMDDataDir) } -func resolveDataDir() string { - // Figure out what data directory to tell algod to use. - // If not specified on cmdline with '-d', look for default in environment. - var dir string - if (len(dataDirs) > 0) && (dataDirs[0] != "") { - // calculate absolute path, see https://github.com/algorand/go-algorand/issues/589 - absDir, err := filepath.Abs(dataDirs[0]) - if err != nil { - reportErrorf("Absolute path conversion error: %s", err) - } - dir = absDir - } - if dir == "" { - dir = os.Getenv("ALGORAND_DATA") - } - return dir -} - -func ensureFirstDataDir() string { - // Get the target data directory to work against, - // then handle the scenario where no data directory is provided. - dir := resolveDataDir() - if dir == "" { - reportErrorln(errorNoDataDirectory) - } - return dir -} - -func ensureSingleDataDir() string { - if len(dataDirs) > 1 { - reportErrorln(errorOneDataDirSupported) - } - return ensureFirstDataDir() -} - -// like ensureSingleDataDir() but doesn't exit() -func maybeSingleDataDir() string { - if len(dataDirs) > 1 { - return "" - } - return resolveDataDir() -} - -func getDataDirs() (dirs []string) { - if len(dataDirs) == 0 { - reportErrorln(errorNoDataDirectory) - } - dirs = append(dirs, ensureFirstDataDir()) - dirs = append(dirs, dataDirs[1:]...) - return -} - -func onDataDirs(action func(dataDir string)) { - dirs := getDataDirs() - report := len(dirs) > 1 - - for _, dir := range dirs { - if report { - reportInfof(infoDataDir, dir) - } - action(dir) - } -} - func ensureCacheDir(dataDir string) string { var err error if libgoal.AlgorandDataIsPrivate(dataDir) { diff --git a/cmd/goal/commands_test.go b/cmd/goal/commands_test.go index 5a2e6cd021..78794793c6 100644 --- a/cmd/goal/commands_test.go +++ b/cmd/goal/commands_test.go @@ -20,6 +20,7 @@ import ( "os" "testing" + "github.com/algorand/go-algorand/cmd/util/datadir" "github.com/algorand/go-algorand/test/partitiontest" "github.com/stretchr/testify/require" ) @@ -28,7 +29,7 @@ func TestEnsureDataDirReturnsWhenDataDirIsProvided(t *testing.T) { partitiontest.PartitionTest(t) expectedDir := "~/.algorand" os.Setenv("ALGORAND_DATA", expectedDir) - actualDir := ensureFirstDataDir() + actualDir := datadir.EnsureFirstDataDir() require.Equal(t, expectedDir, actualDir) } @@ -38,8 +39,8 @@ func TestEnsureDataDirReturnsWhenWorkDirIsProvided(t *testing.T) { if err != nil { reportErrorf("Error getting work dir: %s", err) } - dataDirs[0] = "." - actualDir := ensureFirstDataDir() + datadir.DataDirs[0] = "." + actualDir := datadir.EnsureFirstDataDir() require.Equal(t, expectedDir, actualDir) } @@ -49,8 +50,8 @@ func TestEnsureDataDirReturnsWhenRelPath1IsProvided(t *testing.T) { if err != nil { reportErrorf("Error getting work dir: %s", err) } - dataDirs[0] = "./../goal" - actualDir := ensureFirstDataDir() + datadir.DataDirs[0] = "./../goal" + actualDir := datadir.EnsureFirstDataDir() require.Equal(t, expectedDir, actualDir) } @@ -60,8 +61,8 @@ func TestEnsureDataDirReturnsWhenRelPath2IsProvided(t *testing.T) { if err != nil { reportErrorf("Error getting work dir: %s", err) } - dataDirs[0] = "../goal" - actualDir := ensureFirstDataDir() + datadir.DataDirs[0] = "../goal" + actualDir := datadir.EnsureFirstDataDir() require.Equal(t, expectedDir, actualDir) } @@ -71,7 +72,7 @@ func TestEnsureDataDirReturnsWhenRelPath3IsProvided(t *testing.T) { if err != nil { reportErrorf("Error getting work dir: %s", err) } - dataDirs[0] = "../../cmd/goal" - actualDir := ensureFirstDataDir() + datadir.DataDirs[0] = "../../cmd/goal" + actualDir := datadir.EnsureFirstDataDir() require.Equal(t, expectedDir, actualDir) } diff --git a/cmd/goal/interact.go b/cmd/goal/interact.go index 000b7e8459..fcdcc3953c 100644 --- a/cmd/goal/interact.go +++ b/cmd/goal/interact.go @@ -30,6 +30,7 @@ import ( "github.com/spf13/cobra" "github.com/algorand/avm-abi/apps" + "github.com/algorand/go-algorand/cmd/util/datadir" "github.com/algorand/go-algorand/crypto" apiclient "github.com/algorand/go-algorand/daemon/algod/api/client" "github.com/algorand/go-algorand/data/basics" @@ -478,7 +479,7 @@ var appExecuteCmd = &cobra.Command{ Short: "Execute a procedure on an application", Args: cobra.MinimumNArgs(1), Run: func(cmd *cobra.Command, args []string) { - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client := ensureFullClient(dataDir) header := parseAppHeader() @@ -654,7 +655,7 @@ var appQueryCmd = &cobra.Command{ Short: "Query local or global state from an application", Args: cobra.MinimumNArgs(1), Run: func(cmd *cobra.Command, args []string) { - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client := ensureFullClient(dataDir) header := parseAppHeader() diff --git a/cmd/goal/kmd.go b/cmd/goal/kmd.go index 3e9901f5a3..517d975fa9 100644 --- a/cmd/goal/kmd.go +++ b/cmd/goal/kmd.go @@ -19,6 +19,7 @@ package main import ( "github.com/spf13/cobra" + "github.com/algorand/go-algorand/cmd/util/datadir" "github.com/algorand/go-algorand/nodecontrol" "github.com/algorand/go-algorand/util" ) @@ -69,7 +70,7 @@ var startKMDCmd = &cobra.Command{ panic(err) } - onDataDirs(func(dataDir string) { + datadir.OnDataDirs(func(dataDir string) { kdd := resolveKmdDataDir(dataDir) startKMDForDataDir(binDir, dataDir, kdd) }) @@ -86,7 +87,7 @@ var stopKMDCmd = &cobra.Command{ panic(err) } - onDataDirs(func(dataDir string) { + datadir.OnDataDirs(func(dataDir string) { nc := nodecontrol.MakeNodeController(binDir, dataDir) kdd := resolveKmdDataDir(dataDir) nc.SetKMDDataDir(kdd) diff --git a/cmd/goal/ledger.go b/cmd/goal/ledger.go index 208655ad6e..84a92352ff 100644 --- a/cmd/goal/ledger.go +++ b/cmd/goal/ledger.go @@ -23,6 +23,7 @@ import ( "github.com/spf13/cobra" + "github.com/algorand/go-algorand/cmd/util/datadir" "github.com/algorand/go-algorand/protocol/transcode" ) @@ -59,7 +60,7 @@ var supplyCmd = &cobra.Command{ Long: `Show ledger token supply. All units are in microAlgos. The "Total Money" is all algos held by online+offline accounts (excludes non-participating accounts). The "Online Money" is the amount held solely by online accounts.`, Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, _ []string) { - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() response, err := ensureAlgodClient(dataDir).LedgerSupply() if err != nil { reportErrorf(errorRequestFail, err) @@ -80,7 +81,7 @@ var blockCmd = &cobra.Command{ reportErrorf(errParsingRoundNumber, err) } - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client := ensureAlgodClient(dataDir) response, err := client.RawBlock(round) if err != nil { diff --git a/cmd/goal/logging.go b/cmd/goal/logging.go index eb05c73996..68c0a696c4 100644 --- a/cmd/goal/logging.go +++ b/cmd/goal/logging.go @@ -24,6 +24,7 @@ import ( "github.com/spf13/cobra" + "github.com/algorand/go-algorand/cmd/util/datadir" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/logging" ) @@ -51,7 +52,7 @@ var loggingCmd = &cobra.Command{ Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, _ []string) { reportWarnln("`goal logging` deprecated, use `diagcfg telemetry status`") - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() cfg, err := logging.EnsureTelemetryConfig(&dataDir, "") // If error loading config, can't disable / no need to disable @@ -73,7 +74,7 @@ var enableCmd = &cobra.Command{ Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, _ []string) { reportWarnln("`goal logging enable` deprecated, use `diagcfg telemetry enable`") - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() cfg, err := logging.EnsureTelemetryConfig(&dataDir, "") if err != nil { fmt.Println(err) @@ -94,7 +95,7 @@ var disableCmd = &cobra.Command{ Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, _ []string) { reportWarnf("`goal logging disable` deprecated, use `diagcfg telemetry disable`") - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() cfg, err := logging.EnsureTelemetryConfig(&dataDir, "") // If error loading config, can't disable / no need to disable @@ -125,7 +126,7 @@ var loggingSendCmd = &cobra.Command{ counter := uint(1) errcount := 0 var firsterr error = nil - onDataDirs(func(dataDir string) { + datadir.OnDataDirs(func(dataDir string) { cfg, err := logging.EnsureTelemetryConfig(&dataDir, "") if err != nil { fmt.Println(err) diff --git a/cmd/goal/multisig.go b/cmd/goal/multisig.go index 5db3d09af9..88b9a45638 100644 --- a/cmd/goal/multisig.go +++ b/cmd/goal/multisig.go @@ -23,6 +23,7 @@ import ( "github.com/spf13/cobra" + "github.com/algorand/go-algorand/cmd/util/datadir" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/transactions" @@ -90,7 +91,7 @@ var addSigCmd = &cobra.Command{ reportErrorf(addrNoSigError) } - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client := ensureKmdClient(dataDir) wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true) @@ -146,7 +147,7 @@ var signProgramCmd = &cobra.Command{ Long: `Start a multisig LogicSig, or add a signature to an existing multisig, for a given program.`, Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, _ []string) { - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() client := ensureKmdClient(dataDir) wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true) var program []byte diff --git a/cmd/goal/network.go b/cmd/goal/network.go index 715b5707d5..d7212123f5 100644 --- a/cmd/goal/network.go +++ b/cmd/goal/network.go @@ -23,6 +23,7 @@ import ( "github.com/spf13/cobra" + "github.com/algorand/go-algorand/cmd/util/datadir" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/netdeploy" "github.com/algorand/go-algorand/util" @@ -96,7 +97,7 @@ var networkCreateCmd = &cobra.Command{ panic(err) } - dataDir := maybeSingleDataDir() + dataDir := datadir.MaybeSingleDataDir() var consensus config.ConsensusProtocols if dataDir != "" { // try to load the consensus from there. If there is none, we can just use the built in one. diff --git a/cmd/goal/node.go b/cmd/goal/node.go index fcfa27d6dc..803b305992 100644 --- a/cmd/goal/node.go +++ b/cmd/goal/node.go @@ -33,6 +33,7 @@ import ( "github.com/spf13/cobra" + "github.com/algorand/go-algorand/cmd/util/datadir" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" "github.com/algorand/go-algorand/ledger/ledgercore" @@ -152,7 +153,7 @@ var catchupCmd = &cobra.Command{ Example: "goal node catchup 6500000#1234567890ABCDEF01234567890ABCDEF0\tStart catching up to round 6500000 with the provided catchpoint\ngoal node catchup --abort\t\t\t\t\tAbort the current catchup", Args: catchpointCmdArgument, Run: func(cmd *cobra.Command, args []string) { - onDataDirs(func(dataDir string) { + datadir.OnDataDirs(func(dataDir string) { if !abortCatchup && len(args) == 0 { client := ensureAlgodClient(dataDir) vers, err := client.AlgodVersions() @@ -206,7 +207,7 @@ var startCmd = &cobra.Command{ if err != nil { panic(err) } - onDataDirs(func(dataDir string) { + datadir.OnDataDirs(func(dataDir string) { if libgoal.AlgorandDaemonSystemdManaged(dataDir) { reportErrorf(errorNodeManagedBySystemd) } @@ -247,7 +248,7 @@ var shutdownCmd = &cobra.Command{ if err != nil { panic(err) } - onDataDirs(func(dataDir string) { + datadir.OnDataDirs(func(dataDir string) { nc := nodecontrol.MakeNodeController(binDir, dataDir) err := nc.Shutdown() @@ -278,7 +279,7 @@ var stopCmd = &cobra.Command{ if err != nil { panic(err) } - onDataDirs(func(dataDir string) { + datadir.OnDataDirs(func(dataDir string) { if libgoal.AlgorandDaemonSystemdManaged(dataDir) { reportErrorf(errorNodeManagedBySystemd) } @@ -309,7 +310,7 @@ var restartCmd = &cobra.Command{ if err != nil { panic(err) } - onDataDirs(func(dataDir string) { + datadir.OnDataDirs(func(dataDir string) { if libgoal.AlgorandDaemonSystemdManaged(dataDir) { reportErrorf(errorNodeManagedBySystemd) } @@ -366,7 +367,7 @@ var generateTokenCmd = &cobra.Command{ Short: "Generate and install a new API token", Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, _ []string) { - onDataDirs(func(dataDir string) { + datadir.OnDataDirs(func(dataDir string) { // Ensure the node is stopped -- HealthCheck should fail clientConfig := libgoal.ClientConfig{ AlgodDataDir: dataDir, @@ -399,7 +400,7 @@ var statusCmd = &cobra.Command{ Long: `Show the current status of the running Algorand node.`, Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, _ []string) { - onDataDirs(getStatus) + datadir.OnDataDirs(getStatus) }, } @@ -517,7 +518,7 @@ var lastroundCmd = &cobra.Command{ Long: `Prints the most recent round confirmed by the Algorand node.`, Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, _ []string) { - onDataDirs(func(dataDir string) { + datadir.OnDataDirs(func(dataDir string) { round, err := ensureAlgodClient(dataDir).CurrentRound() if err != nil { reportErrorf(errorNodeStatus, err) @@ -538,7 +539,7 @@ var cloneCmd = &cobra.Command{ if err != nil { panic(err) } - nc := nodecontrol.MakeNodeController(binDir, ensureSingleDataDir()) + nc := nodecontrol.MakeNodeController(binDir, datadir.EnsureSingleDataDir()) err = nc.Clone(targetDir, !noLedger) if err != nil { reportErrorf(errorCloningNode, err) @@ -555,7 +556,7 @@ var pendingTxnsCmd = &cobra.Command{ Long: `Get a snapshot of current pending transactions on this node, cut off at MAX transactions (-m), default 0. If MAX=0, fetches as many transactions as possible.`, Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, _ []string) { - onDataDirs(func(dataDir string) { + datadir.OnDataDirs(func(dataDir string) { client := ensureAlgodClient(dataDir) statusTxnPool, err := client.GetParsedPendingTransactions(maxPendingTransactions) if err != nil { @@ -589,7 +590,7 @@ var waitCmd = &cobra.Command{ Long: "Waits for the node to make progress, which includes catching up.", Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, _ []string) { - client := ensureAlgodClient(ensureSingleDataDir()) + client := ensureAlgodClient(datadir.EnsureSingleDataDir()) stat, err := client.Status() if err != nil { reportErrorf(errorNodeStatus, err) @@ -698,7 +699,7 @@ var createCmd = &cobra.Command{ } func catchup(dataDir string, args []string) { - client := ensureAlgodClient(ensureSingleDataDir()) + client := ensureAlgodClient(datadir.EnsureSingleDataDir()) if abortCatchup { err := client.AbortCatchup() if err != nil { diff --git a/cmd/goal/wallet.go b/cmd/goal/wallet.go index 800d4b179b..0b55c18f14 100644 --- a/cmd/goal/wallet.go +++ b/cmd/goal/wallet.go @@ -25,6 +25,7 @@ import ( "github.com/spf13/cobra" + "github.com/algorand/go-algorand/cmd/util/datadir" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/crypto/passphrase" "github.com/algorand/go-algorand/daemon/kmd/lib/kmdapi" @@ -53,7 +54,7 @@ var walletCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { // Update the default wallet if defaultWalletName != "" { - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() accountList := makeAccountsList(dataDir) // Check that the new default wallet exists and isn't a duplicate @@ -84,7 +85,7 @@ var newWalletCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { var err error - dataDir := ensureSingleDataDir() + dataDir := datadir.EnsureSingleDataDir() accountList := makeAccountsList(dataDir) client := ensureKmdClient(dataDir) walletName := []byte(args[0]) @@ -188,7 +189,7 @@ var listWalletsCmd = &cobra.Command{ Short: "List wallets managed by kmd", Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, _ []string) { - onDataDirs(func(dataDir string) { + datadir.OnDataDirs(func(dataDir string) { client := ensureKmdClient(dataDir) wallets, err := client.ListWallets() if err != nil { diff --git a/cmd/util/datadir/datadir.go b/cmd/util/datadir/datadir.go new file mode 100644 index 0000000000..20ba92b8e3 --- /dev/null +++ b/cmd/util/datadir/datadir.go @@ -0,0 +1,95 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package datadir + +import ( + "os" + "path/filepath" +) + +// DataDirs contains the list of data directories +var DataDirs []string + +// ResolveDataDir determines the data directory to to use. +// If not specified on cmdline with '-d', look for default in environment. +func ResolveDataDir() string { + var dir string + if (len(DataDirs) > 0) && (DataDirs[0] != "") { + // calculate absolute path, see https://github.com/algorand/go-algorand/issues/589 + absDir, err := filepath.Abs(DataDirs[0]) + if err != nil { + reportErrorf("Absolute path conversion error: %s", err) + } + dir = absDir + } + if dir == "" { + dir = os.Getenv("ALGORAND_DATA") + } + return dir +} + +// EnsureFirstDataDir retrieves the first data directory. +// Reports an Error and exits when no data directory can be found. +func EnsureFirstDataDir() string { + dir := ResolveDataDir() + if dir == "" { + reportErrorln(errorNoDataDirectory) + } + return dir +} + +// EnsureSingleDataDir retrieves the exactly one data directory that exists. +// Reports and Error and exits when more than one data directories are available. +func EnsureSingleDataDir() string { + if len(DataDirs) > 1 { + reportErrorln(errorOneDataDirSupported) + } + return EnsureFirstDataDir() +} + +// MaybeSingleDataDir retrieves the exactly one data directory that exists. +// Returns empty string "" when than one data directories are available. +func MaybeSingleDataDir() string { + if len(DataDirs) > 1 { + return "" + } + return ResolveDataDir() +} + +// GetDataDirs returns a list of available data directories as strings +// Reports and Error and exits when no data directories are available. +func GetDataDirs() (dirs []string) { + if len(DataDirs) == 0 { + reportErrorln(errorNoDataDirectory) + } + dirs = append(dirs, EnsureFirstDataDir()) + dirs = append(dirs, DataDirs[1:]...) + return +} + +// OnDataDirs (...) +func OnDataDirs(action func(dataDir string)) { + dirs := GetDataDirs() + doreport := len(dirs) > 1 + + for _, dir := range dirs { + if doreport { + reportInfof(infoDataDir, dir) + } + action(dir) + } +} diff --git a/cmd/algocfg/messages.go b/cmd/util/datadir/messages.go similarity index 98% rename from cmd/algocfg/messages.go rename to cmd/util/datadir/messages.go index 5225cfc860..0ef5e6a49a 100644 --- a/cmd/algocfg/messages.go +++ b/cmd/util/datadir/messages.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package main +package datadir const ( // General diff --git a/cmd/util/datadir/report.go b/cmd/util/datadir/report.go new file mode 100644 index 0000000000..c75aa4924e --- /dev/null +++ b/cmd/util/datadir/report.go @@ -0,0 +1,39 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package datadir + +// TODO: eliminate duplication (copied from cmd/algocfg/report.go) + +import ( + "fmt" + "os" +) + +func reportInfof(format string, args ...interface{}) { + fmt.Printf(format+"\n", args...) +} + +func reportErrorln(args ...interface{}) { + fmt.Fprintln(os.Stderr, args...) + os.Exit(1) +} + +// TODO: Replace all report functions with the higher grade ones from cmd/algo + +func reportErrorf(format string, args ...interface{}) { + reportErrorln(fmt.Sprintf(format, args...)) +} From 4f53f6a46d1a77a96641b3967379547a98000ca7 Mon Sep 17 00:00:00 2001 From: Eric Warehime Date: Wed, 1 Feb 2023 07:26:24 -0800 Subject: [PATCH 16/81] node: Add follower node for sync mode (#5009) Co-authored-by: cce <51567+cce@users.noreply.github.com> --- catchup/service.go | 17 + config/localTemplate.go | 5 + config/local_defaults.go | 1 + daemon/algod/api/client/restClient.go | 38 +- daemon/algod/api/server/lib/common.go | 10 +- daemon/algod/api/server/router.go | 28 +- daemon/algod/api/server/v2/handlers.go | 2 + .../algod/api/server/v2/test/handlers_test.go | 10 +- daemon/algod/server.go | 23 +- installer/config.json.example | 1 + libgoal/libgoal.go | 27 ++ node/error.go | 21 + node/follower_node.go | 441 ++++++++++++++++++ node/follower_node_test.go | 97 ++++ node/node.go | 107 ++--- .../features/followerNode/syncDeltas_test.go | 104 +++++ 16 files changed, 858 insertions(+), 74 deletions(-) create mode 100644 node/follower_node.go create mode 100644 node/follower_node_test.go create mode 100644 test/e2e-go/features/followerNode/syncDeltas_test.go diff --git a/catchup/service.go b/catchup/service.go index 2204212df2..21c104ea48 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -95,6 +95,9 @@ type Service struct { protocolErrorLogged bool lastSupportedRound basics.Round unmatchedPendingCertificates <-chan PendingUnmatchedCertificate + // This channel signals periodSync to attempt catchup immediately. This allows us to start fetching rounds from + // the network as soon as disableSyncRound is modified. + syncNow chan struct{} } // A BlockAuthenticator authenticates blocks given a certificate. @@ -122,6 +125,7 @@ func MakeService(log logging.Logger, config config.Local, net network.GossipNode s.parallelBlocks = config.CatchupParallelBlocks s.deadlineTimeout = agreement.DeadlineTimeout() s.blockValidationPool = blockValidationPool + s.syncNow = make(chan struct{}) return s } @@ -160,12 +164,18 @@ func (s *Service) SetDisableSyncRound(rnd uint64) error { return ErrSyncRoundInvalid } atomic.StoreUint64(&s.disableSyncRound, rnd) + if syncing, initial := s.IsSynchronizing(); !syncing && !initial { + s.syncNow <- struct{}{} + } return nil } // UnsetDisableSyncRound removes any previously set disabled sync round func (s *Service) UnsetDisableSyncRound() { atomic.StoreUint64(&s.disableSyncRound, 0) + if syncing, initial := s.IsSynchronizing(); !syncing && !initial { + s.syncNow <- struct{}{} + } } // GetDisableSyncRound returns the disabled sync round @@ -575,6 +585,13 @@ func (s *Service) periodicSync() { // we want to sleep for a random duration since it would "de-syncronize" us from the ledger advance sync sleepDuration = time.Duration(crypto.RandUint63()) % s.deadlineTimeout continue + case <-s.syncNow: + if s.parallelBlocks == 0 || s.ledger.IsWritingCatchpointDataFile() { + continue + } + s.suspendForCatchpointWriting = false + s.log.Info("Immediate resync triggered; resyncing") + s.sync() case <-time.After(sleepDuration): if sleepDuration < s.deadlineTimeout || s.cfg.DisableNetworking { sleepDuration = s.deadlineTimeout diff --git a/config/localTemplate.go b/config/localTemplate.go index 7caa5e9dee..e1b7259417 100644 --- a/config/localTemplate.go +++ b/config/localTemplate.go @@ -490,6 +490,11 @@ type Local struct { // EnableExperimentalAPI enables experimental API endpoint. Note that these endpoints have no // guarantees in terms of functionality or future support. EnableExperimentalAPI bool `version[26]:"false"` + + // EnableFollowMode launches the node in "follower" mode. This turns off the agreement service, + // and APIs related to broadcasting transactions, and enables APIs which can retrieve detailed information + // from ledger caches and can control the ledger round. + EnableFollowMode bool `version[27]:"false"` } // DNSBootstrapArray returns an array of one or more DNS Bootstrap identifiers diff --git a/config/local_defaults.go b/config/local_defaults.go index 18604556f6..f0e739a2db 100644 --- a/config/local_defaults.go +++ b/config/local_defaults.go @@ -61,6 +61,7 @@ var defaultLocal = Local{ EnableCatchupFromArchiveServers: false, EnableDeveloperAPI: false, EnableExperimentalAPI: false, + EnableFollowMode: false, EnableGossipBlockService: true, EnableIncomingMessageFilter: false, EnableLedgerService: false, diff --git a/daemon/algod/api/client/restClient.go b/daemon/algod/api/client/restClient.go index 3cf61e9036..f3f5415734 100644 --- a/daemon/algod/api/client/restClient.go +++ b/daemon/algod/api/client/restClient.go @@ -266,8 +266,8 @@ func (client RestClient) getRaw(response RawResponse, path string, request inter // post sends a POST request to the given path with the given request object. // No query parameters will be sent if request is nil. // response must be a pointer to an object as post writes the response there. -func (client RestClient) post(response interface{}, path string, request interface{}) error { - return client.submitForm(response, path, request, "POST", true /* encodeJSON */, true /* decodeJSON */, false) +func (client RestClient) post(response interface{}, path string, request interface{}, expectNoContent bool) error { + return client.submitForm(response, path, request, "POST", true /* encodeJSON */, true /* decodeJSON */, expectNoContent) } // Status retrieves the StatusResponse from the running node @@ -504,7 +504,7 @@ func (client RestClient) SuggestedParams() (response model.TransactionParameters // SendRawTransaction gets a SignedTxn and broadcasts it to the network func (client RestClient) SendRawTransaction(txn transactions.SignedTxn) (response model.PostTransactionsResponse, err error) { - err = client.post(&response, "/v2/transactions", protocol.Encode(&txn)) + err = client.post(&response, "/v2/transactions", protocol.Encode(&txn), false) return } @@ -518,7 +518,7 @@ func (client RestClient) SendRawTransactionGroup(txgroup []transactions.SignedTx } var response model.PostTransactionsResponse - return client.post(&response, "/v2/transactions", enc) + return client.post(&response, "/v2/transactions", enc, false) } // Block gets the block info for the given round @@ -538,7 +538,7 @@ func (client RestClient) RawBlock(round uint64) (response []byte, err error) { // Shutdown requests the node to shut itself down func (client RestClient) Shutdown() (err error) { response := 1 - err = client.post(&response, "/v2/shutdown", nil) + err = client.post(&response, "/v2/shutdown", nil, false) return } @@ -650,7 +650,7 @@ func (client RestClient) TransactionProof(txid string, round uint64, hashType cr // PostParticipationKey sends a key file to the node. func (client RestClient) PostParticipationKey(file []byte) (response model.PostParticipationResponse, err error) { - err = client.post(&response, "/v2/participation", file) + err = client.post(&response, "/v2/participation", file, false) return } @@ -672,3 +672,29 @@ func (client RestClient) RemoveParticipationKeyByID(participationID string) (err return } + +/* Endpoint registered for follower nodes */ + +// SetSyncRound sets the sync round for the catchup service +func (client RestClient) SetSyncRound(round uint64) (err error) { + err = client.post(nil, fmt.Sprintf("/v2/ledger/sync/%d", round), nil, true) + return +} + +// UnsetSyncRound deletes the sync round constraint +func (client RestClient) UnsetSyncRound() (err error) { + err = client.delete(nil, "/v2/ledger/sync", nil, true) + return +} + +// GetSyncRound retrieves the sync round (if set) +func (client RestClient) GetSyncRound() (response model.GetSyncRoundResponse, err error) { + err = client.get(&response, "/v2/ledger/sync", nil) + return +} + +// GetLedgerStateDelta retrieves the ledger state delta for the round +func (client RestClient) GetLedgerStateDelta(round uint64) (response model.LedgerStateDeltaResponse, err error) { + err = client.get(&response, fmt.Sprintf("/v2/deltas/%d", round), nil) + return +} diff --git a/daemon/algod/api/server/lib/common.go b/daemon/algod/api/server/lib/common.go index 2599774a8b..a04d85a951 100644 --- a/daemon/algod/api/server/lib/common.go +++ b/daemon/algod/api/server/lib/common.go @@ -21,13 +21,19 @@ import ( "github.com/labstack/echo/v4" + "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/logging" - "github.com/algorand/go-algorand/node" ) // GenesisJSONText is initialized when the node starts. var GenesisJSONText string +// NodeInterface defines the node's methods required by the common APIs +type NodeInterface interface { + GenesisHash() crypto.Digest + GenesisID() string +} + // HandlerFunc defines a wrapper for http.HandlerFunc that includes a context type HandlerFunc func(ReqContext, echo.Context) @@ -45,7 +51,7 @@ type Routes []Route // ReqContext is passed to each of the handlers below via wrapCtx, allowing // handlers to interact with the node type ReqContext struct { - Node *node.AlgorandFullNode + Node NodeInterface Log logging.Logger Context echo.Context Shutdown <-chan struct{} diff --git a/daemon/algod/api/server/router.go b/daemon/algod/api/server/router.go index ab79ee3051..632e8cbb9f 100644 --- a/daemon/algod/api/server/router.go +++ b/daemon/algod/api/server/router.go @@ -30,6 +30,7 @@ import ( "github.com/algorand/go-algorand/daemon/algod/api/server/lib/middlewares" "github.com/algorand/go-algorand/daemon/algod/api/server/v1/routes" v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2" + "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/data" "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/experimental" npprivate "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/nonparticipating/private" nppublic "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/nonparticipating/public" @@ -40,6 +41,12 @@ import ( "github.com/algorand/go-algorand/util/tokens" ) +// APINodeInterface describes all the node methods required by common and v2 APIs, and the server/router +type APINodeInterface interface { + lib.NodeInterface + v2.NodeInterface +} + const ( apiV1Tag = "/v1" // TokenHeader is the header where we put the token. @@ -63,7 +70,7 @@ func registerHandlers(router *echo.Echo, prefix string, routes lib.Routes, ctx l } // NewRouter builds and returns a new router with our REST handlers registered. -func NewRouter(logger logging.Logger, node *node.AlgorandFullNode, shutdown <-chan struct{}, apiToken string, adminAPIToken string, listener net.Listener, numConnectionsLimit uint64) *echo.Echo { +func NewRouter(logger logging.Logger, node APINodeInterface, shutdown <-chan struct{}, apiToken string, adminAPIToken string, listener net.Listener, numConnectionsLimit uint64) *echo.Echo { if err := tokens.ValidateAPIToken(apiToken); err != nil { logger.Errorf("Invalid apiToken was passed to NewRouter ('%s'): %v", apiToken, err) } @@ -104,7 +111,7 @@ func NewRouter(logger logging.Logger, node *node.AlgorandFullNode, shutdown <-ch // Registering v2 routes v2Handler := v2.Handlers{ - Node: apiNode{node}, + Node: node, Log: logger, Shutdown: shutdown, } @@ -113,6 +120,10 @@ func NewRouter(logger logging.Logger, node *node.AlgorandFullNode, shutdown <-ch ppublic.RegisterHandlers(e, &v2Handler, apiAuthenticator) pprivate.RegisterHandlers(e, &v2Handler, adminAuthenticator) + if node.Config().EnableFollowMode { + data.RegisterHandlers(e, &v2Handler, apiAuthenticator) + } + if node.Config().EnableExperimentalAPI { experimental.RegisterHandlers(e, &v2Handler, apiAuthenticator) } @@ -120,7 +131,14 @@ func NewRouter(logger logging.Logger, node *node.AlgorandFullNode, shutdown <-ch return e } -// apiNode wraps the AlgorandFullNode to provide v2.NodeInterface. -type apiNode struct{ *node.AlgorandFullNode } +// FollowerNode wraps the AlgorandFollowerNode to provide v2.NodeInterface. +type FollowerNode struct{ *node.AlgorandFollowerNode } + +// LedgerForAPI implements the v2.Handlers interface +func (n FollowerNode) LedgerForAPI() v2.LedgerForAPI { return n.Ledger() } + +// APINode wraps the AlgorandFullNode to provide v2.NodeInterface. +type APINode struct{ *node.AlgorandFullNode } -func (n apiNode) LedgerForAPI() v2.LedgerForAPI { return n.Ledger() } +// LedgerForAPI implements the v2.Handlers interface +func (n APINode) LedgerForAPI() v2.LedgerForAPI { return n.Ledger() } diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go index 264d4f1445..206d036fcc 100644 --- a/daemon/algod/api/server/v2/handlers.go +++ b/daemon/algod/api/server/v2/handlers.go @@ -1292,6 +1292,8 @@ func (v2 *Handlers) startCatchup(ctx echo.Context, catchpoint string) error { code = http.StatusOK case *node.CatchpointUnableToStartError: return badRequest(ctx, err, err.Error(), v2.Log) + case *node.CatchpointSyncRoundFailure: + return badRequest(ctx, err, fmt.Sprintf(errFailedToStartCatchup, err), v2.Log) default: return internalError(ctx, err, fmt.Sprintf(errFailedToStartCatchup, err), v2.Log) } diff --git a/daemon/algod/api/server/v2/test/handlers_test.go b/daemon/algod/api/server/v2/test/handlers_test.go index dedd902c94..fd233e4c38 100644 --- a/daemon/algod/api/server/v2/test/handlers_test.go +++ b/daemon/algod/api/server/v2/test/handlers_test.go @@ -706,11 +706,7 @@ func startCatchupTest(t *testing.T, catchpoint string, nodeError error, expected defer releasefunc() dummyShutdownChan := make(chan struct{}) mockNode := makeMockNode(mockLedger, t.Name(), nodeError, false) - handler := v2.Handlers{ - Node: mockNode, - Log: logging.Base(), - Shutdown: dummyShutdownChan, - } + handler := v2.Handlers{Node: mockNode, Log: logging.Base(), Shutdown: dummyShutdownChan} e := echo.New() req := httptest.NewRequest(http.MethodPost, "/", nil) rec := httptest.NewRecorder() @@ -737,6 +733,10 @@ func TestStartCatchup(t *testing.T) { badCatchPoint := "bad catchpoint" startCatchupTest(t, badCatchPoint, nil, 400) + + // Test that a catchup fails w/ 400 when the catchpoint round is > syncRound (while syncRound is set) + syncRoundError := node.MakeCatchpointSyncRoundFailure(goodCatchPoint, 1) + startCatchupTest(t, goodCatchPoint, syncRoundError, 400) } func abortCatchupTest(t *testing.T, catchpoint string, expectedCode int) { diff --git a/daemon/algod/server.go b/daemon/algod/server.go index 0c65b2efab..54b4acd103 100644 --- a/daemon/algod/server.go +++ b/daemon/algod/server.go @@ -49,6 +49,14 @@ import ( var server http.Server +// ServerNode is the required methods for any node the server fronts +type ServerNode interface { + apiServer.APINodeInterface + ListeningAddress() (string, bool) + Start() + Stop() +} + // Server represents an instance of the REST API HTTP server type Server struct { RootPath string @@ -57,7 +65,7 @@ type Server struct { netFile string netListenFile string log logging.Logger - node *node.AlgorandFullNode + node ServerNode metricCollector *metrics.MetricService metricServiceStarted bool stopping chan struct{} @@ -171,14 +179,23 @@ func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string, genes NodeExporterPath: cfg.NodeExporterPath, }) - s.node, err = node.MakeFull(s.log, s.RootPath, cfg, phonebookAddresses, s.Genesis) + var serverNode ServerNode + if cfg.EnableFollowMode { + var followerNode *node.AlgorandFollowerNode + followerNode, err = node.MakeFollower(s.log, s.RootPath, cfg, phonebookAddresses, s.Genesis) + serverNode = apiServer.FollowerNode{AlgorandFollowerNode: followerNode} + } else { + var fullNode *node.AlgorandFullNode + fullNode, err = node.MakeFull(s.log, s.RootPath, cfg, phonebookAddresses, s.Genesis) + serverNode = apiServer.APINode{AlgorandFullNode: fullNode} + } if os.IsNotExist(err) { return fmt.Errorf("node has not been installed: %s", err) } if err != nil { return fmt.Errorf("couldn't initialize the node: %s", err) } - + s.node = serverNode return nil } diff --git a/installer/config.json.example b/installer/config.json.example index 137578e0fa..52b86764ed 100644 --- a/installer/config.json.example +++ b/installer/config.json.example @@ -40,6 +40,7 @@ "EnableCatchupFromArchiveServers": false, "EnableDeveloperAPI": false, "EnableExperimentalAPI": false, + "EnableFollowMode": false, "EnableGossipBlockService": true, "EnableIncomingMessageFilter": false, "EnableLedgerService": false, diff --git a/libgoal/libgoal.go b/libgoal/libgoal.go index 4a27b70587..4756aa155f 100644 --- a/libgoal/libgoal.go +++ b/libgoal/libgoal.go @@ -1287,3 +1287,30 @@ func (c *Client) LightBlockHeaderProof(round uint64) (resp model.LightBlockHeade } return } + +// SetSyncRound sets the sync round on a node w/ EnableFollowMode +func (c *Client) SetSyncRound(round uint64) (err error) { + algod, err := c.ensureAlgodClient() + if err == nil { + return algod.SetSyncRound(round) + } + return +} + +// GetSyncRound gets the sync round on a node w/ EnableFollowMode +func (c *Client) GetSyncRound() (rep model.GetSyncRoundResponse, err error) { + algod, err := c.ensureAlgodClient() + if err == nil { + return algod.GetSyncRound() + } + return +} + +// GetLedgerStateDelta gets the LedgerStateDelta on a node w/ EnableFollowMode +func (c *Client) GetLedgerStateDelta(round uint64) (rep model.LedgerStateDeltaResponse, err error) { + algod, err := c.ensureAlgodClient() + if err == nil { + return algod.GetLedgerStateDelta(round) + } + return +} diff --git a/node/error.go b/node/error.go index d177f0c870..dbe036b605 100644 --- a/node/error.go +++ b/node/error.go @@ -62,3 +62,24 @@ func (e *CatchpointUnableToStartError) Error() string { e.catchpointRequested, e.catchpointRunning) } + +// CatchpointSyncRoundFailure indicates that the requested catchpoint is beyond the currently set sync round +type CatchpointSyncRoundFailure struct { + catchpoint string + syncRound uint64 +} + +// MakeCatchpointSyncRoundFailure creates the error type +func MakeCatchpointSyncRoundFailure(catchpoint string, syncRound uint64) *CatchpointSyncRoundFailure { + return &CatchpointSyncRoundFailure{ + catchpoint: catchpoint, + syncRound: syncRound, + } +} + +// Error satisfies the builtin `error` interface +func (e *CatchpointSyncRoundFailure) Error() string { + return fmt.Sprintf( + "unable to start catchpoint catchup for '%s' - resulting round is beyond current sync round '%v'", + e.catchpoint, e.syncRound) +} diff --git a/node/follower_node.go b/node/follower_node.go new file mode 100644 index 0000000000..591a6cda55 --- /dev/null +++ b/node/follower_node.go @@ -0,0 +1,441 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +// Package node is the Algorand node itself, with functions exposed to the frontend +package node + +import ( + "context" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/algorand/go-deadlock" + + "github.com/algorand/go-algorand/agreement" + "github.com/algorand/go-algorand/catchup" + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/data" + "github.com/algorand/go-algorand/data/account" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/ledger" + "github.com/algorand/go-algorand/ledger/ledgercore" + "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/network" + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/rpcs" + "github.com/algorand/go-algorand/util/execpool" +) + +// AlgorandFollowerNode implements follower mode/ledger delta APIs and disables participation-related methods +type AlgorandFollowerNode struct { + mu deadlock.Mutex + ctx context.Context + cancelCtx context.CancelFunc + config config.Local + + ledger *data.Ledger + net network.GossipNode + + catchupService *catchup.Service + catchpointCatchupService *catchup.CatchpointCatchupService + blockService *rpcs.BlockService + + rootDir string + genesisID string + genesisHash crypto.Digest + devMode bool // is this node operates in a developer mode ? ( benign agreement, broadcasting transaction generates a new block ) + + log logging.Logger + + // syncStatusMu used for locking lastRoundTimestamp and hasSyncedSinceStartup + // syncStatusMu added so OnNewBlock wouldn't be blocked by oldKeyDeletionThread during catchup + syncStatusMu deadlock.Mutex + lastRoundTimestamp time.Time + hasSyncedSinceStartup bool + + cryptoPool execpool.ExecutionPool + lowPriorityCryptoVerificationPool execpool.BacklogPool + catchupBlockAuth blockAuthenticatorImpl +} + +// MakeFollower sets up an Algorand data node +func MakeFollower(log logging.Logger, rootDir string, cfg config.Local, phonebookAddresses []string, genesis bookkeeping.Genesis) (*AlgorandFollowerNode, error) { + node := new(AlgorandFollowerNode) + node.rootDir = rootDir + node.log = log.With("name", cfg.NetAddress) + node.genesisID = genesis.ID() + node.genesisHash = genesis.Hash() + node.devMode = genesis.DevMode + + if node.devMode { + log.Errorf("Cannot run follower node in devMode--submitting txns won't work") + return nil, fmt.Errorf("cannot run with both EnableFollowMode and DevMode") + } + node.config = cfg + + // tie network, block fetcher, and agreement services together + p2pNode, err := network.NewWebsocketNetwork(node.log, node.config, phonebookAddresses, genesis.ID(), genesis.Network, nil) + if err != nil { + log.Errorf("could not create websocket node: %v", err) + return nil, err + } + p2pNode.DeregisterMessageInterest(protocol.AgreementVoteTag) + p2pNode.DeregisterMessageInterest(protocol.ProposalPayloadTag) + p2pNode.DeregisterMessageInterest(protocol.VoteBundleTag) + node.net = p2pNode + + // load stored data + genesisDir := filepath.Join(rootDir, genesis.ID()) + ledgerPathnamePrefix := filepath.Join(genesisDir, config.LedgerFilenamePrefix) + + // create initial ledger, if it doesn't exist + err = os.Mkdir(genesisDir, 0700) + if err != nil && !os.IsExist(err) { + log.Errorf("Unable to create genesis directory: %v", err) + return nil, err + } + genalloc, err := genesis.Balances() + if err != nil { + log.Errorf("Cannot load genesis allocation: %v", err) + return nil, err + } + + node.cryptoPool = execpool.MakePool(node) + node.lowPriorityCryptoVerificationPool = execpool.MakeBacklog(node.cryptoPool, 2*node.cryptoPool.GetParallelism(), execpool.LowPriority, node) + node.ledger, err = data.LoadLedger(node.log, ledgerPathnamePrefix, false, genesis.Proto, genalloc, node.genesisID, node.genesisHash, []ledgercore.BlockListener{}, cfg) + if err != nil { + log.Errorf("Cannot initialize ledger (%s): %v", ledgerPathnamePrefix, err) + return nil, err + } + + blockListeners := []ledgercore.BlockListener{ + node, + } + + node.ledger.RegisterBlockListeners(blockListeners) + node.blockService = rpcs.MakeBlockService(node.log, cfg, node.ledger, p2pNode, node.genesisID) + node.catchupBlockAuth = blockAuthenticatorImpl{Ledger: node.ledger, AsyncVoteVerifier: agreement.MakeAsyncVoteVerifier(node.lowPriorityCryptoVerificationPool)} + node.catchupService = catchup.MakeService(node.log, node.config, p2pNode, node.ledger, node.catchupBlockAuth, make(chan catchup.PendingUnmatchedCertificate), node.lowPriorityCryptoVerificationPool) + + // Initialize sync round to the next round so that nothing falls out of the cache on Start + err = node.SetSyncRound(uint64(node.Ledger().NextRound())) + if err != nil { + log.Errorf("unable to set sync round to Ledger.NextRound %v", err) + return nil, err + } + + catchpointCatchupState, err := node.ledger.GetCatchpointCatchupState(context.Background()) + if err != nil { + log.Errorf("unable to determine catchpoint catchup state: %v", err) + return nil, err + } + if catchpointCatchupState != ledger.CatchpointCatchupStateInactive { + accessor := ledger.MakeCatchpointCatchupAccessor(node.ledger.Ledger, node.log) + node.catchpointCatchupService, err = catchup.MakeResumedCatchpointCatchupService(context.Background(), node, node.log, node.net, accessor, node.config) + if err != nil { + log.Errorf("unable to create catchpoint catchup service: %v", err) + return nil, err + } + node.log.Infof("resuming catchpoint catchup from state %d", catchpointCatchupState) + } + + return node, err +} + +// Config returns a copy of the node's Local configuration +func (node *AlgorandFollowerNode) Config() config.Local { + return node.config +} + +// Start the node: connect to peers while obtaining a lock. Doesn't wait for initial sync. +func (node *AlgorandFollowerNode) Start() { + node.mu.Lock() + defer node.mu.Unlock() + + // Set up a context we can use to cancel goroutines on Stop() + node.ctx, node.cancelCtx = context.WithCancel(context.Background()) + + // The start network is being called only after the various services start up. + // We want to do so in order to let the services register their callbacks with the + // network package before any connections are being made. + startNetwork := func() { + if !node.config.DisableNetworking { + // start accepting connections + node.net.Start() + node.config.NetAddress, _ = node.net.Address() + } + } + + if node.catchpointCatchupService != nil { + startNetwork() + node.catchpointCatchupService.Start(node.ctx) + } else { + node.catchupService.Start() + node.blockService.Start() + startNetwork() + } +} + +// ListeningAddress retrieves the node's current listening address, if any. +// Returns true if currently listening, false otherwise. +func (node *AlgorandFollowerNode) ListeningAddress() (string, bool) { + node.mu.Lock() + defer node.mu.Unlock() + return node.net.Address() +} + +// Stop stops running the node. Once a node is closed, it can never start again. +func (node *AlgorandFollowerNode) Stop() { + node.mu.Lock() + defer node.mu.Unlock() + + node.net.ClearHandlers() + if !node.config.DisableNetworking { + node.net.Stop() + } + if node.catchpointCatchupService != nil { + node.catchpointCatchupService.Stop() + } else { + node.catchupService.Stop() + node.blockService.Stop() + } + node.catchupBlockAuth.Quit() + node.lowPriorityCryptoVerificationPool.Shutdown() + node.cryptoPool.Shutdown() + node.cancelCtx() +} + +// Ledger exposes the node's ledger handle to the algod API code +func (node *AlgorandFollowerNode) Ledger() *data.Ledger { + return node.ledger +} + +// BroadcastSignedTxGroup errors in follower mode +func (node *AlgorandFollowerNode) BroadcastSignedTxGroup(_ []transactions.SignedTxn) (err error) { + return fmt.Errorf("cannot broadcast txns in sync mode") +} + +// BroadcastInternalSignedTxGroup errors in follower mode +func (node *AlgorandFollowerNode) BroadcastInternalSignedTxGroup(_ []transactions.SignedTxn) (err error) { + return fmt.Errorf("cannot broadcast internal signed txn group in sync mode") +} + +// Simulate speculatively runs a transaction group against the current +// blockchain state and returns the effects and/or errors that would result. +func (node *AlgorandFollowerNode) Simulate(_ []transactions.SignedTxn) (vb *ledgercore.ValidatedBlock, missingSignatures bool, err error) { + err = fmt.Errorf("cannot simulate in data mode") + return +} + +// GetPendingTransaction no-ops in follower mode +func (node *AlgorandFollowerNode) GetPendingTransaction(_ transactions.Txid) (res TxnWithStatus, found bool) { + return +} + +// Status returns a StatusReport structure reporting our status as Active and with our ledger's LastRound +func (node *AlgorandFollowerNode) Status() (s StatusReport, err error) { + node.syncStatusMu.Lock() + s.LastRoundTimestamp = node.lastRoundTimestamp + s.HasSyncedSinceStartup = node.hasSyncedSinceStartup + node.syncStatusMu.Unlock() + + node.mu.Lock() + defer node.mu.Unlock() + if node.catchpointCatchupService != nil { + return catchpointCatchupStatus(node.catchpointCatchupService.GetLatestBlockHeader(), node.catchpointCatchupService.GetStatistics()), nil + } + return latestBlockStatus(node.ledger, node.catchupService) +} + +// GenesisID returns the ID of the genesis node. +func (node *AlgorandFollowerNode) GenesisID() string { + return node.genesisID +} + +// GenesisHash returns the hash of the genesis configuration. +func (node *AlgorandFollowerNode) GenesisHash() crypto.Digest { + return node.genesisHash +} + +// SuggestedFee no-ops in follower mode +func (node *AlgorandFollowerNode) SuggestedFee() basics.MicroAlgos { + return basics.MicroAlgos{} +} + +// GetPendingTxnsFromPool returns an empty array in follower mode. +func (node *AlgorandFollowerNode) GetPendingTxnsFromPool() ([]transactions.SignedTxn, error) { + return []transactions.SignedTxn{}, nil +} + +// ListParticipationKeys returns an empty list in follower mode +func (node *AlgorandFollowerNode) ListParticipationKeys() (partKeys []account.ParticipationRecord, err error) { + return []account.ParticipationRecord{}, nil +} + +// GetParticipationKey returns an error in follower mode +func (node *AlgorandFollowerNode) GetParticipationKey(_ account.ParticipationID) (account.ParticipationRecord, error) { + return account.ParticipationRecord{}, fmt.Errorf("cannot get participation key in follower mode") +} + +// RemoveParticipationKey returns an error in follower mode +func (node *AlgorandFollowerNode) RemoveParticipationKey(_ account.ParticipationID) error { + return fmt.Errorf("cannot remove participation key in follower mode") +} + +// AppendParticipationKeys returns an error in follower mode +func (node *AlgorandFollowerNode) AppendParticipationKeys(_ account.ParticipationID, _ account.StateProofKeys) error { + return fmt.Errorf("cannot append participation keys in follower mode") +} + +// InstallParticipationKey returns an error in follower mode +func (node *AlgorandFollowerNode) InstallParticipationKey(_ []byte) (account.ParticipationID, error) { + return account.ParticipationID{}, fmt.Errorf("cannot install participation key in follower mode") +} + +// OnNewBlock implements the BlockListener interface so we're notified after each block is written to the ledger +func (node *AlgorandFollowerNode) OnNewBlock(block bookkeeping.Block, _ ledgercore.StateDelta) { + if node.ledger.Latest() > block.Round() { + return + } + node.syncStatusMu.Lock() + node.lastRoundTimestamp = time.Now() + node.hasSyncedSinceStartup = true + node.syncStatusMu.Unlock() +} + +// StartCatchup starts the catchpoint mode and attempt to get to the provided catchpoint +// this function is intended to be called externally via the REST api interface. +func (node *AlgorandFollowerNode) StartCatchup(catchpoint string) error { + node.mu.Lock() + defer node.mu.Unlock() + if node.catchpointCatchupService != nil { + stats := node.catchpointCatchupService.GetStatistics() + // No need to return an error + if catchpoint == stats.CatchpointLabel { + return MakeCatchpointAlreadyInProgressError(catchpoint) + } + return MakeCatchpointUnableToStartError(stats.CatchpointLabel, catchpoint) + } + cpRound, _, err := ledgercore.ParseCatchpointLabel(catchpoint) + if err != nil { + return err + } + sRound := node.GetSyncRound() + if sRound > 0 && uint64(cpRound) > sRound { + return MakeCatchpointSyncRoundFailure(catchpoint, sRound) + } + accessor := ledger.MakeCatchpointCatchupAccessor(node.ledger.Ledger, node.log) + node.catchpointCatchupService, err = catchup.MakeNewCatchpointCatchupService(catchpoint, node, node.log, node.net, accessor, node.config) + if err != nil { + node.log.Warnf("unable to create catchpoint catchup service : %v", err) + return err + } + node.catchpointCatchupService.Start(node.ctx) + node.log.Infof("starting catching up toward catchpoint %s", catchpoint) + return nil +} + +// AbortCatchup aborts the given catchpoint +// this function is intended to be called externally via the REST api interface. +func (node *AlgorandFollowerNode) AbortCatchup(catchpoint string) error { + node.mu.Lock() + defer node.mu.Unlock() + if node.catchpointCatchupService == nil { + return nil + } + stats := node.catchpointCatchupService.GetStatistics() + if stats.CatchpointLabel != catchpoint { + return fmt.Errorf("unable to abort catchpoint catchup for '%s' - already catching up '%s'", catchpoint, stats.CatchpointLabel) + } + node.catchpointCatchupService.Abort() + return nil +} + +// SetCatchpointCatchupMode change the node's operational mode from catchpoint catchup mode and back, it returns a +// channel which contains the updated node context. This function need to work asynchronously so that the caller could +// detect and handle the use case where the node is being shut down while we're switching to/from catchup mode without +// deadlocking on the shared node mutex. +func (node *AlgorandFollowerNode) SetCatchpointCatchupMode(catchpointCatchupMode bool) (outCtxCh <-chan context.Context) { + // create a non-buffered channel to return the newly created context. The fact that it's non-buffered here + // is important, as it allows us to synchronize the "receiving" of the new context before canceling of the previous + // one. + ctxCh := make(chan context.Context) + outCtxCh = ctxCh + go func() { + node.mu.Lock() + // check that the node wasn't canceled. If it have been canceled, it means that the node.Stop() was called, in which case + // we should close the channel. + if node.ctx.Err() == context.Canceled { + close(ctxCh) + node.mu.Unlock() + return + } + if catchpointCatchupMode { + // stop.. + defer func() { + node.mu.Unlock() + }() + node.net.ClearHandlers() + node.catchupService.Stop() + node.blockService.Stop() + + prevNodeCancelFunc := node.cancelCtx + + // Set up a context we can use to cancel goroutines on Stop() + node.ctx, node.cancelCtx = context.WithCancel(context.Background()) + ctxCh <- node.ctx + + prevNodeCancelFunc() + return + } + defer node.mu.Unlock() + // start + node.catchupService.Start() + node.blockService.Start() + + // Set up a context we can use to cancel goroutines on Stop() + node.ctx, node.cancelCtx = context.WithCancel(context.Background()) + + // at this point, the catchpoint catchup is done ( either successfully or not.. ) + node.catchpointCatchupService = nil + + ctxCh <- node.ctx + }() + return +} + +// SetSyncRound sets the minimum sync round on the catchup service +func (node *AlgorandFollowerNode) SetSyncRound(rnd uint64) error { + // Calculate the first round for which we want to disable catchup from the network. + // This is based on the size of the cache used in the ledger. + disableSyncRound := rnd + node.Config().MaxAcctLookback + return node.catchupService.SetDisableSyncRound(disableSyncRound) +} + +// GetSyncRound retrieves the sync round, removes cache offset used during SetSyncRound +func (node *AlgorandFollowerNode) GetSyncRound() uint64 { + return basics.SubSaturate(node.catchupService.GetDisableSyncRound(), node.Config().MaxAcctLookback) +} + +// UnsetSyncRound removes the sync round constraint on the catchup service +func (node *AlgorandFollowerNode) UnsetSyncRound() { + node.catchupService.UnsetDisableSyncRound() +} diff --git a/node/follower_node_test.go b/node/follower_node_test.go new file mode 100644 index 0000000000..7be1c620e6 --- /dev/null +++ b/node/follower_node_test.go @@ -0,0 +1,97 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package node + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/algorand/go-algorand/agreement" + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/data/account" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/test/partitiontest" +) + +func setupFollowNode(t *testing.T) *AlgorandFollowerNode { + cfg := config.GetDefaultLocal() + cfg.EnableFollowMode = true + genesis := bookkeeping.Genesis{ + SchemaID: "go-test-follower-node-genesis", + Proto: protocol.ConsensusCurrentVersion, + Network: config.Devtestnet, + FeeSink: sinkAddr.String(), + RewardsPool: poolAddr.String(), + Allocation: []bookkeeping.GenesisAllocation{ + { + Address: poolAddr.String(), + State: basics.AccountData{ + MicroAlgos: basics.MicroAlgos{Raw: 1000000000}, + }, + }, + }, + } + node, err := MakeFollower(logging.Base(), t.TempDir(), cfg, []string{}, genesis) + require.NoError(t, err) + return node +} + +func TestSyncRound(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + node := setupFollowNode(t) + b := bookkeeping.Block{ + BlockHeader: bookkeeping.BlockHeader{ + Round: 1, + }, + } + b.CurrentProtocol = protocol.ConsensusCurrentVersion + err := node.Ledger().AddBlock(b, agreement.Certificate{}) + require.NoError(t, err) + latestRound := uint64(node.Ledger().Latest()) + // Sync Round should be initialized to the ledger's latest round + require.Equal(t, latestRound, node.GetSyncRound()) + // Set a new sync round + require.NoError(t, node.SetSyncRound(latestRound+10)) + // Ensure it is persisted + require.Equal(t, latestRound+10, node.GetSyncRound()) + // Unset the sync round and make sure get returns 0 + node.UnsetSyncRound() + require.Equal(t, uint64(0), node.GetSyncRound()) +} + +func TestErrors(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + // Validates that expected functions are disabled + node := setupFollowNode(t) + require.Error(t, node.BroadcastSignedTxGroup([]transactions.SignedTxn{})) + require.Error(t, node.BroadcastInternalSignedTxGroup([]transactions.SignedTxn{})) + _, _, err := node.Simulate([]transactions.SignedTxn{}) + require.Error(t, err) + _, err = node.GetParticipationKey(account.ParticipationID{}) + require.Error(t, err) + require.Error(t, node.RemoveParticipationKey(account.ParticipationID{})) + require.Error(t, node.AppendParticipationKeys(account.ParticipationID{}, account.StateProofKeys{})) + _, err = node.InstallParticipationKey([]byte{}) + require.Error(t, err) +} diff --git a/node/node.go b/node/node.go index 94bb876d88..f648779697 100644 --- a/node/node.go +++ b/node/node.go @@ -687,52 +687,57 @@ func (node *AlgorandFullNode) Status() (s StatusReport, err error) { node.mu.Lock() defer node.mu.Unlock() if node.catchpointCatchupService != nil { - // we're in catchpoint catchup mode. - lastBlockHeader := node.catchpointCatchupService.GetLatestBlockHeader() - s.LastRound = lastBlockHeader.Round - s.LastVersion = lastBlockHeader.CurrentProtocol - s.NextVersion, s.NextVersionRound, s.NextVersionSupported = lastBlockHeader.NextVersionInfo() - s.StoppedAtUnsupportedRound = s.LastRound+1 == s.NextVersionRound && !s.NextVersionSupported - - // for now, I'm leaving this commented out. Once we refactor some of the ledger locking mechanisms, we - // should be able to make this call work. - //s.LastCatchpoint = node.ledger.GetLastCatchpointLabel() - - // report back the catchpoint catchup progress statistics - stats := node.catchpointCatchupService.GetStatistics() - s.Catchpoint = stats.CatchpointLabel - s.CatchpointCatchupTotalAccounts = stats.TotalAccounts - s.CatchpointCatchupProcessedAccounts = stats.ProcessedAccounts - s.CatchpointCatchupVerifiedAccounts = stats.VerifiedAccounts - s.CatchpointCatchupTotalKVs = stats.TotalKVs - s.CatchpointCatchupProcessedKVs = stats.ProcessedKVs - s.CatchpointCatchupVerifiedKVs = stats.VerifiedKVs - s.CatchpointCatchupTotalBlocks = stats.TotalBlocks - s.CatchpointCatchupAcquiredBlocks = stats.AcquiredBlocks - s.CatchupTime = time.Now().Sub(stats.StartTime) - } else { - // we're not in catchpoint catchup mode - var b bookkeeping.BlockHeader - s.LastRound = node.ledger.Latest() - b, err = node.ledger.BlockHdr(s.LastRound) - if err != nil { - return - } - s.LastVersion = b.CurrentProtocol - s.NextVersion, s.NextVersionRound, s.NextVersionSupported = b.NextVersionInfo() - - s.StoppedAtUnsupportedRound = s.LastRound+1 == s.NextVersionRound && !s.NextVersionSupported - s.LastCatchpoint = node.ledger.GetLastCatchpointLabel() - s.SynchronizingTime = node.catchupService.SynchronizingTime() - s.CatchupTime = node.catchupService.SynchronizingTime() + return catchpointCatchupStatus(node.catchpointCatchupService.GetLatestBlockHeader(), node.catchpointCatchupService.GetStatistics()), nil + } + return latestBlockStatus(node.ledger, node.catchupService) +} - s.UpgradePropose = b.UpgradeVote.UpgradePropose - s.UpgradeApprove = b.UpgradeApprove - s.UpgradeDelay = uint64(b.UpgradeVote.UpgradeDelay) - s.NextProtocolVoteBefore = b.NextProtocolVoteBefore - s.NextProtocolApprovals = b.UpgradeState.NextProtocolApprovals +func catchpointCatchupStatus(lastBlockHeader bookkeeping.BlockHeader, stats catchup.CatchpointCatchupStats) (s StatusReport) { + // we're in catchpoint catchup mode. + s.LastRound = lastBlockHeader.Round + s.LastVersion = lastBlockHeader.CurrentProtocol + s.NextVersion, s.NextVersionRound, s.NextVersionSupported = lastBlockHeader.NextVersionInfo() + s.StoppedAtUnsupportedRound = s.LastRound+1 == s.NextVersionRound && !s.NextVersionSupported + + // for now, I'm leaving this commented out. Once we refactor some of the ledger locking mechanisms, we + // should be able to make this call work. + //s.LastCatchpoint = node.ledger.GetLastCatchpointLabel() + + // report back the catchpoint catchup progress statistics + s.Catchpoint = stats.CatchpointLabel + s.CatchpointCatchupTotalAccounts = stats.TotalAccounts + s.CatchpointCatchupProcessedAccounts = stats.ProcessedAccounts + s.CatchpointCatchupVerifiedAccounts = stats.VerifiedAccounts + s.CatchpointCatchupTotalKVs = stats.TotalKVs + s.CatchpointCatchupProcessedKVs = stats.ProcessedKVs + s.CatchpointCatchupVerifiedKVs = stats.VerifiedKVs + s.CatchpointCatchupTotalBlocks = stats.TotalBlocks + s.CatchpointCatchupAcquiredBlocks = stats.AcquiredBlocks + s.CatchupTime = time.Since(stats.StartTime) + return +} +func latestBlockStatus(ledger *data.Ledger, catchupService *catchup.Service) (s StatusReport, err error) { + // we're not in catchpoint catchup mode + var b bookkeeping.BlockHeader + s.LastRound = ledger.Latest() + b, err = ledger.BlockHdr(s.LastRound) + if err != nil { + return } + s.LastVersion = b.CurrentProtocol + s.NextVersion, s.NextVersionRound, s.NextVersionSupported = b.NextVersionInfo() + + s.StoppedAtUnsupportedRound = s.LastRound+1 == s.NextVersionRound && !s.NextVersionSupported + s.LastCatchpoint = ledger.GetLastCatchpointLabel() + s.SynchronizingTime = catchupService.SynchronizingTime() + s.CatchupTime = catchupService.SynchronizingTime() + + s.UpgradePropose = b.UpgradeVote.UpgradePropose + s.UpgradeApprove = b.UpgradeApprove + s.UpgradeDelay = uint64(b.UpgradeVote.UpgradeDelay) + s.NextProtocolVoteBefore = b.NextProtocolVoteBefore + s.NextProtocolApprovals = b.UpgradeState.NextProtocolApprovals return } @@ -1402,20 +1407,16 @@ func (node *AlgorandFullNode) IsParticipating() bool { return node.accountManager.HasLiveKeys(round, round+10) } -// SetSyncRound sets the minimum sync round on the catchup service -func (node *AlgorandFullNode) SetSyncRound(rnd uint64) error { - // Calculate the first round for which we want to disable catchup from the network. - // This is based on the size of the cache used in the ledger. - disableSyncRound := rnd + node.Config().MaxAcctLookback - return node.catchupService.SetDisableSyncRound(disableSyncRound) +// SetSyncRound no-ops +func (node *AlgorandFullNode) SetSyncRound(_ uint64) error { + return nil } -// GetSyncRound retrieves the sync round, removes cache offset used during SetSyncRound +// GetSyncRound returns 0 (not set) in the base node implementation func (node *AlgorandFullNode) GetSyncRound() uint64 { - return node.catchupService.GetDisableSyncRound() - node.Config().MaxAcctLookback + return 0 } -// UnsetSyncRound removes the sync round constraint on the catchup service +// UnsetSyncRound no-ops func (node *AlgorandFullNode) UnsetSyncRound() { - node.catchupService.UnsetDisableSyncRound() } diff --git a/test/e2e-go/features/followerNode/syncDeltas_test.go b/test/e2e-go/features/followerNode/syncDeltas_test.go new file mode 100644 index 0000000000..df83e6d970 --- /dev/null +++ b/test/e2e-go/features/followerNode/syncDeltas_test.go @@ -0,0 +1,104 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package followerNode + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/test/framework/fixtures" + "github.com/algorand/go-algorand/test/partitiontest" +) + +func TestBasicSyncMode(t *testing.T) { + partitiontest.PartitionTest(t) + defer fixtures.ShutdownSynchronizedTest(t) + + if testing.Short() { + t.Skip() + } + t.Parallel() + a := require.New(fixtures.SynchronizedTest(t)) + + // Overview of this test: + // Start a two-node network (primary has 0%, secondary has 100%) + // Let it run for a few blocks. + // Spin up a third node in follower mode and retrieve deltas for some rounds using sync round calls. + + var fixture fixtures.RestClientFixture + // Give the second node (which starts up last) all the stake so that its proposal always has better credentials, + // and so that its proposal isn't dropped. Otherwise the test burns 17s to recover. We don't care about stake + // distribution so this is fine. + fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes100Second.json")) + defer fixture.Shutdown() + + // Get 2nd node so we wait until we know they're at target block + nc, err := fixture.GetNodeController("Node") + a.NoError(err) + + // Let the network make some progress + a.NoError(err) + waitForRound := uint64(5) + err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound) + a.NoError(err) + + // Now spin up third node in follower mode + cloneDataDir := filepath.Join(fixture.PrimaryDataDir(), "../clone") + cloneLedger := false + err = fixture.NC.Clone(cloneDataDir, cloneLedger) + a.NoError(err) + // Set config.Local::EnableFollowMode = true + cfg := config.GetDefaultLocal() + cfg.EnableFollowMode = true + cloneCfg := filepath.Join(cloneDataDir, config.ConfigFilename) + err = cfg.SaveToFile(cloneCfg) + a.NoError(err) + // Start the node + cloneClient, err := fixture.StartNode(cloneDataDir) + a.NoError(err) + defer shutdownClonedNode(cloneDataDir, &fixture, t) + // Now, catch up round by round, retrieving state deltas for each + for round := uint64(1); round <= waitForRound; round++ { + // assert sync round set + rResp, err := cloneClient.GetSyncRound() + a.NoError(err) + a.Equal(round, rResp.Round) + // retrieve state delta + gResp, err := cloneClient.GetLedgerStateDelta(round) + a.NoError(err) + a.NotNil(gResp) + // set sync round next + err = cloneClient.SetSyncRound(round + 1) + a.NoError(err) + } + err = fixture.LibGoalFixture.ClientWaitForRoundWithTimeout(cloneClient, waitForRound) + a.NoError(err) +} + +// shutdownClonedNode replicates the behavior of fixture.Shutdown() for network nodes on cloned node +// It deletes the directory if the test passes, otherwise it preserves it +func shutdownClonedNode(nodeDataDir string, f *fixtures.RestClientFixture, t *testing.T) { + nc := f.LibGoalFixture.GetNodeControllerForDataDir(nodeDataDir) + nc.FullStop() + if !t.Failed() { + os.RemoveAll(nodeDataDir) + } +} From ee593c10ba77baeda91ed2014d5927c0ea3c1460 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Wed, 1 Feb 2023 11:20:48 -0500 Subject: [PATCH 17/81] goal: allow ConfigJSONOverride in local network templates (#5017) --- netdeploy/networkTemplate.go | 8 ++++++++ netdeploy/networkTemplates_test.go | 21 +++++++++++++++++++++ netdeploy/remote/nodeConfig.go | 9 +++++---- test/testdata/nettemplates/perf/mkconf.py | 13 ++++++++++++- 4 files changed, 46 insertions(+), 5 deletions(-) diff --git a/netdeploy/networkTemplate.go b/netdeploy/networkTemplate.go index f4e2c1aaa3..4d5aa67b95 100644 --- a/netdeploy/networkTemplate.go +++ b/netdeploy/networkTemplate.go @@ -265,5 +265,13 @@ func createConfigFile(node remote.NodeConfigGoal, configFile string, numNodes in if node.DeadlockDetection != 0 { cfg.DeadlockDetection = node.DeadlockDetection } + + if node.ConfigJSONOverride != "" { + reader := strings.NewReader(node.ConfigJSONOverride) + dec := json.NewDecoder(reader) + if err := dec.Decode(&cfg); err != nil { + return err + } + } return cfg.SaveToFile(configFile) } diff --git a/netdeploy/networkTemplates_test.go b/netdeploy/networkTemplates_test.go index f8e3ee6857..62bf8be5fe 100644 --- a/netdeploy/networkTemplates_test.go +++ b/netdeploy/networkTemplates_test.go @@ -17,8 +17,10 @@ package netdeploy import ( + "encoding/json" "os" "path/filepath" + "strings" "testing" "github.com/stretchr/testify/require" @@ -95,3 +97,22 @@ func TestValidate(t *testing.T) { err = template.Validate() a.NoError(err) } + +type overlayTestStruct struct { + A string + B string +} + +// TestJsonOverlay ensures that encoding/json will only clobber fields present in the json and leave other fields unchanged +func TestJsonOverlay(t *testing.T) { + partitiontest.PartitionTest(t) + before := overlayTestStruct{A: "one", B: "two"} + setB := "{\"B\":\"other\"}" + dec := json.NewDecoder(strings.NewReader(setB)) + after := before + err := dec.Decode(&after) + a := require.New(t) + a.NoError(err) + a.Equal("one", after.A) + a.Equal("other", after.B) +} diff --git a/netdeploy/remote/nodeConfig.go b/netdeploy/remote/nodeConfig.go index 2a6eee0bfb..c5a9b1a6d9 100644 --- a/netdeploy/remote/nodeConfig.go +++ b/netdeploy/remote/nodeConfig.go @@ -53,8 +53,9 @@ func (nc NodeConfig) IsRelay() bool { // NodeConfigGoal represents is a simplified version of NodeConfig used with 'goal network' commands type NodeConfigGoal struct { - Name string - IsRelay bool `json:",omitempty"` - Wallets []NodeWalletData - DeadlockDetection int `json:"-"` + Name string + IsRelay bool `json:",omitempty"` + Wallets []NodeWalletData + DeadlockDetection int `json:"-"` + ConfigJSONOverride string `json:",omitempty"` // Raw json to merge into config.json after other modifications are complete } diff --git a/test/testdata/nettemplates/perf/mkconf.py b/test/testdata/nettemplates/perf/mkconf.py index 0428aee15a..fd58cb6c3e 100644 --- a/test/testdata/nettemplates/perf/mkconf.py +++ b/test/testdata/nettemplates/perf/mkconf.py @@ -6,6 +6,7 @@ "Genesis": { "NetworkName": "tbd", "ConsensusProtocol": "test-big-blocks", + "LastPartKeyRound": 3000, "Wallets": [], }, "Nodes": [], @@ -33,10 +34,20 @@ d["Nodes"].append(node) +npn_nodes = 0 +for n in range(0, npn_nodes): + node = { + "Name": "NPNode%d" % n, + "Wallets": [], + "DeadlockDetection": -1, + "ConfigJSONOverride": '{"ForceFetchTransactions":true}' + } + d["Nodes"].append(node) + d["Nodes"].append({ "Name": "Relay", "IsRelay": True, "Wallets": [], }) -print json.dumps(d, indent=True) +print(json.dumps(d, indent=True)) From f329ffddfdd43c9f7a0f6edf1595dfdc2174da22 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Wed, 1 Feb 2023 11:22:44 -0500 Subject: [PATCH 18/81] tests: reenable catchpoint tests (#4419) Co-authored-by: chris erway --- ledger/acctupdates_test.go | 165 +----------------------- ledger/catchpointtracker.go | 59 ++++----- ledger/catchpointtracker_test.go | 209 ++++++++++++++++++++++++++++--- 3 files changed, 220 insertions(+), 213 deletions(-) diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go index 69e4506517..607e324919 100644 --- a/ledger/acctupdates_test.go +++ b/ledger/acctupdates_test.go @@ -300,6 +300,7 @@ func checkAcctUpdates(t *testing.T, au *accountUpdates, ao *onlineAccounts, base latest := au.latest() require.Equal(t, latestRnd, latest) + // the log has "onlineAccounts failed to fetch online totals for rnd" warning that is expected _, err := ao.onlineTotals(latest + 1) require.Error(t, err) @@ -596,82 +597,6 @@ func TestAcctUpdates(t *testing.T) { } } -// TestAcctUpdatesFastUpdates tests catchpoint label writing datarace -func TestAcctUpdatesFastUpdates(t *testing.T) { - partitiontest.PartitionTest(t) - - if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" { - t.Skip("This test is too slow on ARM and causes travis builds to time out") - } - proto := config.Consensus[protocol.ConsensusCurrentVersion] - - accts := setupAccts(20) - rewardsLevels := []uint64{0} - - conf := config.GetDefaultLocal() - conf.CatchpointInterval = 1 - initialBlocksCount := int(conf.MaxAcctLookback) - ml := makeMockLedgerForTracker(t, true, initialBlocksCount, protocol.ConsensusCurrentVersion, accts) - defer ml.Close() - - au, ao := newAcctUpdates(t, ml, conf) - defer au.close() - defer ao.close() - - // Remove the txtail from the list of trackers since it causes a data race that - // wouldn't be observed under normal execution because commitedUpTo and newBlock - // are protected by the tracker mutex. - ml.trackers.trackers = ml.trackers.trackers[:2] - - // cover 10 genesis blocks - rewardLevel := uint64(0) - for i := 1; i < initialBlocksCount; i++ { - accts = append(accts, accts[0]) - rewardsLevels = append(rewardsLevels, rewardLevel) - } - - checkAcctUpdates(t, au, ao, 0, basics.Round(initialBlocksCount)-1, accts, rewardsLevels, proto) - - wg := sync.WaitGroup{} - - for i := basics.Round(initialBlocksCount); i < basics.Round(proto.CatchpointLookback+15); i++ { - rewardLevelDelta := crypto.RandUint64() % 5 - rewardLevel += rewardLevelDelta - updates, totals := ledgertesting.RandomDeltasBalanced(1, accts[i-1], rewardLevel) - prevRound, prevTotals, err := au.LatestTotals() - require.Equal(t, i-1, prevRound) - require.NoError(t, err) - - newPool := totals[testPoolAddr] - newPool.MicroAlgos.Raw -= prevTotals.RewardUnits() * rewardLevelDelta - updates.Upsert(testPoolAddr, newPool) - totals[testPoolAddr] = newPool - newAccts := applyPartialDeltas(accts[i-1], updates) - - blk := bookkeeping.Block{ - BlockHeader: bookkeeping.BlockHeader{ - Round: basics.Round(i), - }, - } - blk.RewardsLevel = rewardLevel - blk.CurrentProtocol = protocol.ConsensusCurrentVersion - - delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0) - delta.Accts.MergeAccounts(updates) - ml.trackers.newBlock(blk, delta) - accts = append(accts, newAccts) - rewardsLevels = append(rewardsLevels, rewardLevel) - - wg.Add(1) - go func(round basics.Round) { - defer wg.Done() - ml.trackers.committedUpTo(round) - }(i) - } - ml.trackers.waitAccountsWriting() - wg.Wait() -} - func BenchmarkBalancesChanges(b *testing.B) { if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" { b.Skip("This test is too slow on ARM and causes travis builds to time out") @@ -784,94 +709,6 @@ func BenchmarkCalibrateCacheNodeSize(b *testing.B) { store.TrieCachedNodesCount = defaultTrieCachedNodesCount } -// TestLargeAccountCountCatchpointGeneration creates a ledger containing a large set of accounts ( i.e. 100K accounts ) -// and attempts to have the accountUpdates create the associated catchpoint. It's designed precisely around setting an -// environment which would quickly ( i.e. after 32 rounds ) would start producing catchpoints. -func TestLargeAccountCountCatchpointGeneration(t *testing.T) { - partitiontest.PartitionTest(t) - - t.Skip("TODO: move to catchpointtracker_test and add catchpoint tracker into trackers list") - if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" { - t.Skip("This test is too slow on ARM and causes travis builds to time out") - } - - // The next operations are heavy on the memory. - // Garbage collection helps prevent trashing - runtime.GC() - - // create new protocol version, which has lower lookback - testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestLargeAccountCountCatchpointGeneration") - protoParams := config.Consensus[protocol.ConsensusCurrentVersion] - // TODO: fix MaxBalLookback after updating catchpoint round - protoParams.MaxBalLookback = 32 - protoParams.SeedLookback = 2 - protoParams.SeedRefreshInterval = 8 - config.Consensus[testProtocolVersion] = protoParams - defer func() { - delete(config.Consensus, testProtocolVersion) - os.RemoveAll(store.CatchpointDirName) - }() - - accts := setupAccts(100000) - rewardsLevels := []uint64{0} - conf := config.GetDefaultLocal() - conf.CatchpointInterval = 1 - conf.Archival = true - initialBlocksCount := int(conf.MaxAcctLookback) - ml := makeMockLedgerForTracker(t, true, initialBlocksCount, testProtocolVersion, accts) - defer ml.Close() - - au, _ := newAcctUpdates(t, ml, conf) - defer au.close() - - // cover 10 genesis blocks - rewardLevel := uint64(0) - for i := 1; i < initialBlocksCount; i++ { - accts = append(accts, accts[0]) - rewardsLevels = append(rewardsLevels, rewardLevel) - } - - start := basics.Round(initialBlocksCount) - end := basics.Round(protoParams.MaxBalLookback + 5) - for i := start; i < end; i++ { - rewardLevelDelta := crypto.RandUint64() % 5 - rewardLevel += rewardLevelDelta - updates, totals := ledgertesting.RandomDeltasBalanced(1, accts[i-1], rewardLevel) - - prevRound, prevTotals, err := au.LatestTotals() - require.Equal(t, i-1, prevRound) - require.NoError(t, err) - - newPool := totals[testPoolAddr] - newPool.MicroAlgos.Raw -= prevTotals.RewardUnits() * rewardLevelDelta - updates.Upsert(testPoolAddr, newPool) - totals[testPoolAddr] = newPool - newAccts := applyPartialDeltas(accts[i-1], updates) - - blk := bookkeeping.Block{ - BlockHeader: bookkeeping.BlockHeader{ - Round: basics.Round(i), - }, - } - blk.RewardsLevel = rewardLevel - blk.CurrentProtocol = testProtocolVersion - - delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0) - delta.Accts.MergeAccounts(updates) - ml.trackers.newBlock(blk, delta) - accts = append(accts, newAccts) - rewardsLevels = append(rewardsLevels, rewardLevel) - - ml.trackers.committedUpTo(i) - if i%2 == 1 || i == end-1 { - ml.trackers.waitAccountsWriting() - } - } - - // Garbage collection helps prevent trashing for next tests - runtime.GC() -} - // The TestAcctUpdatesUpdatesCorrectness conduct a correctless test for the accounts update in the following way - // Each account is initialized with 100 algos. // On every round, each account move variable amount of funds to an accumulating account. diff --git a/ledger/catchpointtracker.go b/ledger/catchpointtracker.go index ef205d9e5f..b6b996da4d 100644 --- a/ledger/catchpointtracker.go +++ b/ledger/catchpointtracker.go @@ -461,11 +461,8 @@ func (ct *catchpointTracker) produceCommittingTask(committedRound basics.Round, dcr.catchpointDataWriting = &ct.catchpointDataWriting dcr.enableGeneratingCatchpointFiles = ct.enableGeneratingCatchpointFiles - { - rounds := calculateCatchpointRounds( - dcr.oldBase+1, dcr.oldBase+basics.Round(dcr.offset), ct.catchpointInterval) - dcr.catchpointSecondStage = (len(rounds) > 0) - } + rounds := ct.calculateCatchpointRounds(dcr) + dcr.catchpointSecondStage = (len(rounds) > 0) return dcr } @@ -488,8 +485,7 @@ func (ct *catchpointTracker) commitRound(ctx context.Context, tx *sql.Tx, dcc *d dbRound := dcc.oldBase defer func() { - if err != nil && dcc.catchpointFirstStage && - ct.enableGeneratingCatchpointFiles { + if err != nil && dcc.catchpointFirstStage && ct.enableGeneratingCatchpointFiles { atomic.StoreInt32(&ct.catchpointDataWriting, 0) } }() @@ -549,7 +545,7 @@ func (ct *catchpointTracker) commitRound(ctx context.Context, tx *sql.Tx, dcc *d return err } - for _, round := range ct.calculateCatchpointRounds(dcc) { + for _, round := range ct.calculateCatchpointRounds(&dcc.deferredCommitRange) { err = crw.InsertUnfinishedCatchpoint(ctx, round, dcc.committedRoundDigests[round-dcc.oldBase-1]) if err != nil { return err @@ -579,22 +575,20 @@ func (ct *catchpointTracker) postCommit(ctx context.Context, dcc *deferredCommit } func doRepackCatchpoint(ctx context.Context, header CatchpointFileHeader, biggestChunkLen uint64, in *tar.Reader, out *tar.Writer) error { - { - bytes := protocol.Encode(&header) + bytes := protocol.Encode(&header) - err := out.WriteHeader(&tar.Header{ - Name: "content.msgpack", - Mode: 0600, - Size: int64(len(bytes)), - }) - if err != nil { - return err - } + err := out.WriteHeader(&tar.Header{ + Name: "content.msgpack", + Mode: 0600, + Size: int64(len(bytes)), + }) + if err != nil { + return err + } - _, err = out.Write(bytes) - if err != nil { - return err - } + _, err = out.Write(bytes) + if err != nil { + return err } // make buffer for re-use that can fit biggest chunk @@ -818,13 +812,19 @@ func (ct *catchpointTracker) finishCatchpoint(ctx context.Context, round basics. // Calculate catchpoint round numbers in [min, max]. `catchpointInterval` must be // non-zero. func calculateCatchpointRounds(min basics.Round, max basics.Round, catchpointInterval uint64) []basics.Round { - var res []basics.Round - // The smallest integer i such that i * ct.catchpointInterval >= first. + // The smallest integer i such that i * ct.catchpointInterval >= min. l := (uint64(min) + catchpointInterval - 1) / catchpointInterval - // The largest integer i such that i * ct.catchpointInterval <= last. + // The largest integer i such that i * ct.catchpointInterval <= max. r := uint64(max) / catchpointInterval + // handle situations when max - min < catchpointInterval, + // for example min=11, max=19, catchpointInterval = 10 + if l > r { + return nil + } + + res := make([]basics.Round, 0, r-l+1) for i := l; i <= r; i++ { round := basics.Round(i * catchpointInterval) res = append(res, round) @@ -833,7 +833,7 @@ func calculateCatchpointRounds(min basics.Round, max basics.Round, catchpointInt return res } -func (ct *catchpointTracker) calculateCatchpointRounds(dcc *deferredCommitContext) []basics.Round { +func (ct *catchpointTracker) calculateCatchpointRounds(dcc *deferredCommitRange) []basics.Round { if ct.catchpointInterval == 0 { return nil } @@ -842,7 +842,8 @@ func (ct *catchpointTracker) calculateCatchpointRounds(dcc *deferredCommitContex if dcc.catchpointLookback+1 > uint64(min) { min = basics.Round(dcc.catchpointLookback) + 1 } - return calculateCatchpointRounds(min, dcc.newBase, ct.catchpointInterval) + max := dcc.oldBase + basics.Round(dcc.offset) + return calculateCatchpointRounds(min, max, ct.catchpointInterval) } // Delete old first stage catchpoint records and data files. @@ -875,7 +876,7 @@ func (ct *catchpointTracker) postCommitUnlocked(ctx context.Context, dcc *deferr } // Generate catchpoints for rounds in (dcc.oldBase, dcc.newBase]. - for _, round := range ct.calculateCatchpointRounds(dcc) { + for _, round := range ct.calculateCatchpointRounds(&dcc.deferredCommitRange) { err := ct.finishCatchpoint( ctx, round, dcc.committedRoundDigests[round-dcc.oldBase-1], dcc.catchpointLookback) if err != nil { @@ -896,7 +897,7 @@ func (ct *catchpointTracker) postCommitUnlocked(ctx context.Context, dcc *deferr } // handleUnorderedCommit is a special method for handling deferred commits that are out of order. -// Tracker might update own state in this case. For example, account updates tracker cancels +// Tracker might update own state in this case. For example, account catchpoint tracker cancels // scheduled catchpoint writing that deferred commit. func (ct *catchpointTracker) handleUnorderedCommit(dcc *deferredCommitContext) { // if the node is configured to generate catchpoint files, we might need to update the catchpointWriting variable. diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go index 22f38e599f..b85a380f1b 100644 --- a/ledger/catchpointtracker_test.go +++ b/ledger/catchpointtracker_test.go @@ -27,6 +27,7 @@ import ( "path/filepath" "runtime" "strings" + "sync" "sync/atomic" "testing" "time" @@ -321,16 +322,7 @@ func BenchmarkLargeCatchpointDataWriting(b *testing.B) { proto := config.Consensus[protocol.ConsensusCurrentVersion] accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(5, true)} - - pooldata := basics.AccountData{} - pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000 - pooldata.Status = basics.NotParticipating - accts[0][testPoolAddr] = pooldata - - sinkdata := basics.AccountData{} - sinkdata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000 - sinkdata.Status = basics.NotParticipating - accts[0][testSinkAddr] = sinkdata + addSinkAndPoolAccounts(accts) ml := makeMockLedgerForTracker(b, true, 10, protocol.ConsensusCurrentVersion, accts) defer ml.Close() @@ -399,18 +391,9 @@ func TestReproducibleCatchpointLabels(t *testing.T) { }() accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)} + addSinkAndPoolAccounts(accts) rewardsLevels := []uint64{0} - pooldata := basics.AccountData{} - pooldata.MicroAlgos.Raw = 100 * 1000 * 1000 * 1000 * 1000 - pooldata.Status = basics.NotParticipating - accts[0][testPoolAddr] = pooldata - - sinkdata := basics.AccountData{} - sinkdata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000 - sinkdata.Status = basics.NotParticipating - accts[0][testSinkAddr] = sinkdata - ml := makeMockLedgerForTracker(t, false, 1, testProtocolVersion, accts) defer ml.Close() @@ -1485,3 +1468,189 @@ func TestHashContract(t *testing.T) { } } } + +// TestCatchpoint_FastUpdates tests catchpoint label writing data race +func TestCatchpointFastUpdates(t *testing.T) { + partitiontest.PartitionTest(t) + + if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" { + t.Skip("This test is too slow on ARM and causes travis builds to time out") + } + proto := config.Consensus[protocol.ConsensusCurrentVersion] + + accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)} + addSinkAndPoolAccounts(accts) + rewardsLevels := []uint64{0} + + conf := config.GetDefaultLocal() + conf.CatchpointInterval = 1 + conf.CatchpointTracking = 1 + initialBlocksCount := int(conf.MaxAcctLookback) + ml := makeMockLedgerForTracker(t, true, initialBlocksCount, protocol.ConsensusCurrentVersion, accts) + defer ml.Close() + + ct := newCatchpointTracker(t, ml, conf, ".") + au := ml.trackers.accts + ao := ml.trackers.acctsOnline + + // Remove the txtail from the list of trackers since it causes a data race that + // wouldn't be observed under normal execution because commitedUpTo and newBlock + // are protected by the tracker mutex. + trackers := make([]ledgerTracker, 0, len(ml.trackers.trackers)) + for _, tracker := range ml.trackers.trackers { + if _, ok := tracker.(*txTail); !ok { + trackers = append(trackers, tracker) + } + } + ml.trackers.trackers = trackers + + // cover 10 genesis blocks + rewardLevel := uint64(0) + for i := 1; i < initialBlocksCount; i++ { + accts = append(accts, accts[0]) + rewardsLevels = append(rewardsLevels, rewardLevel) + } + + checkAcctUpdates(t, au, ao, 0, basics.Round(initialBlocksCount)-1, accts, rewardsLevels, proto) + + wg := sync.WaitGroup{} + + for i := basics.Round(initialBlocksCount); i < basics.Round(proto.CatchpointLookback+15); i++ { + rewardLevelDelta := crypto.RandUint64() % 5 + rewardLevel += rewardLevelDelta + updates, totals := ledgertesting.RandomDeltasBalanced(1, accts[i-1], rewardLevel) + prevRound, prevTotals, err := au.LatestTotals() + require.Equal(t, i-1, prevRound) + require.NoError(t, err) + + newPool := totals[testPoolAddr] + newPool.MicroAlgos.Raw -= prevTotals.RewardUnits() * rewardLevelDelta + updates.Upsert(testPoolAddr, newPool) + totals[testPoolAddr] = newPool + newAccts := applyPartialDeltas(accts[i-1], updates) + + blk := bookkeeping.Block{ + BlockHeader: bookkeeping.BlockHeader{ + Round: basics.Round(i), + }, + } + blk.RewardsLevel = rewardLevel + blk.CurrentProtocol = protocol.ConsensusCurrentVersion + + delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0) + delta.Accts.MergeAccounts(updates) + ml.trackers.newBlock(blk, delta) + accts = append(accts, newAccts) + rewardsLevels = append(rewardsLevels, rewardLevel) + + wg.Add(1) + go func(round basics.Round) { + defer wg.Done() + ml.trackers.committedUpTo(round) + }(i) + } + ml.trackers.waitAccountsWriting() + wg.Wait() + + require.NotEmpty(t, ct.GetLastCatchpointLabel()) +} + +// TestCatchpoint_LargeAccountCountCatchpointGeneration creates a ledger containing a large set of accounts ( i.e. 100K accounts ) +// and attempts to have the catchpoint tracker create the associated catchpoint. It's designed precisely around setting an +// environment which would quickly ( i.e. after 32 rounds ) would start producing catchpoints. +func TestCatchpointLargeAccountCountCatchpointGeneration(t *testing.T) { + partitiontest.PartitionTest(t) + + if strings.ToUpper(os.Getenv("CIRCLECI")) == "TRUE" || testing.Short() { + t.Skip("This test is too slow on CI executors: cannot repack catchpoint") + } + + // The next operations are heavy on the memory. + // Garbage collection helps prevent trashing + runtime.GC() + + // create new protocol version, which has lower lookback + testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestLargeAccountCountCatchpointGeneration") + protoParams := config.Consensus[protocol.ConsensusCurrentVersion] + protoParams.CatchpointLookback = 16 + config.Consensus[testProtocolVersion] = protoParams + defer func() { + delete(config.Consensus, testProtocolVersion) + }() + + accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(100000, true)} + addSinkAndPoolAccounts(accts) + rewardsLevels := []uint64{0} + + conf := config.GetDefaultLocal() + conf.CatchpointInterval = 32 + conf.CatchpointTracking = 1 + conf.Archival = true + initialBlocksCount := int(conf.MaxAcctLookback) + ml := makeMockLedgerForTracker(t, true, initialBlocksCount, testProtocolVersion, accts) + defer ml.Close() + + ct := newCatchpointTracker(t, ml, conf, ".") + temporaryDirectory := t.TempDir() + catchpointsDirectory := filepath.Join(temporaryDirectory, store.CatchpointDirName) + err := os.Mkdir(catchpointsDirectory, 0777) + require.NoError(t, err) + defer os.RemoveAll(catchpointsDirectory) + + ct.dbDirectory = temporaryDirectory + + au := ml.trackers.accts + + // cover 10 genesis blocks + rewardLevel := uint64(0) + for i := 1; i < initialBlocksCount; i++ { + accts = append(accts, accts[0]) + rewardsLevels = append(rewardsLevels, rewardLevel) + } + + start := basics.Round(initialBlocksCount) + min := conf.CatchpointInterval + if min < protoParams.CatchpointLookback { + min = protoParams.CatchpointLookback + } + end := basics.Round(min + conf.MaxAcctLookback + 3) // few more rounds to commit and generate the second stage + for i := start; i < end; i++ { + rewardLevelDelta := crypto.RandUint64() % 5 + rewardLevel += rewardLevelDelta + updates, totals := ledgertesting.RandomDeltasBalanced(1, accts[i-1], rewardLevel) + + prevRound, prevTotals, err := au.LatestTotals() + require.Equal(t, i-1, prevRound) + require.NoError(t, err) + + newPool := totals[testPoolAddr] + newPool.MicroAlgos.Raw -= prevTotals.RewardUnits() * rewardLevelDelta + updates.Upsert(testPoolAddr, newPool) + totals[testPoolAddr] = newPool + newAccts := applyPartialDeltas(accts[i-1], updates) + + blk := bookkeeping.Block{ + BlockHeader: bookkeeping.BlockHeader{ + Round: basics.Round(i), + }, + } + blk.RewardsLevel = rewardLevel + blk.CurrentProtocol = testProtocolVersion + + delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0) + delta.Accts.MergeAccounts(updates) + ml.trackers.newBlock(blk, delta) + accts = append(accts, newAccts) + rewardsLevels = append(rewardsLevels, rewardLevel) + + ml.trackers.committedUpTo(i) + if i%2 == 1 || i == end-1 { + ml.trackers.waitAccountsWriting() + } + } + + require.NotEmpty(t, ct.GetLastCatchpointLabel()) + + // Garbage collection helps prevent trashing for next tests + runtime.GC() +} From 4e652f226ee7bf4ddffa3c5e32ab2076d49667c6 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Wed, 1 Feb 2023 12:43:36 -0500 Subject: [PATCH 19/81] algod: split SetFdSoftLimit calls for relay and non-relay nodes (#5070) --- config/config_test.go | 154 +++++++++++++++++++++++++++------------- config/localTemplate.go | 36 ++++++++++ daemon/algod/server.go | 41 +++++++++-- network/wsNetwork.go | 4 +- util/util.go | 10 +++ util/util_windows.go | 6 ++ 6 files changed, 196 insertions(+), 55 deletions(-) diff --git a/config/config_test.go b/config/config_test.go index c2ee070fc4..1e24fa4591 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -42,8 +42,9 @@ var defaultConfig = Local{ BaseLoggerDebugLevel: 1, //Info level } -func TestSaveThenLoad(t *testing.T) { +func TestLocal_SaveThenLoad(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() c1, err := loadWithoutDefaults(defaultConfig) require.NoError(t, err) @@ -53,13 +54,10 @@ func TestSaveThenLoad(t *testing.T) { ser1 := json.NewEncoder(&b1) ser1.Encode(c1) - os.RemoveAll("testdir") - err = os.Mkdir("testdir", 0777) - require.NoError(t, err) - - c1.SaveToDisk("testdir") + tempDir := t.TempDir() + c1.SaveToDisk(tempDir) - c2, err := LoadConfigFromDisk("testdir") + c2, err := LoadConfigFromDisk(tempDir) require.NoError(t, err) var b2 bytes.Buffer @@ -67,23 +65,23 @@ func TestSaveThenLoad(t *testing.T) { ser2.Encode(c2) require.True(t, bytes.Equal(b1.Bytes(), b2.Bytes())) - - os.RemoveAll("testdir") } -func TestLoadMissing(t *testing.T) { +func TestConfig_LoadMissing(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() - os.RemoveAll("testdir") - _, err := LoadConfigFromDisk("testdir") + tempDir := t.TempDir() + os.RemoveAll(tempDir) + _, err := LoadConfigFromDisk(tempDir) require.True(t, os.IsNotExist(err)) } -func TestMergeConfig(t *testing.T) { +func TestLocal_MergeConfig(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() - os.RemoveAll("testdir") - err := os.Mkdir("testdir", 0777) + tempDir := t.TempDir() c1 := struct { GossipFanout int @@ -98,7 +96,7 @@ func TestMergeConfig(t *testing.T) { c1.NetAddress = testString // write our reduced version of the Local struct - fileToMerge := filepath.Join("testdir", ConfigFilename) + fileToMerge := filepath.Join(tempDir, ConfigFilename) f, err := os.OpenFile(fileToMerge, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err == nil { enc := json.NewEncoder(f) @@ -110,7 +108,7 @@ func TestMergeConfig(t *testing.T) { // Take defaultConfig and merge with the saved custom settings. // This should result in c2 being the same as defaultConfig except for the value(s) in our custom c1 - c2, err := mergeConfigFromDir("testdir", defaultConfig) + c2, err := mergeConfigFromDir(tempDir, defaultConfig) require.NoError(t, err) require.Equal(t, defaultConfig.Archival || c1.NetAddress != "", c2.Archival) @@ -119,12 +117,10 @@ func TestMergeConfig(t *testing.T) { require.Equal(t, c1.NetAddress, c2.NetAddress) require.Equal(t, c1.GossipFanout, c2.GossipFanout) - - os.RemoveAll("testdir") } -func saveFullPhonebook(phonebook phonebookBlackWhiteList) error { - filename := filepath.Join("testdir", PhonebookFilename) +func saveFullPhonebook(phonebook phonebookBlackWhiteList, saveToDir string) error { + filename := filepath.Join(saveToDir, PhonebookFilename) f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err == nil { defer f.Close() @@ -142,52 +138,48 @@ var phonebookToMerge = phonebookBlackWhiteList{ Include: []string{"test1", "addThisOne"}, } -var expectedMerged = []string{ - "test1", "test2", "addThisOne", -} - func TestLoadPhonebook(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() - os.RemoveAll("testdir") - err := os.Mkdir("testdir", 0777) - require.NoError(t, err) + tempDir := t.TempDir() - err = saveFullPhonebook(phonebook) + err := saveFullPhonebook(phonebook, tempDir) require.NoError(t, err) - phonebookEntries, err := LoadPhonebook("testdir") + phonebookEntries, err := LoadPhonebook(tempDir) require.NoError(t, err) require.Equal(t, 3, len(phonebookEntries)) for index, entry := range phonebookEntries { require.Equal(t, phonebook.Include[index], entry) } - os.RemoveAll("testdir") } func TestLoadPhonebookMissing(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() - os.RemoveAll("testdir") - _, err := LoadPhonebook("testdir") + tempDir := t.TempDir() + _, err := LoadPhonebook(tempDir) require.True(t, os.IsNotExist(err)) } func TestArchivalIfRelay(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() testArchivalIfRelay(t, true) } func TestArchivalIfNotRelay(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() testArchivalIfRelay(t, false) } func testArchivalIfRelay(t *testing.T, relay bool) { - os.RemoveAll("testdir") - err := os.Mkdir("testdir", 0777) + tempDir := t.TempDir() c1 := struct { NetAddress string @@ -197,7 +189,7 @@ func testArchivalIfRelay(t *testing.T, relay bool) { } // write our reduced version of the Local struct - fileToMerge := filepath.Join("testdir", ConfigFilename) + fileToMerge := filepath.Join(tempDir, ConfigFilename) f, err := os.OpenFile(fileToMerge, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err == nil { enc := json.NewEncoder(f) @@ -207,18 +199,16 @@ func testArchivalIfRelay(t *testing.T, relay bool) { require.NoError(t, err) require.False(t, defaultConfig.Archival, "Default should be non-archival") - c2, err := mergeConfigFromDir("testdir", defaultConfig) + c2, err := mergeConfigFromDir(tempDir, defaultConfig) require.NoError(t, err) if relay { require.True(t, c2.Archival, "Relay should be archival") } else { require.False(t, c2.Archival, "Non-relay should still be non-archival") } - - os.RemoveAll("testdir") } -func TestConfigExampleIsCorrect(t *testing.T) { +func TestLocal_ConfigExampleIsCorrect(t *testing.T) { partitiontest.PartitionTest(t) a := require.New(t) @@ -261,7 +251,7 @@ func loadWithoutDefaults(cfg Local) (Local, error) { return cfg, err } -func TestConfigMigrate(t *testing.T) { +func TestLocal_ConfigMigrate(t *testing.T) { partitiontest.PartitionTest(t) a := require.New(t) @@ -288,7 +278,7 @@ func TestConfigMigrate(t *testing.T) { a.NotEqual(defaultLocal, c0Modified) } -func TestConfigMigrateFromDisk(t *testing.T) { +func TestLocal_ConfigMigrateFromDisk(t *testing.T) { partitiontest.PartitionTest(t) a := require.New(t) @@ -311,7 +301,7 @@ func TestConfigMigrateFromDisk(t *testing.T) { } // Verify that nobody is changing the shipping default configurations -func TestConfigInvariant(t *testing.T) { +func TestLocal_ConfigInvariant(t *testing.T) { partitiontest.PartitionTest(t) a := require.New(t) @@ -328,8 +318,9 @@ func TestConfigInvariant(t *testing.T) { } } -func TestConfigLatestVersion(t *testing.T) { +func TestLocal_ConfigLatestVersion(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() a := require.New(t) @@ -391,6 +382,7 @@ func TestConsensusLatestVersion(t *testing.T) { func TestLocal_DNSBootstrapArray(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() type fields struct { DNSBootstrapID string @@ -434,6 +426,7 @@ func TestLocal_DNSBootstrapArray(t *testing.T) { func TestLocal_DNSBootstrap(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() type fields struct { DNSBootstrapID string @@ -480,8 +473,9 @@ func TestLocal_DNSBootstrap(t *testing.T) { } } -func TestLocalStructTags(t *testing.T) { +func TestLocal_StructTags(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() localType := reflect.TypeOf(Local{}) @@ -523,8 +517,9 @@ func TestLocalStructTags(t *testing.T) { } } -func TestGetVersionedDefaultLocalConfig(t *testing.T) { +func TestLocal_GetVersionedDefaultLocalConfig(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() for i := uint32(0); i < getLatestConfigVersion(); i++ { localVersion := getVersionedDefaultLocalConfig(i) @@ -533,8 +528,9 @@ func TestGetVersionedDefaultLocalConfig(t *testing.T) { } // TestLocalVersionField - ensures the Version contains only versions tags, the versions are all contiguous, and that no non-version tags are included there. -func TestLocalVersionField(t *testing.T) { +func TestLocal_VersionField(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() localType := reflect.TypeOf(Local{}) field, ok := localType.FieldByName("Version") @@ -553,8 +549,9 @@ func TestLocalVersionField(t *testing.T) { require.Equal(t, expectedTag, string(field.Tag)) } -func TestGetNonDefaultConfigValues(t *testing.T) { +func TestLocal_GetNonDefaultConfigValues(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() cfg := GetDefaultLocal() @@ -583,6 +580,8 @@ func TestGetNonDefaultConfigValues(t *testing.T) { func TestLocal_TxFiltering(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + cfg := GetDefaultLocal() // ensure the default @@ -605,3 +604,62 @@ func TestLocal_TxFiltering(t *testing.T) { require.True(t, cfg.TxFilterRawMsgEnabled()) require.True(t, cfg.TxFilterCanonicalEnabled()) } + +func TestLocal_IsGossipServer(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + cfg := GetDefaultLocal() + require.False(t, cfg.IsGossipServer()) + + cfg.NetAddress = ":4160" + require.True(t, cfg.IsGossipServer()) +} + +func TestLocal_RecalculateConnectionLimits(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + var tests = []struct { + maxFDs uint64 + reservedIn uint64 + restSoftIn uint64 + restHardIn uint64 + incomingIn int + + updated bool + restSoftExp uint64 + restHardExp uint64 + incomingExp int + }{ + {100, 10, 20, 40, 50, false, 20, 40, 50}, // no change + {100, 10, 20, 50, 50, true, 20, 40, 50}, // borrow from rest + {100, 10, 25, 50, 50, true, 25, 40, 50}, // borrow from rest + {100, 10, 50, 50, 50, true, 40, 40, 50}, // borrow from rest, update soft + {100, 10, 9, 19, 81, true, 9, 10, 80}, // borrow from both rest and incoming + {100, 10, 10, 20, 80, true, 10, 10, 80}, // borrow from both rest and incoming + {100, 50, 10, 30, 40, true, 10, 10, 40}, // borrow from both rest and incoming + {100, 90, 10, 30, 40, true, 10, 10, 0}, // borrow from both rest and incoming, clear incoming + {4096, 256, 1024, 2048, 2400, true, 1024, 1440, 2400}, // real numbers + {5000, 256, 1024, 2048, 2400, false, 1024, 2048, 2400}, // real numbers + } + + for i, test := range tests { + test := test + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Parallel() + + c := Local{ + RestConnectionsSoftLimit: test.restSoftIn, + RestConnectionsHardLimit: test.restHardIn, + IncomingConnectionsLimit: test.incomingIn, + } + requireFDs := test.reservedIn + test.restHardIn + uint64(test.incomingIn) + res := c.AdjustConnectionLimits(requireFDs, test.maxFDs) + require.Equal(t, test.updated, res) + require.Equal(t, test.restSoftExp, c.RestConnectionsSoftLimit) + require.Equal(t, test.restHardExp, c.RestConnectionsHardLimit) + require.Equal(t, test.incomingExp, c.IncomingConnectionsLimit) + }) + } +} diff --git a/config/localTemplate.go b/config/localTemplate.go index e1b7259417..020361918d 100644 --- a/config/localTemplate.go +++ b/config/localTemplate.go @@ -103,6 +103,8 @@ type Local struct { // that RLIMIT_NOFILE >= IncomingConnectionsLimit + RestConnectionsHardLimit + // ReservedFDs. ReservedFDs are meant to leave room for short-lived FDs like // DNS queries, SQLite files, etc. This parameter shouldn't be changed. + // If RLIMIT_NOFILE < IncomingConnectionsLimit + RestConnectionsHardLimit + ReservedFDs + // then either RestConnectionsHardLimit or IncomingConnectionsLimit decreased. ReservedFDs uint64 `version[2]:"256"` // local server @@ -593,3 +595,37 @@ func (cfg Local) TxFilterRawMsgEnabled() bool { func (cfg Local) TxFilterCanonicalEnabled() bool { return cfg.TxIncomingFilteringFlags&txFilterCanonical != 0 } + +// IsGossipServer returns true if NetAddress is set and this node supposed +// to start websocket server +func (cfg Local) IsGossipServer() bool { + return cfg.NetAddress != "" +} + +// AdjustConnectionLimits updates RestConnectionsSoftLimit, RestConnectionsHardLimit, IncomingConnectionsLimit +// if requiredFDs greater than maxFDs +func (cfg *Local) AdjustConnectionLimits(requiredFDs, maxFDs uint64) bool { + if maxFDs >= requiredFDs { + return false + } + const reservedRESTConns = 10 + diff := requiredFDs - maxFDs + + if cfg.RestConnectionsHardLimit <= diff+reservedRESTConns { + restDelta := diff + reservedRESTConns - cfg.RestConnectionsHardLimit + cfg.RestConnectionsHardLimit = reservedRESTConns + if cfg.IncomingConnectionsLimit > int(restDelta) { + cfg.IncomingConnectionsLimit -= int(restDelta) + } else { + cfg.IncomingConnectionsLimit = 0 + } + } else { + cfg.RestConnectionsHardLimit -= diff + } + + if cfg.RestConnectionsSoftLimit > cfg.RestConnectionsHardLimit { + cfg.RestConnectionsSoftLimit = cfg.RestConnectionsHardLimit + } + + return true +} diff --git a/daemon/algod/server.go b/daemon/algod/server.go index 54b4acd103..97ede14578 100644 --- a/daemon/algod/server.go +++ b/daemon/algod/server.go @@ -117,18 +117,49 @@ func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string, genes // Set large enough soft file descriptors limit. var ot basics.OverflowTracker - fdRequired := ot.Add( - cfg.ReservedFDs, - ot.Add(uint64(cfg.IncomingConnectionsLimit), cfg.RestConnectionsHardLimit)) + fdRequired := ot.Add(cfg.ReservedFDs, cfg.RestConnectionsHardLimit) if ot.Overflowed { return errors.New( - "Initialize() overflowed when adding up ReservedFDs, IncomingConnectionsLimit " + - "RestConnectionsHardLimit; decrease them") + "Initialize() overflowed when adding up ReservedFDs and RestConnectionsHardLimit; decrease them") } err = util.SetFdSoftLimit(fdRequired) if err != nil { return fmt.Errorf("Initialize() err: %w", err) } + if cfg.IsGossipServer() { + var ot basics.OverflowTracker + fdRequired := ot.Add(fdRequired, uint64(cfg.IncomingConnectionsLimit)) + if ot.Overflowed { + return errors.New("Initialize() overflowed when adding up IncomingConnectionsLimit to the existing RLIMIT_NOFILE value; decrease RestConnectionsHardLimit or IncomingConnectionsLimit") + } + _, hard, err := util.GetFdLimits() + if err != nil { + s.log.Errorf("Failed to get RLIMIT_NOFILE values: %s", err.Error()) + } else { + maxFDs := fdRequired + if fdRequired > hard { + // claim as many descriptors are possible + maxFDs = hard + // but try to keep cfg.ReservedFDs untouched by decreasing other limits + if cfg.AdjustConnectionLimits(fdRequired, hard) { + s.log.Warnf( + "Updated connection limits: RestConnectionsSoftLimit=%d, RestConnectionsHardLimit=%d, IncomingConnectionsLimit=%d", + cfg.RestConnectionsSoftLimit, + cfg.RestConnectionsHardLimit, + cfg.IncomingConnectionsLimit, + ) + if cfg.IncomingConnectionsLimit == 0 { + return errors.New("Initialize() failed to adjust connection limits") + } + } + } + err = util.SetFdSoftLimit(maxFDs) + if err != nil { + // do not fail but log the error + s.log.Errorf("Failed to set a new RLIMIT_NOFILE value to %d (max %d): %s", fdRequired, hard, err.Error()) + } + } + } // configure the deadlock detector library switch { diff --git a/network/wsNetwork.go b/network/wsNetwork.go index 549acac867..9bc945aee9 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -724,7 +724,7 @@ func (wn *WebsocketNetwork) setup() { wn.server.IdleTimeout = httpServerIdleTimeout wn.server.MaxHeaderBytes = httpServerMaxHeaderBytes wn.ctx, wn.ctxCancel = context.WithCancel(context.Background()) - wn.relayMessages = wn.config.NetAddress != "" || wn.config.ForceRelayMessages + wn.relayMessages = wn.config.IsGossipServer() || wn.config.ForceRelayMessages if wn.relayMessages || wn.config.ForceFetchTransactions { wn.wantTXGossip = wantTXGossipYes } @@ -798,7 +798,7 @@ func (wn *WebsocketNetwork) Start() { wn.messagesOfInterestEnc = MarshallMessageOfInterestMap(wn.messagesOfInterest) } - if wn.config.NetAddress != "" { + if wn.config.IsGossipServer() { listener, err := net.Listen("tcp", wn.config.NetAddress) if err != nil { wn.log.Errorf("network could not listen %v: %s", wn.config.NetAddress, err) diff --git a/util/util.go b/util/util.go index 0ecf357344..f3699188c6 100644 --- a/util/util.go +++ b/util/util.go @@ -26,6 +26,16 @@ import ( /* misc */ +// GetFdLimits returns a current values for file descriptors limits. +func GetFdLimits() (soft uint64, hard uint64, err error) { + var rLimit syscall.Rlimit + err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rLimit) + if err != nil { + return 0, 0, fmt.Errorf("GetFdSoftLimit() err: %w", err) + } + return rLimit.Cur, rLimit.Max, nil +} + // SetFdSoftLimit sets a new file descriptors soft limit. func SetFdSoftLimit(newLimit uint64) error { var rLimit syscall.Rlimit diff --git a/util/util_windows.go b/util/util_windows.go index fa91a4f8a8..b485f8e253 100644 --- a/util/util_windows.go +++ b/util/util_windows.go @@ -18,12 +18,18 @@ package util import ( "errors" + "math" "syscall" "time" ) /* misc */ +// GetFdLimits returns a current values for file descriptors limits. +func GetFdLimits() (soft uint64, hard uint64, err error) { + return math.MaxUint64, math.MaxUint64, nil // syscall.RLIM_INFINITY +} + // SetFdSoftLimit sets a new file descriptors soft limit. func SetFdSoftLimit(_ uint64) error { return nil From b74346632587f212962b3ed31b43232d75fff6a2 Mon Sep 17 00:00:00 2001 From: algolucky <105239720+algolucky@users.noreply.github.com> Date: Wed, 1 Feb 2023 13:36:23 -0600 Subject: [PATCH 20/81] container: support kmd (#4984) --- .github/workflows/container.yml | 24 +++- Dockerfile | 6 +- docker/README.md | 25 ++-- .../{build => run}/kmd_config.json.example | 0 docker/files/run/run.sh | 121 +++++++++++------- 5 files changed, 108 insertions(+), 68 deletions(-) rename docker/files/{build => run}/kmd_config.json.example (100%) diff --git a/.github/workflows/container.yml b/.github/workflows/container.yml index fa4b39c4cf..f01392632c 100644 --- a/.github/workflows/container.yml +++ b/.github/workflows/container.yml @@ -5,13 +5,11 @@ on: push: branches: - master - - rel/* - feature/* - tags: - - "*" jobs: - build: + build-and-push: + name: Build and Push to DockerHub runs-on: ubuntu-latest steps: - name: Checkout Code @@ -25,9 +23,7 @@ jobs: docker.io/${{ github.repository_owner }}/algod tags: | type=sha,format=long,prefix= - type=ref,event=tag type=ref,event=branch - type=raw,value=latest,enable=${{ github.ref == format('refs/heads/{0}', 'rel/stable') }} - name: Login to Docker Hub uses: docker/login-action@v2 @@ -55,3 +51,19 @@ jobs: SHA=${{ github.sha }} URL=${{ github.server_url }}/${{ github.repository }}.git BRANCH=${{ github.ref_name }} + + update-repo-description: + name: Update DockerHub Repository Description + runs-on: ubuntu-latest + if: github.ref == format('refs/heads/{0}', 'master') + steps: + - name: Checkout Code + uses: actions/checkout@v3 + + - name: Update DockerHub Repository Description + uses: peter-evans/dockerhub-description@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + repository: ${{ github.repository_owner }}/algod + readme-filepath: ./docker/README.md diff --git a/Dockerfile b/Dockerfile index ab55a90940..aaa7b6c3bd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -43,7 +43,7 @@ RUN /dist/files/build/install.sh \ FROM debian:bullseye-slim as final -ENV PATH="/node/bin:${PATH}" ALGOD_PORT="8080" ALGORAND_DATA="/algod/data" +ENV PATH="/node/bin:${PATH}" ALGOD_PORT="8080" KMD_PORT="7833" ALGORAND_DATA="/algod/data" # curl is needed to lookup the fast catchup url RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates curl && \ @@ -58,8 +58,8 @@ USER algorand COPY --chown=algorand:algorand --from=builder "/dist/bin/" "/node/bin/" COPY --chown=algorand:algorand --from=builder "/dist/files/run/" "/node/run/" -# Expose Algod REST API, Algod Gossip, and Prometheus Metrics ports -EXPOSE $ALGOD_PORT 4160 9100 +# Expose Algod REST API, KMD REST API, Algod Gossip, and Prometheus Metrics ports +EXPOSE $ALGOD_PORT $KMD_PORT 4160 9100 WORKDIR /algod diff --git a/docker/README.md b/docker/README.md index 78099e6027..5db6b20a1d 100644 --- a/docker/README.md +++ b/docker/README.md @@ -14,12 +14,7 @@ By default the following config.json overrides are applied: | Setting | Value | | ------- | ----- | -| GossipFanout | 1 | | EndpointAddress | 0.0.0.0:8080 | -| IncomingConnectionsLimit | 0 | -| Archival | false | -| IsIndexerActive | false | -| EnableDeveloperAPI | true | ### Environment Variables @@ -28,12 +23,14 @@ The following environment variables can be supplied. Except when noted, it is po | Variable | Description | | -------- | ----------- | | NETWORK | Leave blank for a private network, otherwise specify one of mainnet, betanet, testnet, or devnet. Only used during a data directory initialization. | -| FAST_CATCHUP | If set on a public network, attempt to start fast-catchup during initial config. | +| FAST_CATCHUP | If set to 1 on a public network, attempt to start fast-catchup during initial config. | | TELEMETRY_NAME| If set on a public network, telemetry is reported with this name. | -| DEV_MODE | If set on a private network, enable dev mode. Only used during data directory initialization. | +| DEV_MODE | If set to 1 on a private network, enable dev mode. Only used during data directory initialization. | | NUM_ROUNDS | If set on a private network, override default of 30000 participation keys. | | TOKEN | If set, overrides the REST API token. | | ADMIN_TOKEN | If set, overrides the REST API admin token. | +| KMD_TOKEN | If set along with `START_KMD`, override the KMD REST API token. | +| START_KMD | When set to 1, start kmd service with no timeout. THIS SHOULD NOT BE USED IN PRODUCTION. | ### Special Files @@ -55,10 +52,12 @@ The following command launches a container configured with one of the public net ```bash docker run --rm -it \ -p 4190:8080 \ + -p 4191:7833 \ -e NETWORK=mainnet \ -e FAST_CATCHUP=1 \ -e TELEMETRY_NAME=name \ -e TOKEN=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \ + -e START_KMD=1 \ -v ${PWD}/data:/algod/data/ \ --name mainnet-container \ algorand/algod:latest @@ -66,11 +65,13 @@ docker run --rm -it \ Explanation of parts: -* `-p 4190:8080` maps the internal algod REST API to local port 4190 -* `-e NETWORK=` can be set to any of the supported public networks. -* `-e FAST_CATCHUP=` causes fast catchup to start shortly after launching the network. -* `-e TELEMETRY_NAME=` enables telemetry reporting to Algorand for network health analysis. The value of this variable takes precedence over the `name` attribute set in `/etc/algorand/logging.config`. -* `-e TOKEN=` sets the REST API token to use. +* `-p 4190:8080` maps the internal algod REST API to local port 4190. +* `-p 4191:7833` maps the internal kmd REST API to local port 4191. +* `-e NETWORK=mainnet` can be set to any of the supported public networks. +* `-e TELEMETRY_NAME=name` enables telemetry reporting to Algorand for network health analysis. The value of this variable takes precedence over the `name` attribute set in `/etc/algorand/logging.config`. +* `-e FAST_CATCHUP=1` causes fast catchup to start shortly after launching the network. +* `-e START_KMD=1` signals to entrypoint to start the kmd REST API (THIS SHOULD NOT BE USED IN PRODUCTION). +* `-e TOKEN=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa` sets the REST API token to use. * `-v ${PWD}/data:/algod/data/` mounts a local volume to the data directory, which can be used to restart and upgrade the deployment. ## Mounting the Data Directory diff --git a/docker/files/build/kmd_config.json.example b/docker/files/run/kmd_config.json.example similarity index 100% rename from docker/files/build/kmd_config.json.example rename to docker/files/run/kmd_config.json.example diff --git a/docker/files/run/run.sh b/docker/files/run/run.sh index 627665dd6f..bd4e79fe04 100755 --- a/docker/files/run/run.sh +++ b/docker/files/run/run.sh @@ -1,6 +1,10 @@ #!/usr/bin/env bash -set -ex +set -e + +if [ "$DEBUG" = "1" ]; then + set -x +fi # Script to configure or resume a network. Based on environment settings the # node will be setup with a private network or connect to a public network. @@ -9,8 +13,35 @@ set -ex # Helper functions # #################### -function apply_configuration() { +function catchup() { + local FAST_CATCHUP_URL="https://algorand-catchpoints.s3.us-east-2.amazonaws.com/channel/CHANNEL/latest.catchpoint" + local CATCHPOINT=$(curl -s ${FAST_CATCHUP_URL/CHANNEL/$NETWORK}) + if [[ "$(echo $CATCHPOINT | wc -l | tr -d ' ')" != "1" ]]; then + echo "Problem starting fast catchup." + exit 1 + fi + + sleep 5 + goal node catchup "$CATCHPOINT" +} + +function start_public_network() { + cd "$ALGORAND_DATA" + + configure_data_dir + start_kmd & + + if [ "$FAST_CATCHUP" = "1" ]; then + catchup & + fi + + # redirect output to stdout + algod -o +} + +function configure_data_dir() { cd "$ALGORAND_DATA" + algocfg -d . set -p EndpointAddress -v "0.0.0.0:${ALGOD_PORT}" # check for config file overrides. if [ -f "/etc/algorand/config.json" ]; then @@ -26,7 +57,7 @@ function apply_configuration() { cp /etc/algorand/logging.config logging.config fi - # check for environment variable overrides. + # check for token overrides if [ "$TOKEN" != "" ]; then echo "$TOKEN" >algod.token fi @@ -41,40 +72,30 @@ function apply_configuration() { else diagcfg telemetry disable fi -} -function catchup() { - local FAST_CATCHUP_URL="https://algorand-catchpoints.s3.us-east-2.amazonaws.com/channel/CHANNEL/latest.catchpoint" - local CATCHPOINT=$(curl -s ${FAST_CATCHUP_URL/CHANNEL/$NETWORK}) - if [[ "$(echo $CATCHPOINT | wc -l | tr -d ' ')" != "1" ]]; then - echo "Problem starting fast catchup." - exit 1 + # start kmd + if [ "$START_KMD" = "1" ]; then + local KMD_DIR="kmd-v0.5" + # on intial bootstrap, this directory won't exist. + mkdir -p "$KMD_DIR" + chmod 0700 "$KMD_DIR" + cd "$KMD_DIR" + if [ -f "/etc/algorand/kmd_config.json" ]; then + cp /etc/algorand/kmd_config.json kmd_config.json + else + echo "{ \"address\":\"0.0.0.0:${KMD_PORT}\", \"allowed_origins\":[\"*\"] }" >kmd_config.json + fi + + if [ "$KMD_TOKEN" != "" ]; then + echo "$KMD_TOKEN" >kmd.token + fi fi - - sleep 5 - goal node catchup "$CATCHPOINT" } -function start_public_network() { - cd "$ALGORAND_DATA" - - apply_configuration - - if [[ $FAST_CATCHUP ]]; then - catchup & +function start_kmd() { + if [ "$START_KMD" = "1" ]; then + goal kmd start -d "$ALGORAND_DATA" fi - # redirect output to stdout - algod -o -} - -function configure_data_dir() { - cd "$ALGORAND_DATA" - algocfg -d . set -p GossipFanout -v 1 - algocfg -d . set -p EndpointAddress -v "0.0.0.0:${ALGOD_PORT}" - algocfg -d . set -p IncomingConnectionsLimit -v 0 - algocfg -d . set -p Archival -v false - algocfg -d . set -p IsIndexerActive -v false - algocfg -d . set -p EnableDeveloperAPI -v true } function start_new_public_network() { @@ -95,12 +116,15 @@ function start_new_public_network() { local ID case $NETWORK in - mainnet) ID=".algorand.network";; - testnet) ID=".algorand.network";; - betanet) ID=".algodev.network";; - alphanet) ID=".algodev.network";; - devnet) ID=".algodev.network";; - *) echo "Unknown network"; exit 1;; + mainnet) ID=".algorand.network" ;; + testnet) ID=".algorand.network" ;; + betanet) ID=".algodev.network" ;; + alphanet) ID=".algodev.network" ;; + devnet) ID=".algodev.network" ;; + *) + echo "Unknown network" + exit 1 + ;; esac set -p DNSBootstrapID -v "$ID" @@ -108,7 +132,8 @@ function start_new_public_network() { } function start_private_network() { - apply_configuration + configure_data_dir + start_kmd # TODO: Is there a way to properly exec a private network? goal network start -r "${ALGORAND_DATA}/.." @@ -117,7 +142,7 @@ function start_private_network() { function start_new_private_network() { local TEMPLATE="template.json" - if [ "$DEV_MODE" ]; then + if [ "$DEV_MODE" = "1" ]; then TEMPLATE="devmode_template.json" fi sed -i "s/NUM_ROUNDS/${NUM_ROUNDS:-30000}/" "/node/run/$TEMPLATE" @@ -131,13 +156,15 @@ function start_new_private_network() { ############## echo "Starting Algod Docker Container" -echo " ALGORAND_DATA: $ALGORAND_DATA" -echo " NETWORK: $NETWORK" -echo " ALGOD_PORT: $ALGOD_PORT" -echo " FAST_CATCHUP: $FAST_CATCHUP" -echo " DEV_MODE: $DEV_MODE" -echo " TOKEN: $TOKEN" -echo " TELEMETRY_NAME $TELEMETRY_NAME" +echo " ALGORAND_DATA: $ALGORAND_DATA" +echo " NETWORK: $NETWORK" +echo " ALGOD_PORT: $ALGOD_PORT" +echo " FAST_CATCHUP: $FAST_CATCHUP" +echo " DEV_MODE: $DEV_MODE" +echo " TOKEN: ${TOKEN:-"Not Set"}" +echo " KMD_TOKEN: ${KMD_TOKEN:-"Not Set"}" +echo " TELEMETRY_NAME: $TELEMETRY_NAME" +echo " START_KMD: ${START_KMD:-"Not Set"}" # If data directory is initialized, start existing environment. if [ -f "$ALGORAND_DATA/../network.json" ]; then From 513a88a5cdf0485989f111a6ac4fe800256c6d21 Mon Sep 17 00:00:00 2001 From: cce <51567+cce@users.noreply.github.com> Date: Wed, 1 Feb 2023 15:21:33 -0500 Subject: [PATCH 21/81] network: Ignore invalid tags (#4517) --- agreement/service_test.go | 9 ++-- logging/telemetryspec/event.go | 2 +- network/wsNetwork.go | 1 + network/wsNetwork_test.go | 39 ++++++++++++++ network/wsPeer.go | 16 +++++- network/wsPeer_test.go | 99 ++++++++++++++++++++++++++++++++++ protocol/hash_test.go | 41 ++++++++++++++ protocol/tags.go | 8 +-- protocol/tags_test.go | 98 +++++++++++++++++++++++++++++++++ rpcs/blockService.go | 1 - tools/debug/algodump/main.go | 1 - util/metrics/metrics.go | 2 + 12 files changed, 303 insertions(+), 14 deletions(-) create mode 100644 protocol/hash_test.go create mode 100644 protocol/tags_test.go diff --git a/agreement/service_test.go b/agreement/service_test.go index 2cf6969629..e24f81890c 100644 --- a/agreement/service_test.go +++ b/agreement/service_test.go @@ -190,6 +190,9 @@ type multicastParams struct { exclude nodeID } +// UnknownMsgTag ensures the testingNetwork implementation below will drop a message. +const UnknownMsgTag protocol.Tag = "??" + func (n *testingNetwork) multicast(tag protocol.Tag, data []byte, source nodeID, exclude nodeID) { // fmt.Println("mc", source, "x", exclude) n.mu.Lock() @@ -262,7 +265,7 @@ func (n *testingNetwork) multicast(tag protocol.Tag, data []byte, source nodeID, msgChans = n.bundleMessages case protocol.ProposalPayloadTag: msgChans = n.payloadMessages - case protocol.UnknownMsgTag: + case UnknownMsgTag: // We use this intentionally - just drop it return default: @@ -1682,7 +1685,7 @@ func TestAgreementRecoverGlobalStartingValueBadProposal(t *testing.T) { // intercept all proposals for the next period; replace with unexpected baseNetwork.intercept(func(params multicastParams) multicastParams { if params.tag == protocol.ProposalPayloadTag { - params.tag = protocol.UnknownMsgTag + params.tag = UnknownMsgTag } return params }) @@ -2281,7 +2284,7 @@ func TestAgreementCertificateDoesNotStallSingleRelay(t *testing.T) { return params } } - params.tag = protocol.UnknownMsgTag + params.tag = UnknownMsgTag } return params diff --git a/logging/telemetryspec/event.go b/logging/telemetryspec/event.go index 34d161bce5..b50ac7eab7 100644 --- a/logging/telemetryspec/event.go +++ b/logging/telemetryspec/event.go @@ -296,7 +296,7 @@ type PeerConnectionDetails struct { // DuplicateFilterCount is the number of times this peer has sent us a message hash to filter that it had already sent before. DuplicateFilterCount uint64 // These message counters count received messages from this peer. - TXCount, MICount, AVCount, PPCount uint64 + TXCount, MICount, AVCount, PPCount, UNKCount uint64 // TCPInfo provides connection measurements from TCP. TCP util.TCPInfo `json:",omitempty"` } diff --git a/network/wsNetwork.go b/network/wsNetwork.go index 9bc945aee9..e8c9e97421 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -1841,6 +1841,7 @@ func (wn *WebsocketNetwork) getPeerConnectionTelemetryDetails(now time.Time, pee MICount: atomic.LoadUint64(&peer.miMessageCount), AVCount: atomic.LoadUint64(&peer.avMessageCount), PPCount: atomic.LoadUint64(&peer.ppMessageCount), + UNKCount: atomic.LoadUint64(&peer.unkMessageCount), } if tcpInfo, err := peer.GetUnderlyingConnTCPInfo(); err == nil && tcpInfo != nil { connDetail.TCP = *tcpInfo diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go index 726db954c6..ac9a757214 100644 --- a/network/wsNetwork_test.go +++ b/network/wsNetwork_test.go @@ -53,6 +53,11 @@ import ( const sendBufferLength = 1000 +func init() { + // this allows test code to use out-of-protocol message tags and have them go through + allowCustomTags = true +} + func TestMain(m *testing.M) { logging.Base().SetLevel(logging.Debug) os.Exit(m.Run()) @@ -326,6 +331,40 @@ func TestWebsocketNetworkBasic(t *testing.T) { } } +// Set up two nodes, test that B drops invalid tags when A ends them. +func TestWebsocketNetworkBasicInvalidTags(t *testing.T) { // nolint:paralleltest // changes global variable allowCustomTags + partitiontest.PartitionTest(t) + // disallow custom tags for this test + allowCustomTags = false + defaultSendMessageTags["XX"] = true + defer func() { + allowCustomTags = true + delete(defaultSendMessageTags, "XX") + }() + + netA, netB, counter, closeFunc := setupWebsocketNetworkAB(t, 2) + defer closeFunc() + counterDone := counter.done + // register a handler that should never get called, because the message will + // be dropped before it gets to the handlers if allowCustomTags = false + netB.RegisterHandlers([]TaggedMessageHandler{ + {Tag: "XX", MessageHandler: HandlerFunc(func(msg IncomingMessage) OutgoingMessage { + require.Fail(t, "MessageHandler for out-of-protocol tag should not be called") + return OutgoingMessage{} + })}}) + // send 2 valid and 2 invalid tags + netA.Broadcast(context.Background(), "TX", []byte("foo"), false, nil) + netA.Broadcast(context.Background(), "XX", []byte("foo"), false, nil) + netA.Broadcast(context.Background(), "TX", []byte("bar"), false, nil) + netA.Broadcast(context.Background(), "XX", []byte("bar"), false, nil) + + select { + case <-counterDone: + case <-time.After(2 * time.Second): + t.Errorf("timeout, count=%d, wanted 2", counter.count) + } +} + // Set up two nodes, send proposal func TestWebsocketProposalPayloadCompression(t *testing.T) { partitiontest.PartitionTest(t) diff --git a/network/wsPeer.go b/network/wsPeer.go index 7bec904821..d769d122b7 100644 --- a/network/wsPeer.go +++ b/network/wsPeer.go @@ -52,6 +52,9 @@ const msgsInReadBufferPerPeer = 10 var tagStringList []string +// allowCustomTags is set by tests to allow non-protocol-defined message tags. It is false in non-test code. +var allowCustomTags bool + func init() { tagStringList = make([]string, len(protocol.TagList)) for i, t := range protocol.TagList { @@ -97,6 +100,7 @@ var duplicateNetworkMessageReceivedBytesTotal = metrics.MakeCounter(metrics.Dupl var duplicateNetworkFilterReceivedTotal = metrics.MakeCounter(metrics.DuplicateNetworkFilterReceivedTotal) var outgoingNetworkMessageFilteredOutTotal = metrics.MakeCounter(metrics.OutgoingNetworkMessageFilteredOutTotal) var outgoingNetworkMessageFilteredOutBytesTotal = metrics.MakeCounter(metrics.OutgoingNetworkMessageFilteredOutBytesTotal) +var unknownProtocolTagMessagesTotal = metrics.MakeCounter(metrics.UnknownProtocolTagMessagesTotal) // defaultSendMessageTags is the default list of messages which a peer would // allow to be sent without receiving any explicit request. @@ -110,7 +114,6 @@ var defaultSendMessageTags = map[protocol.Tag]bool{ protocol.TopicMsgRespTag: true, protocol.MsgOfInterestTag: true, protocol.TxnTag: true, - protocol.UniCatchupReqTag: true, protocol.UniEnsBlockReqTag: true, protocol.VoteBundleTag: true, } @@ -192,7 +195,7 @@ type wsPeer struct { duplicateFilterCount uint64 // These message counters need to be 64-bit aligned as well. - txMessageCount, miMessageCount, ppMessageCount, avMessageCount uint64 + txMessageCount, miMessageCount, ppMessageCount, avMessageCount, unkMessageCount uint64 wsPeerCore @@ -566,6 +569,15 @@ func (wp *wsPeer) readLoop() { atomic.AddUint64(&wp.avMessageCount, 1) case protocol.ProposalPayloadTag: atomic.AddUint64(&wp.ppMessageCount, 1) + // the remaining valid tags: no special handling here + case protocol.NetPrioResponseTag, protocol.PingTag, protocol.PingReplyTag, + protocol.StateProofSigTag, protocol.UniEnsBlockReqTag, protocol.VoteBundleTag: + default: // unrecognized tag + unknownProtocolTagMessagesTotal.Inc(nil) + atomic.AddUint64(&wp.unkMessageCount, 1) + if !allowCustomTags { + continue // drop message, skip adding it to queue + } } if len(msg.Data) > 0 && wp.incomingMsgFilter != nil && dedupSafeTag(msg.Tag) { if wp.incomingMsgFilter.CheckIncomingMessage(msg.Tag, msg.Data, true, true) { diff --git a/network/wsPeer_test.go b/network/wsPeer_test.go index 7ed49b17c2..ceb2121aa4 100644 --- a/network/wsPeer_test.go +++ b/network/wsPeer_test.go @@ -19,12 +19,18 @@ package network import ( "encoding/binary" "fmt" + "go/ast" + "go/parser" + "go/token" + "path/filepath" + "sort" "strings" "testing" "time" "unsafe" "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/test/partitiontest" "github.com/algorand/go-algorand/util/metrics" "github.com/stretchr/testify/require" @@ -104,6 +110,11 @@ func TestAtomicVariablesAlignment(t *testing.T) { require.True(t, (unsafe.Offsetof(p.lastPacketTime)%8) == 0) require.True(t, (unsafe.Offsetof(p.intermittentOutgoingMessageEnqueueTime)%8) == 0) require.True(t, (unsafe.Offsetof(p.duplicateFilterCount)%8) == 0) + require.True(t, (unsafe.Offsetof(p.txMessageCount)%8) == 0) + require.True(t, (unsafe.Offsetof(p.miMessageCount)%8) == 0) + require.True(t, (unsafe.Offsetof(p.ppMessageCount)%8) == 0) + require.True(t, (unsafe.Offsetof(p.avMessageCount)%8) == 0) + require.True(t, (unsafe.Offsetof(p.unkMessageCount)%8) == 0) } func TestTagCounterFiltering(t *testing.T) { @@ -180,3 +191,91 @@ func TestVersionToFeature(t *testing.T) { }) } } + +func TestPeerReadLoopSwitchAllTags(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + allTags := getProtocolTags(t) + foundTags := []string{} + + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, "wsPeer.go", nil, 0) + require.NoError(t, err) + + getCases := func(n ast.Node) (ret bool) { + switch x := n.(type) { + case *ast.SwitchStmt: + // look for "switch msg.Tag" + if tagSel, ok := x.Tag.(*ast.SelectorExpr); ok { + if tagSel.Sel.Name != "Tag" { + return false + } + if id, ok := tagSel.X.(*ast.Ident); ok && id.Name != "msg" { + return false + } + } + // found switch msg.Tag, go through case statements + for _, s := range x.Body.List { + cl, ok := s.(*ast.CaseClause) + if !ok { + continue + } + for i := range cl.List { + if selExpr, ok := cl.List[i].(*ast.SelectorExpr); ok { + xid, ok := selExpr.X.(*ast.Ident) + require.True(t, ok) + require.Equal(t, "protocol", xid.Name) + foundTags = append(foundTags, selExpr.Sel.Name) + } + } + } + } + return true + } + + readLoopFound := false + ast.Inspect(f, func(n ast.Node) bool { + // look for "readLoop" function + fn, ok := n.(*ast.FuncDecl) + if ok && fn.Name.Name == "readLoop" { + readLoopFound = true + ast.Inspect(fn, getCases) + return false + } + return true + }) + require.True(t, readLoopFound) + require.NotEmpty(t, foundTags) + sort.Strings(allTags) + sort.Strings(foundTags) + require.Equal(t, allTags, foundTags) +} + +func getProtocolTags(t *testing.T) []string { + file := filepath.Join("../protocol", "tags.go") + fset := token.NewFileSet() + f, _ := parser.ParseFile(fset, file, nil, parser.ParseComments) + + // look for const declarations in protocol/tags.go + var declaredTags []string + // Iterate through the declarations in the file + for _, d := range f.Decls { + genDecl, ok := d.(*ast.GenDecl) + // Check if the declaration is a constant and if not, continue + if !ok || genDecl.Tok != token.CONST { + continue + } + // Iterate through the specs (specifications) in the declaration + for _, spec := range genDecl.Specs { + if valueSpec, ok := spec.(*ast.ValueSpec); ok { + for _, n := range valueSpec.Names { + declaredTags = append(declaredTags, n.Name) + } + } + } + } + // assert these AST-discovered tags are complete (match the size of protocol.TagList) + require.Len(t, declaredTags, len(protocol.TagList)) + return declaredTags +} diff --git a/protocol/hash_test.go b/protocol/hash_test.go new file mode 100644 index 0000000000..e490987c1c --- /dev/null +++ b/protocol/hash_test.go @@ -0,0 +1,41 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package protocol + +import ( + "strings" + "testing" + + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/stretchr/testify/assert" +) + +// TestHashIDPrefix checks if any HashID const declared in hash.go is a prefix of another. +func TestHashIDPrefix(t *testing.T) { + t.Parallel() + partitiontest.PartitionTest(t) + + values := getConstValues(t, "hash.go", "HashID") + for i, v1 := range values { + for j, v2 := range values { + if i == j { + continue + } + assert.False(t, strings.HasPrefix(v1, v2), "HashID %s is a prefix of %s", v2, v1) + } + } +} diff --git a/protocol/tags.go b/protocol/tags.go index abe947b65b..876a8c868c 100644 --- a/protocol/tags.go +++ b/protocol/tags.go @@ -25,7 +25,6 @@ type Tag string // are encoded using a comma separator (see network/msgOfInterest.go). // The tags must be 2 bytes long. const ( - UnknownMsgTag Tag = "??" AgreementVoteTag Tag = "AV" MsgOfInterestTag Tag = "MI" MsgDigestSkipTag Tag = "MS" @@ -36,17 +35,15 @@ const ( StateProofSigTag Tag = "SP" TopicMsgRespTag Tag = "TS" TxnTag Tag = "TX" - UniCatchupReqTag Tag = "UC" //Replaced by UniEnsBlockReqTag. Only for backward compatibility. - UniEnsBlockReqTag Tag = "UE" + //UniCatchupReqTag Tag = "UC" was replaced by UniEnsBlockReqTag + UniEnsBlockReqTag Tag = "UE" //UniEnsBlockResTag Tag = "US" was used for wsfetcherservice //UniCatchupResTag Tag = "UT" was used for wsfetcherservice VoteBundleTag Tag = "VB" ) // TagList is a list of all currently used protocol tags. -// TODO: generate this and/or have a test that it is complete. var TagList = []Tag{ - UnknownMsgTag, AgreementVoteTag, MsgOfInterestTag, MsgDigestSkipTag, @@ -57,7 +54,6 @@ var TagList = []Tag{ StateProofSigTag, TopicMsgRespTag, TxnTag, - UniCatchupReqTag, UniEnsBlockReqTag, VoteBundleTag, } diff --git a/protocol/tags_test.go b/protocol/tags_test.go new file mode 100644 index 0000000000..087e32a82d --- /dev/null +++ b/protocol/tags_test.go @@ -0,0 +1,98 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package protocol + +import ( + "go/ast" + "go/parser" + "go/token" + "strconv" + "testing" + + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/stretchr/testify/require" +) + +// getConstValues uses the AST to get a list of the values of declared const +// variables of the provided typeName in a specified fileName. +func getConstValues(t *testing.T, fileName string, typeName string) []string { + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, fileName, nil, 0) + require.NoError(t, err) + + var ret []string + + // Iterate through the declarations in the file + for _, d := range f.Decls { + gen, ok := d.(*ast.GenDecl) + // Check if the declaration is a constant + if !ok || gen.Tok != token.CONST { + continue + } + // Iterate through the specifications in the declaration + for _, spec := range gen.Specs { + // Check if the spec is a value spec + v, ok := spec.(*ast.ValueSpec) + if !ok { + continue + } + // Check if the typeName specified is being declared + if v.Type == nil || v.Type.(*ast.Ident).Name != typeName { + continue + } + // Iterate through the expressions in the value spec + for _, expr := range v.Values { + val, ok := expr.(*ast.BasicLit) + // Check if the expression is a basic literal and if not, continue + if !ok { + continue + } + // Unquote the value of the basic literal to remove the quotes + tagVal, err := strconv.Unquote(val.Value) + require.NoError(t, err) + ret = append(ret, tagVal) + } + } + } + return ret +} + +// TestTagList checks that the TagList global variable contains +// all the constant Tag variables declared in tags.go. +func TestTagList(t *testing.T) { + t.Parallel() + partitiontest.PartitionTest(t) + + constTags := getConstValues(t, "tags.go", "Tag") + + // Verify that TagList is not empty and has the same length as constTags + require.NotEmpty(t, TagList) + require.Len(t, TagList, len(constTags), "TagList is not complete") + tagListMap := make(map[Tag]bool) + for _, tag := range TagList { + tagListMap[tag] = true + } + // Iterate through constTags and check that each element exists in tagListMap + for _, constTag := range constTags { + if tagListMap[Tag(constTag)] { + delete(tagListMap, Tag(constTag)) // check off as seen + } else { + require.Fail(t, "const Tag %s is not in TagList", constTag) + } + } + require.Empty(t, tagListMap, "Unseen tags remain in TagList") +} diff --git a/rpcs/blockService.go b/rpcs/blockService.go index cd4a33ad20..f668a1c52a 100644 --- a/rpcs/blockService.go +++ b/rpcs/blockService.go @@ -132,7 +132,6 @@ func (bs *BlockService) Start() { defer bs.mu.Unlock() if bs.enableServiceOverGossip { handlers := []network.TaggedMessageHandler{ - {Tag: protocol.UniCatchupReqTag, MessageHandler: network.HandlerFunc(bs.processIncomingMessage)}, {Tag: protocol.UniEnsBlockReqTag, MessageHandler: network.HandlerFunc(bs.processIncomingMessage)}, } diff --git a/tools/debug/algodump/main.go b/tools/debug/algodump/main.go index be80768fc5..429cec9f9d 100644 --- a/tools/debug/algodump/main.go +++ b/tools/debug/algodump/main.go @@ -149,7 +149,6 @@ func setDumpHandlers(n network.GossipNode) { {Tag: protocol.ProposalPayloadTag, MessageHandler: &dh}, {Tag: protocol.TopicMsgRespTag, MessageHandler: &dh}, {Tag: protocol.TxnTag, MessageHandler: &dh}, - {Tag: protocol.UniCatchupReqTag, MessageHandler: &dh}, {Tag: protocol.UniEnsBlockReqTag, MessageHandler: &dh}, {Tag: protocol.VoteBundleTag, MessageHandler: &dh}, } diff --git a/util/metrics/metrics.go b/util/metrics/metrics.go index 11973e85b7..f2437f2675 100644 --- a/util/metrics/metrics.go +++ b/util/metrics/metrics.go @@ -55,6 +55,8 @@ var ( OutgoingNetworkMessageFilteredOutTotal = MetricName{Name: "algod_outgoing_network_message_filtered_out_total", Description: "Total number of messages that were not sent per peer request"} // OutgoingNetworkMessageFilteredOutBytesTotal Total number of bytes saved by not sending messages that were asked not to be sent by peer OutgoingNetworkMessageFilteredOutBytesTotal = MetricName{Name: "algod_outgoing_network_message_filtered_out_bytes_total", Description: "Total number of bytes saved by not sending messages that were asked not to be sent by peer"} + // UnknownProtocolTagMessagesTotal Total number of out-of-protocol tag messages received from the network + UnknownProtocolTagMessagesTotal = MetricName{Name: "algod_network_unk_tag_messages_total", Description: "Total number of unknown protocol tag messages received from the network"} // CryptoGenSigSecretsTotal Total number of calls to GenerateSignatureSecrets() CryptoGenSigSecretsTotal = MetricName{Name: "algod_crypto_signature_secrets_generate_total", Description: "Total number of calls to GenerateSignatureSecrets"} // CryptoSigSecretsSignTotal Total number of calls to SignatureSecrets.Sign From b326672b2f0502f6f02ff7c57bbe5e4267d7a679 Mon Sep 17 00:00:00 2001 From: Mark Ciccarello Date: Wed, 1 Feb 2023 12:54:18 -0800 Subject: [PATCH 22/81] goal: goal network create default templates (#4891) --- cmd/goal/defaultNetworkTemplate.json | 46 +++++++++++++++++++++++ cmd/goal/network.go | 30 ++++++++++++--- netdeploy/network.go | 16 +++++--- netdeploy/networkTemplate.go | 10 ++--- test/framework/fixtures/libgoalFixture.go | 4 +- test/scripts/test_private_network.sh | 12 ++++++ 6 files changed, 102 insertions(+), 16 deletions(-) create mode 100644 cmd/goal/defaultNetworkTemplate.json diff --git a/cmd/goal/defaultNetworkTemplate.json b/cmd/goal/defaultNetworkTemplate.json new file mode 100644 index 0000000000..2ca207bf44 --- /dev/null +++ b/cmd/goal/defaultNetworkTemplate.json @@ -0,0 +1,46 @@ +{ + "Genesis": { + "NetworkName": "Default Network Template", + "RewardsPoolBalance": 0, + "FirstPartKeyRound": 0, + "LastPartKeyRound": 30000, + "Wallets": [ + { + "Name": "Wallet1", + "Stake": 40, + "Online": true + }, + { + "Name": "Wallet2", + "Stake": 40, + "Online": true + }, + { + "Name": "Wallet3", + "Stake": 20, + "Online": false + } + ] + }, + "Nodes": [ + { + "Name": "Node", + "IsRelay": false, + "Wallets": [ + { + "Name": "Wallet1", + "ParticipationOnly": false + }, + { + "Name": "Wallet2", + "ParticipationOnly": false + }, + { + "Name": "Wallet3", + "ParticipationOnly": false + } + ] + } + ] +} + diff --git a/cmd/goal/network.go b/cmd/goal/network.go index d7212123f5..ddad6f99d1 100644 --- a/cmd/goal/network.go +++ b/cmd/goal/network.go @@ -17,9 +17,12 @@ package main import ( + _ "embed" "fmt" + "io" "os" "path/filepath" + "strings" "github.com/spf13/cobra" @@ -45,7 +48,6 @@ func init() { networkCreateCmd.Flags().StringVarP(&networkName, "network", "n", "", "Specify the name to use for the private network") networkCreateCmd.Flags().StringVarP(&networkTemplateFile, "template", "t", "", "Specify the path to the template file for the network") - networkCreateCmd.MarkFlagRequired("template") networkCreateCmd.Flags().BoolVarP(&noImportKeys, "noimportkeys", "K", false, "Do not import root keys when creating the network (by default will import)") networkCreateCmd.Flags().BoolVar(&noClean, "noclean", false, "Prevents auto-cleanup on error - for diagnosing problems") networkCreateCmd.Flags().BoolVar(&devModeOverride, "devMode", false, "Forces the configuration to enable DevMode, returns an error if the template is not compatible with DevMode.") @@ -73,6 +75,9 @@ The basic idea is that we create one or more data directories and wallets to for }, } +//go:embed defaultNetworkTemplate.json +var defaultNetworkTemplate string + var networkCreateCmd = &cobra.Command{ Use: "create", Short: "Create a private named network from a template", @@ -83,10 +88,25 @@ var networkCreateCmd = &cobra.Command{ if err != nil { panic(err) } - networkTemplateFile, err := filepath.Abs(networkTemplateFile) - if err != nil { - panic(err) + + var templateReader io.Reader + + if networkTemplateFile == "" { + templateReader = strings.NewReader(defaultNetworkTemplate) + } else { + networkTemplateFile, err = filepath.Abs(networkTemplateFile) + if err != nil { + panic(err) + } + file, err := os.Open(networkTemplateFile) + if err != nil { + reportErrorf(errorCreateNetwork, err) + } + + defer file.Close() + templateReader = file } + // Make sure target directory does not exist or is empty if util.FileExists(networkRootDir) && !util.IsEmpty(networkRootDir) { reportErrorf(infoNetworkAlreadyExists, networkRootDir) @@ -104,7 +124,7 @@ var networkCreateCmd = &cobra.Command{ consensus, _ = config.PreloadConfigurableConsensusProtocols(dataDir) } - network, err := netdeploy.CreateNetworkFromTemplate(networkName, networkRootDir, networkTemplateFile, binDir, !noImportKeys, nil, consensus, devModeOverride) + network, err := netdeploy.CreateNetworkFromTemplate(networkName, networkRootDir, templateReader, binDir, !noImportKeys, nil, consensus, devModeOverride) if err != nil { if noClean { reportInfof(" ** failed ** - Preserving network rootdir '%s'", networkRootDir) diff --git a/netdeploy/network.go b/netdeploy/network.go index 1d5e503de3..520f185a3c 100644 --- a/netdeploy/network.go +++ b/netdeploy/network.go @@ -19,6 +19,7 @@ package netdeploy import ( "encoding/json" "fmt" + "io" "os" "path/filepath" "sort" @@ -57,15 +58,18 @@ type Network struct { // CreateNetworkFromTemplate uses the specified template to deploy a new private network // under the specified root directory. -func CreateNetworkFromTemplate(name, rootDir, templateFile, binDir string, importKeys bool, nodeExitCallback nodecontrol.AlgodExitErrorCallback, consensus config.ConsensusProtocols, overrideDevMode bool) (Network, error) { +func CreateNetworkFromTemplate(name, rootDir string, templateReader io.Reader, binDir string, importKeys bool, nodeExitCallback nodecontrol.AlgodExitErrorCallback, consensus config.ConsensusProtocols, overrideDevMode bool) (Network, error) { n := Network{ rootDir: rootDir, nodeExitCallback: nodeExitCallback, } n.cfg.Name = name - n.cfg.TemplateFile = templateFile - template, err := loadTemplate(templateFile) + var err error + template := defaultNetworkTemplate + + err = loadTemplateFromReader(templateReader, &template) + if err == nil { if overrideDevMode { template.Genesis.DevMode = true @@ -73,9 +77,11 @@ func CreateNetworkFromTemplate(name, rootDir, templateFile, binDir string, impor template.Nodes[0].IsRelay = false } } - err = template.Validate() + } else { + return n, err } - if err != nil { + + if err = template.Validate(); err != nil { return n, err } diff --git a/netdeploy/networkTemplate.go b/netdeploy/networkTemplate.go index 4d5aa67b95..34cd01e1b6 100644 --- a/netdeploy/networkTemplate.go +++ b/netdeploy/networkTemplate.go @@ -165,16 +165,16 @@ func loadTemplate(templateFile string) (NetworkTemplate, error) { } defer f.Close() - if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" { - // for arm machines, use smaller key dilution - template.Genesis.PartKeyDilution = 100 - } - err = loadTemplateFromReader(f, &template) return template, err } func loadTemplateFromReader(reader io.Reader, template *NetworkTemplate) error { + + if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" { + // for arm machines, use smaller key dilution + template.Genesis.PartKeyDilution = 100 + } dec := json.NewDecoder(reader) return dec.Decode(template) } diff --git a/test/framework/fixtures/libgoalFixture.go b/test/framework/fixtures/libgoalFixture.go index 7294bafc40..c0527fdd62 100644 --- a/test/framework/fixtures/libgoalFixture.go +++ b/test/framework/fixtures/libgoalFixture.go @@ -96,7 +96,9 @@ func (f *LibGoalFixture) setup(test TestingTB, testName string, templateFile str os.RemoveAll(f.rootDir) templateFile = filepath.Join(f.testDataDir, templateFile) importKeys := false // Don't automatically import root keys when creating folders, we'll import on-demand - network, err := netdeploy.CreateNetworkFromTemplate("test", f.rootDir, templateFile, f.binDir, importKeys, f.nodeExitWithError, f.consensus, false) + file, err := os.Open(templateFile) + f.failOnError(err, "Template file could not be opened: %v") + network, err := netdeploy.CreateNetworkFromTemplate("test", f.rootDir, file, f.binDir, importKeys, f.nodeExitWithError, f.consensus, false) f.failOnError(err, "CreateNetworkFromTemplate failed: %v") f.network = network diff --git a/test/scripts/test_private_network.sh b/test/scripts/test_private_network.sh index 1886a6548e..f1adc7f62b 100755 --- a/test/scripts/test_private_network.sh +++ b/test/scripts/test_private_network.sh @@ -23,6 +23,18 @@ ${GOPATH}/bin/goal network stop -r ${NETROOTPATH} ${GOPATH}/bin/goal network delete -r ${NETROOTPATH} +# default network with no template specified + +rm -rf ${NETROOTPATH} + +${GOPATH}/bin/goal network create -r ${NETROOTPATH} + +${GOPATH}/bin/goal network start -r ${NETROOTPATH} + +${GOPATH}/bin/goal network stop -r ${NETROOTPATH} + +${GOPATH}/bin/goal network delete -r ${NETROOTPATH} + echo "----------------------------------------------------------------------" echo " DONE: test_private_network" echo "----------------------------------------------------------------------" From 7f7939d9438c077d2ec95e4a3fd944084b29de83 Mon Sep 17 00:00:00 2001 From: cce <51567+cce@users.noreply.github.com> Date: Wed, 1 Feb 2023 16:10:52 -0500 Subject: [PATCH 23/81] ledger: turn deferredCommitContext.newBase into a function (#5093) --- ledger/acctonline.go | 6 +++--- ledger/acctonline_test.go | 7 ++----- ledger/acctupdates.go | 2 +- ledger/acctupdates_test.go | 6 ++---- ledger/catchpointtracker.go | 12 ++++++------ ledger/tracker.go | 6 ++++-- ledger/txtail.go | 8 ++++---- ledger/txtail_test.go | 4 +--- 8 files changed, 23 insertions(+), 28 deletions(-) diff --git a/ledger/acctonline.go b/ledger/acctonline.go index 2cacc0efe7..ea9c411735 100644 --- a/ledger/acctonline.go +++ b/ledger/acctonline.go @@ -380,7 +380,7 @@ func (ao *onlineAccounts) prepareCommit(dcc *deferredCommitContext) error { if err != nil { return err } - end, err := ao.roundParamsOffset(dcc.newBase) + end, err := ao.roundParamsOffset(dcc.newBase()) if err != nil { return err } @@ -388,7 +388,7 @@ func (ao *onlineAccounts) prepareCommit(dcc *deferredCommitContext) error { dcc.onlineRoundParams = ao.onlineRoundParamsData[start+1 : end+1] maxOnlineLookback := basics.Round(ao.maxBalLookback()) - dcc.onlineAccountsForgetBefore = (dcc.newBase + 1).SubSaturate(maxOnlineLookback) + dcc.onlineAccountsForgetBefore = (dcc.newBase() + 1).SubSaturate(maxOnlineLookback) if dcc.lowestRound > 0 && dcc.lowestRound < dcc.onlineAccountsForgetBefore { // extend history as needed dcc.onlineAccountsForgetBefore = dcc.lowestRound @@ -440,7 +440,7 @@ func (ao *onlineAccounts) commitRound(ctx context.Context, tx *sql.Tx, dcc *defe func (ao *onlineAccounts) postCommit(ctx context.Context, dcc *deferredCommitContext) { offset := dcc.offset - newBase := dcc.newBase + newBase := dcc.newBase() ao.accountsMu.Lock() // Drop reference counts to modified accounts, and evict them diff --git a/ledger/acctonline_test.go b/ledger/acctonline_test.go index 050785e10a..14d92fb67f 100644 --- a/ledger/acctonline_test.go +++ b/ledger/acctonline_test.go @@ -50,8 +50,6 @@ func commitSync(t *testing.T, oa *onlineAccounts, ml *mockLedgerForTracker, rnd ml.trackers.accountsWriting.Add(1) // do not take any locks since all operations are synchronous - newBase := basics.Round(dcc.offset) + dcc.oldBase - dcc.newBase = newBase err := ml.trackers.commitRound(dcc) require.NoError(t, err) }() @@ -73,8 +71,7 @@ func commitSyncPartial(t *testing.T, oa *onlineAccounts, ml *mockLedgerForTracke ml.trackers.accountsWriting.Add(1) // do not take any locks since all operations are synchronous - newBase := basics.Round(dcc.offset) + dcc.oldBase - dcc.newBase = newBase + newBase := dcc.newBase() dcc.flushTime = time.Now() for _, lt := range ml.trackers.trackers { @@ -102,7 +99,7 @@ func commitSyncPartial(t *testing.T, oa *onlineAccounts, ml *mockLedgerForTracke func commitSyncPartialComplete(t *testing.T, oa *onlineAccounts, ml *mockLedgerForTracker, dcc *deferredCommitContext) { defer ml.trackers.accountsWriting.Done() - ml.trackers.dbRound = dcc.newBase + ml.trackers.dbRound = dcc.newBase() for _, lt := range ml.trackers.trackers { lt.postCommit(ml.trackers.ctx, dcc) } diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index dc92684ea0..8e5ec2f752 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -1729,7 +1729,7 @@ func (au *accountUpdates) postCommit(ctx context.Context, dcc *deferredCommitCon offset := dcc.offset dbRound := dcc.oldBase - newBase := dcc.newBase + newBase := dcc.newBase() dcc.updatingBalancesDuration = time.Since(dcc.flushTime) diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go index 607e324919..7d6c3485c6 100644 --- a/ledger/acctupdates_test.go +++ b/ledger/acctupdates_test.go @@ -2183,8 +2183,7 @@ func TestAcctUpdatesResources(t *testing.T) { defer ml.trackers.accountsWriting.Done() // do not take any locks since all operations are synchronous - newBase := basics.Round(dcc.offset) + dcc.oldBase - dcc.newBase = newBase + newBase := dcc.newBase() err := au.prepareCommit(dcc) require.NoError(t, err) @@ -2467,8 +2466,7 @@ func auCommitSync(t *testing.T, rnd basics.Round, au *accountUpdates, ml *mockLe defer ml.trackers.accountsWriting.Done() // do not take any locks since all operations are synchronous - newBase := basics.Round(dcc.offset) + dcc.oldBase - dcc.newBase = newBase + newBase := dcc.newBase() err := au.prepareCommit(dcc) require.NoError(t, err) diff --git a/ledger/catchpointtracker.go b/ledger/catchpointtracker.go index b6b996da4d..ecb0301b66 100644 --- a/ledger/catchpointtracker.go +++ b/ledger/catchpointtracker.go @@ -518,7 +518,7 @@ func (ct *catchpointTracker) commitRound(ctx context.Context, tx *sql.Tx, dcc *d dcc.stats.MerkleTrieUpdateDuration = time.Duration(time.Now().UnixNano()) } - err = ct.accountsUpdateBalances(dcc.compactAccountDeltas, dcc.compactResourcesDeltas, dcc.compactKvDeltas, dcc.oldBase, dcc.newBase) + err = ct.accountsUpdateBalances(dcc.compactAccountDeltas, dcc.compactResourcesDeltas, dcc.compactKvDeltas, dcc.oldBase, dcc.newBase()) if err != nil { return err } @@ -867,11 +867,11 @@ func (ct *catchpointTracker) pruneFirstStageRecordsData(ctx context.Context, max func (ct *catchpointTracker) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) { if dcc.catchpointFirstStage { - err := ct.finishFirstStage(ctx, dcc.newBase, dcc.updatingBalancesDuration) + err := ct.finishFirstStage(ctx, dcc.newBase(), dcc.updatingBalancesDuration) if err != nil { ct.log.Warnf( "error finishing catchpoint's first stage dcc.newBase: %d err: %v", - dcc.newBase, err) + dcc.newBase(), err) } } @@ -885,13 +885,13 @@ func (ct *catchpointTracker) postCommitUnlocked(ctx context.Context, dcc *deferr } // Prune first stage catchpoint records from the database. - if uint64(dcc.newBase) >= dcc.catchpointLookback { + if uint64(dcc.newBase()) >= dcc.catchpointLookback { err := ct.pruneFirstStageRecordsData( - ctx, dcc.newBase-basics.Round(dcc.catchpointLookback)) + ctx, dcc.newBase()-basics.Round(dcc.catchpointLookback)) if err != nil { ct.log.Warnf( "error pruning first stage records and data dcc.newBase: %d err: %v", - dcc.newBase, err) + dcc.newBase(), err) } } } diff --git a/ledger/tracker.go b/ledger/tracker.go index d43faec3f5..caf59f27e6 100644 --- a/ledger/tracker.go +++ b/ledger/tracker.go @@ -233,7 +233,6 @@ type deferredCommitRange struct { type deferredCommitContext struct { deferredCommitRange - newBase basics.Round flushTime time.Time genesisProto config.ConsensusParams @@ -273,6 +272,10 @@ type deferredCommitContext struct { updateStats bool } +func (dcc deferredCommitContext) newBase() basics.Round { + return dcc.oldBase + basics.Round(dcc.offset) +} + var errMissingAccountUpdateTracker = errors.New("initializeTrackerCaches : called without a valid accounts update tracker") func (tr *trackerRegistry) initialize(l ledgerForTracker, trackers []ledgerTracker, cfg config.Local) (err error) { @@ -495,7 +498,6 @@ func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) error { dcc.offset = offset dcc.oldBase = dbRound - dcc.newBase = newBase dcc.flushTime = time.Now() for _, lt := range tr.trackers { diff --git a/ledger/txtail.go b/ledger/txtail.go index 4fc1ea63d4..cbd1b5266b 100644 --- a/ledger/txtail.go +++ b/ledger/txtail.go @@ -249,10 +249,10 @@ func (t *txTail) prepareCommit(dcc *deferredCommitContext) (err error) { dcc.txTailDeltas = append(dcc.txTailDeltas, t.roundTailSerializedDeltas[i]) } lowest := t.lowestBlockHeaderRound - proto, ok := config.Consensus[t.blockHeaderData[dcc.newBase].CurrentProtocol] + proto, ok := config.Consensus[t.blockHeaderData[dcc.newBase()].CurrentProtocol] t.tailMu.RUnlock() if !ok { - return fmt.Errorf("round %d not found in blockHeaderData: lowest=%d, base=%d", dcc.newBase, lowest, dcc.oldBase) + return fmt.Errorf("round %d not found in blockHeaderData: lowest=%d, base=%d", dcc.newBase(), lowest, dcc.oldBase) } // get the MaxTxnLife from the consensus params of the latest round in this commit range // preserve data for MaxTxnLife + DeeperBlockHeaderHistory @@ -274,7 +274,7 @@ func (t *txTail) commitRound(ctx context.Context, tx *sql.Tx, dcc *deferredCommi // determine the round to remove data // the formula is similar to the committedUpTo: rnd + 1 - retain size - forgetBeforeRound := (dcc.newBase + 1).SubSaturate(basics.Round(dcc.txTailRetainSize)) + forgetBeforeRound := (dcc.newBase() + 1).SubSaturate(basics.Round(dcc.txTailRetainSize)) baseRound := dcc.oldBase + 1 if err := arw.TxtailNewRound(ctx, baseRound, dcc.txTailDeltas, forgetBeforeRound); err != nil { return fmt.Errorf("txTail: unable to persist new round %d : %w", baseRound, err) @@ -290,7 +290,7 @@ func (t *txTail) postCommit(ctx context.Context, dcc *deferredCommitContext) { // get the MaxTxnLife from the consensus params of the latest round in this commit range // preserve data for MaxTxnLife + DeeperBlockHeaderHistory rounds - newLowestRound := (dcc.newBase + 1).SubSaturate(basics.Round(dcc.txTailRetainSize)) + newLowestRound := (dcc.newBase() + 1).SubSaturate(basics.Round(dcc.txTailRetainSize)) for t.lowestBlockHeaderRound < newLowestRound { delete(t.blockHeaderData, t.lowestBlockHeaderRound) t.lowestBlockHeaderRound++ diff --git a/ledger/txtail_test.go b/ledger/txtail_test.go index 5831db3239..1267c8350b 100644 --- a/ledger/txtail_test.go +++ b/ledger/txtail_test.go @@ -294,7 +294,6 @@ func TestTxTailDeltaTracking(t *testing.T) { offset: 1, catchpointFirstStage: true, }, - newBase: basics.Round(i), } err = txtail.prepareCommit(dcc) require.NoError(t, err) @@ -363,12 +362,11 @@ func BenchmarkTxTailBlockHeaderCache(b *testing.B) { oldBase: dbRound, lookback: lookback, }, - newBase: dbRound + basics.Round(offset), } err := tail.prepareCommit(dcc) require.NoError(b, err) tail.postCommit(context.Background(), dcc) - dbRound = dcc.newBase + dbRound = dcc.newBase() require.Less(b, len(tail.blockHeaderData), 1001+10) } } From 96e2328189f5d8d37e71e0d2d46ef7009de3c6e3 Mon Sep 17 00:00:00 2001 From: abebeos <110243666+abebeos@users.noreply.github.com> Date: Wed, 1 Feb 2023 23:35:36 +0200 Subject: [PATCH 24/81] link to security doc, minor formatting (#5095) --- CONTRIBUTING.md | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 517fb17fa8..04f8c9508c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -15,7 +15,7 @@ Some of our most active projects include: * [py-algorand-sdk](https://github.com/algorand/py-algorand-sdk) - Python SDK * [sandbox](https://github.com/algorand/sandbox) - Algorand node quickstart tool -# Filing Issues +## Filing Issues Did you discover a bug? Do you have a feature request? Filing issues is an easy way anyone can contribute and helps us improve Algorand. We use GitHub Issues to track all known bugs and feature requests. @@ -25,15 +25,13 @@ If you’d like to contribute to any of the repositories, please file a [GitHub See the GitHub help guide for more information on [filing an issue](https://help.github.com/en/articles/creating-an-issue). -## Vulnerabilities +## Security / Vulnerabilities -Please don't create issues for any security vulnerabilities. Instead, we would appreciate it if you reported them through our [vulnerability disclosure form][vuln_url]. This allows us to distribute a fix before the vulnerability is exploited. - -Additionally, if you believe that you've discovered a security vulnerability, you might qualify for our bug bounty program. Visit our [bug bounty site][bug_bounty_url] for details. +Please refer to our [SECURITY](SECURITY.md) document. If you have any questions, don't hesitate to contact us at security@algorand.com. -# Contribution Model +## Contribution Model For each of our repositories we use the same model for contributing code. Developers wanting to contribute must create pull requests. This process is described in the GitHub [Creating a pull request from a fork](https://help.github.com/en/articles/creating-a-pull-request-from-a-fork) documentation. Each pull request should be initiated against the `master` branch in the Algorand repository. After a pull request is submitted the core development team will review the submission and communicate with the developer using the comments sections of the PR. After the submission is reviewed and approved, it will be merged into the `master` branch of the source. These changes will be merged to our release branch on the next viable release date. For the SDKs, this may be immediate. Changes to the node software may take more time as we must ensure and verify the security, as well as apply protocol upgrades in an orderly way. @@ -41,7 +39,7 @@ Note: some of our projects are using gitflow, for these you will open pull reque Again, if you have a patch for a critical security vulnerability, please use our [vulnerability disclosure form][vuln_url] instead of creating a PR. We'll follow up with you on distributing the patch before we merge it. -# Code Guidelines +## Code Guidelines For Go code we use the [Golang guidelines defined here](https://golang.org/doc/effective_go.html). * Code must adhere to the official Go formatting guidelines (i.e. uses gofmt). @@ -52,7 +50,7 @@ For JavaScript code we use the [MDN formatting rules](https://developer.mozilla. For Java code we use [Oracle’s standard formatting rules for Java](https://www.oracle.com/technetwork/java/codeconventions-150003.pdf). -# Communication Channels +## Communication Channels The core development team monitors the Algorand community forums and regularly responds to questions and suggestions. Issues and Pull Requests are handled on GitHub. From e544fc8f6131c924d5ef56bbda9eba98bedf335a Mon Sep 17 00:00:00 2001 From: shiqizng <80276844+shiqizng@users.noreply.github.com> Date: Thu, 2 Feb 2023 09:08:14 -0500 Subject: [PATCH 25/81] kv delta fix (#5084) --- daemon/algod/api/server/v2/delta.go | 3 ++- daemon/algod/api/server/v2/delta_test.go | 10 ++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/daemon/algod/api/server/v2/delta.go b/daemon/algod/api/server/v2/delta.go index d70e8275a6..ae1d2d43e5 100644 --- a/daemon/algod/api/server/v2/delta.go +++ b/daemon/algod/api/server/v2/delta.go @@ -85,9 +85,10 @@ func StateDeltaToLedgerDelta(sDelta ledgercore.StateDelta, consensus config.Cons for key, kvDelta := range sDelta.KvMods { var keyBytes = []byte(key) + var valueBytes = kvDelta.Data keyValues = append(keyValues, model.KvDelta{ Key: &keyBytes, - Value: &kvDelta.Data, + Value: &valueBytes, }) } diff --git a/daemon/algod/api/server/v2/delta_test.go b/daemon/algod/api/server/v2/delta_test.go index 29135ce47c..316dbb8078 100644 --- a/daemon/algod/api/server/v2/delta_test.go +++ b/daemon/algod/api/server/v2/delta_test.go @@ -89,6 +89,12 @@ func TestDelta(t *testing.T) { Data: []byte("foobar"), OldData: []byte("barfoo"), }, + "box2": { + Data: []byte("alpha"), + }, + "box3": { + Data: []byte("beta"), + }, }, Txleases: map[ledgercore.Txlease]basics.Round{ {Sender: poolAddr, Lease: txLease}: 600, @@ -134,6 +140,10 @@ func TestDelta(t *testing.T) { require.Equal(t, len(original.KvMods), len(*converted.KvMods)) require.Equal(t, []uint8("box1"), *(*converted.KvMods)[0].Key) require.Equal(t, original.KvMods["box1"].Data, *(*converted.KvMods)[0].Value) + require.Equal(t, []uint8("box2"), *(*converted.KvMods)[1].Key) + require.Equal(t, original.KvMods["box2"].Data, *(*converted.KvMods)[1].Value) + require.Equal(t, []uint8("box3"), *(*converted.KvMods)[2].Key) + require.Equal(t, original.KvMods["box3"].Data, *(*converted.KvMods)[2].Value) require.Equal(t, txLease[:], (*converted.TxLeases)[0].Lease) require.Equal(t, poolAddr.String(), (*converted.TxLeases)[0].Sender) require.Equal(t, uint64(600), (*converted.TxLeases)[0].Expiration) From 6399f30535eb3684b3034b2ebbf7fb840bdd7634 Mon Sep 17 00:00:00 2001 From: Eric Warehime Date: Thu, 2 Feb 2023 06:32:21 -0800 Subject: [PATCH 26/81] Rename enums by default (#5089) --- .../server/v2/generated/model/model_types.yml | 2 ++ .../api/server/v2/generated/model/types.go | 28 +++++++++---------- daemon/algod/api/server/v2/handlers.go | 12 ++++---- daemon/algod/api/server/v2/utils.go | 10 +++---- 4 files changed, 27 insertions(+), 25 deletions(-) diff --git a/daemon/algod/api/server/v2/generated/model/model_types.yml b/daemon/algod/api/server/v2/generated/model/model_types.yml index ecb10ba22e..ad39740b12 100644 --- a/daemon/algod/api/server/v2/generated/model/model_types.yml +++ b/daemon/algod/api/server/v2/generated/model/model_types.yml @@ -6,3 +6,5 @@ output-options: integer: uint64 skip-prune: true output: ./server/v2/generated/model/types.go +compatibility: + always-prefix-enum-values: true diff --git a/daemon/algod/api/server/v2/generated/model/types.go b/daemon/algod/api/server/v2/generated/model/types.go index e9e07b3592..e3fd2d22ff 100644 --- a/daemon/algod/api/server/v2/generated/model/types.go +++ b/daemon/algod/api/server/v2/generated/model/types.go @@ -23,9 +23,9 @@ const ( // Defines values for AddressRole. const ( - FreezeTarget AddressRole = "freeze-target" - Receiver AddressRole = "receiver" - Sender AddressRole = "sender" + AddressRoleFreezeTarget AddressRole = "freeze-target" + AddressRoleReceiver AddressRole = "receiver" + AddressRoleSender AddressRole = "sender" ) // Defines values for Format. @@ -43,13 +43,13 @@ const ( // Defines values for TxType. const ( - Acfg TxType = "acfg" - Afrz TxType = "afrz" - Appl TxType = "appl" - Axfer TxType = "axfer" - Keyreg TxType = "keyreg" - Pay TxType = "pay" - Stpf TxType = "stpf" + TxTypeAcfg TxType = "acfg" + TxTypeAfrz TxType = "afrz" + TxTypeAppl TxType = "appl" + TxTypeAxfer TxType = "axfer" + TxTypeKeyreg TxType = "keyreg" + TxTypePay TxType = "pay" + TxTypeStpf TxType = "stpf" ) // Defines values for TransactionProofResponseHashtype. @@ -66,8 +66,8 @@ const ( // Defines values for AccountInformationParamsExclude. const ( - All AccountInformationParamsExclude = "all" - None AccountInformationParamsExclude = "none" + AccountInformationParamsExcludeAll AccountInformationParamsExclude = "all" + AccountInformationParamsExcludeNone AccountInformationParamsExclude = "none" ) // Defines values for AccountApplicationInformationParamsFormat. @@ -114,8 +114,8 @@ const ( // Defines values for PendingTransactionInformationParamsFormat. const ( - Json PendingTransactionInformationParamsFormat = "json" - Msgpack PendingTransactionInformationParamsFormat = "msgpack" + PendingTransactionInformationParamsFormatJson PendingTransactionInformationParamsFormat = "json" + PendingTransactionInformationParamsFormatMsgpack PendingTransactionInformationParamsFormat = "msgpack" ) // Account Account information at a given round. diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go index 206d036fcc..16cb477d91 100644 --- a/daemon/algod/api/server/v2/handlers.go +++ b/daemon/algod/api/server/v2/handlers.go @@ -341,7 +341,7 @@ func (v2 *Handlers) ShutdownNode(ctx echo.Context, params model.ShutdownNodePara // AccountInformation gets account information for a given account. // (GET /v2/accounts/{address}) func (v2 *Handlers) AccountInformation(ctx echo.Context, address string, params model.AccountInformationParams) error { - handle, contentType, err := getCodecHandle((*model.Format)(params.Format)) + handle, contentType, err := getCodecHandle((*string)(params.Format)) if err != nil { return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log) } @@ -488,7 +488,7 @@ func (v2 *Handlers) basicAccountInformation(ctx echo.Context, addr basics.Addres // AccountAssetInformation gets account information about a given asset. // (GET /v2/accounts/{address}/assets/{asset-id}) func (v2 *Handlers) AccountAssetInformation(ctx echo.Context, address string, assetID uint64, params model.AccountAssetInformationParams) error { - handle, contentType, err := getCodecHandle((*model.Format)(params.Format)) + handle, contentType, err := getCodecHandle((*string)(params.Format)) if err != nil { return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log) } @@ -541,7 +541,7 @@ func (v2 *Handlers) AccountAssetInformation(ctx echo.Context, address string, as // AccountApplicationInformation gets account information about a given app. // (GET /v2/accounts/{address}/applications/{application-id}) func (v2 *Handlers) AccountApplicationInformation(ctx echo.Context, address string, applicationID uint64, params model.AccountApplicationInformationParams) error { - handle, contentType, err := getCodecHandle((*model.Format)(params.Format)) + handle, contentType, err := getCodecHandle((*string)(params.Format)) if err != nil { return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log) } @@ -598,7 +598,7 @@ func (v2 *Handlers) AccountApplicationInformation(ctx echo.Context, address stri // GetBlock gets the block for the given round. // (GET /v2/blocks/{round}) func (v2 *Handlers) GetBlock(ctx echo.Context, round uint64, params model.GetBlockParams) error { - handle, contentType, err := getCodecHandle((*model.Format)(params.Format)) + handle, contentType, err := getCodecHandle((*string)(params.Format)) if err != nil { return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log) } @@ -1187,7 +1187,7 @@ func (v2 *Handlers) PendingTransactionInformation(ctx echo.Context, txid string, response.Inners = convertInners(&txn) } - handle, contentType, err := getCodecHandle((*model.Format)(params.Format)) + handle, contentType, err := getCodecHandle((*string)(params.Format)) if err != nil { return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log) } @@ -1221,7 +1221,7 @@ func (v2 *Handlers) getPendingTransactions(ctx echo.Context, max *uint64, format addrPtr = &addr } - handle, contentType, err := getCodecHandle((*model.Format)(format)) + handle, contentType, err := getCodecHandle(format) if err != nil { return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log) } diff --git a/daemon/algod/api/server/v2/utils.go b/daemon/algod/api/server/v2/utils.go index 52690cb8e3..32f18ea7d4 100644 --- a/daemon/algod/api/server/v2/utils.go +++ b/daemon/algod/api/server/v2/utils.go @@ -206,16 +206,16 @@ func computeAppIndexFromTxn(tx node.TxnWithStatus, l LedgerForAPI) *uint64 { } // getCodecHandle converts a format string into the encoder + content type -func getCodecHandle(formatPtr *model.Format) (codec.Handle, string, error) { - format := model.Json +func getCodecHandle(formatPtr *string) (codec.Handle, string, error) { + format := "json" if formatPtr != nil { - format = model.PendingTransactionInformationParamsFormat(strings.ToLower(string(*formatPtr))) + format = strings.ToLower(*formatPtr) } switch format { - case model.Json: + case "json": return protocol.JSONStrictHandle, "application/json", nil - case model.Msgpack: + case "msgpack": fallthrough case "msgp": return protocol.CodecHandle, "application/msgpack", nil From 9ffa72a922c55096ad55453fb6ca559136b57d4e Mon Sep 17 00:00:00 2001 From: Mark Ciccarello Date: Thu, 2 Feb 2023 06:37:21 -0800 Subject: [PATCH 27/81] fix for goal node status crash - no longer getting block (#5100) --- daemon/algod/api/server/v2/handlers.go | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go index 16cb477d91..5f296c5425 100644 --- a/daemon/algod/api/server/v2/handlers.go +++ b/daemon/algod/api/server/v2/handlers.go @@ -772,12 +772,6 @@ func (v2 *Handlers) GetStatus(ctx echo.Context) error { return internalError(ctx, err, errFailedRetrievingNodeStatus, v2.Log) } - ledger := v2.Node.LedgerForAPI() - latestBlkHdr, err := ledger.BlockHdr(ledger.Latest()) - if err != nil { - return internalError(ctx, err, errFailedRetrievingLatestBlockHeaderStatus, v2.Log) - } - response := model.NodeStatusResponse{ LastRound: uint64(stat.LastRound), LastVersion: string(stat.LastVersion), @@ -813,7 +807,7 @@ func (v2 *Handlers) GetStatus(ctx echo.Context) error { votesNo := votes - votesYes upgradeDelay := uint64(stat.UpgradeDelay) response.UpgradeVotesRequired = &upgradeThreshold - response.UpgradeNodeVote = &latestBlkHdr.UpgradeApprove + response.UpgradeNodeVote = &stat.UpgradeApprove response.UpgradeDelay = &upgradeDelay response.UpgradeVotes = &votes response.UpgradeYesVotes = &votesYes From 2c0a8db3c3286814f72013ec6af47a6eebc88fd0 Mon Sep 17 00:00:00 2001 From: algolucky <105239720+algolucky@users.noreply.github.com> Date: Thu, 2 Feb 2023 10:26:58 -0600 Subject: [PATCH 28/81] fix: disable update-repo-description (#5096) --- .github/workflows/container.yml | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/.github/workflows/container.yml b/.github/workflows/container.yml index f01392632c..d279f141c7 100644 --- a/.github/workflows/container.yml +++ b/.github/workflows/container.yml @@ -52,18 +52,19 @@ jobs: URL=${{ github.server_url }}/${{ github.repository }}.git BRANCH=${{ github.ref_name }} - update-repo-description: - name: Update DockerHub Repository Description - runs-on: ubuntu-latest - if: github.ref == format('refs/heads/{0}', 'master') - steps: - - name: Checkout Code - uses: actions/checkout@v3 + # TODO: uncomment when https://github.com/docker/hub-tool/issues/172 is complete + # update-repo-description: + # name: Update DockerHub Repository Description + # runs-on: ubuntu-latest + # if: github.ref == format('refs/heads/{0}', 'master') + # steps: + # - name: Checkout Code + # uses: actions/checkout@v3 - - name: Update DockerHub Repository Description - uses: peter-evans/dockerhub-description@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - repository: ${{ github.repository_owner }}/algod - readme-filepath: ./docker/README.md + # - name: Update DockerHub Repository Description + # uses: peter-evans/dockerhub-description@v3 + # with: + # username: ${{ secrets.DOCKERHUB_USERNAME }} + # password: ${{ secrets.DOCKERHUB_TOKEN }} + # repository: ${{ github.repository_owner }}/algod + # readme-filepath: ./docker/README.md From 6487b374ca303e98317739a5f129b12a35f76176 Mon Sep 17 00:00:00 2001 From: Eric Warehime Date: Thu, 2 Feb 2023 10:46:23 -0800 Subject: [PATCH 29/81] tests: fix TestDelta by sorting results (#5103) --- daemon/algod/api/server/v2/delta_test.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/daemon/algod/api/server/v2/delta_test.go b/daemon/algod/api/server/v2/delta_test.go index 316dbb8078..31154b951d 100644 --- a/daemon/algod/api/server/v2/delta_test.go +++ b/daemon/algod/api/server/v2/delta_test.go @@ -17,6 +17,8 @@ package v2 import ( + "bytes" + "sort" "testing" "github.com/stretchr/testify/require" @@ -138,6 +140,10 @@ func TestDelta(t *testing.T) { require.Equal(t, expAppDelta.Addr.String(), actAppDelta.Address) require.Equal(t, expAppDelta.Params.Deleted, actAppDelta.AppDeleted) require.Equal(t, len(original.KvMods), len(*converted.KvMods)) + // sort the result so we have deterministic order + sort.Slice(*converted.KvMods, func(i, j int) bool { + return bytes.Compare(*(*converted.KvMods)[i].Key, *(*converted.KvMods)[j].Key) < 0 + }) require.Equal(t, []uint8("box1"), *(*converted.KvMods)[0].Key) require.Equal(t, original.KvMods["box1"].Data, *(*converted.KvMods)[0].Value) require.Equal(t, []uint8("box2"), *(*converted.KvMods)[1].Key) From 4e72c58fd61492c7df0d8eea067b7dbe4adc9f8c Mon Sep 17 00:00:00 2001 From: Eric Warehime Date: Mon, 6 Feb 2023 08:35:32 -0800 Subject: [PATCH 30/81] e2e-test: Fix follower e2e test (#5114) --- .../features/followerNode/syncDeltas_test.go | 49 ++++++------------- .../TwoNodesFollower100Second.json | 28 +++++++++++ 2 files changed, 42 insertions(+), 35 deletions(-) create mode 100644 test/testdata/nettemplates/TwoNodesFollower100Second.json diff --git a/test/e2e-go/features/followerNode/syncDeltas_test.go b/test/e2e-go/features/followerNode/syncDeltas_test.go index df83e6d970..83c131512a 100644 --- a/test/e2e-go/features/followerNode/syncDeltas_test.go +++ b/test/e2e-go/features/followerNode/syncDeltas_test.go @@ -17,13 +17,11 @@ package followerNode import ( - "os" "path/filepath" "testing" "github.com/stretchr/testify/require" - "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/test/framework/fixtures" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -39,19 +37,19 @@ func TestBasicSyncMode(t *testing.T) { a := require.New(fixtures.SynchronizedTest(t)) // Overview of this test: - // Start a two-node network (primary has 0%, secondary has 100%) + // Start a two-node network--one in follower mode (follower has 0%, secondary has 100%) // Let it run for a few blocks. - // Spin up a third node in follower mode and retrieve deltas for some rounds using sync round calls. + // Retrieve deltas for some rounds using sync round calls on the follower node. var fixture fixtures.RestClientFixture // Give the second node (which starts up last) all the stake so that its proposal always has better credentials, // and so that its proposal isn't dropped. Otherwise the test burns 17s to recover. We don't care about stake // distribution so this is fine. - fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes100Second.json")) + fixture.Setup(t, filepath.Join("nettemplates", "TwoNodesFollower100Second.json")) defer fixture.Shutdown() - // Get 2nd node so we wait until we know they're at target block - nc, err := fixture.GetNodeController("Node") + // Get controller for Primary node to see the state of the chain + nc, err := fixture.GetNodeController("Primary") a.NoError(err) // Let the network make some progress @@ -60,45 +58,26 @@ func TestBasicSyncMode(t *testing.T) { err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound) a.NoError(err) - // Now spin up third node in follower mode - cloneDataDir := filepath.Join(fixture.PrimaryDataDir(), "../clone") - cloneLedger := false - err = fixture.NC.Clone(cloneDataDir, cloneLedger) + // Get the follower client, and exercise the sync/ledger functionality + followControl, err := fixture.GetNodeController("Follower") a.NoError(err) - // Set config.Local::EnableFollowMode = true - cfg := config.GetDefaultLocal() - cfg.EnableFollowMode = true - cloneCfg := filepath.Join(cloneDataDir, config.ConfigFilename) - err = cfg.SaveToFile(cloneCfg) - a.NoError(err) - // Start the node - cloneClient, err := fixture.StartNode(cloneDataDir) - a.NoError(err) - defer shutdownClonedNode(cloneDataDir, &fixture, t) + followClient := fixture.GetAlgodClientForController(followControl) // Now, catch up round by round, retrieving state deltas for each for round := uint64(1); round <= waitForRound; round++ { // assert sync round set - rResp, err := cloneClient.GetSyncRound() + rResp, err := followClient.GetSyncRound() a.NoError(err) a.Equal(round, rResp.Round) + err = fixture.ClientWaitForRoundWithTimeout(followClient, round) + a.NoError(err) // retrieve state delta - gResp, err := cloneClient.GetLedgerStateDelta(round) + gResp, err := followClient.GetLedgerStateDelta(round) a.NoError(err) a.NotNil(gResp) // set sync round next - err = cloneClient.SetSyncRound(round + 1) + err = followClient.SetSyncRound(round + 1) a.NoError(err) } - err = fixture.LibGoalFixture.ClientWaitForRoundWithTimeout(cloneClient, waitForRound) + err = fixture.LibGoalFixture.ClientWaitForRoundWithTimeout(fixture.LibGoalClient, waitForRound) a.NoError(err) } - -// shutdownClonedNode replicates the behavior of fixture.Shutdown() for network nodes on cloned node -// It deletes the directory if the test passes, otherwise it preserves it -func shutdownClonedNode(nodeDataDir string, f *fixtures.RestClientFixture, t *testing.T) { - nc := f.LibGoalFixture.GetNodeControllerForDataDir(nodeDataDir) - nc.FullStop() - if !t.Failed() { - os.RemoveAll(nodeDataDir) - } -} diff --git a/test/testdata/nettemplates/TwoNodesFollower100Second.json b/test/testdata/nettemplates/TwoNodesFollower100Second.json new file mode 100644 index 0000000000..921066b48b --- /dev/null +++ b/test/testdata/nettemplates/TwoNodesFollower100Second.json @@ -0,0 +1,28 @@ +{ + "Genesis": { + "LastPartKeyRound": 3000, + "NetworkName": "tbd", + "Wallets": [ + { + "Name": "Wallet1", + "Stake": 100, + "Online": true + } + ] + }, + "Nodes": [ + { + "Name": "Follower", + "IsRelay": true, + "Wallets": [], + "ConfigJSONOverride": "{\"EnableFollowMode\":true}" + }, + { + "Name": "Primary", + "Wallets": [ + { "Name": "Wallet1", + "ParticipationOnly": false } + ] + } + ] +} From a99148ba4b5d58e8fda86206c20d1520e3b9dec7 Mon Sep 17 00:00:00 2001 From: DevOps Service Date: Mon, 6 Feb 2023 17:14:04 +0000 Subject: [PATCH 31/81] Bump Version, Remove buildnumber.dat and genesistimestamp.dat files. --- buildnumber.dat | 1 - config/version.go | 2 +- genesistimestamp.dat | 1 - 3 files changed, 1 insertion(+), 3 deletions(-) delete mode 100644 buildnumber.dat delete mode 100644 genesistimestamp.dat diff --git a/buildnumber.dat b/buildnumber.dat deleted file mode 100644 index 0cfbf08886..0000000000 --- a/buildnumber.dat +++ /dev/null @@ -1 +0,0 @@ -2 diff --git a/config/version.go b/config/version.go index 4c91e271fd..06701e9f1f 100644 --- a/config/version.go +++ b/config/version.go @@ -33,7 +33,7 @@ const VersionMajor = 3 // VersionMinor is the Minor semantic version number (x.#.z) - changed when backwards-compatible features are introduced. // Not enforced until after initial public release (x > 0). -const VersionMinor = 14 +const VersionMinor = 15 // Version is the type holding our full version information. type Version struct { diff --git a/genesistimestamp.dat b/genesistimestamp.dat deleted file mode 100644 index c72c6a7795..0000000000 --- a/genesistimestamp.dat +++ /dev/null @@ -1 +0,0 @@ -1558657885 From 5511cf3d177dc2b67485fe3bcf10fcb5e875900a Mon Sep 17 00:00:00 2001 From: Will Winder Date: Mon, 6 Feb 2023 17:11:09 -0500 Subject: [PATCH 32/81] node: Fix time since last block. (#5113) --- node/follower_node.go | 18 +++++++++++++----- node/node.go | 18 +++++++++++++----- test/scripts/e2e_subs/time.sh | 21 +++++++++++++++++++++ 3 files changed, 47 insertions(+), 10 deletions(-) create mode 100755 test/scripts/e2e_subs/time.sh diff --git a/node/follower_node.go b/node/follower_node.go index 591a6cda55..95c2787d81 100644 --- a/node/follower_node.go +++ b/node/follower_node.go @@ -251,18 +251,26 @@ func (node *AlgorandFollowerNode) GetPendingTransaction(_ transactions.Txid) (re } // Status returns a StatusReport structure reporting our status as Active and with our ledger's LastRound -func (node *AlgorandFollowerNode) Status() (s StatusReport, err error) { +func (node *AlgorandFollowerNode) Status() (StatusReport, error) { node.syncStatusMu.Lock() - s.LastRoundTimestamp = node.lastRoundTimestamp - s.HasSyncedSinceStartup = node.hasSyncedSinceStartup + lastRoundTimestamp := node.lastRoundTimestamp + hasSyncedSinceStartup := node.hasSyncedSinceStartup node.syncStatusMu.Unlock() node.mu.Lock() defer node.mu.Unlock() + var s StatusReport + var err error if node.catchpointCatchupService != nil { - return catchpointCatchupStatus(node.catchpointCatchupService.GetLatestBlockHeader(), node.catchpointCatchupService.GetStatistics()), nil + s = catchpointCatchupStatus(node.catchpointCatchupService.GetLatestBlockHeader(), node.catchpointCatchupService.GetStatistics()) + } else { + s, err = latestBlockStatus(node.ledger, node.catchupService) } - return latestBlockStatus(node.ledger, node.catchupService) + + s.LastRoundTimestamp = lastRoundTimestamp + s.HasSyncedSinceStartup = hasSyncedSinceStartup + + return s, err } // GenesisID returns the ID of the genesis node. diff --git a/node/node.go b/node/node.go index f648779697..c27cef8b51 100644 --- a/node/node.go +++ b/node/node.go @@ -678,18 +678,26 @@ func (node *AlgorandFullNode) GetPendingTransaction(txID transactions.Txid) (res } // Status returns a StatusReport structure reporting our status as Active and with our ledger's LastRound -func (node *AlgorandFullNode) Status() (s StatusReport, err error) { +func (node *AlgorandFullNode) Status() (StatusReport, error) { node.syncStatusMu.Lock() - s.LastRoundTimestamp = node.lastRoundTimestamp - s.HasSyncedSinceStartup = node.hasSyncedSinceStartup + lastRoundTimestamp := node.lastRoundTimestamp + hasSyncedSinceStartup := node.hasSyncedSinceStartup node.syncStatusMu.Unlock() node.mu.Lock() defer node.mu.Unlock() + var s StatusReport + var err error if node.catchpointCatchupService != nil { - return catchpointCatchupStatus(node.catchpointCatchupService.GetLatestBlockHeader(), node.catchpointCatchupService.GetStatistics()), nil + s = catchpointCatchupStatus(node.catchpointCatchupService.GetLatestBlockHeader(), node.catchpointCatchupService.GetStatistics()) + } else { + s, err = latestBlockStatus(node.ledger, node.catchupService) } - return latestBlockStatus(node.ledger, node.catchupService) + + s.LastRoundTimestamp = lastRoundTimestamp + s.HasSyncedSinceStartup = hasSyncedSinceStartup + + return s, err } func catchpointCatchupStatus(lastBlockHeader bookkeeping.BlockHeader, stats catchup.CatchpointCatchupStats) (s StatusReport) { diff --git a/test/scripts/e2e_subs/time.sh b/test/scripts/e2e_subs/time.sh new file mode 100755 index 0000000000..a248523632 --- /dev/null +++ b/test/scripts/e2e_subs/time.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +filename=$(basename "$0") +scriptname="${filename%.*}" +date "+${scriptname} start %Y%m%d_%H%M%S" + +set -exo pipefail +export SHELLOPTS + +# make sure the time is updating +for i in {1..20}; do + output=$(goal node status) + if [[ $output != *"Time since last block: 0.0s"* ]]; then + exit 0 + fi + sleep 0.5 +done + +echo "Time since last block is still 0.0s after 10 seconds" +goal node status +exit 1 From 0f9080580b69570ca19c351a090e488dce077b4f Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Tue, 7 Feb 2023 15:51:34 -0500 Subject: [PATCH 33/81] tests: debug flaky expect test (#5119) --- test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp | 2 +- test/e2e-go/cli/goal/expect/goalExpectCommon.exp | 11 ++++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp b/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp index d26074e2fa..0a3b8ea48e 100644 --- a/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp +++ b/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp @@ -182,7 +182,7 @@ if { [catch { # once the node is started we can clear the ::GLOBAL_TEST_ALGO_DIR, so that shutdown would be done as a network. set ::GLOBAL_TEST_ALGO_DIR "" - ::AlgorandGoal::WaitForRound 38 $TEST_ROOT_DIR/Node + ::AlgorandGoal::WaitForRound $CATCHPOINT_ROUND $TEST_ROOT_DIR/Node ::AlgorandGoal::StopNode $TEST_ROOT_DIR/Node diff --git a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp index a0e174b12a..e21d3e9ec1 100644 --- a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp +++ b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp @@ -52,6 +52,15 @@ proc ::AlgorandGoal::Abort { ERROR } { puts "GLOBAL_TEST_ROOT_DIR $::GLOBAL_TEST_ROOT_DIR" puts "GLOBAL_NETWORK_NAME $::GLOBAL_NETWORK_NAME" + # dump system and processes information to check memory consumption and if algod procs are still alive + if { $tcl_platform(os) == "Darwin" } { + exec top -l 1 + } elseif { $tcl_platform(os) == "Linux" } { + exec top -n 1 + } else { + # no logging for other platforms + } + ::AlgorandGoal::StopNetwork $::GLOBAL_NETWORK_NAME $::GLOBAL_TEST_ROOT_DIR log_user 1 @@ -935,7 +944,7 @@ proc ::AlgorandGoal::StartCatchup { NODE_DATA_DIR CATCHPOINT } { return $CATCHPOINT } -# Wait for node to get into catchup mode +# Wait for node to get into fast catchup mode proc ::AlgorandGoal::WaitCatchup { TEST_PRIMARY_NODE_DIR WAIT_DURATION_SEC } { if { [catch { set i 0 From 60f6622dace51e2e58f7e0e2f659d00e3f80d7de Mon Sep 17 00:00:00 2001 From: Eric Warehime Date: Tue, 7 Feb 2023 19:34:45 -0800 Subject: [PATCH 34/81] algod: Refactor AccountData conversion (#5098) --- daemon/algod/api/server/v2/account.go | 52 ++++++++++++++++++++++ daemon/algod/api/server/v2/delta.go | 5 +-- daemon/algod/api/server/v2/delta_test.go | 20 +++++---- daemon/algod/api/server/v2/test/helpers.go | 5 --- 4 files changed, 65 insertions(+), 17 deletions(-) diff --git a/daemon/algod/api/server/v2/account.go b/daemon/algod/api/server/v2/account.go index 9c25021b9c..c722ab6825 100644 --- a/daemon/algod/api/server/v2/account.go +++ b/daemon/algod/api/server/v2/account.go @@ -26,6 +26,7 @@ import ( "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/ledger/ledgercore" ) // AssetHolding converts between basics.AssetHolding and model.AssetHolding @@ -139,6 +140,57 @@ func AccountDataToAccount( }, nil } +// ledgercoreADToAccount converts a ledgercore.AccountData to model.Account +func ledgercoreADToAccount(addr string, amntWithoutPendingRewards uint64, rnd uint64, + consensus *config.ConsensusParams, ad ledgercore.AccountData) (model.Account, error) { + pendingRewards, overflowed := basics.OSubA(ad.MicroAlgos, basics.MicroAlgos{Raw: amntWithoutPendingRewards}) + if overflowed { + return model.Account{}, errors.New("overflow on pending reward calculation") + } + var apiParticipation *model.AccountParticipation + if ad.VoteID != (crypto.OneTimeSignatureVerifier{}) { + apiParticipation = &model.AccountParticipation{ + VoteParticipationKey: ad.VoteID[:], + SelectionParticipationKey: ad.SelectionID[:], + VoteFirstValid: uint64(ad.VoteFirstValid), + VoteLastValid: uint64(ad.VoteLastValid), + VoteKeyDilution: ad.VoteKeyDilution, + } + if !ad.StateProofID.IsEmpty() { + tmp := ad.StateProofID[:] + apiParticipation.StateProofKey = &tmp + } + } + var authAddr *string = nil + if !ad.AuthAddr.IsZero() { + authAddr = strOrNil(ad.AuthAddr.String()) + } + return model.Account{ + Address: addr, + Amount: ad.MicroAlgos.Raw, + AmountWithoutPendingRewards: amntWithoutPendingRewards, + AppsTotalExtraPages: numOrNil(uint64(ad.TotalExtraAppPages)), + AppsTotalSchema: &model.ApplicationStateSchema{ + NumUint: ad.TotalAppSchema.NumUint, + NumByteSlice: ad.TotalAppSchema.NumByteSlice, + }, + AuthAddr: authAddr, + MinBalance: ad.MinBalance(consensus).Raw, + Participation: apiParticipation, + PendingRewards: pendingRewards.Raw, + RewardBase: numOrNil(ad.RewardsBase), + Rewards: ad.RewardedMicroAlgos.Raw, + Round: rnd, + Status: ad.Status.String(), + TotalAppsOptedIn: ad.TotalAppLocalStates, + TotalAssetsOptedIn: ad.TotalAssets, + TotalBoxBytes: numOrNil(ad.TotalBoxBytes), + TotalBoxes: numOrNil(ad.TotalBoxes), + TotalCreatedApps: ad.TotalAppParams, + TotalCreatedAssets: ad.TotalAssetParams, + }, nil +} + func convertTKVToGenerated(tkv *basics.TealKeyValue) *model.TealKeyValueStore { if tkv == nil || len(*tkv) == 0 { return nil diff --git a/daemon/algod/api/server/v2/delta.go b/daemon/algod/api/server/v2/delta.go index ae1d2d43e5..4c4058fcde 100644 --- a/daemon/algod/api/server/v2/delta.go +++ b/daemon/algod/api/server/v2/delta.go @@ -19,7 +19,6 @@ package v2 import ( "errors" "fmt" - "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" "github.com/algorand/go-algorand/data/basics" @@ -101,9 +100,7 @@ func StateDeltaToLedgerDelta(sDelta ledgercore.StateDelta, consensus config.Cons return response, errors.New("overflow on pending reward calculation") } - ad := basics.AccountData{} - ledgercore.AssignAccountData(&ad, record.AccountData) - a, err := AccountDataToAccount(record.Addr.String(), &ad, basics.Round(round), &consensus, amountWithoutPendingRewards) + a, err := ledgercoreADToAccount(record.Addr.String(), amountWithoutPendingRewards.Raw, uint64(round), &consensus, record.AccountData) if err != nil { return response, err } diff --git a/daemon/algod/api/server/v2/delta_test.go b/daemon/algod/api/server/v2/delta_test.go index 31154b951d..061912ac61 100644 --- a/daemon/algod/api/server/v2/delta_test.go +++ b/daemon/algod/api/server/v2/delta_test.go @@ -48,13 +48,13 @@ func TestDelta(t *testing.T) { MicroAlgos: basics.MicroAlgos{Raw: 5000}, RewardsBase: 2, RewardedMicroAlgos: basics.MicroAlgos{Raw: 0}, - TotalExtraAppPages: 0, - TotalAppParams: 0, - TotalAppLocalStates: 0, - TotalAssetParams: 0, - TotalAssets: 0, - TotalBoxes: 0, - TotalBoxBytes: 0, + TotalExtraAppPages: 1, + TotalAppParams: 2, + TotalAppLocalStates: 3, + TotalAssetParams: 4, + TotalAssets: 5, + TotalBoxes: 6, + TotalBoxBytes: 7, }, }, }, @@ -69,7 +69,7 @@ func TestDelta(t *testing.T) { ClearStateProgram: []byte("2"), GlobalState: basics.TealKeyValue{}, StateSchemas: basics.StateSchemas{}, - ExtraProgramPages: 0, + ExtraProgramPages: 2, }, Deleted: false, }, @@ -125,6 +125,10 @@ func TestDelta(t *testing.T) { actAccDelta := (*converted.Accts.Accounts)[0] require.Equal(t, expAccDelta.Addr.String(), actAccDelta.Address) require.Equal(t, expAccDelta.Status.String(), actAccDelta.AccountData.Status) + require.Equal(t, expAccDelta.TotalAppLocalStates, actAccDelta.AccountData.TotalAppsOptedIn) + require.Equal(t, expAccDelta.TotalAppParams, actAccDelta.AccountData.TotalCreatedApps) + require.Equal(t, expAccDelta.TotalAssetParams, actAccDelta.AccountData.TotalCreatedAssets) + require.Equal(t, expAccDelta.TotalAssets, actAccDelta.AccountData.TotalAssetsOptedIn) require.Equal(t, uint64(0), actAccDelta.AccountData.PendingRewards) require.Equal(t, len(original.Accts.AssetResources), len(*converted.Accts.Assets)) expAssetDelta := original.Accts.AssetResources[0] diff --git a/daemon/algod/api/server/v2/test/helpers.go b/daemon/algod/api/server/v2/test/helpers.go index 51467b65d9..f27940f33a 100644 --- a/daemon/algod/api/server/v2/test/helpers.go +++ b/daemon/algod/api/server/v2/test/helpers.go @@ -116,13 +116,8 @@ var poolDeltaResponseGolden = model.LedgerStateDelta{ Amount: 50000000000, AmountWithoutPendingRewards: 50000000000, MinBalance: 100000, - CreatedApps: &[]model.Application{}, AppsTotalSchema: &appsTotalSchema, - AppsLocalState: &[]model.ApplicationLocalState{}, Status: "Not Participating", - RewardBase: &poolAddrRewardBaseGolden, - CreatedAssets: &[]model.Asset{}, - Assets: &[]model.AssetHolding{}, }, Address: poolAddr.String(), }, From c53cd4c786791c00d3d9014d084f05b1fd3d1d55 Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Wed, 8 Feb 2023 09:57:56 -0500 Subject: [PATCH 35/81] AVM: Teal macros (#4737) Co-authored-by: Ilan Co-authored-by: itennenhouse Co-authored-by: iten-alg <85889519+iten-alg@users.noreply.github.com> Co-authored-by: Ben Guidarelli --- data/transactions/logic/assembler.go | 164 ++++++++++- data/transactions/logic/assembler_test.go | 334 +++++++++++++++++++++- data/transactions/logic/opcodes.go | 16 ++ 3 files changed, 497 insertions(+), 17 deletions(-) diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go index 45489918a7..f7979e6828 100644 --- a/data/transactions/logic/assembler.go +++ b/data/transactions/logic/assembler.go @@ -31,6 +31,7 @@ import ( "sort" "strconv" "strings" + "unicode" "github.com/algorand/avm-abi/abi" "github.com/algorand/go-algorand/data/basics" @@ -256,6 +257,8 @@ type OpStream struct { // Need new copy for each opstream versionedPseudoOps map[string]map[int]OpSpec + + macros map[string][]string } // newOpStream constructs OpStream instances ready to invoke assemble. A new @@ -266,6 +269,7 @@ func newOpStream(version uint64) OpStream { OffsetToLine: make(map[int]int), typeTracking: true, Version: version, + macros: make(map[string][]string), known: ProgramKnowledge{fp: -1}, } @@ -1845,9 +1849,17 @@ func (ops *OpStream) trackStack(args StackTypes, returns StackTypes, instruction } } -// splitTokens breaks tokens into two slices at the first semicolon. -func splitTokens(tokens []string) (current, rest []string) { - for i, token := range tokens { +// nextStatement breaks tokens into two slices at the first semicolon and expands macros along the way. +func nextStatement(ops *OpStream, tokens []string) (current, rest []string) { + for i := 0; i < len(tokens); i++ { + token := tokens[i] + replacement, ok := ops.macros[token] + if ok { + tokens = append(tokens[0:i], append(replacement, tokens[i+1:]...)...) + // backup to handle potential re-expansion of the first token in the expansion + i-- + continue + } if token == ";" { return tokens[:i], tokens[i+1:] } @@ -1855,6 +1867,10 @@ func splitTokens(tokens []string) (current, rest []string) { return tokens, nil } +type directiveFunc func(*OpStream, []string) error + +var directives = map[string]directiveFunc{"pragma": pragma, "define": define} + // assemble reads text from an input and accumulates the program func (ops *OpStream) assemble(text string) error { if ops.Version > LogicVersion && ops.Version != assemblerNoVersion { @@ -1872,30 +1888,35 @@ func (ops *OpStream) assemble(text string) error { if len(tokens) > 0 { if first := tokens[0]; first[0] == '#' { directive := first[1:] - switch directive { - case "pragma": - ops.pragma(tokens) //nolint:errcheck // report bad pragma line error, but continue assembling - ops.trace("%3d: #pragma line\n", ops.sourceLine) - default: + if dFunc, ok := directives[directive]; ok { + _ = dFunc(ops, tokens) + ops.trace("%3d: %s line\n", ops.sourceLine, first) + } else { ops.errorf("Unknown directive: %s", directive) } continue } } - for current, next := splitTokens(tokens); len(current) > 0 || len(next) > 0; current, next = splitTokens(next) { + for current, next := nextStatement(ops, tokens); len(current) > 0 || len(next) > 0; current, next = nextStatement(ops, next) { if len(current) == 0 { continue } // we're about to begin processing opcodes, so settle the Version if ops.Version == assemblerNoVersion { ops.Version = AssemblerDefaultVersion + _ = ops.recheckMacroNames() } if ops.versionedPseudoOps == nil { ops.versionedPseudoOps = prepareVersionedPseudoTable(ops.Version) } opstring := current[0] if opstring[len(opstring)-1] == ':' { - ops.createLabel(opstring[:len(opstring)-1]) + labelName := opstring[:len(opstring)-1] + if _, ok := ops.macros[labelName]; ok { + ops.errorf("Cannot create label with same name as macro: %s", labelName) + } else { + ops.createLabel(opstring[:len(opstring)-1]) + } current = current[1:] if len(current) == 0 { ops.trace("%3d: label only\n", ops.sourceLine) @@ -1973,7 +1994,121 @@ func (ops *OpStream) assemble(text string) error { return nil } -func (ops *OpStream) pragma(tokens []string) error { +func (ops *OpStream) cycle(macro string, previous ...string) bool { + replacement, ok := ops.macros[macro] + if !ok { + return false + } + if len(previous) > 0 && macro == previous[0] { + ops.errorf("Macro cycle discovered: %s", strings.Join(append(previous, macro), " -> ")) + return true + } + for _, token := range replacement { + if ops.cycle(token, append(previous, macro)...) { + return true + } + } + return false +} + +// recheckMacroNames goes through previously defined macros and ensures they +// don't use opcodes/fields from newly obtained version. Therefore it repeats +// some checks that don't need to be repeated, in the interest of simplicity. +func (ops *OpStream) recheckMacroNames() error { + errored := false + for macroName := range ops.macros { + err := checkMacroName(macroName, ops.Version, ops.labels) + if err != nil { + delete(ops.macros, macroName) + ops.error(err) + errored = true + } + } + if errored { + return errors.New("version is incompatible with defined macros") + } + return nil +} + +var otherAllowedChars = [256]bool{'+': true, '-': true, '*': true, '/': true, '^': true, '%': true, '&': true, '|': true, '~': true, '!': true, '>': true, '<': true, '=': true, '?': true, '_': true} + +func checkMacroName(macroName string, version uint64, labels map[string]int) error { + var firstRune rune + var secondRune rune + count := 0 + for _, r := range macroName { + if count == 0 { + firstRune = r + } else if count == 1 { + secondRune = r + } + if !unicode.IsLetter(r) && !unicode.IsDigit(r) && !otherAllowedChars[r] { + return fmt.Errorf("%s character not allowed in macro name", string(r)) + } + count++ + } + if unicode.IsDigit(firstRune) { + return fmt.Errorf("Cannot begin macro name with number: %s", macroName) + } + if len(macroName) > 1 && (firstRune == '-' || firstRune == '+') { + if unicode.IsDigit(secondRune) { + return fmt.Errorf("Cannot begin macro name with number: %s", macroName) + } + } + // Note parentheses are not allowed characters, so we don't have to check for b64(AAA) syntax + if macroName == "b64" || macroName == "base64" { + return fmt.Errorf("Cannot use %s as macro name", macroName) + } + if macroName == "b32" || macroName == "base32" { + return fmt.Errorf("Cannot use %s as macro name", macroName) + } + _, isTxnType := txnTypeMap[macroName] + _, isOnCompletion := onCompletionMap[macroName] + if isTxnType || isOnCompletion { + return fmt.Errorf("Named constants cannot be used as macro names: %s", macroName) + } + if _, ok := pseudoOps[macroName]; ok { + return fmt.Errorf("Macro names cannot be pseudo-ops: %s", macroName) + } + if version != assemblerNoVersion { + if _, ok := OpsByName[version][macroName]; ok { + return fmt.Errorf("Macro names cannot be opcodes: %s", macroName) + } + if fieldNames[version][macroName] { + return fmt.Errorf("Macro names cannot be field names: %s", macroName) + } + } + if _, ok := labels[macroName]; ok { + return fmt.Errorf("Labels cannot be used as macro names: %s", macroName) + } + return nil +} + +func define(ops *OpStream, tokens []string) error { + if tokens[0] != "#define" { + return ops.errorf("invalid syntax: %s", tokens[0]) + } + if len(tokens) < 3 { + return ops.errorf("define directive requires a name and body") + } + name := tokens[1] + err := checkMacroName(name, ops.Version, ops.labels) + if err != nil { + return ops.error(err) + } + saved, ok := ops.macros[name] + ops.macros[name] = tokens[2:len(tokens):len(tokens)] + if ops.cycle(tokens[1]) { + if ok { + ops.macros[tokens[1]] = saved + } else { + delete(ops.macros, tokens[1]) + } + } + return nil +} + +func pragma(ops *OpStream, tokens []string) error { if tokens[0] != "#pragma" { return ops.errorf("invalid syntax: %s", tokens[0]) } @@ -2004,11 +2139,12 @@ func (ops *OpStream) pragma(tokens []string) error { // version for v1. if ops.Version == assemblerNoVersion { ops.Version = ver - } else if ops.Version != ver { + return ops.recheckMacroNames() + } + if ops.Version != ver { return ops.errorf("version mismatch: assembling v%d with v%d assembler", ver, ops.Version) - } else { - // ops.Version is already correct, or needed to be upped. } + // ops.Version is already correct, or needed to be upped. return nil case "typetrack": if len(tokens) < 3 { diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go index cc5cd97a44..9158d2f268 100644 --- a/data/transactions/logic/assembler_test.go +++ b/data/transactions/logic/assembler_test.go @@ -560,6 +560,7 @@ type Expect struct { func testMatch(t testing.TB, actual, expected string) (ok bool) { defer func() { + t.Helper() if !ok { t.Logf("'%s' does not match '%s'", actual, expected) } @@ -1335,13 +1336,16 @@ func TestFieldsFromLine(t *testing.T) { check(" ; ", ";") } -func TestSplitTokens(t *testing.T) { +func TestNextStatement(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() + // this test ensures nextStatement splits tokens on semicolons properly + // macro testing should be handled in TestMacros + ops := newOpStream(AssemblerMaxVersion) check := func(tokens []string, left []string, right []string) { t.Helper() - current, next := splitTokens(tokens) + current, next := nextStatement(&ops, tokens) assert.Equal(t, left, current) assert.Equal(t, right, next) } @@ -2928,7 +2932,7 @@ int 1 switch %s extra %s `, strings.Join(labels, " "), strings.Join(labels, ":\n")+":\n") - ops = testProg(t, source, AssemblerMaxVersion, Expect{3, "switch cannot take more than 255 labels"}) + testProg(t, source, AssemblerMaxVersion, Expect{3, "switch cannot take more than 255 labels"}) // allow duplicate label reference source = ` @@ -2939,6 +2943,330 @@ int 1 testProg(t, source, AssemblerMaxVersion) } +func TestMacros(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + checkSame(t, AssemblerMaxVersion, ` + pushint 0; pushint 1; +`, ` + #define none 0 + #define one 1 + pushint none; pushint one; +`, + ) + + checkSame(t, AssemblerMaxVersion, ` + pushint 1 + pushint 2 + == + bnz label1 + err + label1: + pushint 1`, ` + #define ==? ==; bnz + pushint 1; pushint 2; ==? label1 + err + label1: + pushint 1`, + ) + + // Test redefining macros with macro chaining works + checkSame(t, AssemblerMaxVersion, ` + pushbytes 0x100000000000; substring 3 5; substring 0 1`, ` + #define rowSize 3 + #define columnSize 5 + #define tableDimensions rowSize columnSize + pushbytes 0x100000000000; substring tableDimensions + #define rowSize 0 + #define columnSize 1 + substring tableDimensions`, + ) + + // Test more complicated macros like multi-token + checkSame(t, AssemblerMaxVersion, ` + int 3 + store 0 + int 4 + store 1 + load 0 + load 1 + <`, ` + #define &x 0 + #define x load &x; + #define &y 1 + #define y load &y; + #define -> ; store + int 3 -> &x; int 4 -> &y + x y <`, + ) + + checkSame(t, AssemblerMaxVersion, ` + pushbytes 0xddf2554d + txna ApplicationArgs 0 + == + bnz kickstart + pushbytes 0x903f4535 + txna ApplicationArgs 0 + == + bnz portal_transfer + kickstart: + pushint 1 + portal_transfer: + pushint 1 + `, ` + #define abi-route txna ApplicationArgs 0; ==; bnz + method "kickstart(account)void"; abi-route kickstart + method "portal_transfer(byte[])byte[]"; abi-route portal_transfer + kickstart: + pushint 1 + portal_transfer: + pushint 1 + `) + + checkSame(t, AssemblerMaxVersion, ` +method "echo(string)string" +txn ApplicationArgs 0 +== +bnz echo + +echo: + int 1 + dup + txnas ApplicationArgs + extract 2 0 + stores + + int 1 + loads + dup + len + itob + extract 6 0 + swap + concat + + pushbytes 0x151f7c75 + swap + concat + log + int 1 + return + +method "add(uint32,uint32)uint32" +txn ApplicationArgs 0 +== +bnz add + +add: + int 1 + dup + txnas ApplicationArgs + int 0 + extract_uint32 + stores + + int 2 + dup + txnas ApplicationArgs + int 0 + extract_uint32 + stores + + load 1; load 2; + + store 255 + + int 255 + loads + itob + extract 4 0 + pushbytes 0x151f7c75 + swap + concat + + log + int 1 + return + `, ` +// Library Methods + +// codecs +#define abi-encode-uint16 ;itob; extract 6 0; +#define abi-decode-uint16 ;extract_uint16; + +#define abi-decode-uint32 ;int 0; extract_uint32; +#define abi-encode-uint32 ;itob;extract 4 0; + +#define abi-encode-bytes ;dup; len; abi-encode-uint16; swap; concat; +#define abi-decode-bytes ;extract 2 0; + +// abi method handling +#define abi-route ;txna ApplicationArgs 0; ==; bnz +#define abi-return ;pushbytes 0x151f7c75; swap; concat; log; int 1; return; + +// stanza: "set $var from-{type}" +#define parse ; int +#define read_arg ;dup; txnas ApplicationArgs; +#define from-string ;read_arg; abi-decode-bytes; stores; +#define from-uint16 ;read_arg; abi-decode-uint16; stores; +#define from-uint32 ;read_arg; abi-decode-uint32; stores; + +// stanza: "reply $var as-{type} +#define returns ; int +#define as-uint32; loads; abi-encode-uint32; abi-return; +#define as-string; loads; abi-encode-bytes; abi-return; + +// Contract + +// echo handler +method "echo(string)string"; abi-route echo +echo: + #define msg 1 + parse msg from-string + + // cool things happen ... + + returns msg as-string + + +// add handler +method "add(uint32,uint32)uint32"; abi-route add +add: + #define x 1 + parse x from-uint32 + + #define y 2 + parse y from-uint32 + + #define sum 255 + load x; load y; +; store sum + + returns sum as-uint32 + `) + + testProg(t, ` + #define x a d + #define d c a + #define hey wat's up x + #define c woah hey + int 1 + c`, + AssemblerMaxVersion, Expect{5, "Macro cycle discovered: c -> hey -> x -> d -> c"}, Expect{7, "unknown opcode: c"}, + ) + + testProg(t, ` + #define c + + #define x a c + #define d x + #define c d + int 1 + c`, + AssemblerMaxVersion, Expect{5, "Macro cycle discovered: c -> d -> x -> c"}, Expect{7, "+ expects..."}, + ) + + testProg(t, ` + #define X X + int 3`, + AssemblerMaxVersion, Expect{2, "Macro cycle discovered: X -> X"}, + ) + + // Check that macros names can't be things like named constants, opcodes, etc. + // If pragma is given, only macros that violate that version's stuff should be errored on + testProg(t, ` + #define return random + #define pay randomm + #define NoOp randommm + #define + randommmm + #pragma version 1 // now the versioned check should activate and check all previous macros + #define return hi // no error b/c return is after v1 + #define + hey // since versioned check is now online, we can error here + int 1`, + assemblerNoVersion, + Expect{3, "Named constants..."}, + Expect{4, "Named constants..."}, + Expect{6, "Macro names cannot be opcodes: +"}, + Expect{8, "Macro names cannot be opcodes: +"}, + ) + + // Same check, but this time since no version is given, the versioned check + // uses AssemblerDefaultVersion and activates on first instruction (int 1) + testProg(t, ` + #define return random + #define pay randomm + #define NoOp randommm + #define + randommmm + int 1 // versioned check activates here + #define return hi + #define + hey`, + assemblerNoVersion, + Expect{3, "Named constants..."}, + Expect{4, "Named constants..."}, + Expect{6, "Macro names cannot be opcodes: +"}, + Expect{8, "Macro names cannot be opcodes: +"}, + ) + + testProg(t, ` + #define Sender hello + #define ApplicationArgs hiya + #pragma version 1 + #define Sender helllooooo + #define ApplicationArgs heyyyyy // no error b/c ApplicationArgs is after v1 + int 1`, + assemblerNoVersion, + Expect{4, "Macro names cannot be field names: Sender"}, // error happens once version is known + ) + + // Same check but defaults to AssemblerDefaultVersion instead of pragma + testProg(t, ` + #define Sender hello + #define ApplicationArgs hiya + int 1 + #define Sender helllooooo + #define ApplicationArgs heyyyyy`, + assemblerNoVersion, + Expect{4, "Macro names cannot be field names: Sender"}, // error happens once version is auto-set + Expect{5, "Macro names cannot be field names: Sender"}, // and on following line + ) + // define needs name and body + testLine(t, "#define", AssemblerMaxVersion, "define directive requires a name and body") + testLine(t, "#define hello", AssemblerMaxVersion, "define directive requires a name and body") + // macro names cannot be directives + testLine(t, "#define #define 1", AssemblerMaxVersion, "# character not allowed in macro name") + testLine(t, "#define #pragma 1", AssemblerMaxVersion, "# character not allowed in macro name") + // macro names cannot begin with digits (including negative ones) + testLine(t, "#define 1hello one", AssemblerMaxVersion, "Cannot begin macro name with number: 1hello") + testLine(t, "#define -1hello negativeOne", AssemblerMaxVersion, "Cannot begin macro name with number: -1hello") + // macro names can't use base64/32 notation + testLine(t, "#define b64 AA", AssemblerMaxVersion, "Cannot use b64 as macro name") + testLine(t, "#define base64 AA", AssemblerMaxVersion, "Cannot use base64 as macro name") + testLine(t, "#define b32 AA", AssemblerMaxVersion, "Cannot use b32 as macro name") + testLine(t, "#define base32 AA", AssemblerMaxVersion, "Cannot use base32 as macro name") + // macro names can't use non-alphanumeric characters that aren't specifically allowed + testLine(t, "#define wh@t 1", AssemblerMaxVersion, "@ character not allowed in macro name") + // check both kinds of pseudo-ops to make sure they can't be used as macro names + testLine(t, "#define int 3", AssemblerMaxVersion, "Macro names cannot be pseudo-ops: int") + testLine(t, "#define extract 3", AssemblerMaxVersion, "Macro names cannot be pseudo-ops: extract") + // check labels to make sure they can't be used as macro names + testProg(t, ` + coolLabel: + int 1 + #define coolLabel 1`, + AssemblerMaxVersion, + Expect{4, "Labels cannot be used as macro names: coolLabel"}, + ) + testProg(t, ` + #define coolLabel 1 + coolLabel: + int 1`, + AssemblerMaxVersion, + Expect{3, "Cannot create label with same name as macro: coolLabel"}, + ) + // Admittedly these two tests are just for coverage + ops := newOpStream(AssemblerMaxVersion) + err := define(&ops, []string{"not#define"}) + require.EqualError(t, err, "0: invalid syntax: not#define") + err = pragma(&ops, []string{"not#pragma"}) + require.EqualError(t, err, "0: invalid syntax: not#pragma") +} + func TestAssembleMatch(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go index 8f47b57caf..5dd6f20d12 100644 --- a/data/transactions/logic/opcodes.go +++ b/data/transactions/logic/opcodes.go @@ -698,6 +698,9 @@ var opsByOpcode [LogicVersion + 1][256]OpSpec // OpsByName map for each version, mapping opcode name to OpSpec var OpsByName [LogicVersion + 1]map[string]OpSpec +// Keeps track of all field names accessible in each version +var fieldNames [LogicVersion + 1]map[string]bool + // Migration from v1 to v2. // v1 allowed execution of program with version 0. // With v2 opcode versions are introduced and they are bound to every opcode. @@ -741,4 +744,17 @@ func init() { } } } + + for v := 0; v <= LogicVersion; v++ { + fieldNames[v] = make(map[string]bool) + for _, spec := range OpsByName[v] { + for _, imm := range spec.Immediates { + if imm.Group != nil { + for _, fieldName := range imm.Group.Names { + fieldNames[v][fieldName] = true + } + } + } + } + } } From 6d25eeea23d578fe07cc039809ec4e58d6bf0cf3 Mon Sep 17 00:00:00 2001 From: Shant Karakashian <55754073+algonautshant@users.noreply.github.com> Date: Wed, 8 Feb 2023 13:42:37 -0500 Subject: [PATCH 36/81] txHandler: move streamverifer out of txn go (#5039) --- data/transactions/verify/streamverifier.go | 421 +++++++++ .../verify/streamverifier_test.go | 842 ++++++++++++++++++ data/transactions/verify/txn.go | 392 -------- data/transactions/verify/txn_test.go | 804 ----------------- 4 files changed, 1263 insertions(+), 1196 deletions(-) create mode 100644 data/transactions/verify/streamverifier.go create mode 100644 data/transactions/verify/streamverifier_test.go diff --git a/data/transactions/verify/streamverifier.go b/data/transactions/verify/streamverifier.go new file mode 100644 index 0000000000..0a3e075e91 --- /dev/null +++ b/data/transactions/verify/streamverifier.go @@ -0,0 +1,421 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package verify + +import ( + "context" + "errors" + "sync" + "sync/atomic" + "time" + + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/data/transactions/logic" + "github.com/algorand/go-algorand/ledger/ledgercore" + "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/util/execpool" +) + +// batchSizeBlockLimit is the limit when the batch exceeds, will be added to the exec pool, even if the pool is saturated +// and the batch verifier will block until the exec pool accepts the batch +const batchSizeBlockLimit = 1024 + +// waitForNextTxnDuration is the time to wait before sending the batch to the exec pool +// If the incoming txn rate is low, a txn in the batch may wait no less than +// waitForNextTxnDuration before it is set for verification. +// This can introduce a latency to the propagation of a transaction in the network, +// since every relay will go through this wait time before broadcasting the txn. +// However, when the incoming txn rate is high, the batch will fill up quickly and will send +// for signature evaluation before waitForNextTxnDuration. +const waitForNextTxnDuration = 2 * time.Millisecond + +// UnverifiedElement is the element passed to the Stream verifier +// BacklogMessage is a *txBacklogMsg from data/txHandler.go which needs to be +// passed back to that context +type UnverifiedElement struct { + TxnGroup []transactions.SignedTxn + BacklogMessage interface{} +} + +// VerificationResult is the result of the txn group verification +// BacklogMessage is the reference associated with the txn group which was +// initially passed to the stream verifier +type VerificationResult struct { + TxnGroup []transactions.SignedTxn + BacklogMessage interface{} + Err error +} + +// StreamVerifier verifies txn groups received through the stxnChan channel, and returns the +// results through the resultChan +type StreamVerifier struct { + resultChan chan<- *VerificationResult + droppedChan chan<- *UnverifiedElement + stxnChan <-chan *UnverifiedElement + verificationPool execpool.BacklogPool + ctx context.Context + cache VerifiedTransactionCache + activeLoopWg sync.WaitGroup + nbw *NewBlockWatcher + ledger logic.LedgerForSignature +} + +// NewBlockWatcher is a struct used to provide a new block header to the +// stream verifier +type NewBlockWatcher struct { + blkHeader atomic.Value +} + +// MakeNewBlockWatcher construct a new block watcher with the initial blkHdr +func MakeNewBlockWatcher(blkHdr bookkeeping.BlockHeader) (nbw *NewBlockWatcher) { + nbw = &NewBlockWatcher{} + nbw.blkHeader.Store(&blkHdr) + return nbw +} + +// OnNewBlock implements the interface to subscribe to new block notifications from the ledger +func (nbw *NewBlockWatcher) OnNewBlock(block bookkeeping.Block, delta ledgercore.StateDelta) { + bh := nbw.blkHeader.Load().(*bookkeeping.BlockHeader) + if bh.Round >= block.BlockHeader.Round { + return + } + nbw.blkHeader.Store(&block.BlockHeader) +} + +func (nbw *NewBlockWatcher) getBlockHeader() (bh *bookkeeping.BlockHeader) { + return nbw.blkHeader.Load().(*bookkeeping.BlockHeader) +} + +type batchLoad struct { + txnGroups [][]transactions.SignedTxn + groupCtxs []*GroupContext + elementBacklogMessage []interface{} + messagesForTxn []int +} + +func makeBatchLoad(l int) (bl batchLoad) { + bl.txnGroups = make([][]transactions.SignedTxn, 0, l) + bl.groupCtxs = make([]*GroupContext, 0, l) + bl.elementBacklogMessage = make([]interface{}, 0, l) + bl.messagesForTxn = make([]int, 0, l) + return bl +} + +func (bl *batchLoad) addLoad(txngrp []transactions.SignedTxn, gctx *GroupContext, backlogMsg interface{}, numBatchableSigs int) { + bl.txnGroups = append(bl.txnGroups, txngrp) + bl.groupCtxs = append(bl.groupCtxs, gctx) + bl.elementBacklogMessage = append(bl.elementBacklogMessage, backlogMsg) + bl.messagesForTxn = append(bl.messagesForTxn, numBatchableSigs) + +} + +// LedgerForStreamVerifier defines the ledger methods used by the StreamVerifier. +type LedgerForStreamVerifier interface { + logic.LedgerForSignature + RegisterBlockListeners([]ledgercore.BlockListener) + Latest() basics.Round + BlockHdr(rnd basics.Round) (blk bookkeeping.BlockHeader, err error) +} + +// MakeStreamVerifier creates a new stream verifier and returns the chans used to send txn groups +// to it and obtain the txn signature verification result from +func MakeStreamVerifier(stxnChan <-chan *UnverifiedElement, resultChan chan<- *VerificationResult, + droppedChan chan<- *UnverifiedElement, ledger LedgerForStreamVerifier, + verificationPool execpool.BacklogPool, cache VerifiedTransactionCache) (*StreamVerifier, error) { + + latest := ledger.Latest() + latestHdr, err := ledger.BlockHdr(latest) + if err != nil { + return nil, errors.New("MakeStreamVerifier: Could not get header for previous block") + } + + nbw := MakeNewBlockWatcher(latestHdr) + ledger.RegisterBlockListeners([]ledgercore.BlockListener{nbw}) + + return &StreamVerifier{ + resultChan: resultChan, + stxnChan: stxnChan, + droppedChan: droppedChan, + verificationPool: verificationPool, + cache: cache, + nbw: nbw, + ledger: ledger, + }, nil +} + +// Start is called when the verifier is created and whenever it needs to restart after +// the ctx is canceled +func (sv *StreamVerifier) Start(ctx context.Context) { + sv.ctx = ctx + sv.activeLoopWg.Add(1) + go sv.batchingLoop() +} + +// WaitForStop waits until the batching loop terminates afer the ctx is canceled +func (sv *StreamVerifier) WaitForStop() { + sv.activeLoopWg.Wait() +} + +func (sv *StreamVerifier) cleanup(pending []*UnverifiedElement) { + // report an error for the unchecked txns + // drop the messages without reporting if the receiver does not consume + for _, uel := range pending { + sv.sendResult(uel.TxnGroup, uel.BacklogMessage, errShuttingDownError) + } +} + +func (sv *StreamVerifier) batchingLoop() { + defer sv.activeLoopWg.Done() + timer := time.NewTicker(waitForNextTxnDuration) + defer timer.Stop() + var added bool + var numberOfSigsInCurrent uint64 + var numberOfBatchAttempts uint64 + ue := make([]*UnverifiedElement, 0, 8) + defer func() { sv.cleanup(ue) }() + for { + select { + case stx := <-sv.stxnChan: + numberOfBatchableSigsInGroup, err := getNumberOfBatchableSigsInGroup(stx.TxnGroup) + if err != nil { + // wrong number of signatures + sv.sendResult(stx.TxnGroup, stx.BacklogMessage, err) + continue + } + + // if no batchable signatures here, send this as a task of its own + if numberOfBatchableSigsInGroup == 0 { + err := sv.addVerificationTaskToThePoolNow([]*UnverifiedElement{stx}) + if err != nil { + return + } + continue // stx is handled, continue + } + + // add this txngrp to the list of batchable txn groups + numberOfSigsInCurrent = numberOfSigsInCurrent + numberOfBatchableSigsInGroup + ue = append(ue, stx) + if numberOfSigsInCurrent > txnPerWorksetThreshold { + // enough transaction in the batch to efficiently verify + + if numberOfSigsInCurrent > batchSizeBlockLimit { + // do not consider adding more txns to this batch. + // bypass the exec pool situation and queue anyway + // this is to prevent creation of very large batches + err := sv.addVerificationTaskToThePoolNow(ue) + if err != nil { + return + } + added = true + } else { + added, err = sv.tryAddVerificationTaskToThePool(ue) + if err != nil { + return + } + } + if added { + numberOfSigsInCurrent = 0 + ue = make([]*UnverifiedElement, 0, 8) + numberOfBatchAttempts = 0 + } else { + // was not added because of the exec pool buffer length + numberOfBatchAttempts++ + } + } + case <-timer.C: + // timer ticked. it is time to send the batch even if it is not full + if numberOfSigsInCurrent == 0 { + // nothing batched yet... wait some more + continue + } + var err error + if numberOfBatchAttempts > 1 { + // bypass the exec pool situation and queue anyway + // this is to prevent long delays in transaction propagation + // at least one transaction here has waited 3 x waitForNextTxnDuration + err = sv.addVerificationTaskToThePoolNow(ue) + added = true + } else { + added, err = sv.tryAddVerificationTaskToThePool(ue) + } + if err != nil { + return + } + if added { + numberOfSigsInCurrent = 0 + ue = make([]*UnverifiedElement, 0, 8) + numberOfBatchAttempts = 0 + } else { + // was not added because of the exec pool buffer length. wait for some more txns + numberOfBatchAttempts++ + } + case <-sv.ctx.Done(): + return + } + } +} + +func (sv *StreamVerifier) sendResult(veTxnGroup []transactions.SignedTxn, veBacklogMessage interface{}, err error) { + // send the txn result out the pipe + select { + case sv.resultChan <- &VerificationResult{ + TxnGroup: veTxnGroup, + BacklogMessage: veBacklogMessage, + Err: err, + }: + default: + // we failed to write to the output queue, since the queue was full. + sv.droppedChan <- &UnverifiedElement{veTxnGroup, veBacklogMessage} + } +} + +func (sv *StreamVerifier) tryAddVerificationTaskToThePool(ue []*UnverifiedElement) (added bool, err error) { + // if the exec pool buffer is full, can go back and collect + // more signatures instead of waiting in the exec pool buffer + // more signatures to the batch do not harm performance but introduce latency when delayed (see crypto.BenchmarkBatchVerifierBig) + + // if the buffer is full + if l, c := sv.verificationPool.BufferSize(); l == c { + return false, nil + } + err = sv.addVerificationTaskToThePoolNow(ue) + if err != nil { + // An error is returned when the context of the pool expires + return false, err + } + return true, nil +} + +func (sv *StreamVerifier) addVerificationTaskToThePoolNow(ue []*UnverifiedElement) error { + // if the context is canceled when the task is in the queue, it should be canceled + // copy the ctx here so that when the StreamVerifier is started again, and a new context + // is created, this task still gets canceled due to the ctx at the time of this task + taskCtx := sv.ctx + function := func(arg interface{}) interface{} { + if taskCtx.Err() != nil { + // ctx is canceled. the results will be returned + sv.cleanup(ue) + return nil + } + + ue := arg.([]*UnverifiedElement) + batchVerifier := crypto.MakeBatchVerifier() + + bl := makeBatchLoad(len(ue)) + // TODO: separate operations here, and get the sig verification inside the LogicSig to the batch here + blockHeader := sv.nbw.getBlockHeader() + for _, ue := range ue { + groupCtx, err := txnGroupBatchPrep(ue.TxnGroup, blockHeader, sv.ledger, batchVerifier, nil) + if err != nil { + // verification failed, no need to add the sig to the batch, report the error + sv.sendResult(ue.TxnGroup, ue.BacklogMessage, err) + continue + } + totalBatchCount := batchVerifier.GetNumberOfEnqueuedSignatures() + bl.addLoad(ue.TxnGroup, groupCtx, ue.BacklogMessage, totalBatchCount) + } + + failed, err := batchVerifier.VerifyWithFeedback() + // this error can only be crypto.ErrBatchHasFailedSigs + if err == nil { // success, all signatures verified + for i := range bl.txnGroups { + sv.sendResult(bl.txnGroups[i], bl.elementBacklogMessage[i], nil) + } + sv.cache.AddPayset(bl.txnGroups, bl.groupCtxs) + return nil + } + + verifiedTxnGroups := make([][]transactions.SignedTxn, 0, len(bl.txnGroups)) + verifiedGroupCtxs := make([]*GroupContext, 0, len(bl.groupCtxs)) + failedSigIdx := 0 + for txgIdx := range bl.txnGroups { + txGroupSigFailed := false + for failedSigIdx < bl.messagesForTxn[txgIdx] { + if failed[failedSigIdx] { + // if there is a failed sig check, then no need to check the rest of the + // sigs for this txnGroup + failedSigIdx = bl.messagesForTxn[txgIdx] + txGroupSigFailed = true + } else { + // proceed to check the next sig belonging to this txnGroup + failedSigIdx++ + } + } + var result error + if !txGroupSigFailed { + verifiedTxnGroups = append(verifiedTxnGroups, bl.txnGroups[txgIdx]) + verifiedGroupCtxs = append(verifiedGroupCtxs, bl.groupCtxs[txgIdx]) + } else { + result = err + } + sv.sendResult(bl.txnGroups[txgIdx], bl.elementBacklogMessage[txgIdx], result) + } + // loading them all at once by locking the cache once + sv.cache.AddPayset(verifiedTxnGroups, verifiedGroupCtxs) + return nil + } + + // EnqueueBacklog returns an error when the context is canceled + err := sv.verificationPool.EnqueueBacklog(sv.ctx, function, ue, nil) + if err != nil { + logging.Base().Infof("addVerificationTaskToThePoolNow: EnqueueBacklog returned an error and StreamVerifier will stop: %v", err) + } + return err +} + +func getNumberOfBatchableSigsInGroup(stxs []transactions.SignedTxn) (batchSigs uint64, err error) { + batchSigs = 0 + for i := range stxs { + count, err := getNumberOfBatchableSigsInTxn(&stxs[i]) + if err != nil { + return 0, err + } + batchSigs = batchSigs + count + } + return +} + +func getNumberOfBatchableSigsInTxn(stx *transactions.SignedTxn) (uint64, error) { + sigType, err := checkTxnSigTypeCounts(stx) + if err != nil { + return 0, err + } + switch sigType { + case regularSig: + return 1, nil + case multiSig: + sig := stx.Msig + batchSigs := uint64(0) + for _, subsigi := range sig.Subsigs { + if (subsigi.Sig != crypto.Signature{}) { + batchSigs++ + } + } + return batchSigs, nil + case logicSig: + // Currently the sigs in here are not batched. Something to consider later. + return 0, nil + case stateProofTxn: + return 0, nil + default: + // this case is impossible + return 0, nil + } +} diff --git a/data/transactions/verify/streamverifier_test.go b/data/transactions/verify/streamverifier_test.go new file mode 100644 index 0000000000..ea20f1d5fd --- /dev/null +++ b/data/transactions/verify/streamverifier_test.go @@ -0,0 +1,842 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package verify + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "math/rand" + "runtime" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/data/transactions/logic" + "github.com/algorand/go-algorand/ledger/ledgercore" + "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/algorand/go-algorand/util/execpool" + "github.com/algorand/go-algorand/util/metrics" +) + +var droppedFromPool = metrics.MakeCounter(metrics.MetricName{Name: "test_streamVerifierTestCore_messages_dropped_pool", Description: "Test streamVerifierTestCore messages dropped from pool"}) + +func streamVerifierTestCore(txnGroups [][]transactions.SignedTxn, badTxnGroups map[uint64]struct{}, + expectedError error, t *testing.T) (sv *StreamVerifier) { + + numOfTxnGroups := len(txnGroups) + verificationPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, t) + defer verificationPool.Shutdown() + + ctx, cancel := context.WithCancel(context.Background()) + cache := MakeVerifiedTransactionCache(50000) + + defer cancel() + + stxnChan := make(chan *UnverifiedElement) + resultChan := make(chan *VerificationResult, txBacklogSize) + droppedChan := make(chan *UnverifiedElement) + sv, err := MakeStreamVerifier(stxnChan, resultChan, droppedChan, &DummyLedgerForSignature{}, verificationPool, cache) + require.NoError(t, err) + sv.Start(ctx) + + wg := sync.WaitGroup{} + + errChan := make(chan error) + var badSigResultCounter int + var goodSigResultCounter int + + wg.Add(1) + go processResults(ctx, errChan, resultChan, numOfTxnGroups, badTxnGroups, &badSigResultCounter, &goodSigResultCounter, &wg) + + wg.Add(1) + // send txn groups to be verified + go func() { + defer wg.Done() + for _, tg := range txnGroups { + stxnChan <- &UnverifiedElement{TxnGroup: tg, BacklogMessage: nil} + } + }() + + for err := range errChan { + require.ErrorContains(t, err, expectedError.Error()) + } + + wg.Wait() + + verifyResults(txnGroups, badTxnGroups, cache, badSigResultCounter, goodSigResultCounter, t) + return sv +} + +func processResults(ctx context.Context, errChan chan<- error, resultChan <-chan *VerificationResult, + numOfTxnGroups int, badTxnGroups map[uint64]struct{}, + badSigResultCounter, goodSigResultCounter *int, wg *sync.WaitGroup) { + defer wg.Done() + defer close(errChan) + // process the results + for x := 0; x < numOfTxnGroups; x++ { + select { + case <-ctx.Done(): + case result := <-resultChan: + u, _ := binary.Uvarint(result.TxnGroup[0].Txn.Note) + if _, has := badTxnGroups[u]; has { + (*badSigResultCounter)++ + if result.Err == nil { + err := fmt.Errorf("%dth (%d)transaction varified with a bad sig", x, u) + errChan <- err + return + } + // we expected an error, but it is not the general crypto error + if result.Err != crypto.ErrBatchHasFailedSigs { + errChan <- result.Err + } + } else { + (*goodSigResultCounter)++ + if result.Err != nil { + errChan <- result.Err + } + } + } + } +} + +func verifyResults(txnGroups [][]transactions.SignedTxn, badTxnGroups map[uint64]struct{}, + cache VerifiedTransactionCache, + badSigResultCounter, goodSigResultCounter int, t *testing.T) { + // check if all txns have been checked. + require.Equal(t, len(txnGroups), badSigResultCounter+goodSigResultCounter) + require.Equal(t, len(badTxnGroups), badSigResultCounter) + + // check the cached transactions + // note that the result of each verified txn group is send before the batch is added to the cache + // the test does not know if the batch is not added to the cache yet, so some elts might be missing from the cache + unverifiedGroups := cache.GetUnverifiedTransactionGroups(txnGroups, spec, protocol.ConsensusCurrentVersion) + require.GreaterOrEqual(t, len(unverifiedGroups), badSigResultCounter) + for _, txn := range unverifiedGroups { + u, _ := binary.Uvarint(txn[0].Txn.Note) + if _, has := badTxnGroups[u]; has { + delete(badTxnGroups, u) + } + } + require.Empty(t, badTxnGroups, "unverifiedGroups should have all the transactions with invalid sigs") +} + +func getSignedTransactions(numOfTxns, maxGrpSize, noteOffset int, badTxnProb float32) (txnGroups [][]transactions.SignedTxn, badTxnGroups map[uint64]struct{}) { + + _, signedTxn, secrets, addrs := generateTestObjects(numOfTxns, 20, noteOffset, 50) + txnGroups = generateTransactionGroups(maxGrpSize, signedTxn, secrets, addrs) + + badTxnGroups = make(map[uint64]struct{}) + + for tgi := range txnGroups { + if rand.Float32() < badTxnProb { + // make a bad sig + t := rand.Intn(len(txnGroups[tgi])) + txnGroups[tgi][t].Sig[0] = txnGroups[tgi][t].Sig[0] + 1 + u, _ := binary.Uvarint(txnGroups[tgi][0].Txn.Note) + badTxnGroups[u] = struct{}{} + } + } + return + +} + +// TestStreamVerifier tests the basic functionality +func TestStreamVerifier(t *testing.T) { + partitiontest.PartitionTest(t) + + numOfTxns := 4000 + txnGroups, badTxnGroups := getSignedTransactions(numOfTxns, protoMaxGroupSize, 0, 0.5) + + sv := streamVerifierTestCore(txnGroups, badTxnGroups, nil, t) + sv.WaitForStop() +} + +// TestStreamVerifierCases tests various valid and invalid transaction signature cases +func TestStreamVerifierCases(t *testing.T) { + partitiontest.PartitionTest(t) + + numOfTxns := 10 + txnGroups, badTxnGroups := getSignedTransactions(numOfTxns, 1, 0, 0) + mod := 1 + + // txn with 0 sigs + txnGroups[mod][0].Sig = crypto.Signature{} + u, _ := binary.Uvarint(txnGroups[mod][0].Txn.Note) + badTxnGroups[u] = struct{}{} + sv := streamVerifierTestCore(txnGroups, badTxnGroups, errTxnSigHasNoSig, t) + sv.WaitForStop() + mod++ + + _, signedTxns, secrets, addrs := generateTestObjects(numOfTxns, 20, 0, 50) + txnGroups = generateTransactionGroups(1, signedTxns, secrets, addrs) + badTxnGroups = make(map[uint64]struct{}) + + // invalid stateproof txn + txnGroups[mod][0].Sig = crypto.Signature{} + txnGroups[mod][0].Txn.Type = protocol.StateProofTx + txnGroups[mod][0].Txn.Header.Sender = transactions.StateProofSender + u, _ = binary.Uvarint(txnGroups[mod][0].Txn.Note) + badTxnGroups[u] = struct{}{} + errFeeMustBeZeroInStateproofTxn := errors.New("fee must be zero in state-proof transaction") + sv = streamVerifierTestCore(txnGroups, badTxnGroups, errFeeMustBeZeroInStateproofTxn, t) + sv.WaitForStop() + mod++ + + _, signedTxns, secrets, addrs = generateTestObjects(numOfTxns, 20, 0, 50) + txnGroups = generateTransactionGroups(1, signedTxns, secrets, addrs) + badTxnGroups = make(map[uint64]struct{}) + + // acceptable stateproof txn + txnGroups[mod][0].Sig = crypto.Signature{} + txnGroups[mod][0].Txn.Note = nil + txnGroups[mod][0].Txn.Type = protocol.StateProofTx + txnGroups[mod][0].Txn.Header.Fee = basics.MicroAlgos{Raw: 0} + txnGroups[mod][0].Txn.Header.Sender = transactions.StateProofSender + txnGroups[mod][0].Txn.PaymentTxnFields = transactions.PaymentTxnFields{} + sv = streamVerifierTestCore(txnGroups, badTxnGroups, nil, t) + sv.WaitForStop() + mod++ + + // multisig + _, mSigTxn, _, _ := generateMultiSigTxn(1, 6, 50, t) + txnGroups[mod] = mSigTxn + sv = streamVerifierTestCore(txnGroups, badTxnGroups, nil, t) + sv.WaitForStop() + mod++ + + _, signedTxn, secrets, addrs := generateTestObjects(numOfTxns, 20, 0, 50) + txnGroups = generateTransactionGroups(1, signedTxn, secrets, addrs) + badTxnGroups = make(map[uint64]struct{}) + + // logicsig + // add a simple logic that verifies this condition: + // sha256(arg0) == base64decode(5rZMNsevs5sULO+54aN+OvU6lQ503z2X+SSYUABIx7E=) + op, err := logic.AssembleString(`arg 0 +sha256 +byte base64 5rZMNsevs5sULO+54aN+OvU6lQ503z2X+SSYUABIx7E= +==`) + require.NoError(t, err) + s := rand.Intn(len(secrets)) + txnGroups[mod][0].Sig = crypto.Signature{} + txnGroups[mod][0].Txn.Sender = addrs[s] + txnGroups[mod][0].Lsig.Args = [][]byte{[]byte("=0\x97S\x85H\xe9\x91B\xfd\xdb;1\xf5Z\xaec?\xae\xf2I\x93\x08\x12\x94\xaa~\x06\x08\x849b")} + txnGroups[mod][0].Lsig.Logic = op.Program + program := logic.Program(op.Program) + txnGroups[mod][0].Lsig.Sig = secrets[s].Sign(program) + sv = streamVerifierTestCore(txnGroups, badTxnGroups, nil, t) + sv.WaitForStop() + mod++ + + // bad lgicsig + s = rand.Intn(len(secrets)) + txnGroups[mod][0].Sig = crypto.Signature{} + txnGroups[mod][0].Txn.Sender = addrs[s] + txnGroups[mod][0].Lsig.Args = [][]byte{[]byte("=0\x97S\x85H\xe9\x91B\xfd\xdb;1\xf5Z\xaec?\xae\xf2I\x93\x08\x12\x94\xaa~\x06\x08\x849b")} + txnGroups[mod][0].Lsig.Args[0][0]++ + txnGroups[mod][0].Lsig.Logic = op.Program + txnGroups[mod][0].Lsig.Sig = secrets[s].Sign(program) + u, _ = binary.Uvarint(txnGroups[mod][0].Txn.Note) + badTxnGroups[u] = struct{}{} + sv = streamVerifierTestCore(txnGroups, badTxnGroups, errors.New("rejected by logic"), t) + sv.WaitForStop() + mod++ + + _, signedTxn, secrets, addrs = generateTestObjects(numOfTxns, 20, 0, 50) + txnGroups = generateTransactionGroups(1, signedTxn, secrets, addrs) + badTxnGroups = make(map[uint64]struct{}) + + // txn with sig and msig + txnGroups[mod][0].Msig = mSigTxn[0].Msig + u, _ = binary.Uvarint(txnGroups[mod][0].Txn.Note) + badTxnGroups[u] = struct{}{} + sv = streamVerifierTestCore(txnGroups, badTxnGroups, errTxnSigNotWellFormed, t) + sv.WaitForStop() +} + +// TestStreamVerifierIdel starts the verifer and sends nothing, to trigger the timer, then sends a txn +func TestStreamVerifierIdel(t *testing.T) { + partitiontest.PartitionTest(t) + + numOfTxns := 1 + txnGroups, badTxnGroups := getSignedTransactions(numOfTxns, protoMaxGroupSize, 0, 0.5) + + sv := streamVerifierTestCore(txnGroups, badTxnGroups, nil, t) + sv.WaitForStop() +} + +func TestGetNumberOfBatchableSigsInGroup(t *testing.T) { + partitiontest.PartitionTest(t) + + numOfTxns := 10 + txnGroups, _ := getSignedTransactions(numOfTxns, 1, 0, 0) + mod := 1 + + // txn with 0 sigs + txnGroups[mod][0].Sig = crypto.Signature{} + batchSigs, err := getNumberOfBatchableSigsInGroup(txnGroups[mod]) + require.ErrorIs(t, err, errTxnSigHasNoSig) + mod++ + + _, signedTxns, secrets, addrs := generateTestObjects(numOfTxns, 20, 0, 50) + txnGroups = generateTransactionGroups(1, signedTxns, secrets, addrs) + batchSigs, err = getNumberOfBatchableSigsInGroup(txnGroups[0]) + require.NoError(t, err) + require.Equal(t, uint64(1), batchSigs) + + // stateproof txn + txnGroups[mod][0].Sig = crypto.Signature{} + txnGroups[mod][0].Txn.Type = protocol.StateProofTx + txnGroups[mod][0].Txn.Header.Sender = transactions.StateProofSender + batchSigs, err = getNumberOfBatchableSigsInGroup(txnGroups[mod]) + require.NoError(t, err) + require.Equal(t, uint64(0), batchSigs) + mod++ + + // multisig + _, mSigTxn, _, _ := generateMultiSigTxn(1, 6, 50, t) + batchSigs, err = getNumberOfBatchableSigsInGroup(mSigTxn) + require.NoError(t, err) + require.Equal(t, uint64(2), batchSigs) + mod++ + + _, signedTxn, secrets, addrs := generateTestObjects(numOfTxns, 20, 0, 50) + txnGroups = generateTransactionGroups(1, signedTxn, secrets, addrs) + + // logicsig + op, err := logic.AssembleString(`arg 0 +sha256 +byte base64 5rZMNsevs5sULO+54aN+OvU6lQ503z2X+SSYUABIx7E= +==`) + require.NoError(t, err) + s := rand.Intn(len(secrets)) + txnGroups[mod][0].Sig = crypto.Signature{} + txnGroups[mod][0].Txn.Sender = addrs[s] + txnGroups[mod][0].Lsig.Args = [][]byte{[]byte("=0\x97S\x85H\xe9\x91B\xfd\xdb;1\xf5Z\xaec?\xae\xf2I\x93\x08\x12\x94\xaa~\x06\x08\x849b")} + txnGroups[mod][0].Lsig.Logic = op.Program + program := logic.Program(op.Program) + txnGroups[mod][0].Lsig.Sig = secrets[s].Sign(program) + batchSigs, err = getNumberOfBatchableSigsInGroup(txnGroups[mod]) + require.NoError(t, err) + require.Equal(t, uint64(0), batchSigs) + mod++ + + // txn with sig and msig + _, signedTxn, secrets, addrs = generateTestObjects(numOfTxns, 20, 0, 50) + txnGroups = generateTransactionGroups(1, signedTxn, secrets, addrs) + txnGroups[mod][0].Msig = mSigTxn[0].Msig + batchSigs, err = getNumberOfBatchableSigsInGroup(txnGroups[mod]) + require.ErrorIs(t, err, errTxnSigNotWellFormed) +} + +// TestStreamVerifierPoolShutdown tests what happens when the exec pool shuts down +func TestStreamVerifierPoolShutdown(t *testing.T) { //nolint:paralleltest // Not parallel because it depends on the default logger + partitiontest.PartitionTest(t) + + // only one transaction should be sufficient for the batch verifier + // to realize the pool is terminated and to shut down + numOfTxns := 1 + txnGroups, badTxnGroups := getSignedTransactions(numOfTxns, protoMaxGroupSize, 0, 0.5) + + // check the logged information + var logBuffer bytes.Buffer + log := logging.Base() + log.SetOutput(&logBuffer) + log.SetLevel(logging.Info) + + // prepare the stream verifier + numOfTxnGroups := len(txnGroups) + verificationPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, t) + _, buffLen := verificationPool.BufferSize() + + // make sure the pool is shut down and the buffer is full + holdTasks := make(chan interface{}) + for x := 0; x < buffLen+runtime.NumCPU(); x++ { + verificationPool.EnqueueBacklog(context.Background(), + func(arg interface{}) interface{} { <-holdTasks; return nil }, nil, nil) + } + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + // Shutdown will block until all tasks held by holdTasks is released + verificationPool.Shutdown() + }() + // Send more tasks to break the backlog worker after b.pool.Enqueue returns the error + for x := 0; x < 100; x++ { + verificationPool.EnqueueBacklog(context.Background(), + func(arg interface{}) interface{} { <-holdTasks; return nil }, nil, nil) + } + // release the tasks + close(holdTasks) + + // make sure the EnqueueBacklogis returning err + for x := 0; x < 10; x++ { + err := verificationPool.EnqueueBacklog(context.Background(), + func(arg interface{}) interface{} { return nil }, nil, nil) + require.Error(t, err, fmt.Sprintf("x = %d", x)) + } + + ctx, cancel := context.WithCancel(context.Background()) + cache := MakeVerifiedTransactionCache(50000) + + stxnChan := make(chan *UnverifiedElement) + resultChan := make(chan *VerificationResult, txBacklogSize) + droppedChan := make(chan *UnverifiedElement) + sv, err := MakeStreamVerifier(stxnChan, resultChan, droppedChan, &DummyLedgerForSignature{}, verificationPool, cache) + require.NoError(t, err) + sv.Start(ctx) + + errChan := make(chan error) + + var badSigResultCounter int + var goodSigResultCounter int + + wg.Add(1) + go processResults(ctx, errChan, resultChan, numOfTxnGroups, badTxnGroups, &badSigResultCounter, &goodSigResultCounter, &wg) + + // When the exec pool shuts down, the batch verifier should gracefully stop + // cancel the context so that the test can terminate + wg.Add(1) + go func() { + defer wg.Done() + sv.WaitForStop() + cancel() + }() + + wg.Add(1) + // send txn groups to be verified + go func() { + defer wg.Done() + for _, tg := range txnGroups { + select { + case <-ctx.Done(): + break + case stxnChan <- &UnverifiedElement{TxnGroup: tg, BacklogMessage: nil}: + } + } + }() + for err := range errChan { + require.ErrorIs(t, err, errShuttingDownError) + } + require.Contains(t, logBuffer.String(), "addVerificationTaskToThePoolNow: EnqueueBacklog returned an error and StreamVerifier will stop: context canceled") +} + +// TestStreamVerifierRestart tests what happens when the context is canceled +func TestStreamVerifierRestart(t *testing.T) { + partitiontest.PartitionTest(t) + + numOfTxns := 1000 + txnGroups, badTxnGroups := getSignedTransactions(numOfTxns, 1, 0, 0.5) + + // prepare the stream verifier + numOfTxnGroups := len(txnGroups) + verificationPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, t) + defer verificationPool.Shutdown() + + cache := MakeVerifiedTransactionCache(50) + + stxnChan := make(chan *UnverifiedElement) + resultChan := make(chan *VerificationResult, txBacklogSize) + droppedChan := make(chan *UnverifiedElement) + + ctx, cancel := context.WithCancel(context.Background()) + sv, err := MakeStreamVerifier(stxnChan, resultChan, droppedChan, &DummyLedgerForSignature{}, verificationPool, cache) + require.NoError(t, err) + sv.Start(ctx) + + errChan := make(chan error) + + var badSigResultCounter int + var goodSigResultCounter int + + ctx2, cancel2 := context.WithCancel(context.Background()) + + wg := sync.WaitGroup{} + wg.Add(1) + go processResults(ctx2, errChan, resultChan, numOfTxnGroups, badTxnGroups, &badSigResultCounter, &goodSigResultCounter, &wg) + + wg.Add(1) + // send txn groups to be verified + go func() { + defer wg.Done() + for i, tg := range txnGroups { + if (i+1)%10 == 0 { + cancel() + sv.WaitForStop() + ctx, cancel = context.WithCancel(context.Background()) + sv.Start(ctx) + } + select { + case <-ctx2.Done(): + break + case stxnChan <- &UnverifiedElement{TxnGroup: tg, BacklogMessage: nil}: + } + } + cancel() + }() + for err := range errChan { + require.ErrorIs(t, err, errShuttingDownError) + } + wg.Wait() + sv.WaitForStop() + cancel2() // not necessary, but the golint will want to see this +} + +// TestBlockWatcher runs multiple goroutines to check the concurency and correctness of the block watcher +func TestStreamVerifierBlockWatcher(t *testing.T) { + partitiontest.PartitionTest(t) + blkHdr := createDummyBlockHeader() + nbw := MakeNewBlockWatcher(blkHdr) + startingRound := blkHdr.Round + + wg := sync.WaitGroup{} + count := 100 + + wg.Add(1) + go func() { + defer wg.Done() + for x := 0; x < 100; x++ { + blkHdr.Round++ + nbw.OnNewBlock(bookkeeping.Block{BlockHeader: blkHdr}, ledgercore.StateDelta{}) + time.Sleep(10 * time.Millisecond) + nbw.OnNewBlock(bookkeeping.Block{BlockHeader: blkHdr}, ledgercore.StateDelta{}) + } + }() + + bhStore := make(map[basics.Round]*bookkeeping.BlockHeader) + wg.Add(1) + go func() { + defer wg.Done() + for { + bh := nbw.getBlockHeader() + bhStore[bh.Round] = bh + if bh.Round == startingRound+10 { + break + } + } + }() + wg.Wait() + bh := nbw.getBlockHeader() + require.Equal(t, uint64(startingRound)+uint64(count), uint64(bh.Round)) + // There should be no inconsistency after new blocks are added + for r, bh := range bhStore { + require.Equal(t, r, bh.Round) + } +} + +func getSaturatedExecPool(t *testing.T) (execpool.BacklogPool, chan interface{}) { + verificationPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, t) + _, buffLen := verificationPool.BufferSize() + + // make the buffer full to control when the tasks get executed + holdTasks := make(chan interface{}) + for x := 0; x < buffLen+runtime.NumCPU()+1; x++ { + verificationPool.EnqueueBacklog(context.Background(), + func(arg interface{}) interface{} { + <-holdTasks + return nil + }, nil, nil) + } + return verificationPool, holdTasks +} + +// TestStreamVerifierCtxCancel tests the termination when the ctx is canceled +// To make sure that the batchingLoop is still working on a batch when the +// ctx is cancled, this test first saturates the exec pool buffer, then +// sends a txn and immediately cancels the ctx so that the batch is not +// passed to the exec pool yet, but is in batchingLoop +func TestStreamVerifierCtxCancel(t *testing.T) { + partitiontest.PartitionTest(t) + + verificationPool, holdTasks := getSaturatedExecPool(t) + defer verificationPool.Shutdown() + ctx, cancel := context.WithCancel(context.Background()) + cache := MakeVerifiedTransactionCache(50) + stxnChan := make(chan *UnverifiedElement) + resultChan := make(chan *VerificationResult, txBacklogSize) + droppedChan := make(chan *UnverifiedElement) + sv, err := MakeStreamVerifier(stxnChan, resultChan, droppedChan, &DummyLedgerForSignature{}, verificationPool, cache) + require.NoError(t, err) + sv.Start(ctx) + + var result *VerificationResult + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + // no verification tasks should be executed + // one result should be returned + result = <-resultChan + }() + + // send batchSizeBlockLimit after the exec pool buffer is full + numOfTxns := 1 + txnGroups, _ := getSignedTransactions(numOfTxns, 1, 0, 0.5) + stxnChan <- &UnverifiedElement{TxnGroup: txnGroups[0], BacklogMessage: nil} + // cancel the ctx before the sig is sent to the exec pool + cancel() + + // the main loop should stop after cancel() + sv.WaitForStop() + + // release the tasks + close(holdTasks) + + wg.Wait() + require.ErrorIs(t, result.Err, errShuttingDownError) +} + +// TestStreamVerifierCtxCancelPoolQueue tests the termination when the ctx is canceled +// To make sure that the batchingLoop is still working on a batch when the +// ctx is cancled, this test first saturates the exec pool buffer, then +// sends a txn and cancels the ctx after multiple waitForNextTxnDuration +// so that the batch is sent to the pool. Since the pool is saturated, +// the task will be stuck waiting to be queued when the context is canceled +// everything should be gracefully terminated +func TestStreamVerifierCtxCancelPoolQueue(t *testing.T) { //nolint:paralleltest // Not parallel because it depends on the default logger + partitiontest.PartitionTest(t) + + verificationPool, holdTasks := getSaturatedExecPool(t) + + // check the logged information + var logBuffer bytes.Buffer + log := logging.Base() + log.SetOutput(&logBuffer) + log.SetLevel(logging.Info) + + ctx, cancel := context.WithCancel(context.Background()) + cache := MakeVerifiedTransactionCache(50) + stxnChan := make(chan *UnverifiedElement) + resultChan := make(chan *VerificationResult, txBacklogSize) + droppedChan := make(chan *UnverifiedElement) + sv, err := MakeStreamVerifier(stxnChan, resultChan, droppedChan, &DummyLedgerForSignature{}, verificationPool, cache) + require.NoError(t, err) + sv.Start(ctx) + + var result *VerificationResult + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + for { + result = <-resultChan + // at least one errShuttingDownError is expected + if result.Err != errShuttingDownError { + continue + } + break + } + }() + + // send batchSizeBlockLimit after the exec pool buffer is full + numOfTxns := 1 + txnGroups, _ := getSignedTransactions(numOfTxns, 1, 0, 0.5) + + wg.Add(1) + // run in separate goroutine because the exec pool is blocked here, and this will not advance + // until holdTasks are closed + go func() { + defer wg.Done() + for { + select { + // Normally, a single txn is sufficient, but the goroutines could be scheduled is such a way that + // the single transaction slips through and passes the batch verifier before the exec pool shuts down. + // this happens when close(holdTasks) runs and frees the exec pool, and lets the txns get verified, before + // verificationPool.Shutdown() executes. + case stxnChan <- &UnverifiedElement{TxnGroup: txnGroups[0], BacklogMessage: nil}: + case <-ctx.Done(): + return + } + } + }() + // cancel the ctx as the sig is not yet sent to the exec pool + // the test might sporadically fail if between sending the txn above + // and the cancelation, 2 x waitForNextTxnDuration elapses (10ms) + time.Sleep(6 * waitForNextTxnDuration) + go func() { + // wait a bit before releasing the tasks, so that the verificationPool ctx first gets canceled + time.Sleep(20 * time.Millisecond) + close(holdTasks) + }() + verificationPool.Shutdown() + + // the main loop should stop before calling cancel() when the exec pool shuts down and returns an error + sv.WaitForStop() + cancel() + + wg.Wait() + require.ErrorIs(t, result.Err, errShuttingDownError) + require.Contains(t, logBuffer.String(), "addVerificationTaskToThePoolNow: EnqueueBacklog returned an error and StreamVerifier will stop: context canceled") +} + +// TestStreamVerifierPostVBlocked tests the behavior when the return channel (result chan) of verified +// transactions is blocked, and checks droppedFromPool counter to confirm the drops +func TestStreamVerifierPostVBlocked(t *testing.T) { + partitiontest.PartitionTest(t) + + // prepare the stream verifier + verificationPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, t) + defer verificationPool.Shutdown() + errChan := make(chan error) + var badSigResultCounter int + var goodSigResultCounter int + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cache := MakeVerifiedTransactionCache(50) + + txBacklogSizeMod := txBacklogSize / 20 + + stxnChan := make(chan *UnverifiedElement) + resultChan := make(chan *VerificationResult, txBacklogSizeMod) + droppedChan := make(chan *UnverifiedElement) + sv, err := MakeStreamVerifier(stxnChan, resultChan, droppedChan, &DummyLedgerForSignature{}, verificationPool, cache) + require.NoError(t, err) + + defer close(droppedChan) + go func() { + for range droppedChan { + droppedFromPool.Inc(nil) + } + }() + + // start the verifier + sv.Start(ctx) + overflow := 3 + // send txBacklogSizeMod + 3 transactions to overflow the result buffer + numOfTxns := txBacklogSizeMod + overflow + txnGroups, badTxnGroups := getSignedTransactions(numOfTxns, 1, 0, 0.5) + numOfTxnGroups := len(txnGroups) + for _, tg := range txnGroups { + stxnChan <- &UnverifiedElement{TxnGroup: tg, BacklogMessage: nil} + } + + var droppedPool uint64 + // wait until overflow transactions are dropped + for w := 0; w < 100; w++ { + droppedPool = droppedFromPool.GetUint64Value() + if droppedPool >= uint64(overflow) { + break + } + time.Sleep(time.Millisecond * 20) + } + + require.Equal(t, uint64(overflow), droppedPool) + + wg := sync.WaitGroup{} + wg.Add(1) + // make sure the other results are fine + go processResults(ctx, errChan, resultChan, numOfTxnGroups-overflow, badTxnGroups, &badSigResultCounter, &goodSigResultCounter, &wg) + + for err := range errChan { + require.ErrorIs(t, err, errShuttingDownError) + fmt.Println(badTxnGroups) + } + + // check if more transactions can be accepted + errChan = make(chan error) + + wg.Add(1) + // make sure the other results are fine + txnGroups, badTxnGroups2 := getSignedTransactions(numOfTxns, 1, numOfTxns, 0.5) + // need to combine these, since left overs from the previous one could still come out + for b := range badTxnGroups2 { + badTxnGroups[b] = struct{}{} + } + go processResults(ctx, errChan, resultChan, numOfTxnGroups, badTxnGroups, &badSigResultCounter, &goodSigResultCounter, &wg) + + for _, tg := range txnGroups { + stxnChan <- &UnverifiedElement{TxnGroup: tg, BacklogMessage: nil} + } + + for err := range errChan { + require.ErrorIs(t, err, errShuttingDownError) + fmt.Println(badTxnGroups) + } + + wg.Wait() +} + +func TestStreamVerifierMakeStreamVerifierErr(t *testing.T) { + partitiontest.PartitionTest(t) + _, err := MakeStreamVerifier(nil, nil, nil, &DummyLedgerForSignature{badHdr: true}, nil, nil) + require.Error(t, err) +} + +// TestStreamVerifierCancelWhenPooled tests the case where the ctx is cancled after the verification +// task is queued to the exec pool and before the task is executed in the pool +func TestStreamVerifierCancelWhenPooled(t *testing.T) { + partitiontest.PartitionTest(t) + numOfTxns := 1000 + txnGroups, badTxnGroups := getSignedTransactions(numOfTxns, 1, 0, 0.5) + + // prepare the stream verifier + numOfTxnGroups := len(txnGroups) + execPool := execpool.MakePool(t) + defer execPool.Shutdown() + verificationPool := execpool.MakeBacklog(execPool, 64, execpool.LowPriority, t) + defer verificationPool.Shutdown() + + cache := MakeVerifiedTransactionCache(50) + + stxnChan := make(chan *UnverifiedElement) + resultChan := make(chan *VerificationResult, txBacklogSize) + droppedChan := make(chan *UnverifiedElement) + ctx, cancel := context.WithCancel(context.Background()) + sv, err := MakeStreamVerifier(stxnChan, resultChan, droppedChan, &DummyLedgerForSignature{}, verificationPool, cache) + require.NoError(t, err) + sv.Start(ctx) + + errChan := make(chan error) + + var badSigResultCounter int + var goodSigResultCounter int + + ctx2, cancel2 := context.WithCancel(context.Background()) + + wg := sync.WaitGroup{} + wg.Add(1) + go processResults(ctx2, errChan, resultChan, numOfTxnGroups, badTxnGroups, &badSigResultCounter, &goodSigResultCounter, &wg) + + wg.Add(1) + // send txn groups to be verified + go func() { + defer wg.Done() + for _, tg := range txnGroups { + stxnChan <- &UnverifiedElement{TxnGroup: tg, BacklogMessage: nil} + } + // cancel the ctx, and expect at least one task queued to the pool but not yet executed + cancel() + }() + for err := range errChan { + require.ErrorIs(t, err, errShuttingDownError) + } + wg.Wait() + sv.WaitForStop() + cancel2() // not necessary, but the golint will want to see this +} diff --git a/data/transactions/verify/txn.go b/data/transactions/verify/txn.go index 2edd69aa05..320874ee5e 100644 --- a/data/transactions/verify/txn.go +++ b/data/transactions/verify/txn.go @@ -21,9 +21,6 @@ import ( "encoding/binary" "errors" "fmt" - "sync" - "sync/atomic" - "time" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" @@ -31,8 +28,6 @@ import ( "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/logic" - "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/execpool" "github.com/algorand/go-algorand/util/metrics" @@ -58,19 +53,6 @@ var errShuttingDownError = errors.New("not verified, verifier is shutting down") // show that these are realistic numbers ) const txnPerWorksetThreshold = 32 -// batchSizeBlockLimit is the limit when the batch exceeds, will be added to the exec pool, even if the pool is saturated -// and the batch verifier will block until the exec pool accepts the batch -const batchSizeBlockLimit = 1024 - -// waitForNextTxnDuration is the time to wait before sending the batch to the exec pool -// If the incoming txn rate is low, a txn in the batch may wait no less than -// waitForNextTxnDuration before it is set for verification. -// This can introduce a latency to the propagation of a transaction in the network, -// since every relay will go through this wait time before broadcasting the txn. -// However, when the incoming txn rate is high, the batch will fill up quickly and will send -// for signature evaluation before waitForNextTxnDuration. -const waitForNextTxnDuration = 2 * time.Millisecond - // When the PaysetGroups is generating worksets, it enqueues up to concurrentWorksets entries to the execution pool. This serves several // purposes : // - if the verification task need to be aborted, there are only concurrentWorksets entries that are currently redundant on the execution pool queue. @@ -584,377 +566,3 @@ func (w *worksetBuilder) next() (txnGroups [][]transactions.SignedTxn) { func (w *worksetBuilder) completed() bool { return w.idx >= len(w.payset) } - -// UnverifiedElement is the element passed to the Stream verifier -// BacklogMessage is a *txBacklogMsg from data/txHandler.go which needs to be -// passed back to that context -type UnverifiedElement struct { - TxnGroup []transactions.SignedTxn - BacklogMessage interface{} -} - -// VerificationResult is the result of the txn group verification -// BacklogMessage is the reference associated with the txn group which was -// initially passed to the stream verifier -type VerificationResult struct { - TxnGroup []transactions.SignedTxn - BacklogMessage interface{} - Err error -} - -// StreamVerifier verifies txn groups received through the stxnChan channel, and returns the -// results through the resultChan -type StreamVerifier struct { - resultChan chan<- *VerificationResult - droppedChan chan<- *UnverifiedElement - stxnChan <-chan *UnverifiedElement - verificationPool execpool.BacklogPool - ctx context.Context - cache VerifiedTransactionCache - activeLoopWg sync.WaitGroup - nbw *NewBlockWatcher - ledger logic.LedgerForSignature -} - -// NewBlockWatcher is a struct used to provide a new block header to the -// stream verifier -type NewBlockWatcher struct { - blkHeader atomic.Value -} - -// MakeNewBlockWatcher construct a new block watcher with the initial blkHdr -func MakeNewBlockWatcher(blkHdr bookkeeping.BlockHeader) (nbw *NewBlockWatcher) { - nbw = &NewBlockWatcher{} - nbw.blkHeader.Store(&blkHdr) - return nbw -} - -// OnNewBlock implements the interface to subscribe to new block notifications from the ledger -func (nbw *NewBlockWatcher) OnNewBlock(block bookkeeping.Block, delta ledgercore.StateDelta) { - bh := nbw.blkHeader.Load().(*bookkeeping.BlockHeader) - if bh.Round >= block.BlockHeader.Round { - return - } - nbw.blkHeader.Store(&block.BlockHeader) -} - -func (nbw *NewBlockWatcher) getBlockHeader() (bh *bookkeeping.BlockHeader) { - return nbw.blkHeader.Load().(*bookkeeping.BlockHeader) -} - -type batchLoad struct { - txnGroups [][]transactions.SignedTxn - groupCtxs []*GroupContext - elementBacklogMessage []interface{} - messagesForTxn []int -} - -func makeBatchLoad(l int) (bl batchLoad) { - bl.txnGroups = make([][]transactions.SignedTxn, 0, l) - bl.groupCtxs = make([]*GroupContext, 0, l) - bl.elementBacklogMessage = make([]interface{}, 0, l) - bl.messagesForTxn = make([]int, 0, l) - return bl -} - -func (bl *batchLoad) addLoad(txngrp []transactions.SignedTxn, gctx *GroupContext, backlogMsg interface{}, numBatchableSigs int) { - bl.txnGroups = append(bl.txnGroups, txngrp) - bl.groupCtxs = append(bl.groupCtxs, gctx) - bl.elementBacklogMessage = append(bl.elementBacklogMessage, backlogMsg) - bl.messagesForTxn = append(bl.messagesForTxn, numBatchableSigs) - -} - -// LedgerForStreamVerifier defines the ledger methods used by the StreamVerifier. -type LedgerForStreamVerifier interface { - logic.LedgerForSignature - RegisterBlockListeners([]ledgercore.BlockListener) - Latest() basics.Round - BlockHdr(rnd basics.Round) (blk bookkeeping.BlockHeader, err error) -} - -// MakeStreamVerifier creates a new stream verifier and returns the chans used to send txn groups -// to it and obtain the txn signature verification result from -func MakeStreamVerifier(stxnChan <-chan *UnverifiedElement, resultChan chan<- *VerificationResult, - droppedChan chan<- *UnverifiedElement, ledger LedgerForStreamVerifier, - verificationPool execpool.BacklogPool, cache VerifiedTransactionCache) (*StreamVerifier, error) { - - latest := ledger.Latest() - latestHdr, err := ledger.BlockHdr(latest) - if err != nil { - return nil, errors.New("MakeStreamVerifier: Could not get header for previous block") - } - - nbw := MakeNewBlockWatcher(latestHdr) - ledger.RegisterBlockListeners([]ledgercore.BlockListener{nbw}) - - return &StreamVerifier{ - resultChan: resultChan, - stxnChan: stxnChan, - droppedChan: droppedChan, - verificationPool: verificationPool, - cache: cache, - nbw: nbw, - ledger: ledger, - }, nil -} - -// Start is called when the verifier is created and whenever it needs to restart after -// the ctx is canceled -func (sv *StreamVerifier) Start(ctx context.Context) { - sv.ctx = ctx - sv.activeLoopWg.Add(1) - go sv.batchingLoop() -} - -// WaitForStop waits until the batching loop terminates afer the ctx is canceled -func (sv *StreamVerifier) WaitForStop() { - sv.activeLoopWg.Wait() -} - -func (sv *StreamVerifier) cleanup(pending []*UnverifiedElement) { - // report an error for the unchecked txns - // drop the messages without reporting if the receiver does not consume - for _, uel := range pending { - sv.sendResult(uel.TxnGroup, uel.BacklogMessage, errShuttingDownError) - } -} - -func (sv *StreamVerifier) batchingLoop() { - defer sv.activeLoopWg.Done() - timer := time.NewTicker(waitForNextTxnDuration) - defer timer.Stop() - var added bool - var numberOfSigsInCurrent uint64 - var numberOfBatchAttempts uint64 - ue := make([]*UnverifiedElement, 0, 8) - defer func() { sv.cleanup(ue) }() - for { - select { - case stx := <-sv.stxnChan: - numberOfBatchableSigsInGroup, err := getNumberOfBatchableSigsInGroup(stx.TxnGroup) - if err != nil { - // wrong number of signatures - sv.sendResult(stx.TxnGroup, stx.BacklogMessage, err) - continue - } - - // if no batchable signatures here, send this as a task of its own - if numberOfBatchableSigsInGroup == 0 { - err := sv.addVerificationTaskToThePoolNow([]*UnverifiedElement{stx}) - if err != nil { - return - } - continue // stx is handled, continue - } - - // add this txngrp to the list of batchable txn groups - numberOfSigsInCurrent = numberOfSigsInCurrent + numberOfBatchableSigsInGroup - ue = append(ue, stx) - if numberOfSigsInCurrent > txnPerWorksetThreshold { - // enough transaction in the batch to efficiently verify - - if numberOfSigsInCurrent > batchSizeBlockLimit { - // do not consider adding more txns to this batch. - // bypass the exec pool situation and queue anyway - // this is to prevent creation of very large batches - err := sv.addVerificationTaskToThePoolNow(ue) - if err != nil { - return - } - added = true - } else { - added, err = sv.tryAddVerificationTaskToThePool(ue) - if err != nil { - return - } - } - if added { - numberOfSigsInCurrent = 0 - ue = make([]*UnverifiedElement, 0, 8) - numberOfBatchAttempts = 0 - } else { - // was not added because of the exec pool buffer length - numberOfBatchAttempts++ - } - } - case <-timer.C: - // timer ticked. it is time to send the batch even if it is not full - if numberOfSigsInCurrent == 0 { - // nothing batched yet... wait some more - continue - } - var err error - if numberOfBatchAttempts > 1 { - // bypass the exec pool situation and queue anyway - // this is to prevent long delays in transaction propagation - // at least one transaction here has waited 3 x waitForNextTxnDuration - err = sv.addVerificationTaskToThePoolNow(ue) - added = true - } else { - added, err = sv.tryAddVerificationTaskToThePool(ue) - } - if err != nil { - return - } - if added { - numberOfSigsInCurrent = 0 - ue = make([]*UnverifiedElement, 0, 8) - numberOfBatchAttempts = 0 - } else { - // was not added because of the exec pool buffer length. wait for some more txns - numberOfBatchAttempts++ - } - case <-sv.ctx.Done(): - return - } - } -} - -func (sv *StreamVerifier) sendResult(veTxnGroup []transactions.SignedTxn, veBacklogMessage interface{}, err error) { - // send the txn result out the pipe - select { - case sv.resultChan <- &VerificationResult{ - TxnGroup: veTxnGroup, - BacklogMessage: veBacklogMessage, - Err: err, - }: - default: - // we failed to write to the output queue, since the queue was full. - sv.droppedChan <- &UnverifiedElement{veTxnGroup, veBacklogMessage} - } -} - -func (sv *StreamVerifier) tryAddVerificationTaskToThePool(ue []*UnverifiedElement) (added bool, err error) { - // if the exec pool buffer is full, can go back and collect - // more signatures instead of waiting in the exec pool buffer - // more signatures to the batch do not harm performance but introduce latency when delayed (see crypto.BenchmarkBatchVerifierBig) - - // if the buffer is full - if l, c := sv.verificationPool.BufferSize(); l == c { - return false, nil - } - err = sv.addVerificationTaskToThePoolNow(ue) - if err != nil { - // An error is returned when the context of the pool expires - return false, err - } - return true, nil -} - -func (sv *StreamVerifier) addVerificationTaskToThePoolNow(ue []*UnverifiedElement) error { - // if the context is canceled when the task is in the queue, it should be canceled - // copy the ctx here so that when the StreamVerifier is started again, and a new context - // is created, this task still gets canceled due to the ctx at the time of this task - taskCtx := sv.ctx - function := func(arg interface{}) interface{} { - if taskCtx.Err() != nil { - // ctx is canceled. the results will be returned - sv.cleanup(ue) - return nil - } - - ue := arg.([]*UnverifiedElement) - batchVerifier := crypto.MakeBatchVerifier() - - bl := makeBatchLoad(len(ue)) - // TODO: separate operations here, and get the sig verification inside the LogicSig to the batch here - blockHeader := sv.nbw.getBlockHeader() - for _, ue := range ue { - groupCtx, err := txnGroupBatchPrep(ue.TxnGroup, blockHeader, sv.ledger, batchVerifier, nil) - if err != nil { - // verification failed, no need to add the sig to the batch, report the error - sv.sendResult(ue.TxnGroup, ue.BacklogMessage, err) - continue - } - totalBatchCount := batchVerifier.GetNumberOfEnqueuedSignatures() - bl.addLoad(ue.TxnGroup, groupCtx, ue.BacklogMessage, totalBatchCount) - } - - failed, err := batchVerifier.VerifyWithFeedback() - // this error can only be crypto.ErrBatchHasFailedSigs - if err == nil { // success, all signatures verified - for i := range bl.txnGroups { - sv.sendResult(bl.txnGroups[i], bl.elementBacklogMessage[i], nil) - } - sv.cache.AddPayset(bl.txnGroups, bl.groupCtxs) - return nil - } - - verifiedTxnGroups := make([][]transactions.SignedTxn, 0, len(bl.txnGroups)) - verifiedGroupCtxs := make([]*GroupContext, 0, len(bl.groupCtxs)) - failedSigIdx := 0 - for txgIdx := range bl.txnGroups { - txGroupSigFailed := false - for failedSigIdx < bl.messagesForTxn[txgIdx] { - if failed[failedSigIdx] { - // if there is a failed sig check, then no need to check the rest of the - // sigs for this txnGroup - failedSigIdx = bl.messagesForTxn[txgIdx] - txGroupSigFailed = true - } else { - // proceed to check the next sig belonging to this txnGroup - failedSigIdx++ - } - } - var result error - if !txGroupSigFailed { - verifiedTxnGroups = append(verifiedTxnGroups, bl.txnGroups[txgIdx]) - verifiedGroupCtxs = append(verifiedGroupCtxs, bl.groupCtxs[txgIdx]) - } else { - result = err - } - sv.sendResult(bl.txnGroups[txgIdx], bl.elementBacklogMessage[txgIdx], result) - } - // loading them all at once by locking the cache once - sv.cache.AddPayset(verifiedTxnGroups, verifiedGroupCtxs) - return nil - } - - // EnqueueBacklog returns an error when the context is canceled - err := sv.verificationPool.EnqueueBacklog(sv.ctx, function, ue, nil) - if err != nil { - logging.Base().Infof("addVerificationTaskToThePoolNow: EnqueueBacklog returned an error and StreamVerifier will stop: %v", err) - } - return err -} - -func getNumberOfBatchableSigsInGroup(stxs []transactions.SignedTxn) (batchSigs uint64, err error) { - batchSigs = 0 - for i := range stxs { - count, err := getNumberOfBatchableSigsInTxn(&stxs[i]) - if err != nil { - return 0, err - } - batchSigs = batchSigs + count - } - return -} - -func getNumberOfBatchableSigsInTxn(stx *transactions.SignedTxn) (uint64, error) { - sigType, err := checkTxnSigTypeCounts(stx) - if err != nil { - return 0, err - } - switch sigType { - case regularSig: - return 1, nil - case multiSig: - sig := stx.Msig - batchSigs := uint64(0) - for _, subsigi := range sig.Subsigs { - if (subsigi.Sig != crypto.Signature{}) { - batchSigs++ - } - } - return batchSigs, nil - case logicSig: - // Currently the sigs in here are not batched. Something to consider later. - return 0, nil - case stateProofTxn: - return 0, nil - default: - // this case is impossible - return 0, nil - } -} diff --git a/data/transactions/verify/txn_test.go b/data/transactions/verify/txn_test.go index eb03917fbd..b279cf4070 100644 --- a/data/transactions/verify/txn_test.go +++ b/data/transactions/verify/txn_test.go @@ -17,14 +17,10 @@ package verify import ( - "bytes" "context" "encoding/binary" - "errors" "fmt" "math/rand" - "runtime" - "sync" "testing" "time" @@ -39,11 +35,9 @@ import ( "github.com/algorand/go-algorand/data/transactions/logic/mocktracer" "github.com/algorand/go-algorand/data/txntest" "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/test/partitiontest" "github.com/algorand/go-algorand/util/execpool" - "github.com/algorand/go-algorand/util/metrics" ) var feeSink = basics.Address{0x7, 0xda, 0xcb, 0x4b, 0x6d, 0x9e, 0xd1, 0x41, 0xb1, 0x75, 0x76, 0xbd, 0x45, 0x9a, 0xe6, 0x42, 0x1d, 0x48, 0x6d, 0xa3, 0xd4, 0xef, 0x22, 0x47, 0xc4, 0x9, 0xa3, 0x96, 0xb8, 0x2e, 0xa2, 0x21} @@ -968,801 +962,3 @@ func BenchmarkTxn(b *testing.B) { } b.StopTimer() } - -var droppedFromPool = metrics.MakeCounter(metrics.MetricName{Name: "test_streamVerifierTestCore_messages_dropped_pool", Description: "Test streamVerifierTestCore messages dropped from pool"}) - -func streamVerifierTestCore(txnGroups [][]transactions.SignedTxn, badTxnGroups map[uint64]struct{}, - expectedError error, t *testing.T) (sv *StreamVerifier) { - - numOfTxnGroups := len(txnGroups) - verificationPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, t) - defer verificationPool.Shutdown() - - ctx, cancel := context.WithCancel(context.Background()) - cache := MakeVerifiedTransactionCache(50000) - - defer cancel() - - stxnChan := make(chan *UnverifiedElement) - resultChan := make(chan *VerificationResult, txBacklogSize) - droppedChan := make(chan *UnverifiedElement) - sv, err := MakeStreamVerifier(stxnChan, resultChan, droppedChan, &DummyLedgerForSignature{}, verificationPool, cache) - require.NoError(t, err) - sv.Start(ctx) - - wg := sync.WaitGroup{} - - errChan := make(chan error) - var badSigResultCounter int - var goodSigResultCounter int - - wg.Add(1) - go processResults(ctx, errChan, resultChan, numOfTxnGroups, badTxnGroups, &badSigResultCounter, &goodSigResultCounter, &wg) - - wg.Add(1) - // send txn groups to be verified - go func() { - defer wg.Done() - for _, tg := range txnGroups { - stxnChan <- &UnverifiedElement{TxnGroup: tg, BacklogMessage: nil} - } - }() - - for err := range errChan { - require.ErrorContains(t, err, expectedError.Error()) - } - - wg.Wait() - - verifyResults(txnGroups, badTxnGroups, cache, badSigResultCounter, goodSigResultCounter, t) - return sv -} - -func processResults(ctx context.Context, errChan chan<- error, resultChan <-chan *VerificationResult, - numOfTxnGroups int, badTxnGroups map[uint64]struct{}, - badSigResultCounter, goodSigResultCounter *int, wg *sync.WaitGroup) { - defer wg.Done() - defer close(errChan) - // process the results - for x := 0; x < numOfTxnGroups; x++ { - select { - case <-ctx.Done(): - case result := <-resultChan: - u, _ := binary.Uvarint(result.TxnGroup[0].Txn.Note) - if _, has := badTxnGroups[u]; has { - (*badSigResultCounter)++ - if result.Err == nil { - err := fmt.Errorf("%dth (%d)transaction varified with a bad sig", x, u) - errChan <- err - return - } - // we expected an error, but it is not the general crypto error - if result.Err != crypto.ErrBatchHasFailedSigs { - errChan <- result.Err - } - } else { - (*goodSigResultCounter)++ - if result.Err != nil { - errChan <- result.Err - } - } - } - } -} - -func verifyResults(txnGroups [][]transactions.SignedTxn, badTxnGroups map[uint64]struct{}, - cache VerifiedTransactionCache, - badSigResultCounter, goodSigResultCounter int, t *testing.T) { - // check if all txns have been checked. - require.Equal(t, len(txnGroups), badSigResultCounter+goodSigResultCounter) - require.Equal(t, len(badTxnGroups), badSigResultCounter) - - // check the cached transactions - // note that the result of each verified txn group is send before the batch is added to the cache - // the test does not know if the batch is not added to the cache yet, so some elts might be missing from the cache - unverifiedGroups := cache.GetUnverifiedTransactionGroups(txnGroups, spec, protocol.ConsensusCurrentVersion) - require.GreaterOrEqual(t, len(unverifiedGroups), badSigResultCounter) - for _, txn := range unverifiedGroups { - u, _ := binary.Uvarint(txn[0].Txn.Note) - if _, has := badTxnGroups[u]; has { - delete(badTxnGroups, u) - } - } - require.Empty(t, badTxnGroups, "unverifiedGroups should have all the transactions with invalid sigs") -} - -func getSignedTransactions(numOfTxns, maxGrpSize, noteOffset int, badTxnProb float32) (txnGroups [][]transactions.SignedTxn, badTxnGroups map[uint64]struct{}) { - - _, signedTxn, secrets, addrs := generateTestObjects(numOfTxns, 20, noteOffset, 50) - txnGroups = generateTransactionGroups(maxGrpSize, signedTxn, secrets, addrs) - - badTxnGroups = make(map[uint64]struct{}) - - for tgi := range txnGroups { - if rand.Float32() < badTxnProb { - // make a bad sig - t := rand.Intn(len(txnGroups[tgi])) - txnGroups[tgi][t].Sig[0] = txnGroups[tgi][t].Sig[0] + 1 - u, _ := binary.Uvarint(txnGroups[tgi][0].Txn.Note) - badTxnGroups[u] = struct{}{} - } - } - return - -} - -// TestStreamVerifier tests the basic functionality -func TestStreamVerifier(t *testing.T) { - partitiontest.PartitionTest(t) - - numOfTxns := 4000 - txnGroups, badTxnGroups := getSignedTransactions(numOfTxns, protoMaxGroupSize, 0, 0.5) - - sv := streamVerifierTestCore(txnGroups, badTxnGroups, nil, t) - sv.WaitForStop() -} - -// TestStreamVerifierCases tests various valid and invalid transaction signature cases -func TestStreamVerifierCases(t *testing.T) { - partitiontest.PartitionTest(t) - - numOfTxns := 10 - txnGroups, badTxnGroups := getSignedTransactions(numOfTxns, 1, 0, 0) - mod := 1 - - // txn with 0 sigs - txnGroups[mod][0].Sig = crypto.Signature{} - u, _ := binary.Uvarint(txnGroups[mod][0].Txn.Note) - badTxnGroups[u] = struct{}{} - sv := streamVerifierTestCore(txnGroups, badTxnGroups, errTxnSigHasNoSig, t) - sv.WaitForStop() - mod++ - - _, signedTxns, secrets, addrs := generateTestObjects(numOfTxns, 20, 0, 50) - txnGroups = generateTransactionGroups(1, signedTxns, secrets, addrs) - badTxnGroups = make(map[uint64]struct{}) - - // invalid stateproof txn - txnGroups[mod][0].Sig = crypto.Signature{} - txnGroups[mod][0].Txn.Type = protocol.StateProofTx - txnGroups[mod][0].Txn.Header.Sender = transactions.StateProofSender - u, _ = binary.Uvarint(txnGroups[mod][0].Txn.Note) - badTxnGroups[u] = struct{}{} - errFeeMustBeZeroInStateproofTxn := errors.New("fee must be zero in state-proof transaction") - sv = streamVerifierTestCore(txnGroups, badTxnGroups, errFeeMustBeZeroInStateproofTxn, t) - sv.WaitForStop() - mod++ - - _, signedTxns, secrets, addrs = generateTestObjects(numOfTxns, 20, 0, 50) - txnGroups = generateTransactionGroups(1, signedTxns, secrets, addrs) - badTxnGroups = make(map[uint64]struct{}) - - // acceptable stateproof txn - txnGroups[mod][0].Sig = crypto.Signature{} - txnGroups[mod][0].Txn.Note = nil - txnGroups[mod][0].Txn.Type = protocol.StateProofTx - txnGroups[mod][0].Txn.Header.Fee = basics.MicroAlgos{Raw: 0} - txnGroups[mod][0].Txn.Header.Sender = transactions.StateProofSender - txnGroups[mod][0].Txn.PaymentTxnFields = transactions.PaymentTxnFields{} - sv = streamVerifierTestCore(txnGroups, badTxnGroups, nil, t) - sv.WaitForStop() - mod++ - - // multisig - _, mSigTxn, _, _ := generateMultiSigTxn(1, 6, 50, t) - txnGroups[mod] = mSigTxn - sv = streamVerifierTestCore(txnGroups, badTxnGroups, nil, t) - sv.WaitForStop() - mod++ - - _, signedTxn, secrets, addrs := generateTestObjects(numOfTxns, 20, 0, 50) - txnGroups = generateTransactionGroups(1, signedTxn, secrets, addrs) - badTxnGroups = make(map[uint64]struct{}) - - // logicsig - // add a simple logic that verifies this condition: - // sha256(arg0) == base64decode(5rZMNsevs5sULO+54aN+OvU6lQ503z2X+SSYUABIx7E=) - op, err := logic.AssembleString(`arg 0 -sha256 -byte base64 5rZMNsevs5sULO+54aN+OvU6lQ503z2X+SSYUABIx7E= -==`) - require.NoError(t, err) - s := rand.Intn(len(secrets)) - txnGroups[mod][0].Sig = crypto.Signature{} - txnGroups[mod][0].Txn.Sender = addrs[s] - txnGroups[mod][0].Lsig.Args = [][]byte{[]byte("=0\x97S\x85H\xe9\x91B\xfd\xdb;1\xf5Z\xaec?\xae\xf2I\x93\x08\x12\x94\xaa~\x06\x08\x849b")} - txnGroups[mod][0].Lsig.Logic = op.Program - program := logic.Program(op.Program) - txnGroups[mod][0].Lsig.Sig = secrets[s].Sign(program) - sv = streamVerifierTestCore(txnGroups, badTxnGroups, nil, t) - sv.WaitForStop() - mod++ - - // bad lgicsig - s = rand.Intn(len(secrets)) - txnGroups[mod][0].Sig = crypto.Signature{} - txnGroups[mod][0].Txn.Sender = addrs[s] - txnGroups[mod][0].Lsig.Args = [][]byte{[]byte("=0\x97S\x85H\xe9\x91B\xfd\xdb;1\xf5Z\xaec?\xae\xf2I\x93\x08\x12\x94\xaa~\x06\x08\x849b")} - txnGroups[mod][0].Lsig.Args[0][0]++ - txnGroups[mod][0].Lsig.Logic = op.Program - txnGroups[mod][0].Lsig.Sig = secrets[s].Sign(program) - u, _ = binary.Uvarint(txnGroups[mod][0].Txn.Note) - badTxnGroups[u] = struct{}{} - sv = streamVerifierTestCore(txnGroups, badTxnGroups, errors.New("rejected by logic"), t) - sv.WaitForStop() - mod++ - - _, signedTxn, secrets, addrs = generateTestObjects(numOfTxns, 20, 0, 50) - txnGroups = generateTransactionGroups(1, signedTxn, secrets, addrs) - badTxnGroups = make(map[uint64]struct{}) - - // txn with sig and msig - txnGroups[mod][0].Msig = mSigTxn[0].Msig - u, _ = binary.Uvarint(txnGroups[mod][0].Txn.Note) - badTxnGroups[u] = struct{}{} - sv = streamVerifierTestCore(txnGroups, badTxnGroups, errTxnSigNotWellFormed, t) - sv.WaitForStop() -} - -// TestStreamVerifierIdel starts the verifer and sends nothing, to trigger the timer, then sends a txn -func TestStreamVerifierIdel(t *testing.T) { - partitiontest.PartitionTest(t) - - numOfTxns := 1 - txnGroups, badTxnGroups := getSignedTransactions(numOfTxns, protoMaxGroupSize, 0, 0.5) - - sv := streamVerifierTestCore(txnGroups, badTxnGroups, nil, t) - sv.WaitForStop() -} - -func TestGetNumberOfBatchableSigsInGroup(t *testing.T) { - partitiontest.PartitionTest(t) - - numOfTxns := 10 - txnGroups, _ := getSignedTransactions(numOfTxns, 1, 0, 0) - mod := 1 - - // txn with 0 sigs - txnGroups[mod][0].Sig = crypto.Signature{} - batchSigs, err := getNumberOfBatchableSigsInGroup(txnGroups[mod]) - require.ErrorIs(t, err, errTxnSigHasNoSig) - mod++ - - _, signedTxns, secrets, addrs := generateTestObjects(numOfTxns, 20, 0, 50) - txnGroups = generateTransactionGroups(1, signedTxns, secrets, addrs) - batchSigs, err = getNumberOfBatchableSigsInGroup(txnGroups[0]) - require.NoError(t, err) - require.Equal(t, uint64(1), batchSigs) - - // stateproof txn - txnGroups[mod][0].Sig = crypto.Signature{} - txnGroups[mod][0].Txn.Type = protocol.StateProofTx - txnGroups[mod][0].Txn.Header.Sender = transactions.StateProofSender - batchSigs, err = getNumberOfBatchableSigsInGroup(txnGroups[mod]) - require.NoError(t, err) - require.Equal(t, uint64(0), batchSigs) - mod++ - - // multisig - _, mSigTxn, _, _ := generateMultiSigTxn(1, 6, 50, t) - batchSigs, err = getNumberOfBatchableSigsInGroup(mSigTxn) - require.NoError(t, err) - require.Equal(t, uint64(2), batchSigs) - mod++ - - _, signedTxn, secrets, addrs := generateTestObjects(numOfTxns, 20, 0, 50) - txnGroups = generateTransactionGroups(1, signedTxn, secrets, addrs) - - // logicsig - op, err := logic.AssembleString(`arg 0 -sha256 -byte base64 5rZMNsevs5sULO+54aN+OvU6lQ503z2X+SSYUABIx7E= -==`) - require.NoError(t, err) - s := rand.Intn(len(secrets)) - txnGroups[mod][0].Sig = crypto.Signature{} - txnGroups[mod][0].Txn.Sender = addrs[s] - txnGroups[mod][0].Lsig.Args = [][]byte{[]byte("=0\x97S\x85H\xe9\x91B\xfd\xdb;1\xf5Z\xaec?\xae\xf2I\x93\x08\x12\x94\xaa~\x06\x08\x849b")} - txnGroups[mod][0].Lsig.Logic = op.Program - program := logic.Program(op.Program) - txnGroups[mod][0].Lsig.Sig = secrets[s].Sign(program) - batchSigs, err = getNumberOfBatchableSigsInGroup(txnGroups[mod]) - require.NoError(t, err) - require.Equal(t, uint64(0), batchSigs) - mod++ - - // txn with sig and msig - _, signedTxn, secrets, addrs = generateTestObjects(numOfTxns, 20, 0, 50) - txnGroups = generateTransactionGroups(1, signedTxn, secrets, addrs) - txnGroups[mod][0].Msig = mSigTxn[0].Msig - batchSigs, err = getNumberOfBatchableSigsInGroup(txnGroups[mod]) - require.ErrorIs(t, err, errTxnSigNotWellFormed) -} - -// TestStreamVerifierPoolShutdown tests what happens when the exec pool shuts down -func TestStreamVerifierPoolShutdown(t *testing.T) { //nolint:paralleltest // Not parallel because it depends on the default logger - partitiontest.PartitionTest(t) - - // only one transaction should be sufficient for the batch verifier - // to realize the pool is terminated and to shut down - numOfTxns := 1 - txnGroups, badTxnGroups := getSignedTransactions(numOfTxns, protoMaxGroupSize, 0, 0.5) - - // check the logged information - var logBuffer bytes.Buffer - log := logging.Base() - log.SetOutput(&logBuffer) - log.SetLevel(logging.Info) - - // prepare the stream verifier - numOfTxnGroups := len(txnGroups) - verificationPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, t) - _, buffLen := verificationPool.BufferSize() - - // make sure the pool is shut down and the buffer is full - holdTasks := make(chan interface{}) - for x := 0; x < buffLen+runtime.NumCPU(); x++ { - verificationPool.EnqueueBacklog(context.Background(), - func(arg interface{}) interface{} { <-holdTasks; return nil }, nil, nil) - } - wg := sync.WaitGroup{} - wg.Add(1) - go func() { - defer wg.Done() - // Shutdown will block until all tasks held by holdTasks is released - verificationPool.Shutdown() - }() - // Send more tasks to break the backlog worker after b.pool.Enqueue returns the error - for x := 0; x < 100; x++ { - verificationPool.EnqueueBacklog(context.Background(), - func(arg interface{}) interface{} { <-holdTasks; return nil }, nil, nil) - } - // release the tasks - close(holdTasks) - - // make sure the EnqueueBacklogis returning err - for x := 0; x < 10; x++ { - err := verificationPool.EnqueueBacklog(context.Background(), - func(arg interface{}) interface{} { return nil }, nil, nil) - require.Error(t, err, fmt.Sprintf("x = %d", x)) - } - - ctx, cancel := context.WithCancel(context.Background()) - cache := MakeVerifiedTransactionCache(50000) - - stxnChan := make(chan *UnverifiedElement) - resultChan := make(chan *VerificationResult, txBacklogSize) - droppedChan := make(chan *UnverifiedElement) - sv, err := MakeStreamVerifier(stxnChan, resultChan, droppedChan, &DummyLedgerForSignature{}, verificationPool, cache) - require.NoError(t, err) - sv.Start(ctx) - - errChan := make(chan error) - - var badSigResultCounter int - var goodSigResultCounter int - - wg.Add(1) - go processResults(ctx, errChan, resultChan, numOfTxnGroups, badTxnGroups, &badSigResultCounter, &goodSigResultCounter, &wg) - - // When the exec pool shuts down, the batch verifier should gracefully stop - // cancel the context so that the test can terminate - wg.Add(1) - go func() { - defer wg.Done() - sv.WaitForStop() - cancel() - }() - - wg.Add(1) - // send txn groups to be verified - go func() { - defer wg.Done() - for _, tg := range txnGroups { - select { - case <-ctx.Done(): - break - case stxnChan <- &UnverifiedElement{TxnGroup: tg, BacklogMessage: nil}: - } - } - }() - for err := range errChan { - require.ErrorIs(t, err, errShuttingDownError) - } - require.Contains(t, logBuffer.String(), "addVerificationTaskToThePoolNow: EnqueueBacklog returned an error and StreamVerifier will stop: context canceled") -} - -// TestStreamVerifierRestart tests what happens when the context is canceled -func TestStreamVerifierRestart(t *testing.T) { - partitiontest.PartitionTest(t) - - numOfTxns := 1000 - txnGroups, badTxnGroups := getSignedTransactions(numOfTxns, 1, 0, 0.5) - - // prepare the stream verifier - numOfTxnGroups := len(txnGroups) - verificationPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, t) - defer verificationPool.Shutdown() - - cache := MakeVerifiedTransactionCache(50) - - stxnChan := make(chan *UnverifiedElement) - resultChan := make(chan *VerificationResult, txBacklogSize) - droppedChan := make(chan *UnverifiedElement) - - ctx, cancel := context.WithCancel(context.Background()) - sv, err := MakeStreamVerifier(stxnChan, resultChan, droppedChan, &DummyLedgerForSignature{}, verificationPool, cache) - require.NoError(t, err) - sv.Start(ctx) - - errChan := make(chan error) - - var badSigResultCounter int - var goodSigResultCounter int - - ctx2, cancel2 := context.WithCancel(context.Background()) - - wg := sync.WaitGroup{} - wg.Add(1) - go processResults(ctx2, errChan, resultChan, numOfTxnGroups, badTxnGroups, &badSigResultCounter, &goodSigResultCounter, &wg) - - wg.Add(1) - // send txn groups to be verified - go func() { - defer wg.Done() - for i, tg := range txnGroups { - if (i+1)%10 == 0 { - cancel() - sv.WaitForStop() - ctx, cancel = context.WithCancel(context.Background()) - sv.Start(ctx) - } - select { - case <-ctx2.Done(): - break - case stxnChan <- &UnverifiedElement{TxnGroup: tg, BacklogMessage: nil}: - } - } - cancel() - }() - for err := range errChan { - require.ErrorIs(t, err, errShuttingDownError) - } - wg.Wait() - sv.WaitForStop() - cancel2() // not necessary, but the golint will want to see this -} - -// TestBlockWatcher runs multiple goroutines to check the concurency and correctness of the block watcher -func TestStreamVerifierBlockWatcher(t *testing.T) { - partitiontest.PartitionTest(t) - blkHdr := createDummyBlockHeader() - nbw := MakeNewBlockWatcher(blkHdr) - startingRound := blkHdr.Round - - wg := sync.WaitGroup{} - count := 100 - - wg.Add(1) - go func() { - defer wg.Done() - for x := 0; x < 100; x++ { - blkHdr.Round++ - nbw.OnNewBlock(bookkeeping.Block{BlockHeader: blkHdr}, ledgercore.StateDelta{}) - time.Sleep(10 * time.Millisecond) - nbw.OnNewBlock(bookkeeping.Block{BlockHeader: blkHdr}, ledgercore.StateDelta{}) - } - }() - - bhStore := make(map[basics.Round]*bookkeeping.BlockHeader) - wg.Add(1) - go func() { - defer wg.Done() - for { - bh := nbw.getBlockHeader() - bhStore[bh.Round] = bh - if bh.Round == startingRound+10 { - break - } - } - }() - wg.Wait() - bh := nbw.getBlockHeader() - require.Equal(t, uint64(startingRound)+uint64(count), uint64(bh.Round)) - // There should be no inconsistency after new blocks are added - for r, bh := range bhStore { - require.Equal(t, r, bh.Round) - } -} - -func getSaturatedExecPool(t *testing.T) (execpool.BacklogPool, chan interface{}) { - verificationPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, t) - _, buffLen := verificationPool.BufferSize() - - // make the buffer full to control when the tasks get executed - holdTasks := make(chan interface{}) - for x := 0; x < buffLen+runtime.NumCPU()+1; x++ { - verificationPool.EnqueueBacklog(context.Background(), - func(arg interface{}) interface{} { - <-holdTasks - return nil - }, nil, nil) - } - return verificationPool, holdTasks -} - -// TestStreamVerifierCtxCancel tests the termination when the ctx is canceled -// To make sure that the batchingLoop is still working on a batch when the -// ctx is cancled, this test first saturates the exec pool buffer, then -// sends a txn and immediately cancels the ctx so that the batch is not -// passed to the exec pool yet, but is in batchingLoop -func TestStreamVerifierCtxCancel(t *testing.T) { - partitiontest.PartitionTest(t) - - verificationPool, holdTasks := getSaturatedExecPool(t) - defer verificationPool.Shutdown() - ctx, cancel := context.WithCancel(context.Background()) - cache := MakeVerifiedTransactionCache(50) - stxnChan := make(chan *UnverifiedElement) - resultChan := make(chan *VerificationResult, txBacklogSize) - droppedChan := make(chan *UnverifiedElement) - sv, err := MakeStreamVerifier(stxnChan, resultChan, droppedChan, &DummyLedgerForSignature{}, verificationPool, cache) - require.NoError(t, err) - sv.Start(ctx) - - var result *VerificationResult - wg := sync.WaitGroup{} - wg.Add(1) - go func() { - defer wg.Done() - // no verification tasks should be executed - // one result should be returned - result = <-resultChan - }() - - // send batchSizeBlockLimit after the exec pool buffer is full - numOfTxns := 1 - txnGroups, _ := getSignedTransactions(numOfTxns, 1, 0, 0.5) - stxnChan <- &UnverifiedElement{TxnGroup: txnGroups[0], BacklogMessage: nil} - // cancel the ctx before the sig is sent to the exec pool - cancel() - - // the main loop should stop after cancel() - sv.WaitForStop() - - // release the tasks - close(holdTasks) - - wg.Wait() - require.ErrorIs(t, result.Err, errShuttingDownError) -} - -// TestStreamVerifierCtxCancelPoolQueue tests the termination when the ctx is canceled -// To make sure that the batchingLoop is still working on a batch when the -// ctx is cancled, this test first saturates the exec pool buffer, then -// sends a txn and cancels the ctx after multiple waitForNextTxnDuration -// so that the batch is sent to the pool. Since the pool is saturated, -// the task will be stuck waiting to be queued when the context is canceled -// everything should be gracefully terminated -func TestStreamVerifierCtxCancelPoolQueue(t *testing.T) { //nolint:paralleltest // Not parallel because it depends on the default logger - partitiontest.PartitionTest(t) - - verificationPool, holdTasks := getSaturatedExecPool(t) - - // check the logged information - var logBuffer bytes.Buffer - log := logging.Base() - log.SetOutput(&logBuffer) - log.SetLevel(logging.Info) - - ctx, cancel := context.WithCancel(context.Background()) - cache := MakeVerifiedTransactionCache(50) - stxnChan := make(chan *UnverifiedElement) - resultChan := make(chan *VerificationResult, txBacklogSize) - droppedChan := make(chan *UnverifiedElement) - sv, err := MakeStreamVerifier(stxnChan, resultChan, droppedChan, &DummyLedgerForSignature{}, verificationPool, cache) - require.NoError(t, err) - sv.Start(ctx) - - var result *VerificationResult - wg := sync.WaitGroup{} - wg.Add(1) - go func() { - defer wg.Done() - for { - result = <-resultChan - // at least one errShuttingDownError is expected - if result.Err != errShuttingDownError { - continue - } - break - } - }() - - // send batchSizeBlockLimit after the exec pool buffer is full - numOfTxns := 1 - txnGroups, _ := getSignedTransactions(numOfTxns, 1, 0, 0.5) - - wg.Add(1) - // run in separate goroutine because the exec pool is blocked here, and this will not advance - // until holdTasks are closed - go func() { - defer wg.Done() - for { - select { - // Normally, a single txn is sufficient, but the goroutines could be scheduled is such a way that - // the single transaction slips through and passes the batch verifier before the exec pool shuts down. - // this happens when close(holdTasks) runs and frees the exec pool, and lets the txns get verified, before - // verificationPool.Shutdown() executes. - case stxnChan <- &UnverifiedElement{TxnGroup: txnGroups[0], BacklogMessage: nil}: - case <-ctx.Done(): - return - } - } - }() - // cancel the ctx as the sig is not yet sent to the exec pool - // the test might sporadically fail if between sending the txn above - // and the cancelation, 2 x waitForNextTxnDuration elapses (10ms) - time.Sleep(6 * waitForNextTxnDuration) - go func() { - // wait a bit before releasing the tasks, so that the verificationPool ctx first gets canceled - time.Sleep(20 * time.Millisecond) - close(holdTasks) - }() - verificationPool.Shutdown() - - // the main loop should stop before calling cancel() when the exec pool shuts down and returns an error - sv.WaitForStop() - cancel() - - wg.Wait() - require.ErrorIs(t, result.Err, errShuttingDownError) - require.Contains(t, logBuffer.String(), "addVerificationTaskToThePoolNow: EnqueueBacklog returned an error and StreamVerifier will stop: context canceled") -} - -// TestStreamVerifierPostVBlocked tests the behavior when the return channel (result chan) of verified -// transactions is blocked, and checks droppedFromPool counter to confirm the drops -func TestStreamVerifierPostVBlocked(t *testing.T) { - partitiontest.PartitionTest(t) - - // prepare the stream verifier - verificationPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, t) - defer verificationPool.Shutdown() - errChan := make(chan error) - var badSigResultCounter int - var goodSigResultCounter int - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - cache := MakeVerifiedTransactionCache(50) - - txBacklogSizeMod := txBacklogSize / 20 - - stxnChan := make(chan *UnverifiedElement) - resultChan := make(chan *VerificationResult, txBacklogSizeMod) - droppedChan := make(chan *UnverifiedElement) - sv, err := MakeStreamVerifier(stxnChan, resultChan, droppedChan, &DummyLedgerForSignature{}, verificationPool, cache) - require.NoError(t, err) - - defer close(droppedChan) - go func() { - for range droppedChan { - droppedFromPool.Inc(nil) - } - }() - - // start the verifier - sv.Start(ctx) - overflow := 3 - // send txBacklogSizeMod + 3 transactions to overflow the result buffer - numOfTxns := txBacklogSizeMod + overflow - txnGroups, badTxnGroups := getSignedTransactions(numOfTxns, 1, 0, 0.5) - numOfTxnGroups := len(txnGroups) - for _, tg := range txnGroups { - stxnChan <- &UnverifiedElement{TxnGroup: tg, BacklogMessage: nil} - } - - var droppedPool uint64 - // wait until overflow transactions are dropped - for w := 0; w < 100; w++ { - droppedPool = droppedFromPool.GetUint64Value() - if droppedPool >= uint64(overflow) { - break - } - time.Sleep(time.Millisecond * 20) - } - - require.Equal(t, uint64(overflow), droppedPool) - - wg := sync.WaitGroup{} - wg.Add(1) - // make sure the other results are fine - go processResults(ctx, errChan, resultChan, numOfTxnGroups-overflow, badTxnGroups, &badSigResultCounter, &goodSigResultCounter, &wg) - - for err := range errChan { - require.ErrorIs(t, err, errShuttingDownError) - fmt.Println(badTxnGroups) - } - - // check if more transactions can be accepted - errChan = make(chan error) - - wg.Add(1) - // make sure the other results are fine - txnGroups, badTxnGroups2 := getSignedTransactions(numOfTxns, 1, numOfTxns, 0.5) - // need to combine these, since left overs from the previous one could still come out - for b := range badTxnGroups2 { - badTxnGroups[b] = struct{}{} - } - go processResults(ctx, errChan, resultChan, numOfTxnGroups, badTxnGroups, &badSigResultCounter, &goodSigResultCounter, &wg) - - for _, tg := range txnGroups { - stxnChan <- &UnverifiedElement{TxnGroup: tg, BacklogMessage: nil} - } - - for err := range errChan { - require.ErrorIs(t, err, errShuttingDownError) - fmt.Println(badTxnGroups) - } - - wg.Wait() -} - -func TestStreamVerifierMakeStreamVerifierErr(t *testing.T) { - partitiontest.PartitionTest(t) - _, err := MakeStreamVerifier(nil, nil, nil, &DummyLedgerForSignature{badHdr: true}, nil, nil) - require.Error(t, err) -} - -// TestStreamVerifierCancelWhenPooled tests the case where the ctx is cancled after the verification -// task is queued to the exec pool and before the task is executed in the pool -func TestStreamVerifierCancelWhenPooled(t *testing.T) { - partitiontest.PartitionTest(t) - numOfTxns := 1000 - txnGroups, badTxnGroups := getSignedTransactions(numOfTxns, 1, 0, 0.5) - - // prepare the stream verifier - numOfTxnGroups := len(txnGroups) - execPool := execpool.MakePool(t) - defer execPool.Shutdown() - verificationPool := execpool.MakeBacklog(execPool, 64, execpool.LowPriority, t) - defer verificationPool.Shutdown() - - cache := MakeVerifiedTransactionCache(50) - - stxnChan := make(chan *UnverifiedElement) - resultChan := make(chan *VerificationResult, txBacklogSize) - droppedChan := make(chan *UnverifiedElement) - ctx, cancel := context.WithCancel(context.Background()) - sv, err := MakeStreamVerifier(stxnChan, resultChan, droppedChan, &DummyLedgerForSignature{}, verificationPool, cache) - require.NoError(t, err) - sv.Start(ctx) - - errChan := make(chan error) - - var badSigResultCounter int - var goodSigResultCounter int - - ctx2, cancel2 := context.WithCancel(context.Background()) - - wg := sync.WaitGroup{} - wg.Add(1) - go processResults(ctx2, errChan, resultChan, numOfTxnGroups, badTxnGroups, &badSigResultCounter, &goodSigResultCounter, &wg) - - wg.Add(1) - // send txn groups to be verified - go func() { - defer wg.Done() - for _, tg := range txnGroups { - stxnChan <- &UnverifiedElement{TxnGroup: tg, BacklogMessage: nil} - } - // cancel the ctx, and expect at least one task queued to the pool but not yet executed - cancel() - }() - for err := range errChan { - require.ErrorIs(t, err, errShuttingDownError) - } - wg.Wait() - sv.WaitForStop() - cancel2() // not necessary, but the golint will want to see this -} From 42a112a2582504bcb5622ef477a34eb37d5730d5 Mon Sep 17 00:00:00 2001 From: cce <51567+cce@users.noreply.github.com> Date: Wed, 8 Feb 2023 13:59:51 -0500 Subject: [PATCH 37/81] lint: check for references to loop variables (#5105) * use more strict gosec linter to find refs to loop variables (instead of exportloopref) * fix loop variable pointer error --- .golangci-warnings.yml | 4 ++++ .golangci.yml | 2 ++ cmd/goal/clerk.go | 4 ++-- cmd/opdoc/opdoc.go | 4 ++-- daemon/algod/api/server/v2/utils.go | 4 ++-- data/pools/transactionPool.go | 2 +- data/transactions/logic/assembler_test.go | 6 ++--- data/transactions/verify/txn.go | 2 +- ledger/store/accountsV2.go | 4 ++-- ledger/store/schema.go | 2 +- scripts/check_deps.sh | 29 ++++++++++++++++++----- 11 files changed, 43 insertions(+), 20 deletions(-) diff --git a/.golangci-warnings.yml b/.golangci-warnings.yml index f8d2063475..440a07f7d2 100644 --- a/.golangci-warnings.yml +++ b/.golangci-warnings.yml @@ -6,12 +6,15 @@ linters: disable-all: true enable: - deadcode + - gosec - partitiontest - structcheck - varcheck - unused linters-settings: + gosec: # we are mostly only interested in G601 + excludes: [G101, G103, G104, G107, G202, G301, G302, G303, G304, G306, G307, G404] custom: partitiontest: path: cmd/partitiontest_linter/plugin.so @@ -51,6 +54,7 @@ issues: - path: _test\.go linters: - deadcode + - gosec - structcheck - varcheck - unused diff --git a/.golangci.yml b/.golangci.yml index 335da20685..46463c18bf 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -7,6 +7,7 @@ linters: disable-all: true enable: - errcheck + - exportloopref - gofmt - gosimple - govet @@ -107,6 +108,7 @@ issues: - path: _test\.go linters: - errcheck + # - exportloopref # - gofmt - gosimple # - govet diff --git a/cmd/goal/clerk.go b/cmd/goal/clerk.go index ed226733d0..2147828947 100644 --- a/cmd/goal/clerk.go +++ b/cmd/goal/clerk.go @@ -221,8 +221,8 @@ func createSignedTransaction(client libgoal.Client, signTx bool, dataDir string, func writeSignedTxnsToFile(stxns []transactions.SignedTxn, filename string) error { var outData []byte - for _, stxn := range stxns { - outData = append(outData, protocol.Encode(&stxn)...) + for i := range stxns { + outData = append(outData, protocol.Encode(&stxns[i])...) } return writeFile(filename, outData, 0600) diff --git a/cmd/opdoc/opdoc.go b/cmd/opdoc/opdoc.go index e0b69fa111..a7f1894018 100644 --- a/cmd/opdoc/opdoc.go +++ b/cmd/opdoc/opdoc.go @@ -225,8 +225,8 @@ func opsToMarkdown(out io.Writer) (err error) { out.Write([]byte("# Opcodes\n\nOps have a 'cost' of 1 unless otherwise specified.\n\n")) opSpecs := logic.OpcodesByVersion(uint64(docVersion)) written := make(map[string]bool) - for _, spec := range opSpecs { - err = opToMarkdown(out, &spec, written) + for i := range opSpecs { + err = opToMarkdown(out, &opSpecs[i], written) if err != nil { return } diff --git a/daemon/algod/api/server/v2/utils.go b/daemon/algod/api/server/v2/utils.go index 32f18ea7d4..cf2648e8d4 100644 --- a/daemon/algod/api/server/v2/utils.go +++ b/daemon/algod/api/server/v2/utils.go @@ -310,8 +310,8 @@ func convertLogs(txn node.TxnWithStatus) *[][]byte { func convertInners(txn *node.TxnWithStatus) *[]PreEncodedTxInfo { inner := make([]PreEncodedTxInfo, len(txn.ApplyData.EvalDelta.InnerTxns)) - for i, itxn := range txn.ApplyData.EvalDelta.InnerTxns { - inner[i] = convertInnerTxn(&itxn) + for i := range txn.ApplyData.EvalDelta.InnerTxns { + inner[i] = convertInnerTxn(&txn.ApplyData.EvalDelta.InnerTxns[i]) } return &inner } diff --git a/data/pools/transactionPool.go b/data/pools/transactionPool.go index bed4e17e68..c99391ccc4 100644 --- a/data/pools/transactionPool.go +++ b/data/pools/transactionPool.go @@ -869,7 +869,7 @@ func (pool *TransactionPool) AssembleBlock(round basics.Round, deadline time.Tim } stats.TotalLength += uint64(encodedLen) if txib.Txn.Type == protocol.StateProofTx { - stats.StateProofStats = pool.getStateProofStats(&txib, encodedLen) + stats.StateProofStats = pool.getStateProofStats(&payset[i], encodedLen) } } stats.AverageFee = totalFees / uint64(stats.IncludedCount) diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go index 9158d2f268..665741e96e 100644 --- a/data/transactions/logic/assembler_test.go +++ b/data/transactions/logic/assembler_test.go @@ -659,9 +659,9 @@ func testProg(t testing.TB, source string, ver uint64, expected ...Expect) *OpSt } } else { var found *lineError - for _, err := range errors { - if err.Line == exp.l { - found = &err + for i := range errors { + if errors[i].Line == exp.l { + found = &errors[i] break } } diff --git a/data/transactions/verify/txn.go b/data/transactions/verify/txn.go index 320874ee5e..71f75dc0c0 100644 --- a/data/transactions/verify/txn.go +++ b/data/transactions/verify/txn.go @@ -208,7 +208,7 @@ func txnGroupBatchPrep(stxs []transactions.SignedTxn, contextHdr *bookkeeping.Bl minFeeCount := uint64(0) feesPaid := uint64(0) for i, stxn := range stxs { - prepErr := txnBatchPrep(&stxn, i, groupCtx, verifier, evalTracer) + prepErr := txnBatchPrep(&stxs[i], i, groupCtx, verifier, evalTracer) if prepErr != nil { // re-wrap the error with more details prepErr.err = fmt.Errorf("transaction %+v invalid : %w", stxn, prepErr.err) diff --git a/ledger/store/accountsV2.go b/ledger/store/accountsV2.go index cb2c1d34eb..ecaaf5970f 100644 --- a/ledger/store/accountsV2.go +++ b/ledger/store/accountsV2.go @@ -710,8 +710,8 @@ func (w *accountsV2Writer) AccountsPutOnlineRoundParams(onlineRoundParamsData [] return err } - for i, onlineRoundParams := range onlineRoundParamsData { - _, err = insertStmt.Exec(startRound+basics.Round(i), protocol.Encode(&onlineRoundParams)) + for i := range onlineRoundParamsData { + _, err = insertStmt.Exec(startRound+basics.Round(i), protocol.Encode(&onlineRoundParamsData[i])) if err != nil { return err } diff --git a/ledger/store/schema.go b/ledger/store/schema.go index acbd102b15..339c660a11 100644 --- a/ledger/store/schema.go +++ b/ledger/store/schema.go @@ -205,7 +205,7 @@ func accountsInit(tx *sql.Tx, initAccounts map[basics.Address]basics.AccountData for addr, data := range initAccounts { _, err = tx.Exec("INSERT INTO accountbase (address, data) VALUES (?, ?)", - addr[:], protocol.Encode(&data)) + addr[:], protocol.Encode(&data)) //nolint:gosec // Encode does not hold on to reference if err != nil { return true, err } diff --git a/scripts/check_deps.sh b/scripts/check_deps.sh index a42405733d..95c7599f78 100755 --- a/scripts/check_deps.sh +++ b/scripts/check_deps.sh @@ -35,19 +35,36 @@ missing_dep() { } GO_DEPS=( - "$GO_BIN/stringer" - "$GO_BIN/msgp" - "$GO_BIN/golangci-lint" + "msgp" + "golangci-lint" + "oapi-codegen" + "swagger" ) +check_go_binary_version() { + binary_name=$1 + expected_version=$(grep "$binary_name" scripts/buildtools/versions | awk '{print $2}') + actual_version=$(go version -m "$GO_BIN/$binary_name" | awk 'NR==3 {print $3}') + + if [ "$expected_version" != "$actual_version" ]; then + echo "$YELLOW_FG[WARNING]$END_FG_COLOR $binary_name version mismatch, expected $expected_version, but got $actual_version" + fi +} + check_deps() { - for path in ${GO_DEPS[*]} + for dep in ${GO_DEPS[*]} do - if [ ! -f "$path" ] + if [ ! -f "$GO_BIN/$dep" ] then # Parameter expansion is faster than invoking another process. # https://www.linuxjournal.com/content/bash-parameter-expansion - missing_dep "${path##*/}" + missing_dep "${dep##*/}" + fi + + # go 1.17 on arm64 macs has an issue checking binaries with "go version", skip version check + if [[ "$(uname)" != "Darwin" ]] || [[ "$(uname -m)" != "arm64" ]] || ! [[ "$(go version | awk '{print $3}')" < "go1.17" ]] + then + check_go_binary_version "$dep" fi done From 4bc725070054b81816ff2c1da5f347dd152b879c Mon Sep 17 00:00:00 2001 From: Eric Warehime Date: Fri, 10 Feb 2023 10:25:07 -0800 Subject: [PATCH 38/81] Update state delta type, handler (#5123) --- daemon/algod/api/algod.oas2.json | 279 +---------- daemon/algod/api/algod.oas3.yml | 304 ++---------- daemon/algod/api/server/v2/account.go | 52 -- daemon/algod/api/server/v2/delta.go | 169 ------- daemon/algod/api/server/v2/delta_test.go | 163 ------- .../api/server/v2/generated/data/routes.go | 347 +++++++------- .../v2/generated/experimental/routes.go | 329 +++++++------ .../api/server/v2/generated/model/types.go | 170 +------ .../nonparticipating/private/routes.go | 331 +++++++------ .../nonparticipating/public/routes.go | 444 +++++++++--------- .../generated/participating/private/routes.go | 334 +++++++------ .../generated/participating/public/routes.go | 350 +++++++------- daemon/algod/api/server/v2/handlers.go | 15 +- .../server/v2/test/handlers_resources_test.go | 12 +- .../algod/api/server/v2/test/handlers_test.go | 52 +- daemon/algod/api/server/v2/test/helpers.go | 23 - tools/debug/vbconvert/main.go | 22 +- 17 files changed, 1159 insertions(+), 2237 deletions(-) delete mode 100644 daemon/algod/api/server/v2/delta.go delete mode 100644 daemon/algod/api/server/v2/delta_test.go diff --git a/daemon/algod/api/algod.oas2.json b/daemon/algod/api/algod.oas2.json index b2bd99e36c..014539ce6a 100644 --- a/daemon/algod/api/algod.oas2.json +++ b/daemon/algod/api/algod.oas2.json @@ -1438,7 +1438,8 @@ "data" ], "produces": [ - "application/json" + "application/json", + "application/msgpack" ], "schemes": [ "http" @@ -1453,6 +1454,9 @@ "in": "path", "required": true, "minimum": 0 + }, + { + "$ref": "#/parameters/format" } ], "responses": { @@ -2366,6 +2370,11 @@ } }, "definitions": { + "LedgerStateDelta": { + "description": "Ledger StateDelta object", + "type": "object", + "x-algorand-format": "StateDelta" + }, "Account": { "description": "Account information at a given round.\n\nDefinition:\ndata/basics/userBalance.go : AccountData\n", "type": "object", @@ -2499,274 +2508,6 @@ } } }, - "LedgerStateDelta": { - "description": "Contains ledger updates.", - "type": "object", - "required": [ - ], - "properties": { - "accts": { - "description": "AccountDeltas object", - "$ref": "#/definitions/AccountDeltas" - }, - "kv-mods": { - "description": "Array of KV Deltas", - "type": "array", - "items": { - "$ref": "#/definitions/KvDelta" - } - }, - "tx-leases": { - "description": "List of transaction leases", - "type": "array", - "items": { - "$ref": "#/definitions/TxLease" - } - }, - "modified-apps": { - "description": "List of modified Apps", - "type": "array", - "items": { - "$ref": "#/definitions/ModifiedApp" - } - }, - "modified-assets": { - "description": "List of modified Assets", - "type": "array", - "items": { - "$ref": "#/definitions/ModifiedAsset" - } - }, - "state-proof-next": { - "description": "Next round for which we expect a state proof", - "type": "integer" - }, - "prev-timestamp": { - "description": "Previous block timestamp", - "type": "integer" - }, - "totals": { - "description": "Account Totals", - "$ref": "#/definitions/AccountTotals" - } - } - }, - "AccountTotals": { - "description": "Total Algos in the system grouped by account status", - "type": "object", - "required": [ - "online", - "offline", - "not-participating", - "rewards-level" - ], - "properties": { - "online": { - "description": "Amount of stake in online accounts", - "type": "integer" - }, - "offline": { - "description": "Amount of stake in offline accounts", - "type": "integer" - }, - "not-participating": { - "description": "Amount of stake in non-participating accounts", - "type": "integer" - }, - "rewards-level": { - "description": "Total number of algos received per reward unit since genesis", - "type": "integer" - } - } - }, - "AccountDeltas": { - "description": "Exposes deltas for account based resources in a single round", - "type": "object", - "properties": { - "accounts": { - "description": "Array of Account updates for the round", - "type": "array", - "items": { - "$ref": "#/definitions/AccountBalanceRecord" - } - }, - "apps": { - "description": "Array of App updates for the round.", - "type": "array", - "items": { - "$ref": "#/definitions/AppResourceRecord" - } - }, - "assets": { - "description": "Array of Asset updates for the round.", - "type": "array", - "items": { - "$ref": "#/definitions/AssetResourceRecord" - } - } - } - }, - "TxLease": { - "description": "", - "type": "object", - "required": [ - "sender", - "lease", - "expiration" - ], - "properties": { - "sender": { - "description": "Address of the lease sender", - "type": "string" - }, - "lease": { - "description": "Lease data", - "type": "string", - "format": "byte" - }, - "expiration": { - "description": "Round that the lease expires", - "type": "integer" - } - } - }, - "ModifiedAsset": { - "description": "Asset which was created or deleted.", - "type": "object", - "required": [ - "id", - "created", - "creator" - ], - "properties": { - "id": { - "description": "Asset Id", - "type": "integer" - }, - "created": { - "description": "Created if true, deleted if false", - "type": "boolean" - }, - "creator": { - "description": "Address of the creator.", - "type": "string" - } - } - }, - "ModifiedApp": { - "description": "App which was created or deleted.", - "type": "object", - "required": [ - "id", - "created", - "creator" - ], - "properties": { - "id": { - "description": "App Id", - "type": "integer" - }, - "created": { - "description": "Created if true, deleted if false", - "type": "boolean" - }, - "creator": { - "description": "Address of the creator.", - "type": "string" - } - } - }, - "AccountBalanceRecord": { - "description": "Account and its address", - "type": "object", - "required": [ - "address", - "account-data" - ], - "properties": { - "address": { - "description": "Address of the updated account.", - "type": "string" - }, - "account-data": { - "description": "Updated account data.", - "$ref": "#/definitions/Account" - } - } - }, - "AppResourceRecord": { - "description": "Represents AppParams and AppLocalStateDelta in deltas", - "type": "object", - "required": [ - "app-index", - "address", - "app-deleted", - "app-local-state-deleted" - ], - "properties": { - "app-index": { - "description": "App index", - "type": "integer", - "x-algorand-format": "uint64" - }, - "address": { - "description": "App account address", - "type": "string" - }, - "app-deleted": { - "description": "Whether the app was deleted", - "type": "boolean" - }, - "app-local-state-deleted": { - "description": "Whether the app local state was deleted", - "type": "boolean" - }, - "app-params": { - "description": "App params", - "$ref": "#/definitions/ApplicationParams" - }, - "app-local-state": { - "description": "App local state", - "$ref": "#/definitions/ApplicationLocalState" - } - } - }, - "AssetResourceRecord": { - "description": "Represents AssetParams and AssetHolding in deltas", - "required": [ - "asset-index", - "address", - "asset-deleted", - "asset-holding-deleted" - ], - "properties": { - "asset-index": { - "description": "Index of the asset", - "type": "integer", - "x-algorand-format": "uint64" - }, - "address": { - "description": "Account address of the asset", - "type": "string" - }, - "asset-deleted": { - "description": "Whether the asset was deleted", - "type": "boolean" - }, - "asset-params": { - "description": "Asset params", - "$ref": "#/definitions/AssetParams" - }, - "asset-holding-deleted": { - "description": "Whether the asset holding was deleted", - "type": "boolean" - }, - "asset-holding": { - "description": "The asset holding", - "$ref": "#/definitions/AssetHolding" - } - } - }, "AccountParticipation": { "description": "AccountParticipation describes the parameters used by this account in consensus protocol.", "type": "object", diff --git a/daemon/algod/api/algod.oas3.yml b/daemon/algod/api/algod.oas3.yml index fa8d86f2ba..3e5947b61b 100644 --- a/daemon/algod/api/algod.oas3.yml +++ b/daemon/algod/api/algod.oas3.yml @@ -1055,50 +1055,6 @@ ], "type": "object" }, - "AccountBalanceRecord": { - "description": "Account and its address", - "properties": { - "account-data": { - "$ref": "#/components/schemas/Account" - }, - "address": { - "description": "Address of the updated account.", - "type": "string" - } - }, - "required": [ - "account-data", - "address" - ], - "type": "object" - }, - "AccountDeltas": { - "description": "Exposes deltas for account based resources in a single round", - "properties": { - "accounts": { - "description": "Array of Account updates for the round", - "items": { - "$ref": "#/components/schemas/AccountBalanceRecord" - }, - "type": "array" - }, - "apps": { - "description": "Array of App updates for the round.", - "items": { - "$ref": "#/components/schemas/AppResourceRecord" - }, - "type": "array" - }, - "assets": { - "description": "Array of Asset updates for the round.", - "items": { - "$ref": "#/components/schemas/AssetResourceRecord" - }, - "type": "array" - } - }, - "type": "object" - }, "AccountParticipation": { "description": "AccountParticipation describes the parameters used by this account in consensus protocol.", "properties": { @@ -1158,69 +1114,6 @@ ], "type": "object" }, - "AccountTotals": { - "description": "Total Algos in the system grouped by account status", - "properties": { - "not-participating": { - "description": "Amount of stake in non-participating accounts", - "type": "integer" - }, - "offline": { - "description": "Amount of stake in offline accounts", - "type": "integer" - }, - "online": { - "description": "Amount of stake in online accounts", - "type": "integer" - }, - "rewards-level": { - "description": "Total number of algos received per reward unit since genesis", - "type": "integer" - } - }, - "required": [ - "not-participating", - "offline", - "online", - "rewards-level" - ], - "type": "object" - }, - "AppResourceRecord": { - "description": "Represents AppParams and AppLocalStateDelta in deltas", - "properties": { - "address": { - "description": "App account address", - "type": "string" - }, - "app-deleted": { - "description": "Whether the app was deleted", - "type": "boolean" - }, - "app-index": { - "description": "App index", - "type": "integer", - "x-algorand-format": "uint64" - }, - "app-local-state": { - "$ref": "#/components/schemas/ApplicationLocalState" - }, - "app-local-state-deleted": { - "description": "Whether the app local state was deleted", - "type": "boolean" - }, - "app-params": { - "$ref": "#/components/schemas/ApplicationParams" - } - }, - "required": [ - "address", - "app-deleted", - "app-index", - "app-local-state-deleted" - ], - "type": "object" - }, "Application": { "description": "Application index and its parameters", "properties": { @@ -1443,41 +1336,6 @@ ], "type": "object" }, - "AssetResourceRecord": { - "description": "Represents AssetParams and AssetHolding in deltas", - "properties": { - "address": { - "description": "Account address of the asset", - "type": "string" - }, - "asset-deleted": { - "description": "Whether the asset was deleted", - "type": "boolean" - }, - "asset-holding": { - "$ref": "#/components/schemas/AssetHolding" - }, - "asset-holding-deleted": { - "description": "Whether the asset holding was deleted", - "type": "boolean" - }, - "asset-index": { - "description": "Index of the asset", - "type": "integer", - "x-algorand-format": "uint64" - }, - "asset-params": { - "$ref": "#/components/schemas/AssetParams" - } - }, - "required": [ - "address", - "asset-deleted", - "asset-holding-deleted", - "asset-index" - ], - "type": "object" - }, "Box": { "description": "Box name and its content.", "properties": { @@ -1812,52 +1670,9 @@ "type": "object" }, "LedgerStateDelta": { - "description": "Contains ledger updates.", - "properties": { - "accts": { - "$ref": "#/components/schemas/AccountDeltas" - }, - "kv-mods": { - "description": "Array of KV Deltas", - "items": { - "$ref": "#/components/schemas/KvDelta" - }, - "type": "array" - }, - "modified-apps": { - "description": "List of modified Apps", - "items": { - "$ref": "#/components/schemas/ModifiedApp" - }, - "type": "array" - }, - "modified-assets": { - "description": "List of modified Assets", - "items": { - "$ref": "#/components/schemas/ModifiedAsset" - }, - "type": "array" - }, - "prev-timestamp": { - "description": "Previous block timestamp", - "type": "integer" - }, - "state-proof-next": { - "description": "Next round for which we expect a state proof", - "type": "integer" - }, - "totals": { - "$ref": "#/components/schemas/AccountTotals" - }, - "tx-leases": { - "description": "List of transaction leases", - "items": { - "$ref": "#/components/schemas/TxLease" - }, - "type": "array" - } - }, - "type": "object" + "description": "Ledger StateDelta object", + "type": "object", + "x-algorand-format": "StateDelta" }, "LightBlockHeaderProof": { "description": "Proof of membership and position of a light block header.", @@ -1884,52 +1699,6 @@ ], "type": "object" }, - "ModifiedApp": { - "description": "App which was created or deleted.", - "properties": { - "created": { - "description": "Created if true, deleted if false", - "type": "boolean" - }, - "creator": { - "description": "Address of the creator.", - "type": "string" - }, - "id": { - "description": "App Id", - "type": "integer" - } - }, - "required": [ - "created", - "creator", - "id" - ], - "type": "object" - }, - "ModifiedAsset": { - "description": "Asset which was created or deleted.", - "properties": { - "created": { - "description": "Created if true, deleted if false", - "type": "boolean" - }, - "creator": { - "description": "Address of the creator.", - "type": "string" - }, - "id": { - "description": "Asset Id", - "type": "integer" - } - }, - "required": [ - "created", - "creator", - "id" - ], - "type": "object" - }, "ParticipationKey": { "description": "Represents a participation key used by the node.", "properties": { @@ -2166,31 +1935,6 @@ ], "type": "object" }, - "TxLease": { - "description": "", - "properties": { - "expiration": { - "description": "Round that the lease expires", - "type": "integer" - }, - "lease": { - "description": "Lease data", - "format": "byte", - "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$", - "type": "string" - }, - "sender": { - "description": "Address of the lease sender", - "type": "string" - } - }, - "required": [ - "expiration", - "lease", - "sender" - ], - "type": "object" - }, "Version": { "description": "algod version information.", "properties": { @@ -3889,6 +3633,18 @@ "minimum": 0, "type": "integer" } + }, + { + "description": "Configures whether the response object is JSON or MessagePack encoded.", + "in": "query", + "name": "format", + "schema": { + "enum": [ + "json", + "msgpack" + ], + "type": "string" + } } ], "responses": { @@ -3898,6 +3654,11 @@ "schema": { "$ref": "#/components/schemas/LedgerStateDelta" } + }, + "application/msgpack": { + "schema": { + "$ref": "#/components/schemas/LedgerStateDelta" + } } }, "description": "Contains ledger deltas" @@ -3908,6 +3669,11 @@ "schema": { "$ref": "#/components/schemas/ErrorResponse" } + }, + "application/msgpack": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + } } }, "description": "Invalid API Token" @@ -3918,6 +3684,11 @@ "schema": { "$ref": "#/components/schemas/ErrorResponse" } + }, + "application/msgpack": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + } } }, "description": "Could not find a delta for round" @@ -3928,6 +3699,11 @@ "schema": { "$ref": "#/components/schemas/ErrorResponse" } + }, + "application/msgpack": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + } } }, "description": "timed out on request" @@ -3938,6 +3714,11 @@ "schema": { "$ref": "#/components/schemas/ErrorResponse" } + }, + "application/msgpack": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + } } }, "description": "Internal Error" @@ -3948,6 +3729,11 @@ "schema": { "$ref": "#/components/schemas/ErrorResponse" } + }, + "application/msgpack": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + } } }, "description": "Service Temporarily Unavailable" diff --git a/daemon/algod/api/server/v2/account.go b/daemon/algod/api/server/v2/account.go index c722ab6825..9c25021b9c 100644 --- a/daemon/algod/api/server/v2/account.go +++ b/daemon/algod/api/server/v2/account.go @@ -26,7 +26,6 @@ import ( "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/ledger/ledgercore" ) // AssetHolding converts between basics.AssetHolding and model.AssetHolding @@ -140,57 +139,6 @@ func AccountDataToAccount( }, nil } -// ledgercoreADToAccount converts a ledgercore.AccountData to model.Account -func ledgercoreADToAccount(addr string, amntWithoutPendingRewards uint64, rnd uint64, - consensus *config.ConsensusParams, ad ledgercore.AccountData) (model.Account, error) { - pendingRewards, overflowed := basics.OSubA(ad.MicroAlgos, basics.MicroAlgos{Raw: amntWithoutPendingRewards}) - if overflowed { - return model.Account{}, errors.New("overflow on pending reward calculation") - } - var apiParticipation *model.AccountParticipation - if ad.VoteID != (crypto.OneTimeSignatureVerifier{}) { - apiParticipation = &model.AccountParticipation{ - VoteParticipationKey: ad.VoteID[:], - SelectionParticipationKey: ad.SelectionID[:], - VoteFirstValid: uint64(ad.VoteFirstValid), - VoteLastValid: uint64(ad.VoteLastValid), - VoteKeyDilution: ad.VoteKeyDilution, - } - if !ad.StateProofID.IsEmpty() { - tmp := ad.StateProofID[:] - apiParticipation.StateProofKey = &tmp - } - } - var authAddr *string = nil - if !ad.AuthAddr.IsZero() { - authAddr = strOrNil(ad.AuthAddr.String()) - } - return model.Account{ - Address: addr, - Amount: ad.MicroAlgos.Raw, - AmountWithoutPendingRewards: amntWithoutPendingRewards, - AppsTotalExtraPages: numOrNil(uint64(ad.TotalExtraAppPages)), - AppsTotalSchema: &model.ApplicationStateSchema{ - NumUint: ad.TotalAppSchema.NumUint, - NumByteSlice: ad.TotalAppSchema.NumByteSlice, - }, - AuthAddr: authAddr, - MinBalance: ad.MinBalance(consensus).Raw, - Participation: apiParticipation, - PendingRewards: pendingRewards.Raw, - RewardBase: numOrNil(ad.RewardsBase), - Rewards: ad.RewardedMicroAlgos.Raw, - Round: rnd, - Status: ad.Status.String(), - TotalAppsOptedIn: ad.TotalAppLocalStates, - TotalAssetsOptedIn: ad.TotalAssets, - TotalBoxBytes: numOrNil(ad.TotalBoxBytes), - TotalBoxes: numOrNil(ad.TotalBoxes), - TotalCreatedApps: ad.TotalAppParams, - TotalCreatedAssets: ad.TotalAssetParams, - }, nil -} - func convertTKVToGenerated(tkv *basics.TealKeyValue) *model.TealKeyValueStore { if tkv == nil || len(*tkv) == 0 { return nil diff --git a/daemon/algod/api/server/v2/delta.go b/daemon/algod/api/server/v2/delta.go deleted file mode 100644 index 4c4058fcde..0000000000 --- a/daemon/algod/api/server/v2/delta.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright (C) 2019-2023 Algorand, Inc. -// This file is part of go-algorand -// -// go-algorand is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as -// published by the Free Software Foundation, either version 3 of the -// License, or (at your option) any later version. -// -// go-algorand is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with go-algorand. If not, see . - -package v2 - -import ( - "errors" - "fmt" - "github.com/algorand/go-algorand/config" - "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" - "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/ledger/ledgercore" -) - -// convertAppResourceRecordToGenerated takes ledgercore.AppResourceRecord and converts it to v2.model.AppResourceRecord -func convertAppResourceRecordToGenerated(app ledgercore.AppResourceRecord) model.AppResourceRecord { - var appLocalState *model.ApplicationLocalState = nil - if app.State.LocalState != nil { - s := AppLocalState(*app.State.LocalState, app.Aidx) - appLocalState = &s - } - var appParams *model.ApplicationParams = nil - if app.Params.Params != nil { - p := AppParamsToApplication(app.Addr.String(), app.Aidx, app.Params.Params).Params - appParams = &p - } - return model.AppResourceRecord{ - Address: app.Addr.String(), - AppIndex: uint64(app.Aidx), - AppDeleted: app.Params.Deleted, - AppParams: appParams, - AppLocalStateDeleted: app.State.Deleted, - AppLocalState: appLocalState, - } -} - -// convertAssetResourceRecordToGenerated takes ledgercore.AppResourceRecord and converts it to v2.model.AppResourceRecord -func convertAssetResourceRecordToGenerated(asset ledgercore.AssetResourceRecord) model.AssetResourceRecord { - var assetHolding *model.AssetHolding = nil - if asset.Holding.Holding != nil { - a := AssetHolding(*asset.Holding.Holding, asset.Aidx) - assetHolding = &a - } - var assetParams *model.AssetParams = nil - if asset.Params.Params != nil { - a := AssetParamsToAsset(asset.Addr.String(), asset.Aidx, asset.Params.Params) - assetParams = &a.Params - } - return model.AssetResourceRecord{ - Address: asset.Addr.String(), - AssetIndex: uint64(asset.Aidx), - AssetHoldingDeleted: asset.Holding.Deleted, - AssetHolding: assetHolding, - AssetParams: assetParams, - AssetDeleted: asset.Params.Deleted, - } -} - -// StateDeltaToLedgerDelta converts ledgercore.StateDelta to v2.model.LedgerStateDelta -func StateDeltaToLedgerDelta(sDelta ledgercore.StateDelta, consensus config.ConsensusParams) (response model.LedgerStateDelta, err error) { - rewardsLevel := sDelta.Hdr.RewardsLevel - round := sDelta.Hdr.Round - - var accts []model.AccountBalanceRecord - var apps []model.AppResourceRecord - var assets []model.AssetResourceRecord - var keyValues []model.KvDelta - var modifiedApps []model.ModifiedApp - var modifiedAssets []model.ModifiedAsset - var txLeases []model.TxLease - - for key, kvDelta := range sDelta.KvMods { - var keyBytes = []byte(key) - var valueBytes = kvDelta.Data - keyValues = append(keyValues, model.KvDelta{ - Key: &keyBytes, - Value: &valueBytes, - }) - } - - for _, record := range sDelta.Accts.Accts { - var ot basics.OverflowTracker - pendingRewards := basics.PendingRewards(&ot, consensus, record.MicroAlgos, record.RewardsBase, rewardsLevel) - - amountWithoutPendingRewards, overflowed := basics.OSubA(record.MicroAlgos, pendingRewards) - if overflowed { - return response, errors.New("overflow on pending reward calculation") - } - - a, err := ledgercoreADToAccount(record.Addr.String(), amountWithoutPendingRewards.Raw, uint64(round), &consensus, record.AccountData) - if err != nil { - return response, err - } - - accts = append(accts, model.AccountBalanceRecord{ - AccountData: a, - Address: record.Addr.String(), - }) - } - - for _, app := range sDelta.Accts.GetAllAppResources() { - apps = append(apps, convertAppResourceRecordToGenerated(app)) - } - - for _, asset := range sDelta.Accts.GetAllAssetResources() { - assets = append(assets, convertAssetResourceRecordToGenerated(asset)) - } - - for createIdx, mod := range sDelta.Creatables { - switch mod.Ctype { - case basics.AppCreatable: - modifiedApps = append(modifiedApps, model.ModifiedApp{ - Created: mod.Created, - Creator: mod.Creator.String(), - Id: uint64(createIdx), - }) - case basics.AssetCreatable: - modifiedAssets = append(modifiedAssets, model.ModifiedAsset{ - Created: mod.Created, - Creator: mod.Creator.String(), - Id: uint64(createIdx), - }) - default: - return response, fmt.Errorf("unable to determine type of creatable for modified creatable with index %d", createIdx) - } - } - - for lease, expRnd := range sDelta.Txleases { - txLeases = append(txLeases, model.TxLease{ - Expiration: uint64(expRnd), - Lease: lease.Lease[:], - Sender: lease.Sender.String(), - }) - } - - response = model.LedgerStateDeltaResponse{ - Accts: &model.AccountDeltas{ - Accounts: &accts, - Apps: &apps, - Assets: &assets, - }, - ModifiedApps: &modifiedApps, - ModifiedAssets: &modifiedAssets, - KvMods: &keyValues, - PrevTimestamp: numOrNil(uint64(sDelta.PrevTimestamp)), - StateProofNext: numOrNil(uint64(sDelta.StateProofNext)), - Totals: &model.AccountTotals{ - NotParticipating: sDelta.Totals.NotParticipating.Money.Raw, - Offline: sDelta.Totals.Offline.Money.Raw, - Online: sDelta.Totals.Online.Money.Raw, - RewardsLevel: sDelta.Totals.RewardsLevel, - }, - TxLeases: &txLeases, - } - return -} diff --git a/daemon/algod/api/server/v2/delta_test.go b/daemon/algod/api/server/v2/delta_test.go deleted file mode 100644 index 061912ac61..0000000000 --- a/daemon/algod/api/server/v2/delta_test.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright (C) 2019-2023 Algorand, Inc. -// This file is part of go-algorand -// -// go-algorand is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as -// published by the Free Software Foundation, either version 3 of the -// License, or (at your option) any later version. -// -// go-algorand is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with go-algorand. If not, see . - -package v2 - -import ( - "bytes" - "sort" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/algorand/go-algorand/config" - "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" - "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/data/bookkeeping" - "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/protocol" - "github.com/algorand/go-algorand/test/partitiontest" -) - -var poolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} -var txLease = [32]byte{} - -func TestDelta(t *testing.T) { - partitiontest.PartitionTest(t) - original := ledgercore.StateDelta{ - Accts: ledgercore.AccountDeltas{ - Accts: []ledgercore.BalanceRecord{ - { - Addr: poolAddr, - AccountData: ledgercore.AccountData{ - AccountBaseData: ledgercore.AccountBaseData{ - Status: 0, - MicroAlgos: basics.MicroAlgos{Raw: 5000}, - RewardsBase: 2, - RewardedMicroAlgos: basics.MicroAlgos{Raw: 0}, - TotalExtraAppPages: 1, - TotalAppParams: 2, - TotalAppLocalStates: 3, - TotalAssetParams: 4, - TotalAssets: 5, - TotalBoxes: 6, - TotalBoxBytes: 7, - }, - }, - }, - }, - AppResources: []ledgercore.AppResourceRecord{ - { - Aidx: basics.AppIndex(2), - Addr: poolAddr, - Params: ledgercore.AppParamsDelta{ - Params: &basics.AppParams{ - ApprovalProgram: []byte("1"), - ClearStateProgram: []byte("2"), - GlobalState: basics.TealKeyValue{}, - StateSchemas: basics.StateSchemas{}, - ExtraProgramPages: 2, - }, - Deleted: false, - }, - }, - }, - AssetResources: []ledgercore.AssetResourceRecord{ - { - Aidx: basics.AssetIndex(1), - Addr: poolAddr, - Params: ledgercore.AssetParamsDelta{ - Params: nil, - Deleted: true, - }, - }, - }, - }, - KvMods: map[string]ledgercore.KvValueDelta{ - "box1": { - Data: []byte("foobar"), - OldData: []byte("barfoo"), - }, - "box2": { - Data: []byte("alpha"), - }, - "box3": { - Data: []byte("beta"), - }, - }, - Txleases: map[ledgercore.Txlease]basics.Round{ - {Sender: poolAddr, Lease: txLease}: 600, - }, - Creatables: map[basics.CreatableIndex]ledgercore.ModifiedCreatable{}, - Hdr: &bookkeeping.BlockHeader{ - Round: 4, - TimeStamp: 0, - RewardsState: bookkeeping.RewardsState{ - FeeSink: basics.Address{}, - RewardsPool: basics.Address{}, - RewardsLevel: 500, - RewardsRate: 510, - RewardsResidue: 0, - RewardsRecalculationRound: 0, - }, - }, - PrevTimestamp: 10, - Totals: ledgercore.AccountTotals{}, - } - - converted, err := StateDeltaToLedgerDelta(original, config.Consensus[protocol.ConsensusCurrentVersion]) - require.NoError(t, err) - require.Equal(t, original.Accts.Len(), len(*converted.Accts.Accounts)) - expAccDelta := original.Accts.Accts[0] - actAccDelta := (*converted.Accts.Accounts)[0] - require.Equal(t, expAccDelta.Addr.String(), actAccDelta.Address) - require.Equal(t, expAccDelta.Status.String(), actAccDelta.AccountData.Status) - require.Equal(t, expAccDelta.TotalAppLocalStates, actAccDelta.AccountData.TotalAppsOptedIn) - require.Equal(t, expAccDelta.TotalAppParams, actAccDelta.AccountData.TotalCreatedApps) - require.Equal(t, expAccDelta.TotalAssetParams, actAccDelta.AccountData.TotalCreatedAssets) - require.Equal(t, expAccDelta.TotalAssets, actAccDelta.AccountData.TotalAssetsOptedIn) - require.Equal(t, uint64(0), actAccDelta.AccountData.PendingRewards) - require.Equal(t, len(original.Accts.AssetResources), len(*converted.Accts.Assets)) - expAssetDelta := original.Accts.AssetResources[0] - actAssetDelta := (*converted.Accts.Assets)[0] - require.Equal(t, uint64(expAssetDelta.Aidx), actAssetDelta.AssetIndex) - require.Equal(t, expAssetDelta.Addr.String(), actAssetDelta.Address) - require.Equal(t, expAssetDelta.Params.Deleted, actAssetDelta.AssetDeleted) - require.Equal(t, expAssetDelta.Holding.Deleted, actAssetDelta.AssetHoldingDeleted) - require.Equal(t, len(original.Accts.AppResources), len(*converted.Accts.Apps)) - expAppDelta := original.Accts.AppResources[0] - actAppDelta := (*converted.Accts.Apps)[0] - require.Equal(t, uint64(expAppDelta.Aidx), actAppDelta.AppIndex) - require.Equal(t, expAppDelta.Addr.String(), actAppDelta.Address) - require.Equal(t, expAppDelta.Params.Deleted, actAppDelta.AppDeleted) - require.Equal(t, len(original.KvMods), len(*converted.KvMods)) - // sort the result so we have deterministic order - sort.Slice(*converted.KvMods, func(i, j int) bool { - return bytes.Compare(*(*converted.KvMods)[i].Key, *(*converted.KvMods)[j].Key) < 0 - }) - require.Equal(t, []uint8("box1"), *(*converted.KvMods)[0].Key) - require.Equal(t, original.KvMods["box1"].Data, *(*converted.KvMods)[0].Value) - require.Equal(t, []uint8("box2"), *(*converted.KvMods)[1].Key) - require.Equal(t, original.KvMods["box2"].Data, *(*converted.KvMods)[1].Value) - require.Equal(t, []uint8("box3"), *(*converted.KvMods)[2].Key) - require.Equal(t, original.KvMods["box3"].Data, *(*converted.KvMods)[2].Value) - require.Equal(t, txLease[:], (*converted.TxLeases)[0].Lease) - require.Equal(t, poolAddr.String(), (*converted.TxLeases)[0].Sender) - require.Equal(t, uint64(600), (*converted.TxLeases)[0].Expiration) - require.Nil(t, converted.StateProofNext) - require.Equal(t, uint64(10), *converted.PrevTimestamp) - require.Equal(t, model.AccountTotals{}, *converted.Totals) -} diff --git a/daemon/algod/api/server/v2/generated/data/routes.go b/daemon/algod/api/server/v2/generated/data/routes.go index 8b842d8bbb..b4512779fb 100644 --- a/daemon/algod/api/server/v2/generated/data/routes.go +++ b/daemon/algod/api/server/v2/generated/data/routes.go @@ -23,7 +23,7 @@ import ( type ServerInterface interface { // Get a LedgerStateDelta object for a given round // (GET /v2/deltas/{round}) - GetLedgerStateDelta(ctx echo.Context, round uint64) error + GetLedgerStateDelta(ctx echo.Context, round uint64, params GetLedgerStateDeltaParams) error // Removes minimum sync round restriction from the ledger. // (DELETE /v2/ledger/sync) UnsetSyncRound(ctx echo.Context) error @@ -53,8 +53,17 @@ func (w *ServerInterfaceWrapper) GetLedgerStateDelta(ctx echo.Context) error { ctx.Set(Api_keyScopes, []string{""}) + // Parameter object where we will unmarshal all parameters from the context + var params GetLedgerStateDeltaParams + // ------------- Optional query parameter "format" ------------- + + err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), ¶ms.Format) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err)) + } + // Invoke the callback with all the unmarshalled arguments - err = w.Handler.GetLedgerStateDelta(ctx, round) + err = w.Handler.GetLedgerStateDelta(ctx, round, params) return err } @@ -136,178 +145,168 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9a3PcNrLoX0HNOVV+3KEkP5Jdqyp1rmInWd3YWZelZO85lm+CIXtmsOIAXACUZuLr", - "/34KDYAESYBDPeLsnsonW0M8Go1Go9HPj7NcbCrBgWs1O/44q6ikG9Ag8S+a56LmOmOF+asAlUtWaSb4", - "7Nh/I0pLxlez+YyZXyuq17P5jNMNtG1M//lMwj9qJqGYHWtZw3ym8jVsqBlY7yrTuhlpm61E5oY4sUOc", - "vpp9GvlAi0KCUkMo/8rLHWE8L+sCiJaUK5qbT4pcM70mes0UcZ0J40RwIGJJ9LrTmCwZlIU68Iv8Rw1y", - "F6zSTZ5e0qcWxEyKEoZwvhSbBePgoYIGqGZDiBakgCU2WlNNzAwGVt9QC6KAynxNlkLuAdUCEcILvN7M", - "jt/PFPACJO5WDuwK/7uUAL9CpqlcgZ59mMcWt9QgM802kaWdOuxLUHWpFcG2uMYVuwJOTK8D8qZWmiyA", - "UE7effuSPHv27IVZyIZqDYUjsuSq2tnDNdnus+NZQTX4z0Nao+VKSMqLrGn/7tuXOP+ZW+DUVlQpiB+W", - "E/OFnL5KLcB3jJAQ4xpWuA8d6jc9Ioei/XkBSyFh4p7Yxve6KeH8v+uu5FTn60owriP7QvArsZ+jPCzo", - "PsbDGgA67SuDKWkGfX+Uvfjw8cn8ydGnf3t/kv2X+/OLZ58mLv9lM+4eDEQb5rWUwPNdtpJA8bSsKR/i", - "452jB7UWdVmQNb3CzacbZPWuLzF9Leu8omVt6ITlUpyUK6EIdWRUwJLWpSZ+YlLz0rApM5qjdsIUqaS4", - "YgUUc8N9r9csX5OcKjsEtiPXrCwNDdYKihStxVc3cpg+hSgxcN0KH7igf15ktOvagwnYIjfI8lIoyLTY", - "cz35G4fygoQXSntXqZtdVuR8DQQnNx/sZYu444amy3JHNO5rQagilPiraU7YkuxETa5xc0p2if3dagzW", - "NsQgDTenc4+aw5tC3wAZEeQthCiBckSeP3dDlPElW9USFLleg167O0+CqgRXQMTi75Brs+3/5+yvPxAh", - "yRtQiq7gLc0vCfBcFOk9dpPGbvC/K2E2fKNWFc0v49d1yTYsAvIbumWbekN4vVmANPvl7wctiARdS54C", - "yI64h842dDuc9FzWPMfNbaftCGqGlJiqSro7IKdLsqHbr47mDhxFaFmSCnjB+IroLU8KaWbu/eBlUtS8", - "mCDDaLNhwa2pKsjZkkFBmlFGIHHT7IOH8ZvB00pWATh+kCQ4zSx7wOGwjdCMObrmC6noCgKSOSA/Os6F", - "X7W4BN4wOLLY4adKwhUTtWo6JWDEqcfFay40ZJWEJYvQ2JlDh+Eeto1jrxsn4OSCa8o4FIbzItBCg+VE", - "SZiCCccfM8MrekEVfPk8dYG3Xyfu/lL0d310xyftNjbK7JGM3IvmqzuwcbGp03/C4y+cW7FVZn8ebCRb", - "nZurZMlKvGb+bvbPo6FWyAQ6iPAXj2IrTnUt4fiCPzZ/kYycacoLKgvzy8b+9KYuNTtjK/NTaX96LVYs", - "P2OrBDIbWKOvKey2sf+Y8eLsWG+jj4bXQlzWVbigvPMqXezI6avUJtsxb0qYJ81TNnxVnG/9S+OmPfS2", - "2cgEkEncVdQ0vISdBAMtzZf4z3aJ9ESX8lfzT1WVpreuljHUGjp29y3qBpzO4KSqSpZTg8R37rP5apgA", - "2FcCbVsc4oV6/DEAsZKiAqmZHZRWVVaKnJaZ0lTjSP8uYTk7nv3bYatcObTd1WEw+WvT6ww7GXnUyjgZ", - "raobjPHWyDVqhFkYBo2fkE1YtocSEeN2Ew0pMcOCS7iiXB+075EOP2gO8Hs3U4tvK8pYfPfeV0mEE9tw", - "AcqKt7bhA0UC1BNEK0G0orS5KsWi+eHhSVW1GMTvJ1Vl8YGiITCUumDLlFaPcPm0PUnhPKevDsh34dgo", - "Zwte7szlYEUNczcs3a3lbrFGceTW0I74QBHcTiEPzNZ4NBgZ/j4oDt8Ma1EaqWcvrZjGf3FtQzIzv0/q", - "/K9BYiFu08SFryiHOfuAwV+Cl8vDHuUMCcfpcg7ISb/v7cjGjBInmFvRyuh+2nFH8Nig8FrSygLovti7", - "lHF8gdlGFtY7ctOJjC4Kc3CGA1pDqG591vaehygkSAo9GL4uRX75F6rW93DmF36s4fHDacgaaAGSrKla", - "H8xiUkZ4vNrRphwx0xBf72QRTHXQLPG+lrdnaQXVNFiagzculljUYz9keiAjb5e/4n9oScxnc7YN67fD", - "HpBzZGDKHmdnQSjMU94+EOxMpgGqGATZ2Nc7Ma/uG0H5sp08vk+T9ugbqzBwO+QWgTsktvd+DL4W2xgM", - "X4vt4AiILaj7oA8zDoqRGjZqAnyvHGQC99+hj0pJd0Mk49hTkGwWaERXhaeBhze+maXVvJ4shLwd9+mx", - "FU5afTKhZtSA+c57SMKmdZU5UozopGyD3kCtCW+cafSHj2Gsg4UzTX8DLCgz6n1goTvQfWNBbCpWwj2Q", - "/jrK9BdUwbOn5OwvJ188efrz0y++NCRZSbGSdEMWOw2KPHRvM6L0roRHw5Xh66gudXz0L597LWR33Ng4", - "StQyhw2thkNZ7aYVgWwzYtoNsdZFM666AXDK4TwHw8kt2olV3BvQXjFlJKzN4l42I4Wwop2lIA6SAvYS", - "002X106zC5cod7K+j6csSClkRL+GR0yLXJTZFUjFRMRU8ta1IK6FF2+r/u8WWnJNFTFzo+q35ihQRChL", - "b/l0vm+HPt/yFjejnN+uN7I6N++Ufeki32sSFalAZnrLSQGLetV5CS2l2BBKCuyId/R3oM92PEet2n0Q", - "afqZtmEcVfxqx/PgzWY2qoRi1dmEu7/N+ljx+jk71QMVAceg4zV+xmf9Kyg1vXf5pT9BDPaXfiMtsKQw", - "DfEV/Jqt1joQMN9KIZb3D2Nslhig+MGK56XpMxTSfxAFmMXW6h4u43awltbNnoYUThei1oQSLgpAjUqt", - "4td0wiyP9kA0Y+rw5tdrK3EvwBBSTmuz2roiaKQbcI62Y0ZzS70ZokYlrBiN+cm2stNZk28pgRbmVQ+c", - "iIUzFTgjBi6SooVR+4vOCQmRs9SBq5IiB6WgyJyKYi9ovp1lInoETwg4AtzMQpQgSyrvDOzl1V44L2GX", - "oT1ckYff/6Qe/Q7waqFpuQex2CaG3ubB5+xBQ6inTT9GcP3JQ7KjEojnueZ1aRhECRpSKLwRTpL714do", - "sIt3R8sVSLTM/KYU7ye5GwE1oP7G9H5XaOsq4eXlHjrnbIN6O065UJALXqjoYCVVOtvHlk2jzmvMrCDg", - "hDFOjAMnhJLXVGlrTWS8QCWIvU5wHiugmCnSACcFUjPyT14WHY6dm3uQq1o1gqmqq0pIDUVsDRy2I3P9", - "ANtmLrEMxm6kXy1IrWDfyCksBeM7ZNmVWARR3Sjdnbl9uDhUTZt7fhdFZQeIFhFjgJz5VgF2Q0+XBCBM", - "tYi2hMNUj3Ia95r5TGlRVYZb6KzmTb8Ums5s6xP9Y9t2SFxUt/d2IcDMrj1MDvJri1nr47Sm5gmNI5MN", - "vTSyBz6IrdlzCLM5jJliPIdsjPLNsTwzrcIjsPeQ1tVK0gKyAkq6Gw76o/1M7OexAXDH24eP0JBZf5b4", - "preU7N0HRoYWOJ6KCY8Ev5DcHEHz8mgJxPXeM3IBOHaMOTk6etAMhXNFt8iPh8u2Wx0ZEW/DK6HNjlty", - "QIgdQ58CbwINzci3xwR2ztpnWX+K/wTlJmjEiJtPsgOVWkI7/o0WkFCmOTfg4Lj0uHuPAUe5ZpKL7WEj", - "qROb0Oy9pVKznFX41Pkedvf+8utPELU3kQI0ZSUUJPhgX4FV2J9YR4z+mLd7CU5SwgzBH2hhIsspmUKJ", - "pwv8Jezwyf3WevidB36B9/CUjYxqrifKCQLq/YaMBB42gS3Ndbkzcppew45cgwSi6sWGaW1dNrsvXS2q", - "LBwgquAemdFZc6x3nN+BKealMxwqWN5wK+Yz+yQYh++89y7ooMM9BSohygnKowEyohBMMvyTSphdZ85D", - "2LuRekrqAOmYNprymtv/geqgGVdA/lPUJKccX1y1hkakERLlBJQfzQxGAmvmdCb+FkNQwgbsQxK/PH7c", - "X/jjx27PmSJLuPZu9aZhHx2PH6Ma561QunO47kFVaI7baeT6QM0/3nvOeaHHU/abmN3IU3bybW/wxlxg", - "zpRSjnDN8u/MAHoncztl7SGNTDOv47iTlPrB0LF1476fsU1d3teGLykrawlp69jFxfvl5uLiA/nWtvSG", - "7bkn8hAd121YxNLdRrVE1xpSMvO+lYIWRkCI6vZxkXyVNc6ZKgrORhlw/ubOIeW7XiDfVBjIAnJaW69k", - "x7UdBK17qDqIyIu93e2jMLqQierxutT20g6xupKirohqtt1SgaYafhtVczt0DMrhxIFvUPsx5R5knonl", - "7h5uazsQkVBJUMhbQ/WKsl/FMoy/ccxX7ZSGzVADbbv+nHifvUu+cwQvGYdsIzjsoiGnjMMb/Bjrbfl7", - "ojPetKm+feG5A38PrO48U6jxrvjF3Q4Y2tvGL+4eNr8/bs/4EEYeoXINyopQkpcMVW+CKy3rXF9wio/7", - "4LBF/Af8Myat7nnpm8T1SxH1jxvqglP0HWme/FG+uIQIX/4WwGt9VL1agdI9KXEJcMFdK8ZJzZnGuTZm", - "vzK7YRVINOIf2JYbuiNLWqJ26leQgixq3WWuGCChNCtLZwkx0xCxvOBUkxIMV33D+PkWh/OWRE8zHPS1", - "kJcNFg6i52EFHBRTWdzP4Tv7FV3Q3PLXzh0No1XtZ6s7N+O3URQ7fPu3EZj/7+F/HL8/yf6LZr8eZS/+", - "1+GHj88/PXo8+PHpp6+++v/dn559+urRf/x7bKc87DH3fQf56Sv3pjh9hYJjqzwfwP7ZFKcbxrMokYUm", - "4h5tkYdG/PUE9KirVtBruOB6yw0hXdGSFVTfjhz6LG5wFu3p6FFNZyN6agS/1huKY3fgMiTCZHqs8dbX", - "+NA1KB4og9YcF/uC52VZc7uVtXIWJfQD9y4aYjlvgqFsEoRjgpEya+r9i9yfT7/4cjZvI1ya77P5zH39", - "EKFkVmxjcUwFbGNStjsgeDAeKFLRnQId5x4Ie9QbxRrFw2E3YJ5nas2qz88plGaLOIfz3rXutb7lp9y6", - "vZrzg7ahnVM5i+Xnh1tLgAIqvY4FR3ckBWzV7iZAz15fSXEFfE7YARz0X8vFCpT3iymBLjFIF+0bYkq0", - "QHMOLKF5qgiwHi5k0pM0Rj8o3Dpu/Wk+c5e/und53A0cg6s/Z2MI8n9rQR589805OXQMUz2wIXV26CAI", - "KqKFcn7+HU8Ow81sSggbU3jBL/grWDLOzPfjC15QTQ8XVLFcHdYK5Ne0pDyHg5Ugxz504BXV9IIPJK1k", - "1pYgaINU9aJkObkMJeKWPG0kfvTZSMuVMA/HvlF7KL+6qaL8xU6QXTO9FrXOXKhxJuGaypjRQDWhpjiy", - "TRQwNuucuLEtK3ahzG78OM+jVaX6IWfD5VdVaZYfkKFyAVVmy4jSQnpZxAgoFhrc3x+EuxgkvfZx6rUC", - "RX7Z0Oo94/oDyS7qo6NnQDoxWL+4K9/Q5K6Cjr7yViFxfV0lLty+a2CrJc0qukooDTTQCncf5eUNPrLL", - "kmC3TuyX923FodoFeHykN8DCceM4Flzcme3lc8bEl4CfcAuxjRE3WovpbfcriAa79Xb1IsoGu1TrdWbO", - "dnRVypC435kmlcTKCFnejK3YCl0FXdaNBZB8DfklFJgAADaV3s073b2nhBM0PetgyibKsLEcGM2Nqt0F", - "kLoqqBPFewolg2EFWntfxXdwCbtz0QaD3ySOthvWqVIHFSk1kC4NsYbH1o3R33znjoO6rqry0ZEYJuPJ", - "4rihC98nfZCtyHsPhzhGFJ2wwxQiqIwgwhJ/AgW3WKgZ706kH1ueeWUs7M0XyavheT9xTdrHk/OcCVeD", - "0ZT2+wYw6464VmRBjdwuXMIYG7oYcLFa0RUkJORQuz4xQLCjkcdB9t170ZtOLPsX2uC+iYJsG2dmzVFK", - "AfPFkAo+Znr+Un4ma8CxClSCeeAcwhYlikmNY5llOlR2rBw2sVUKtDgBg+StwOHB6GIklGzWVPlcNpjy", - "x5/lSTLAbxiKO5aA4TRw9Qny+jSKb89z++d08Lp0aRh87gWfcCF8Wk5InmAkfPQujm2H4CgAFVDCyi7c", - "NvaE0oYFtxtk4PjrclkyDiSLeQ1RpUTObDKi9ppxc4CRjx8TYlXAZPIIMTIOwEbDJA5MfhDh2eSrmwDJ", - "XVgz9WOjSTP4G+IRGNaP1og8ojIsnPGEx7bnANS5mjX3V8/hEYchjM+JYXNXtDRszr342kEGeQBQbO1F", - "/TvT+KOUODuigbcXy43WZK+i26wmlJk80HGBbgTihdhmNgQrKvEutgtD71HXYgwIix1Mm3HhgSILsUV3", - "C7xarCvrHljScHgwghf+limkV+yXus0tMGPTjktTMSpUSDJOndeQS0qcmDJ1QoJJkcvDIInCrQDoKTva", - "dKPu8bv3kdoVT4aXeXurzdvkQD5qI3b8U0couksJ/A21ME3aA6dCeAe5kEVaT2EIlekmf+tQveCyzxq+", - "MTkxwkgu2ZPua8M/IYY7l/AK6MDTzjOCiFc25mgAyTfbShjp1sYk2QQVDilWTpRgQy2V1VkpxlclNJ6b", - "UTTFFux9kjzG7ZLbhFN+wGmyc2xzE4/8MViqKg7HTV4q7xx+RqBInPIWDpTD7wiJS1IxCsunNH287Yv2", - "0YPSda/ppkYJ3lqx28GQz9CaObSZKigBX89Z57WRXcZs3BcX7xWgaHbmuwVaPkzAQvnuUeCzJWHFlIbW", - "2mQkWI/pz63Hp5j3TYhlenW6kkuzvndCNPKcTSyEHTvL/OwrQJ/nJZNKZ2iqiy7BNPpWofbpW9M0/qjo", - "eoXZFKisiF+iOO0l7LKClXWcXt28378y0/7QyA6qXqBgwjgBmq/JAlP2Rn1FR6a27sSjC35tF/ya3tt6", - "p50G09RMLA25dOf4FzkXvZtujB1ECDBGHMNdS6J05AINQnyH3DF4YNjDidfpwZiZYnCYCj/2Xv8qH2ic", - "EubsSCNrQdegpHNuxCHH+pFZpt5m648G43Khs47yI4KuRsGjNL20AWXdDearRqcSd5uy7+pJQ7u2ewbk", - "08fj+4dzQnBWwhWU+52gKWLcK3DQM8KOgK43BMMJvI/Hfql+uAMtwpqV9mGMUstAuhkz3LZPI5c/r31b", - "I8Ea3LnI98nWOyOheXpr6XtouquqrIASonFmfwsCyWhVYbYI3zgW0GMGY7yAbRwc+2key6k/VN7XjGub", - "f/W+Ujv2xpm+7DAB4hQUVDZV383TR6bfmMEuhWhOLypBlI1xYJQR4+DNyy6oRtKnvsQ1TquKFdue3dOO", - "mtSO3wvG8IJyg+3BQEAbsQhGCaqb+LJV5tn06528UweTMHPeTU8ZyjThVEz54iFDRDURzvtwdQ60/B52", - "P5m2uJzZp/nsbmbSGK7diHtw/bbZ3iie0Q3Pms06Xg83RDmtKimuaJk5Y3KKNKW4cqSJzb3t+TNLa3Gu", - "d/7Nyeu3DvxP81leApVZ89pJrgrbVf8yq7I5NhMHxBcnWFPd6OfsazjY/CYxYGiAvl6DSwQfPKgHGWtb", - "54LgKDqD9DLuDbzXvOz8IOwSR/whoGrcIVpTnfWG6HpA0CvKSm8j89AmPHdxcdPuxihXCAe4sydFeBfd", - "K7sZnO746Wipaw9PCucaSVW/sdUYFBG87y5nXsFoekNS3VDMN2stIEPmxOsNWg0yVbI8bk/lCwyx4dZP", - "xjQm2DjxnjYj1izhdsVrFoxlmqkJSu0ekMEcUWT63MUp3C2EK6NVc/aPGggrgGvzSeKp7B1U1J86y/rw", - "Oo1LlW5ga41vh7+LjBHmWu7feE7mGhMwQq+cAbivGq2fX2hjfTI/BO4HN3DuC2ccXIkjjnmOPhw120CF", - "dde7ZrKEvrfklte/uaTPiTmiJbSYypZS/ApxVRVq+CLRoT67NEOP1l+BTwgpay05bSWwdvbkdqekm9Di", - "1HVITFA97nzggoNpbr01mnK71baiTcevPU4wYQTJoR2/JRgH8yDqpqTXCxrLAWyEDANTYH7p2M21IL6z", - "x72z0TCX8PuABH5jTVtmE39UINvA7WESsVsKDHbayaJCKxkg1YYywdz6+pRKRIap+TXltjASWiPwKLne", - "5oHvFULXQmLaHhU38ReQs01UuXRx8b7Ih+bcgq2YLQtUKwjqzriBbD01S0Wudo91p2tRc7okR/OgspXb", - "jYJdMcUWJWCLJ7bFgiqwShXvueG7mOUB12uFzZ9OaL6ueSGh0GtlEasEaYQ6fN40jioL0NcAnBxhuycv", - "yEN00VHsCh4ZLLr7eXb85AUaWO0fR7ELwNX/GuMmxTIMco3TMfoo2TEM43ajHkS1AbZoY5pxjZwm23XK", - "WcKWjtftP0sbyukK4l6hmz0w2b64m2gL6OGFF7bimNJS7AhLhBuDpoY/JSLNDPuzYJBcbDZMb5wjhxIb", - "Q09tURk7qR/Oli9z+cA9XP4j+kNV3h2k94j8vHYfe7/FVo1eaz/QDXTROifU5moqWeup6KsUkFOfCg4T", - "pDd50S1uzFxm6SjmoOPiklSScY0Pi1ovsz+TfE0lzQ37O0iBmy2+fB5JCt9NTsxvBvhnx7sEBfIqjnqZ", - "IHsvQ7i+5CEXPNsYjlI8aiM7g1OZdNyKu+ik/ITGh54qlJlRsiS51R1yowGnvhPh8ZEB70iKzXpuRI83", - "Xtlnp8xaxsmD1maHfnz32kkZGyFj+V3b4+4kDglaMrhCP/34Jpkx77gXspy0C3eB/vc1nnqRMxDL/FlO", - "PgRuYvEJ3gZo8wk9E29j7elaejoyV9Tsgy+caRYQW/N0n93jLtWQOp1vApXn0NOgSygROgGwPYzd7AV8", - "dxVDYPLp7FAKR92lxSjzaxFZsi+h0dh4XMRkRG+VukDMB8OgFm6oOemWK/j8HjXeLDL07DBfPKz4Rx/Y", - "35nZIJL9ChKbGJRSiW5n0XwPnMso+Vpsp25qj3f7jf0nQE0UJTUri5/a3CC9SjWS8nwddRZZmI4/tzU1", - "m8XZwxxN8LumnFtvhKFuAl8pP/vXTOS99XcxdZ4N4xPb9ovn2OX2FtcC3gXTA+UnNOhlujQThFjtpl1o", - "wvrKlSgIztNmk23v9WHRpaA0xj9qUDp2L+IHG1qAGvWloWJboQJ4gXqMA/KdrYm/BtLJFYj6A5ulCQpf", - "J8CaeuqqFLSYEzPO+Tcnr4md1faxleFsZYiVvXY7q0j7597E0XbMt/Y+IvrMqpXG1J1K000VS1FiWpz7", - "BpgHJbQu4cM6xM4BeWV1Gsq/mO0khh6WTG6gIM10TqpGmjD/0Zrma1QWdFhqmuSnlzTxVKmCMsJNOcAm", - "ezSeOwO3q2pii5rMiTCSwzVTthQ6XEE3K0qTIsiJAT5LSnd5subcUkpUKh5LYXUbtHvgrBekN0BFIesh", - "/obSi3NTv2GFlzPsFc1m2S8XM6gfbHNsNGXe3vgK0JQLznLMJRm7ml1Z9SnW2QlpN+ORAc7fRs0ihyta", - "pKYJ1nBYTJat8YzQIW5oHgq+mk211GH/1Fi/e001WYFWjrNBMfe1lpyGmnEFLhs4VtgP+KSQHYs3csio", - "E0UrJ9+QjDA4O6Fy+NZ8+8EppDBq8ZJxfHr6GAkbIGl1yFj1WZv3KtNkJTCCwh2KcE3vTZ8DTNZSwPbD", - "ga8SjWNYg7FZtvWOGA514n0lnG+CafvStLUJ9dqfO3FwdtKTqnKTpitxReUBveVJBEds3o2jV4DcZvxw", - "tBFyG3VywvvUEBpcoYsEVMSFxiSqUvWCYIzQaikKWxDrHx3NoxV1E33NOLQ1zCMXRB69EnBj8Lwm+qlc", - "Um1FwEk87RxoiX4RMYamtDOK3XWo3gY7f9Iqn/k50tvYFtRKMI6mQSu4Ub5rSqcb6g6EiZe0bJyEIuWx", - "UKpyQpQLrukWzIoxDsO4fULO7gUwPAZDmch215Lak3OTmyiVqmRRFyvQGS2KmD7ha/xK8KtPVwpbyOsm", - "i3dVkRwz83VTFQ6pzU2UC67qzchcvsEdpwsq0EWoIayC53cYHa8XO/w3lsI6vTPOPejGPvbeF6howudu", - "Ijd3RxpIvYamM8VW2XRM4J1yd3S0U9+O0Nv+90rppVh1AfnMCcrGuFy4RzH+9o25OML8XYO87PZqadJr", - "oTuo8HWD8dnYJIbpciUfdTqYM8i8PK6ASFcYnePll4hrCXS91N6v1q6dim7Jk8FYVLv8CZqSURaUjEm3", - "fmU2+hyhiOv0U75k1pXMfB70niYZDuRsHHsUod5JcQjQ994DmlSUOaeNllkMMevCvdLqwrFD125wfxEu", - "iCqpsfv+KhXw5OOAbWRHrybjJbikSpWEKyZq7w7h/eX8k9D+6mriB3HFyfUP/WZwqt9XDZpU2p67+j92", - "me5N/v1P1ruSANdy90+gwh1s+qCiZSxncaeepROuovomPfWufNUUxby8yjaiGAuY/v4n8srblibdO56Q", - "Y+mWROGqyEWDxV+7EhC+mZE+J0/7xnU6qarxqRMR4sPJbcObTp9KNWXO55jW7a0/v7YOaKhCiLxVgnBm", - "DludKP7Uj4a9BgLbCjDXbRDYnM6eMZWgXJAjvlazEqiCEQyHWdtc24lIPt++Nu2nBdvHK7GmU862aWaR", - "eVZCsbY4T6xE60SX43OsshpYDIdjeX+/K8g1VmRq/ZgkwE0S6JrJgvLff6SeTShKGs9sT/8jaWbns5C3", - "RAMV3fGibYoctKqhyTWSqt62iTB715mZQ1LD3A9hfljSUsWroiWdXXuZTwKHlUii5/jCTosJ2b7dcuaB", - "DwQrxhEZjwSwzt//M5Fp/drvF52Dml3jr4pB4oUgeYgtrXRwAweSxosaJUPcrxVwVxl+GUPN/qio5RJy", - "za72JLr42xp4kERh7jXBCMsyyHvBmigbTCh6cztHC9BYHopReILE/ncGJxUjegm7B4p0qCFa62nuhfvb", - "5JJEDOCtZQSPSqiYl6I1XTnHMaYaykAseK9g2x3arNzJKrGBnHPLuTxJdiWekSnjZSonzWW63igTGAaM", - "pHJhDMvcpTUer7CqoGoquPtclKFekJxGCkG5XJaYlqSx1vqslqD8bz4HkZ2lZJcQ1rFF2zimUHAtospe", - "r0fORuSkQfR3tHoV5s7yM7M2hmMY7xvJAY3eT3kpsPJTKtypGzbRuHk9UNY5FMUUrESFcC1BunrfeDOU", - "QkGmhXetG4NjDBXWA/ZWSFDJugsWuGQ21HdtulesP2OTZVDn+BoukEjYUAOdDJKypuccQ/ZL+90HuPqc", - "XHt12g29ZnuzqvroHaYGSAypfkncbbk/cPY26m3GOcjM27r7PoXcoDK0v1ZSFHXuEsEEB6MxAUxOWDbC", - "SqKa4Xy4yoGSr8Rs4K+DNASXsDu0+pd8TfkqSK8WQm9Fe7uGIHNZb7fvVfMfV3KWK7uA1b3A+Xtqz+ez", - "SogySxhcT4eJZvtn4JLll0bMrlu/90ShTfIQ7XyNR831eucTq1YVcCgeHRBywm2kkXeu6VY66k3OH+ix", - "+bc4a1Hb3M9OsX9wweMhG5jUR96Rv/lhxrmaAsP87jiVHWRPGtNtIsmtpNeRsrNDf7rJ7i79UqAtUVko", - "YlLKLVN1TTrfQ+V+hPSDKojjr58wk1/rxSytjQilpbYyZFd4edOafqbVY/Qd9oAXKmuCioyeGzlwfmdX", - "4zcNUoKlJCmhs/x9+h+3wJYvBVukMGrSLNMmILZuat19CZR76mWjM4vjeahaw7R9gmPO36FKTqHN0KZh", - "DQjHnEt5RcvPr1bDfI4niA8o3qUFnvD9GyLZolLdzt/vNZ00d/DWvb+p+VtUA/4NzB5Fjb1uKGf8aSph", - "ehMZprinJSlFWxcZhyTXOKa1Dj/5kixcFF0lIWeK9QKMr31Vk+a5h0W+nI/lVu95X+5b509C34GM3QNB", - "VOSHtkKCFng/tBC2R/R3ZiqJkxul8hj1Dcgigr8YjwrT2ey5Li47ZmNbcabnDykk3LP5OHAEu6H5eJio", - "Z+ryrInUXDq1guE6J9/WHdxGLup2bVN9H4bIHUujP8VlIV4dw3RHnwmLECwtQxBU8suTX4iEJdaOFOTx", - "Y5zg8eO5a/rL0+5nc5wfP46KcZ/NW8LiyI3h5o1SjDOmDUJhYFsxmUj6984xd3dho/mOYAeIZ+csIVoN", - "Bqf2fqOfORU0ytx7Ffx2aa7xPn4WoMwvuZkohvufUrEL1j8/ESbTOws1K4t9h7IT9NRWvsWwnp9dQO7v", - "Unv3Z6vLHrJJV//wJj5y/QOAiImstTN5MFUQzjQhksl1i8QtIXHltWR6h3nCvOqT/Rz1qfmusZY4K3CT", - "WcbJHVpcQpNprrWt1MpLNt8JWqIsYN4z6KGohSgPyDdbuqlKcEzqqweLP8GzPz8vjp49+dPiz0dfHOXw", - "/IsXR0f0xXP65MWzJ/D0z188P4Inyy9fLJ4WT58/XTx/+vzLL17kz54/WTz/8sWfHpg7wIBsAZ35rBSz", - "/4sFqrOTt6fZuQG2xQmt2Pews7UwDRn7Kps0Ry4IG8rK2bH/6X977naQi007vP915oLeZ2utK3V8eHh9", - "fX0QdjlcoTI106LO14d+nkEZzpO3p014mPWFwh21kT+GFHBTHSmc4Ld335ydk5O3pwctwcyOZ0cHRwdP", - "MJdxBZxWbHY8e4Y/4elZ474f+iTCxx8/zWeHa6Al2sTNHxvQkuX+k7qmqxXIA1du1Px09fTQi3GHH50i", - "+dPYt8Owcs/hx46+vdjTEx1dDj/6JFbjrTtZopydIegwEYqxZocLjECe2hRU0Di9FHzcqcOP+DxJ/n7o", - "wjLjH/GZaM/AoTdKxVt2sPRRbw2svR451fm6rg4/4n+QJgOwrBN0AO5sFbOYfwfae4aFVUVa376Gtk8L", - "23zgcubS09l8vcfvp5UmAz+deaUXoJjLYYhcwhyB9hD7aKeWRaM5PsgtO5aF6dMHTMWCymo8Vk+Pju6t", - "Yu8AF5HSvX0HvKLxnXt+9OTeIOl6NEfAOOVofDasiFhWixA8/3wQvMT3LxeaLBkvbPkxTZEq7BYjQH/+", - "fABptvFKY46lF0Ehz//iHilkwr4YWYmWBFva6Z99vunPQF6xHMg5bCohqWTljvzIm7jRIIvZkHf8yC+5", - "uOYeciO91JsNlTvHVyjpnw9fpdbymKC+tLk26Uqh1hhLX8zm1pP+wyfHz+zpOcQkOruWzfmfd9xFbZUQ", - "M7//yBX4F4cN197xPMXksPHZjufvGs4z4B9Iq5+RTM4aePEEoX32n4KF/HFY7n5Y3sFGXIEi7h4LiJNI", - "MI8Wa+xCb8WWhg9GDs08eds7zflwJm81aAcfXP17zsT0Xeg+REes75Pg3OMuY4efUv2/qa7fi5GwUz2I", - "bdDsD0bwByO4R0aga8mTRzS4v9CFDCqXvCun+RoOpl+iO56HL4NKxJKknI0wC5caIsUrzrq84l/wffC5", - "j/VLyv157uy49VmgsmQgGyqgfJit4w8u8D9Hdka52L3B50RDWarw7GuBZ99q0Z1nMLfuCBP5QL82fOzn", - "w4/dkmsdZYha17oQ10FfNF5ay/tQR9JU6+78fXhNmc6WQjqvYMwnPeysgZaHLulI79c2znfwBYOXgx8D", - "fUr818Mml170Y19RFfvqFDWJRj5llP/cKqpDxS9yyEbl+/6D4U+YDNYxz1aPeXx4iJ52a6H04ezT/GNP", - "xxl+/NCQhM/FNqsku8LQ7g+f/jsAAP//TBVyWA7OAAA=", + "H4sIAAAAAAAC/+x9a3PctpLoX0HNbpUfdziSXzmxqlJ7FTvJ0cZxXJaSc3dt3wRD9szgiAQYAJRm4qv/", + "fgsNgARJkEM9Yidb55OtIR6NRqPRb3ycpaIoBQeu1ezo46ykkhagQeJfNE1FxXXCMvNXBiqVrNRM8NmR", + "/0aUloyvZ/MZM7+WVG9m8xmnBTRtTP/5TMJvFZOQzY60rGA+U+kGCmoG1rvStK5H2iZrkbghju0QJy9n", + "VyMfaJZJUKoP5Y883xHG07zKgGhJuaKp+aTIJdMbojdMEdeZME4EByJWRG9ajcmKQZ6phV/kbxXIXbBK", + "N/nwkq4aEBMpcujD+UIUS8bBQwU1UPWGEC1IBitstKGamBkMrL6hFkQBlemGrITcA6oFIoQXeFXMjt7N", + "FPAMJO5WCuwC/7uSAL9Doqlcg559mMcWt9IgE82KyNJOHPYlqCrXimBbXOOaXQAnpteC/FApTZZAKCdv", + "v31Bnjx58twspKBaQ+aIbHBVzezhmmz32dEsoxr85z6t0XwtJOVZUrd/++0LnP/ULXBqK6oUxA/LsflC", + "Tl4OLcB3jJAQ4xrWuA8t6jc9Ioei+XkJKyFh4p7Yxne6KeH8n3VXUqrTTSkY15F9IfiV2M9RHhZ0H+Nh", + "NQCt9qXBlDSDvjtMnn/4+Gj+6PDq394dJ//t/nz25Gri8l/U4+7BQLRhWkkJPN0lawkUT8uG8j4+3jp6", + "UBtR5RnZ0AvcfFogq3d9ielrWecFzStDJyyV4jhfC0WoI6MMVrTKNfETk4rnhk2Z0Ry1E6ZIKcUFyyCb", + "G+57uWHphqRU2SGwHblkeW5osFKQDdFafHUjh+kqRImB60b4wAX9eZHRrGsPJmCL3CBJc6Eg0WLP9eRv", + "HMozEl4ozV2lrndZkbMNEJzcfLCXLeKOG5rO8x3RuK8ZoYpQ4q+mOWErshMVucTNydk59nerMVgriEEa", + "bk7rHjWHdwh9PWREkLcUIgfKEXn+3PVRxldsXUlQ5HIDeuPuPAmqFFwBEct/QqrNtv/n6Y+viZDkB1CK", + "ruENTc8J8FRkw3vsJo3d4P9Uwmx4odYlTc/j13XOChYB+Qe6ZUVVEF4VS5Bmv/z9oAWRoCvJhwCyI+6h", + "s4Ju+5OeyYqnuLnNtC1BzZASU2VOdwtysiIF3X51OHfgKELznJTAM8bXRG/5oJBm5t4PXiJFxbMJMow2", + "GxbcmqqElK0YZKQeZQQSN80+eBi/HjyNZBWA4wcZBKeeZQ84HLYRmjFH13whJV1DQDIL8pPjXPhVi3Pg", + "NYMjyx1+KiVcMFGputMAjDj1uHjNhYaklLBiERo7degw3MO2cey1cAJOKrimjENmOC8CLTRYTjQIUzDh", + "uDLTv6KXVMEXT4cu8ObrxN1fie6uj+74pN3GRok9kpF70Xx1BzYuNrX6T1D+wrkVWyf2595GsvWZuUpW", + "LMdr5p9m/zwaKoVMoIUIf/EotuZUVxKO3vOH5i+SkFNNeUZlZn4p7E8/VLlmp2xtfsrtT6/EmqWnbD2A", + "zBrWqDaF3Qr7jxkvzo71Nqo0vBLivCrDBaUtrXS5IycvhzbZjnldwjyuVdlQqzjbek3juj30tt7IASAH", + "cVdS0/AcdhIMtDRd4T/bFdITXcnfzT9lmZveulzFUGvo2N23aBtwNoPjssxZSg0S37rP5qthAmC1BNq0", + "OMAL9ehjAGIpRQlSMzsoLcskFynNE6WpxpH+XcJqdjT7t4PGuHJgu6uDYPJXptcpdjLyqJVxElqW1xjj", + "jZFr1AizMAwaPyGbsGwPJSLG7SYaUmKGBedwQbleNPpIix/UB/idm6nBtxVlLL47+tUgwoltuARlxVvb", + "8J4iAeoJopUgWlHaXOdiWf9w/7gsGwzi9+OytPhA0RAYSl2wZUqrB7h82pykcJ6TlwvyXTg2ytmC5ztz", + "OVhRw9wNK3druVusNhy5NTQj3lMEt1PIhdkajwYjw98FxaHOsBG5kXr20opp/HfXNiQz8/ukzn8NEgtx", + "O0xcqEU5zFkFBn8JNJf7HcrpE46z5SzIcbfvzcjGjBInmBvRyuh+2nFH8Fij8FLS0gLovti7lHHUwGwj", + "C+stuelERheFOTjDAa0hVDc+a3vPQxQSJIUODF/nIj3/O1WbOzjzSz9W//jhNGQDNANJNlRtFrOYlBEe", + "r2a0KUfMNETtnSyDqRb1Eu9qeXuWllFNg6U5eONiiUU99kOmBzKiu/yI/6E5MZ/N2Tas3w67IGfIwJQ9", + "zs6DkBlV3ioIdibTAE0MghRWeydG674WlC+ayeP7NGmPvrEGA7dDbhG4Q2J758fga7GNwfC12PaOgNiC", + "ugv6MOOgGKmhUBPge+kgE7j/Dn1USrrrIxnHnoJks0Ajuio8DTy88c0sjeX1eCnkzbhPh61w0tiTCTWj", + "Bsx33kESNq3KxJFixCZlG3QGalx440yjO3wMYy0snGr6B2BBmVHvAgvtge4aC6IoWQ53QPqbKNNfUgVP", + "HpPTvx8/e/T4l8fPvjAkWUqxlrQgy50GRe473YwovcvhQX9lqB1VuY6P/sVTb4VsjxsbR4lKplDQsj+U", + "tW5aEcg2I6ZdH2ttNOOqawCnHM4zMJzcop1Yw70B7SVTRsIqlneyGUMIy5pZMuIgyWAvMV13ec00u3CJ", + "cieru1BlQUohI/Y1PGJapCJPLkAqJiKukjeuBXEtvHhbdn+30JJLqoiZG02/FUeBIkJZesun83079NmW", + "N7gZ5fx2vZHVuXmn7Esb+d6SqEgJMtFbTjJYVuuWJrSSoiCUZNgR7+jvQJ/ueIpWtbsg0mE1rWAcTfxq", + "x9NAZzMblUO2bm3C7XWzLla8fc5OdU9FwDHoeIWfUa1/Cbmmdy6/dCeIwf7Cb6QFlmSmIWrBr9h6owMB", + "840UYnX3MMZmiQGKH6x4nps+fSH9tcjALLZSd3AZN4M1tG72NKRwuhSVJpRwkQFaVCoVv6YH3PLoD0Q3", + "pg5vfr2xEvcSDCGltDKrrUqCTroe52g6JjS11JsgatSAF6N2P9lWdjrr8s0l0Mxo9cCJWDpXgXNi4CIp", + "ehi1v+ickBA5Sy24SilSUAqyxJko9oLm21kmokfwhIAjwPUsRAmyovLWwJ5f7IXzHHYJ+sMVuf/9z+rB", + "Z4BXC03zPYjFNjH01gqf8wf1oZ42/RjBdScPyY5KIJ7nGu3SMIgcNAyh8Fo4Gdy/LkS9Xbw9Wi5Aomfm", + "D6V4P8ntCKgG9Q+m99tCW5UDUV5O0TljBdrtOOVCQSp4pqKD5VTpZB9bNo1a2phZQcAJY5wYBx4QSl5R", + "pa03kfEMjSD2OsF5rIBiphgGeFAgNSP/7GXR/tipuQe5qlQtmKqqLIXUkMXWwGE7Mtdr2NZziVUwdi39", + "akEqBftGHsJSML5Dll2JRRDVtdHdudv7i0PTtLnnd1FUtoBoEDEGyKlvFWA3jHQZAISpBtGWcJjqUE4d", + "XjOfKS3K0nALnVS87jeEplPb+lj/1LTtExfVzb2dCTCzaw+Tg/zSYtbGOG2oUaFxZFLQcyN7oEJs3Z59", + "mM1hTBTjKSRjlG+O5alpFR6BvYe0KteSZpBkkNNdf9Cf7GdiP48NgDveKD5CQ2LjWeKb3lCyDx8YGVrg", + "eComPBL8QlJzBI3m0RCI671n5Axw7BhzcnR0rx4K54pukR8Pl223OjIi3oYXQpsdt+SAEDuGPgXeATTU", + "I98cE9g5adSy7hT/BcpNUIsR159kB2poCc3411rAgDHNhQEHx6XD3TsMOMo1B7nYHjYydGIHLHtvqNQs", + "ZSWqOt/D7s41v+4EUX8TyUBTlkNGgg9WCyzD/sQGYnTHvJkmOMkI0we/Z4WJLCdnCiWeNvDnsEOV+42N", + "8DsL4gLvQJWNjGquJ8oJAurjhowEHjaBLU11vjNymt7AjlyCBKKqZcG0tiGbbU1XizIJB4gauEdmdN4c", + "Gx3nd2CKe+kUhwqW19+K+cyqBOPwnXX0ghY6nCpQCpFPMB71kBGFYJLjn5TC7DpzEcI+jNRTUgtIx7TR", + "lVff/vdUC824AvJfoiIp5ahxVRpqkUZIlBNQfjQzGAmsntO5+BsMQQ4FWEUSvzx82F34w4duz5kiK7j0", + "YfWmYRcdDx+iGeeNULp1uO7AVGiO20nk+kDLP957Lnihw1P2u5jdyFN28k1n8NpdYM6UUo5wzfJvzQA6", + "J3M7Ze0hjUxzr+O4k4z6wdCxdeO+n7Kiyu9qw1eU5ZWEYe/Y+/fvVsX79x/It7ald2zPPZGH6Lhs0iJW", + "7jaqJIbWkJwZ/VYKmhkBIWrbx0XydVIHZ6ooOIUy4PzDnUPKd51EvqkwkCWktLJRyY5rOwia8FC1iMiL", + "nd3tojC6kInm8SrX9tIOsbqWoiqJqrfdUoGmGv4YU3MzdAzK/sRBbFDzcSg8yKiJ+e4Obms7EJFQSlDI", + "W0PzirJfxSrMv3HMV+2UhqJvgbZdfxnQz94O6jmC54xDUggOu2jKKePwA36M9bb8faAz3rRDfbvCcwv+", + "DljteaZQ423xi7sdMLQ3dVzcHWx+d9yO8yHMPELjGuQloSTNGZreBFdaVql+zykq98Fhi8QPeDVm2Nzz", + "wjeJ25ci5h831HtOMXakVvmjfHEFEb78LYC3+qhqvQalO1LiCuA9d60YJxVnGucqzH4ldsNKkOjEX9iW", + "Bd2RFc3ROvU7SEGWlW4zV0yQUJrlufOEmGmIWL3nVJMcDFf9gfGzLQ7nPYmeZjjoSyHPaywsoudhDRwU", + "U0k8zuE7+xVD0NzyNy4cDbNV7WdrOzfjN1kUO9T9mwzM/3v/P47eHSf/TZPfD5Pn/+vgw8enVw8e9n58", + "fPXVV/+v/dOTq68e/Me/x3bKwx4L33eQn7x0OsXJSxQcG+N5D/ZPZjgtGE+iRBa6iDu0Re4b8dcT0IO2", + "WUFv4D3XW24I6YLmLKP6ZuTQZXG9s2hPR4dqWhvRMSP4tV5THLsFlyERJtNhjTe+xvuhQfFEGfTmuNwX", + "PC+ritutrJTzKGEcuA/REKt5nQxliyAcEcyU2VAfX+T+fPzsi9m8yXCpv8/mM/f1Q4SSWbaN5TFlsI1J", + "2e6A4MG4p0hJdwp0nHsg7NFoFOsUD4ctwKhnasPKT88plGbLOIfz0bVOW9/yE27DXs35Qd/QzpmcxerT", + "w60lQAal3sSSo1uSArZqdhOg468vpbgAPidsAYuutpytQfm4mBzoCpN00b8hpmQL1OfAEpqnigDr4UIm", + "qaQx+kHh1nHrq/nMXf7qzuVxN3AMru6ctSPI/60FuffdN2fkwDFMdc+m1NmhgySoiBXKxfm3IjkMN7Ml", + "IWxO4Xv+nr+EFePMfD96zzOq6cGSKpaqg0qB/JrmlKewWAty5FMHXlJN3/OepDVYtSVI2iBltcxZSs5D", + "ibghT5uJH1Ubab4WRnHsOrX78qubKspf7ATJJdMbUenEpRonEi6pjDkNVJ1qiiPbQgFjs86JG9uyYpfK", + "7MaP8zxalqqbctZfflnmZvkBGSqXUGW2jCgtpJdFjIBiocH9fS3cxSDppc9TrxQo8mtBy3eM6w8keV8d", + "Hj4B0srB+tVd+YYmdyW07JU3Sonr2ipx4Vavga2WNCnpesBooIGWuPsoLxeoZOc5wW6t3C8f24pDNQvw", + "+BjeAAvHtfNYcHGntpevGRNfAn7CLcQ2RtxoPKY33a8gG+zG29XJKOvtUqU3iTnb0VUpQ+J+Z+pSEmsj", + "ZHk3tmJrDBV0VTeWQNINpOeQYQEAKEq9m7e6+0gJJ2h61sGULZRhczkwmxtNu0sgVZlRJ4p3DEoGwwq0", + "9rGKb+EcdmeiSQa/Th5tO61TDR1UpNRAujTEGh5bN0Z38104Dtq6ytJnR2KajCeLo5oufJ/hg2xF3js4", + "xDGiaKUdDiGCyggiLPEPoOAGCzXj3Yr0Y8szWsbS3nyRuhqe9xPXpFGeXORMuBrMprTfC8CqO+JSkSU1", + "crtwBWNs6mLAxSpF1zAgIYfW9YkJgi2LPA6y796L3nRi1b3QevdNFGTbODFrjlIKmC+GVFCZ6cRL+Zms", + "A8caUAnWgXMIW+YoJtWBZZbpUNnyctjCVkOgxQkYJG8EDg9GGyOhZLOhyteywZI//ixPkgH+wFTcsQIM", + "J0GoT1DXpzZ8e57bPac97dKVYfC1F3zBhVC1nFA8wUj4GF0c2w7BUQDKIIe1Xbht7AmlSQtuNsjA8eNq", + "lTMOJIlFDVGlRMpsMaLmmnFzgJGPHxJiTcBk8ggxMg7ARsckDkxei/Bs8vV1gOQurZn6sdGlGfwN8QwM", + "G0drRB5RGhbO+EDEtucA1IWa1fdXJ+ARhyGMz4lhcxc0N2zOaXzNIL06ACi2drL+nWv8wZA4O2KBtxfL", + "tdZkr6KbrCaUmTzQcYFuBOKl2CY2BSsq8S63S0Pv0dBiTAiLHUxbceGeIkuxxXALvFpsKOseWIbh8GAE", + "Gv6WKaRX7Dd0m1tgxqYdl6ZiVKiQZJw5ryaXIXFiytQDEswQudwPiijcCICOsaMpN+qU371Kals86V/m", + "za02b4oD+ayN2PEfOkLRXRrAX98KU5c9eNOVWKJ2inbUQLviQyBCxojesIm+k6bvClKQAyoFSUuISs5j", + "rjuj2wDeOKe+W2C8wLoSlO8eBKEoEtZMaWiM6OZi9l6hT22epFjOSojV8Op0KVdmfW+FqK8pWy8FO7aW", + "+clXgKGcKyaVTtADEV2CafStQqX6W9M0Liu1g11sZUeWxXkDTnsOuyRjeRWnVzfv9y/NtK9rlqiqJfJb", + "xgnQdEOWWIk0GgI3MrWNkhxd8Cu74Ff0ztY77TSYpmZiacilPcdf5Fx0OO8YO4gQYIw4+rs2iNIRBhlk", + "Lva5YyA32cOJmYuLMetr7zBlfuy9YSM+f3LojrIjRdcSGAxGV8HQTWTEEqaDQp79lMKBM0DLkmXbji3U", + "jjqoMdNrGTx8haQOFnB33WB7MBDYPWNZDRJUuxhWI+DbkqytWhSLSZg5a5esChlCOBVTvqB4H1F11tM+", + "XJ0Bzb+H3c+mLS5ndjWf3c50GsO1G3EPrt/U2xvFM7rmrSmt5Qm5JsppWUpxQfPEGZiHSFOKC0ea2Nzb", + "oz8xq4ubMc++OX71xoF/NZ+lOVCZ1KLC4KqwXfmXWZWtuzVwQHzBYqPzeZndipLB5tfFgkKj9OUGXHHY", + "QBrtVbFrHA7BUXRG6lU8Qmivydn5RuwSR3wkUNYuksZ8Zz0kba8IvaAs93YzD+1ANA8ublopxChXCAe4", + "tXclcJIld8pueqc7fjoa6trDk8K5RsrXFrZCsyKCd13oRoREcxySakGxBp21ivSZE68KtCQkKmdp3MbK", + "lxh2y63vzDQm2HhAGDUjVmzAFcsrFoxlmqkJim4HyGCOKDJ9PcMh3C2Fe1qj4uy3CgjLgGvzSeKp7BxU", + "LPrnrO3969TIDv253MDWQt8MfxsZI6y/2L3xEIhxASP01PXAfVmrzH6htUXK/BC4JK7h8A9n7F2JI856", + "Rx+Omm3w4qbtcQtfwujzP0MYtmry/mc4vPLqCkEOzBF9VoOpZCXF7xDX81A9jmSM+IqTDKNcfgc+Icy8", + "se40r4M0sw9u95B0E1qh2kEKA1SPOx+45bD0nbdQU2632la5b8W6xQkmjCo9sOM3BONg7kXi5vRySWN1", + "AY2QYWA6bhzALVu6FsR39rh3Zn/mioAuSOBLrtsymwxcgmySufqFRW4oMNhpJ4sKjWSAVBvKBHPr/8uV", + "iAxT8UvK7WMJpp89Sq63Amv8Mr0uhcRUfhU3+2eQsoLmcckhS/sm3oytmX0qoFIQ1KJ3A9k3ViwVuXr+", + "1sXeoOZkRQ7nwWsXbjcydsEUW+aALR7ZFkuqkJPXhqi6i1kecL1R2PzxhOabimcSMr1RFrFKkFqoQ/Wm", + "dl4tQV8CcHKI7R49J/fRbafYBTwwWHT38+zo0XM0uto/DmMXgHsTZIybZKsw8SVOx+i3tGMYxu1GXUSz", + "nu1DTsOMa+Q02a5TzhK2dLxu/1kqKKdriEeKFHtgsn1xN9GQ1sELz+wrJEpLsSNsIAUJNDX8aSD63LA/", + "CwZJRVEwXTjnjhKFoaem0Lyd1A9nnzRxNUI9XP4j+khL7yLqKJGf1mhq77fYqtGT/ZoW0EbrnFBbvyFn", + "TfSCr1xMTnx5GCyaWtdKtbgxc5mlo5iDwQwrUkrGNSoWlV4lX5J0QyVNDftbDIGbLL94GikU2y5YyK8H", + "+CfHuwQF8iKOejlA9l6GcH3JfS54UhiOkj1osj2CUznozI277YZ8h+NDTxXKzCjJILlVLXKjAae+FeHx", + "kQFvSYr1eq5Fj9de2SenzErGyYNWZod+evvKSRmFkLGab81xdxKHBC0ZXGDsXnyTzJi33AuZT9qF20D/", + "eT0PXuQMxDJ/lmOKwNciop364sW1Jd3FqkesA0PH1HwwZLB0Q81Ju1Dsp3f6eeNz3/lkvnhY8Y8usJ95", + "SxHJfgUDmxgUsY5uZ1Z/D/zflHwttlM3tXNC/Mb+CVATRUnF8uznJiuzUyNcUp5uov6spen4S/OaUb04", + "ez9FS6ttKOeQR4ezsuAvXmaMSLX/FFPnKRif2LZbttwut7O4BvA2mB4oP6FBL9O5mSDEajvhrQ6oztci", + "IzhPU8er4Z79cvdBUeLfKlA6ljyEH2xQF9otjb5ra+IS4BlqiwvynX2NdAOkVaUFtTSbHw+Zr9BqDepV", + "mQuazYkZ5+yb41fEzmr72Dc5bE3eNSop7VV07FVBicJp4cH+eY146sL0ccZjqc2qlcaiSUrToowlh5oW", + "Z74BZqCGNnxUX0LsLMhLqzkqr5fYSQw9rJgsjMZVj2ZlF6QJ8x+tabpBlazFUodJfnoxaU+VKnjArX6I", + "pa7bh+fOwO3qSdty0nMijN58yZR9hBIuoJ2PWidnO5OAz09tL09WnFtKicoeY8UDboJ2D5wN1PBm/ihk", + "HcRfUyC3tdivW1v7FHtF6wh1C3X3Xm6z2Y31Axv+ceGUcsFZilV8Yleze9Byig9sQsGjrpHVH3F3QiOH", + "K1oevA6Tc1gcLBjuGaFDXN8IH3w1m2qpw/6p8eXEDdVkDVo5zgbZ3Fe5d3ZAxhW4Ooz4tmnAJ4Vs+RWR", + "Q0Zd1Unt0rgmGWFazIBi96359tqp/Rgvfs44CvgObS403Vrq8L09bbQCpslagHLraecGq3emzwLTZDPY", + "flj49/lwDOuWM8u2Puj+UMfeI+08wKbtC9PWljJpfm5FINtJj8vSTTr8BkJUHtBbPojgiGcx8a6dALn1", + "+OFoI+Q2GkqC96khNLhARzSUeA/3CKN+D6Dz1owRWi1FYQtiQ7iiFQwYj4DxinFoXo+MXBBp9ErAjcHz", + "OtBPpZJqKwJO4mlnQHP0PscYmtLO9XDboTobjCjBNfo5hrexecpggHHUDRrBjfJd/Wiloe5AmHiBr+U6", + "RPYfJkCpyglRGWYUdJ4qiDEOw7h9KaT2BdA/Bn2ZyHbXktqTc52baChJdFlla9AJzbJYXcyv8SvBr75Q", + "FGwhrer6iWVJUqyJ0i4S06c2N1EquKqKkbl8g1tOF7z9EaGG8P0Rv8OYhLLc4b+x4oHDO+OCMK4dBugj", + "LtxjCdeUm9sj9aReQ9OJYutkOibwTrk9Opqpb0boTf87pfRcrNuAfOLSEGNcLtyjGH/7xlwcYeWEXkVM", + "e7XUhQ0w6E74F9tQbaxTcttcCa+yXolMdPbUNe/GDRDDbzvN8fIbCL0NCmJQe79a7+FQAG46GC9Otctc", + "05SMsqDBbCAbvWPzfhCKuOV0KGLHBuyYz73e0yTDnpyNY48i1IeC9QH63seZkpIy5xpvmEUfsy4ifdhc", + "OHbomg3uLsLFeQ9a7L6/GIrJJorxdQ4Ev3dfwzkHl85eP4du1+qjkrxKaH91r5Ha8eqo+Oj6+9EJONXn", + "NYMOGm3PXOV1u0ynk3//s41hI8C13P0JTLi9Te+9JdSXdq15qmlC6qq9k6r4tm7F+LNAw/WPmppHSE+l", + "UKypFB17L2hirNsZPvkT1G/qj+UDTS4g1VgevHGgS4DrVHMykwVv0f2rDtKA7liHBLryR2M1j/o1wfdc", + "aL20pCC1ztZTXkyv8HNch0khU8LX4NbA3XNw7YSDyWHPqxWkml3sSQP7xwZ4kGI090YI+6xrkBXG6jBa", + "rCJyfRNbA9BYltYoPEE1v1uDM5QEcg67e4q0qCFa4Hnu75WbFJBADCB3SAyJCBULQ7BWU+cZZqqmDMSC", + "D/ux3aEpxTX4NEyQ1HjDuTxJmhu3SXQcmTL+NsWkuUzXa6X/YkToUKZYv7b9sLD9Ep8SUPWzbb4ARaiS", + "kpNI9WdXwAKT9mpHgS9lAcr/5jN07Sw5O4fw8Rp0y1xSmfkWUTuDN2EkI/dRL70rWrKaKhtE6fzgdZBm", + "P6EnUvgJQ3HTXGC556F45nZcZPjGO0Z/4HWA5acRrhVI98gXCnu5UJBo4YM6x+AYQ4V7j/wmSFCDxRYt", + "cIMlUN42NV6w6CzFkifURbaECyQSCmqgk0ElluE5x5D9wn73GSy+6Ohec0pNr8neUio+PJepHhJDql8R", + "d1vuz4y5iWWFcW6fFFWxsizcoDI0/ZdSZFVqL+jwYNTWp8lFj0ZYSdQokfZX2dMvcywB9irIMzyH3YEV", + "/dMN5U0ttvaxtiKUXUOQ19/Z7Ts1OsX163xtF7C+Ezg/p+FmPiuFyJMBW/9Jv7pM9wycs/QcMmLuDh/Y", + "NvC6BrmPJubamXu52flqKmUJHLIHC0KOuQ0l9n7ddnnjzuT8nh6bf4uzZpUt+ORsSov3PB6TiaWY5C35", + "mx9mnKspMMzvllPZQfbULtkOVLaR9DLy1sxiqlLa97R23/9oiMpCEZNSbpjIPul89+1KEdIPnj4Y137C", + "OhdNAJ205kmUlprnINrCyw+N1XHaIwy+wx7wQqU4eIbBcyMHzmeOcvuhRkqwlEFKaC1/n57tFtjwpWCL", + "FKZFmGXaqkM2QqK9L4ERRb2obRNxPPdNGFjUQnAs9NM3fSg0V2O94JBwzLmUFzT/9OYLrHZyjPhwTyLG", + "FxrqvyGSLSrVzUJNXtFJcwe67t1Nzd+gueUfYPYo6mdwQzm7Y/38hbfOYl07mpNcNI8h4ZDkEse0jolH", + "X5ClC5MvJaRMsU4G0aUvZVqre1jZu3kpc1y/3LfOn4W+BRk7BUGU5HVTFlELvB8aCJsj+pmZysDJjVJ5", + "jPp6ZBHBX4xHhfnqe66L85bHwpaZ7YTiCAl37LkIYhCu6bnoZ+JPXZ61zptLp1LQX+fk27qF28hF3axt", + "qtutj9yx2nlTvGXxkpimO7rrLEKwnixBUMmvj34lElb4YIQgDx/iBA8fzl3TXx+3P5vj/PBh/EXOT+Wo", + "szhyY7h5YxTz81Dopg1PHIgS7uxHxfJsH2G0Yr6bJ1cwqvkXl/XxWR59+cXaU/tH1RXev06IQHcTEDGR", + "tbYmD6YKorknBHK7bpGwbdRM0koyvcNiFN78xn6JuhS/qy32zuNTpy+7u0+Lc6jLmTT2/Ur52/U7QXO8", + "j4xMjQEaGl9h/GZLizIHd1C+urf8Gzz58ml2+OTR35ZfHj47TOHps+eHh/T5U/ro+ZNH8PjLZ08P4dHq", + "i+fLx9njp4+XTx8//eLZ8/TJ00fLp188/9s9w4cMyBbQmU99nP0ffBkpOX5zkpwZYBuc0JLVj68aMvbP", + "O9AUTyIUlOWzI//T//YnbJGKohne/zpzmVWzjdalOjo4uLy8XIRdDtZo0Eu0qNLNgZ+n/+jlm5M6Ot66", + "gnFHbeCzIQXcVEcKx/jt7TenZ+T4zcmiIZjZ0exwcbh4hI+ZlcBpyWZHsyf4E56eDe77gSO22dHHq/ns", + "YAM0R/+X+aMALVnqP6lLul6DXLh3LsxPF48PvChx8NEZM6/Gvh2EJWMPPrZsvtmenlhS8uCjr5Qw3rpV", + "isDZuoMOE6EYa3awxASsqU1BBY2Hl4IKhjr4iCLy4O8HLisl/hFVFXsGDrxjJN6yhaWPemtg7fRwrzcf", + "fMT/IE0GYNkYsADc2TpWoOU70D6Bw/ZwEQ51aENN2yeZbd7zuLsaKLYo3NG7aTWxwU9nNMUMFHOFcpBL", + "mCPQHGIf7N2waC0rCAuYjaX6X80jz8et2LqSnQfz61An94QDU+Q/T398TYQkTid+Q9PzMPYBYf2tArlr", + "gHV3YQidr6jtIukLtS7bYae1nP0BU5MRCuQAjw8P7+xVm962XdkQ0Ho4D9dtRuwxzBf+GmzRmGF8Tw8f", + "3dna2tFst15Yd7jeqk44OpMNWyf22sIFPf3LLugFqsdcaLJiPLMlyTXFA2tPH67vy7/s+jQrvIma4+sO", + "oPB2f3aHB+zTE6ERsmlOsKVdzZO/7GpOQV6wFMgZFKWQVLJ8R37idfpWULKlf4f9xM+5uOQeEUaKroqC", + "yp273yjpsirP4+1dFzywZcQ3ulZoQcciubO5DWj9cOXuVcvIDuyDrM1163/ecZc8kUMsFOEnrsC9Yu6y", + "Jnc8HbpssfHpjqdv6xuwdzngwfzjiLi/TzW8yC7QV/0Hc/Np7PfZp8TCpz17n+ywvIVCXICq3yBtiNNI", + "R0bjsM+RSlEENLwYOTTzQanTeRH6M3kPSjN4TwTdcyZu+p7nSCTCJDj3hA7Z4Se+TG5F0U6osp3qXmyD", + "Zv9iBP9iBHfICHQl+eARDe4vDKeD0kYKkJSmG1hMv0R3PA011FLEahWcjjALl6E9xCtO27ziT62nfvhT", + "3O8vKPfnubXjNn6DypyBrKmA8n7S/L+4wP8YLmCrfzhb0JxoyHMVnn0t8OzbEAAXJc1taMZEPtB9HC/2", + "88HH9uMMLaOc2lQ6E5dBX3Tk2iiEvq2ufq6s9ffBJWU6WQnpIqSxeGa/swaaH7jc/86vTbpd7wvmEAY/", + "Bna9+K8HdW3i6MeuwTT21RkMBxr5yi3+c+MwCR0QyCFr18O7D4Y/YeU7xzwbe/rRwQFGHW6E0gezq/nH", + "jq09/PihJglfEmlWSnaBGZYfrv5/AAAA//9vX8MwD78AAA==", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/generated/experimental/routes.go b/daemon/algod/api/server/v2/generated/experimental/routes.go index 105fee6226..40f6cae52b 100644 --- a/daemon/algod/api/server/v2/generated/experimental/routes.go +++ b/daemon/algod/api/server/v2/generated/experimental/routes.go @@ -75,177 +75,164 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9/XPcNrLgv4Ka96oc+4aSv5Jdq2rrnWInWV1sr8tSsvee5ctiyJ4ZrDgAFwClmfj8", - "v1+hAZAgCXA4kuLsXr2fbA3x0Wg0Go3+/DTLxaYSHLhWs5NPs4pKugENEv+ieS5qrjNWmL8KULlklWaC", - "z078N6K0ZHw1m8+Y+bWiej2bzzjdQNvG9J/PJPyjZhKK2YmWNcxnKl/DhpqB9a4yrZuRttlKZG6IUzvE", - "2avZ55EPtCgkKDWE8i+83BHG87IugGhJuaK5+aTIDdNrotdMEdeZME4EByKWRK87jcmSQVmoI7/If9Qg", - "d8Eq3eTpJX1uQcykKGEI50uxWTAOHipogGo2hGhBClhiozXVxMxgYPUNtSAKqMzXZCnkHlAtECG8wOvN", - "7OTDTAEvQOJu5cCu8b9LCfArZJrKFejZx3lscUsNMtNsE1namcO+BFWXWhFsi2tcsWvgxPQ6Im9qpckC", - "COXk/fcvybNnz16YhWyo1lA4Ikuuqp09XJPtPjuZFVSD/zykNVquhKS8yJr2779/ifOfuwVObUWVgvhh", - "OTVfyNmr1AJ8xwgJMa5hhfvQoX7TI3Io2p8XsBQSJu6JbXyvmxLO/7vuSk51vq4E4zqyLwS/Evs5ysOC", - "7mM8rAGg074ymJJm0A+PsxcfPz2ZP3n8+d8+nGb/5f78+tnnict/2Yy7BwPRhnktJfB8l60kUDwta8qH", - "+Hjv6EGtRV0WZE2vcfPpBlm960tMX8s6r2lZGzphuRSn5UooQh0ZFbCkdamJn5jUvDRsyozmqJ0wRSop", - "rlkBxdxw35s1y9ckp8oOge3IDStLQ4O1giJFa/HVjRymzyFKDFy3wgcu6J8XGe269mACtsgNsrwUCjIt", - "9lxP/sahvCDhhdLeVeqwy4pcrIHg5OaDvWwRd9zQdFnuiMZ9LQhVhBJ/Nc0JW5KdqMkNbk7JrrC/W43B", - "2oYYpOHmdO5Rc3hT6BsgI4K8hRAlUI7I8+duiDK+ZKtagiI3a9Brd+dJUJXgCohY/B1ybbb9f53/5S0R", - "krwBpegK3tH8igDPRZHeYzdp7Ab/uxJmwzdqVdH8Kn5dl2zDIiC/oVu2qTeE15sFSLNf/n7QgkjQteQp", - "gOyIe+hsQ7fDSS9kzXPc3HbajqBmSImpqqS7I3K2JBu6/dPjuQNHEVqWpAJeML4iesuTQpqZez94mRQ1", - "LybIMNpsWHBrqgpytmRQkGaUEUjcNPvgYfwweFrJKgDHD5IEp5llDzgcthGaMUfXfCEVXUFAMkfkJ8e5", - "8KsWV8AbBkcWO/xUSbhmolZNpwSMOPW4eM2FhqySsGQRGjt36DDcw7Zx7HXjBJxccE0Zh8JwXgRaaLCc", - "KAlTMOH4Y2Z4RS+ogm+epy7w9uvE3V+K/q6P7vik3cZGmT2SkXvRfHUHNi42dfpPePyFcyu2yuzPg41k", - "qwtzlSxZidfM383+eTTUCplABxH+4lFsxamuJZxc8kfmL5KRc015QWVhftnYn97UpWbnbGV+Ku1Pr8WK", - "5edslUBmA2v0NYXdNvYfM16cHett9NHwWoirugoXlHdepYsdOXuV2mQ75qGEedo8ZcNXxcXWvzQO7aG3", - "zUYmgEzirqKm4RXsJBhoab7Ef7ZLpCe6lL+af6qqNL11tYyh1tCxu29RN+B0BqdVVbKcGiS+d5/NV8ME", - "wL4SaNviGC/Uk08BiJUUFUjN7KC0qrJS5LTMlKYaR/p3CcvZyezfjlvlyrHtro6DyV+bXufYycijVsbJ", - "aFUdMMY7I9eoEWZhGDR+QjZh2R5KRIzbTTSkxAwLLuGacn3Uvkc6/KA5wB/cTC2+rShj8d17XyURTmzD", - "BSgr3tqGDxQJUE8QrQTRitLmqhSL5oevTquqxSB+P60qiw8UDYGh1AVbprR6iMun7UkK5zl7dUR+CMdG", - "OVvwcmcuBytqmLth6W4td4s1iiO3hnbEB4rgdgp5ZLbGo8HI8PdBcfhmWIvSSD17acU0/rNrG5KZ+X1S", - "538NEgtxmyYufEU5zNkHDP4SvFy+6lHOkHCcLueInPb73o5szChxgrkVrYzupx13BI8NCm8krSyA7ou9", - "SxnHF5htZGG9IzedyOiiMAdnOKA1hOrWZ23veYhCgqTQg+HbUuRXf6ZqfQ9nfuHHGh4/nIasgRYgyZqq", - "9dEsJmWEx6sdbcoRMw3x9U4WwVRHzRLva3l7llZQTYOlOXjjYolFPfZDpgcy8nb5C/6HlsR8NmfbsH47", - "7BG5QAam7HF2FoTCPOXtA8HOZBqgikGQjX29E/PqPgjKl+3k8X2atEffWYWB2yG3CNwhsb33Y/Ct2MZg", - "+FZsB0dAbEHdB32YcVCM1LBRE+B75SATuP8OfVRKuhsiGceegmSzQCO6KjwNPLzxzSyt5vV0IeTtuE+P", - "rXDS6pMJNaMGzHfeQxI2ravMkWJEJ2Ub9AZqTXjjTKM/fAxjHSyca/obYEGZUe8DC92B7hsLYlOxEu6B", - "9NdRpr+gCp49Jed/Pv36ydNfnn79jSHJSoqVpBuy2GlQ5Cv3NiNK70p4OFwZvo7qUsdH/+a510J2x42N", - "o0Qtc9jQajiU1W5aEcg2I6bdEGtdNOOqGwCnHM4LMJzcop1Yxb0B7RVTRsLaLO5lM1IIK9pZCuIgKWAv", - "MR26vHaaXbhEuZP1fTxlQUohI/o1PGJa5KLMrkEqJiKmkneuBXEtvHhb9X+30JIbqoiZG1W/NUeBIkJZ", - "esun83079MWWt7gZ5fx2vZHVuXmn7EsX+V6TqEgFMtNbTgpY1KvOS2gpxYZQUmBHvKN/AH2+4zlq1e6D", - "SNPPtA3jqOJXO54HbzazUSUUq84m3P1t1seK18/ZqR6oCDgGHa/xMz7rX0Gp6b3LL/0JYrC/9BtpgSWF", - "aYiv4NdstdaBgPlOCrG8fxhjs8QAxQ9WPC9Nn6GQ/lYUYBZbq3u4jNvBWlo3expSOF2IWhNKuCgANSq1", - "il/TCbM82gPRjKnDm1+vrcS9AENIOa3NauuKoJFuwDnajhnNLfVmiBqVsGI05ifbyk5nTb6lBFqYVz1w", - "IhbOVOCMGLhIihZG7S86JyREzlIHrkqKHJSCInMqir2g+XaWiegRPCHgCHAzC1GCLKm8M7BX13vhvIJd", - "hvZwRb768Wf18HeAVwtNyz2IxTYx9DYPPmcPGkI9bfoxgutPHpIdlUA8zzWvS8MgStCQQuFBOEnuXx+i", - "wS7eHS3XINEy85tSvJ/kbgTUgPob0/tdoa2rhJeXe+hcsA3q7TjlQkEueKGig5VU6WwfWzaNOq8xs4KA", - "E8Y4MQ6cEEpeU6WtNZHxApUg9jrBeayAYqZIA5wUSM3IP3tZdDh2bu5BrmrVCKaqriohNRSxNXDYjsz1", - "FrbNXGIZjN1Iv1qQWsG+kVNYCsZ3yLIrsQiiulG6O3P7cHGomjb3/C6Kyg4QLSLGADn3rQLshp4uCUCY", - "ahFtCYepHuU07jXzmdKiqgy30FnNm34pNJ3b1qf6p7btkLiobu/tQoCZXXuYHOQ3FrPWx2lNzRMaRyYb", - "emVkD3wQW7PnEGZzGDPFeA7ZGOWbY3luWoVHYO8hrauVpAVkBZR0Nxz0J/uZ2M9jA+COtw8foSGz/izx", - "TW8p2bsPjAwtcDwVEx4JfiG5OYLm5dESiOu9Z+QCcOwYc3J09KAZCueKbpEfD5dttzoyIt6G10KbHbfk", - "gBA7hj4F3gQampFvjwnsnLXPsv4U/wnKTdCIEYdPsgOVWkI7/kELSCjTnBtwcFx63L3HgKNcM8nF9rCR", - "1IlNaPbeUalZzip86vwIu3t/+fUniNqbSAGashIKEnywr8Aq7E+sI0Z/zNu9BCcpYYbgD7QwkeWUTKHE", - "0wX+Cnb45H5nPfwuAr/Ae3jKRkY11xPlBAH1fkNGAg+bwJbmutwZOU2vYUduQAJR9WLDtLYum92XrhZV", - "Fg4QVXCPzOisOdY7zu/AFPPSOQ4VLG+4FfOZfRKMw3fRexd00OGeApUQ5QTl0QAZUQgmGf5JJcyuM+ch", - "7N1IPSV1gHRMG015ze3/QHXQjCsg/ylqklOOL65aQyPSCIlyAsqPZgYjgTVzOhN/iyEoYQP2IYlfHj3q", - "L/zRI7fnTJEl3Hi3etOwj45Hj1CN804o3Tlc96AqNMftLHJ9oOYf7z3nvNDjKftNzG7kKTv5rjd4Yy4w", - "Z0opR7hm+XdmAL2TuZ2y9pBGppnXcdxJSv1g6Ni6cd/P2aYu72vDl5SVtYS0dezy8sNyc3n5kXxvW3rD", - "9twTeYiOmzYsYuluo1qiaw0pmXnfSkELIyBEdfu4SL7KGudMFQVnoww4f3XnkPJdL5BvKgxkATmtrVey", - "49oOgtY9VB1F5MXe7vZRGF3IRPV4XWp7aYdYXUlRV0Q1226pQFMNv42quR06BuVw4sA3qP2Ycg8yz8Ry", - "dw+3tR2ISKgkKOStoXpF2a9iGcbfOOardkrDZqiBtl1/SbzP3iffOYKXjEO2ERx20ZBTxuENfoz1tvw9", - "0Rlv2lTfvvDcgb8HVneeKdR4V/zibgcM7V3jF3cPm98ft2d8CCOPULkGZUUoyUuGqjfBlZZ1ri85xcd9", - "cNgi/gP+GZNW97z0TeL6pYj6xw11ySn6jjRP/ihfXEKEL38P4LU+ql6tQOmelLgEuOSuFeOk5kzjXBuz", - "X5ndsAokGvGPbMsN3ZElLVE79StIQRa17jJXDJBQmpWls4SYaYhYXnKqSQmGq75h/GKLw3lLoqcZDvpG", - "yKsGC0fR87ACDoqpLO7n8IP9ii5obvlr546G0ar2s9Wdm/HbKIodvv3bCMz/89V/nHw4zf6LZr8+zl78", - "j+OPn55/fvho8OPTz3/60//t/vTs858e/se/x3bKwx5z33eQn71yb4qzVyg4tsrzAexfTHG6YTyLEllo", - "Iu7RFvnKiL+egB521Qp6DZdcb7khpGtasoLq25FDn8UNzqI9HT2q6WxET43g13qgOHYHLkMiTKbHGm99", - "jQ9dg+KBMmjNcbEveF6WNbdbWStnUUI/cO+iIZbzJhjKJkE4IRgps6bev8j9+fTrb2bzNsKl+T6bz9zX", - "jxFKZsU2FsdUwDYmZbsDggfjgSIV3SnQce6BsEe9UaxRPBx2A+Z5ptas+vKcQmm2iHM4713rXutbfsat", - "26s5P2gb2jmVs1h+ebi1BCig0utYcHRHUsBW7W4C9Oz1lRTXwOeEHcFR/7VcrEB5v5gS6BKDdNG+IaZE", - "CzTnwBKap4oA6+FCJj1JY/SDwq3j1p/nM3f5q3uXx93AMbj6czaGIP+3FuTBD99dkGPHMNUDG1Jnhw6C", - "oCJaKOfn3/HkMNzMpoSwMYWX/JK/giXjzHw/ueQF1fR4QRXL1XGtQH5LS8pzOFoJcuJDB15RTS/5QNJK", - "Zm0JgjZIVS9KlpOrUCJuydNG4kefjbRcCfNw7Bu1h/KrmyrKX+wE2Q3Ta1HrzIUaZxJuqIwZDVQTaooj", - "20QBY7POiRvbsmIXyuzGj/M8WlWqH3I2XH5VlWb5ARkqF1BltowoLaSXRYyAYqHB/X0r3MUg6Y2PU68V", - "KPK3Da0+MK4/kuyyfvz4GZBODNbf3JVvaHJXQUdfeauQuL6uEhdu3zWw1ZJmFV0llAYaaIW7j/LyBh/Z", - "ZUmwWyf2y/u24lDtAjw+0htg4Tg4jgUXd257+Zwx8SXgJ9xCbGPEjdZietv9CqLBbr1dvYiywS7Vep2Z", - "sx1dlTIk7nemSSWxMkKWN2MrtkJXQZd1YwEkX0N+BQUmAIBNpXfzTnfvKeEETc86mLKJMmwsB0Zzo2p3", - "AaSuCupE8Z5CyWBYgdbeV/E9XMHuQrTB4IfE0XbDOlXqoCKlBtKlIdbw2Lox+pvv3HFQ11VVPjoSw2Q8", - "WZw0dOH7pA+yFXnv4RDHiKITdphCBJURRFjiT6DgFgs1492J9GPLM6+Mhb35Ink1PO8nrkn7eHKeM+Fq", - "MJrSft8AZt0RN4osqJHbhUsYY0MXAy5WK7qChIQcatcnBgh2NPI4yL57L3rTiWX/QhvcN1GQbePMrDlK", - "KWC+GFLBx0zPX8rPZA04VoFKMA+cQ9iiRDGpcSyzTIfKjpXDJrZKgRYnYJC8FTg8GF2MhJLNmiqfywZT", - "/vizPEkG+A1DcccSMJwFrj5BXp9G8e15bv+cDl6XLg2Dz73gEy6ET8sJyROMhI/exbHtEBwFoAJKWNmF", - "28aeUNqw4HaDDBx/WS5LxoFkMa8hqpTImU1G1F4zbg4w8vEjQqwKmEweIUbGAdhomMSByVsRnk2+OgRI", - "7sKaqR8bTZrB3xCPwLB+tEbkEZVh4YwnPLY9B6DO1ay5v3oOjzgMYXxODJu7pqVhc+7F1w4yyAOAYmsv", - "6t+Zxh+mxNkRDby9WA5ak72KbrOaUGbyQMcFuhGIF2Kb2RCsqMS72C4MvUddizEgLHYwbcaFB4osxBbd", - "LfBqsa6se2BJw+HBCF74W6aQXrFf6ja3wIxNOy5NxahQIck4dV5DLilxYsrUCQkmRS5fBUkUbgVAT9nR", - "pht1j9+9j9SueDK8zNtbbd4mB/JRG7HjnzpC0V1K4G+ohWnSHjgVwnvIhSzSegpDqEw3+VuH6gWXfdbw", - "jcmJEUZyyZ52Xxv+CTHcuYRXQAeedp4RRLyyMUcDSL7bVsJItzYmySaocEixcqIEG2qprM5KMb4qofHc", - "jKIptmDvk+QxbpfcJpzyA06TnWObm3jkj8FSVXE4DnmpvHf4GYEiccpbOFAOvyMkLknFKCyf0/Txri/a", - "Rw9K172mmxoleGvFbgdDPkNr5tBmqqAEfD1nnddGdhWzcV9eflCAotm57xZo+TABC+W7h4HPloQVUxpa", - "a5ORYD2mv7Qen2LeNyGW6dXpSi7N+t4L0chzNrEQduws84uvAH2el0wqnaGpLroE0+h7hdqn703T+KOi", - "6xVmU6CyIn6J4rRXsMsKVtZxenXz/vjKTPu2kR1UvUDBhHECNF+TBabsjfqKjkxt3YlHF/zaLvg1vbf1", - "TjsNpqmZWBpy6c7xL3IuejfdGDuIEGCMOIa7lkTpyAUahPgOuWPwwLCHE6/TozEzxeAwFX7svf5VPtA4", - "JczZkUbWgq5BSefciEOO9SOzTL3N1h8NxuVCZx3lRwRdjYJHaXplA8q6G8xXjU4l7jZl39WThnZt9wzI", - "p4/H9w/nhOCshGso9ztBU8S4V+CgZ4QdAV1vCIYTeB+P/VL9cAdahDUr7cMYpZaBdDNmuG2fRi5/Xvu2", - "RoI1uHOR75Otd0ZC8/TW0vfQdFdVWQElROPM/hoEktGqwmwRvnEsoMcMxngB2zg49tM8llN/qLyvGdc2", - "/+p9pXbsjTN92WECxCkoqGyqvsPTR6bfmMEuhWhOLypBlI1xYJQR4+DNyy6oRtKnvsQ1TquKFdue3dOO", - "mtSO3wvG8IJyg+3BQEAbsQhGCaqb+LJV5tn06528U0eTMHPRTU8ZyjThVEz54iFDRDURzvtwdQG0/BF2", - "P5u2uJzZ5/nsbmbSGK7diHtw/a7Z3iie0Q3Pms06Xg8HopxWlRTXtMycMTlFmlJcO9LE5t72/IWltTjX", - "u/ju9PU7B/7n+Swvgcqsee0kV4Xtqn+ZVdkcm4kD4osTrKlu9HP2NRxsfpMYMDRA36zBJYIPHtSDjLWt", - "c0FwFJ1Behn3Bt5rXnZ+EHaJI/4QUDXuEK2pznpDdD0g6DVlpbeReWgTnru4uGl3Y5QrhAPc2ZMivIvu", - "ld0MTnf8dLTUtYcnhXONpKrf2GoMigjed5czr2A0vSGpbijmm7UWkCFz4vUGrQaZKlket6fyBYbYcOsn", - "YxoTbJx4T5sRa5Zwu+I1C8YyzdQEpXYPyGCOKDJ97uIU7hbCldGqOftHDYQVwLX5JPFU9g4q6k+dZX14", - "ncalSjewtca3w99FxghzLfdvPCdzjQkYoVfOANxXjdbPL7SxPpkfAveDA5z7whkHV+KIY56jD0fNNlBh", - "3fWumSyh7y255fVvLulzYo5oCS2msqUUv0JcVYUavkh0qM8uzdCj9VfgE0LKWktOWwmsnT253SnpJrQ4", - "dR0SE1SPOx+44GCaW2+Nptxuta1o0/FrjxNMGEFybMdvCcbBPIi6KenNgsZyABshw8AUmF86dnMtiO/s", - "ce9sNMwl/D4igd9Y05bZxB8VyDZwe5hE7JYCg512sqjQSgZItaFMMLe+PqUSkWFqfkO5LYyE1gg8Sq63", - "eeB7hdCNkJi2R8VN/AXkbBNVLl1efijyoTm3YCtmywLVCoK6M24gW0/NUpGr3WPd6VrUnC3J43lQ2crt", - "RsGumWKLErDFE9tiQRVYpYr33PBdzPKA67XC5k8nNF/XvJBQ6LWyiFWCNEIdPm8aR5UF6BsATh5juycv", - "yFfooqPYNTw0WHT38+zkyQs0sNo/HscuAFf/a4ybFMswyDVOx+ijZMcwjNuNehTVBtiijWnGNXKabNcp", - "ZwlbOl63/yxtKKcriHuFbvbAZPvibqItoIcXXtiKY0pLsSMsEW4Mmhr+lIg0M+zPgkFysdkwvXGOHEps", - "DD21RWXspH44W77M5QP3cPmP6A9VeXeQ3iPyy9p97P0WWzV6rb2lG+iidU6ozdVUstZT0VcpIGc+FRwm", - "SG/yolvcmLnM0lHMQcfFJakk4xofFrVeZn8k+ZpKmhv2d5QCN1t88zySFL6bnJgfBvgXx7sEBfI6jnqZ", - "IHsvQ7i+5CsueLYxHKV42EZ2Bqcy6bgVd9FJ+QmNDz1VKDOjZElyqzvkRgNOfSfC4yMD3pEUm/UcRI8H", - "r+yLU2Yt4+RBa7NDP71/7aSMjZCx/K7tcXcShwQtGVyjn358k8yYd9wLWU7ahbtA//saT73IGYhl/iwn", - "HwKHWHyCtwHafELPxNtYe7qWno7MFTX74AtnmgXE1jzdZ/e4SzWkTudDoPIcehp0CSVCJwC2h7HDXsB3", - "VzEEJp/ODqVw1F1ajDK/FZEl+xIajY3HRUxG9FapC8R8MAxq4Yaak265gi/vUePNIkPPDvPFw4p/9IH9", - "nZkNItmvILGJQSmV6HYWzffAuYySb8V26qb2eLff2H8C1ERRUrOy+LnNDdKrVCMpz9dRZ5GF6fhLW1Oz", - "WZw9zNEEv2vKufVGGOom8JXyi3/NRN5bfxdT59kwPrFtv3iOXW5vcS3gXTA9UH5Cg16mSzNBiNVu2oUm", - "rK9ciYLgPG022fZeHxZdCkpj/KMGpWP3In6woQWoUV8aKrYVKoAXqMc4Ij/YmvhrIJ1cgag/sFmaoPB1", - "Aqypp65KQYs5MeNcfHf6mthZbR9bGc5WhljZa7ezirR/7iGOtmO+tfcR0WdWrTSm7lSabqpYihLT4sI3", - "wDwooXUJH9Yhdo7IK6vTUP7FbCcx9LBkcgMFaaZzUjXShPmP1jRfo7Kgw1LTJD+9pImnShWUEW7KATbZ", - "o/HcGbhdVRNb1GROhJEcbpiypdDhGrpZUZoUQU4M8FlSusuTNeeWUqJS8VgKq9ug3QNnvSC9ASoKWQ/x", - "B0ovzk39wAov59grms2yXy5mUD/Y5thoyry98RWgKRec5ZhLMnY1u7LqU6yzE9JuxiMDnL+NmkUOV7RI", - "TROs4bCYLFvjGaFD3NA8FHw1m2qpw/6psX73mmqyAq0cZ4Ni7mstOQ014wpcNnCssB/wSSE7Fm/kkFEn", - "ilZOPpCMMDg7oXL43nx76xRSGLV4xTg+PX2MhA2QtDpkrPqszXuVabISGEHhDkW4pg+mzxEmaylg+/HI", - "V4nGMazB2CzbekcMhzr1vhLON8G0fWna2oR67c+dODg76WlVuUnTlbii8oDe8iSCIzbvxtErQG4zfjja", - "CLmNOjnhfWoIDa7RRQIq4kJjElWpekEwRmi1FIUtiPWPjubRirqJvmYc2hrmkQsij14JuDF4XhP9VC6p", - "tiLgJJ52AbREv4gYQ1PaGcXuOlRvg50/aZXP/BzpbWwLaiUYR9OgFdwo3zWl0w11B8LES1o2TkKR8lgo", - "VTkhygXXdAtmxRiHYdw+IWf3Ahgeg6FMZLtrSe3JOeQmSqUqWdTFCnRGiyKmT/gWvxL86tOVwhbyusni", - "XVUkx8x83VSFQ2pzE+WCq3ozMpdvcMfpggp0EWoIq+D5HUbH68UO/42lsE7vjHMPOtjH3vsCFU343CFy", - "c3ekgdRraDpTbJVNxwTeKXdHRzv17Qi97X+vlF6KVReQL5ygbIzLhXsU42/fmYsjzN81yMtur5YmvRa6", - "gwpfNxifjU1imC5X8lGngzmDzMvjCoh0hdE5Xn6JuJZA10vt/Wrt2qnoljwZjEW1y5+gKRllQcmYdOtX", - "ZqPPEYq4Tj/lS2ZdycznQe9pkuFAzsaxRxHqnRSHAP3oPaBJRZlz2miZxRCzLtwrrS4cO3TtBvcX4YKo", - "khq7H69TAU8+DthGdvRqMl6BS6pUSbhmovbuEN5fzj8J7a+uJn4QV5xc/9BvBqf6fdWgSaXthav/Y5fp", - "3uQ//my9KwlwLXf/BCrcwaYPKlrGchZ36lk64Sqqb9JT78pXTVHMq+tsI4qxgOkffyavvG1p0r3jCTmW", - "bkkUropcNFj8tSsB4ZsZ6XPytG9cp9OqGp86ESE+nNw2PHT6VKopcz7HtG7v/Pm1dUBDFULkrRKEM3PY", - "6kTxp3407A0Q2FaAuW6DwOZ09oypBOWCHPG1mpVAFYxgOMza5tpORPLF9rVpPy3YPl6JNZ1ytk0zi8yz", - "Eoq1xXliJVonuhxfYJXVwGI4HMv7+11DrrEiU+vHJAEOSaBrJgvKf/936tmEoqTxzPb0P5Jmdj4LeUs0", - "UNEdL9qmyEGrGppcI6nqbZsIs3edmTkkNcz9EOaHJS1VvCpa0tm1l/kkcFiJJHqOL+ysmJDt2y1nHvhA", - "sGIckfFIAOv8/f8nMq1f+/2ic1Cza/xVMUi8ECQPsaWVjg5wIGm8qFEyxP1aAXeV4Zcx1OyPilouIdfs", - "ek+ii7+ugQdJFOZeE4ywLIO8F6yJssGEoofbOVqAxvJQjMITJPa/MzipGNEr2D1QpEMN0VpPcy/c3yaX", - "JGIAby0jeFRCxbwUrenKOY4x1VAGYsF7Bdvu0GblTlaJDeScW87lSbIr8YxMGS9TOWku0/WgTGAYMJLK", - "hTEsc5fWeLzCqoKqqeDuc1GGekFyFikE5XJZYlqSxlrrs1qC8r/5HER2lpJdQVjHFm3jmELBtYgqe70e", - "ORuRkwbR39HqVZg7y8/M2hiOYbxvJAc0ej/lpcDKT6lwp27YROPm9UBZ51AUU7ASFcK1BOnqfePNUAoF", - "mRbetW4MjjFUWA/YWyFBJesuWOCS2VDft+lesf6MTZZBneNruEAiYUMNdDJIypqecwzZL+13H+Dqc3Lt", - "1Wk39Jrtzarqo3eYGiAxpPolcbfl/sDZ26i3GecgM2/r7vsUcoPK0P5aSVHUuUsEExyMxgQwOWHZCCuJ", - "aobz4SoHSr4Ss4G/DtIQXMHu2Opf8jXlqyC9Wgi9Fe3tGoLMZb3dvlfNf1zJWa7sAlb3AufvqT2fzyoh", - "yixhcD0bJprtn4Erll8ZMbtu/d4ThTbJV2jnazxqbtY7n1i1qoBD8fCIkFNuI428c0230lFvcv5Aj82/", - "xVmL2uZ+dor9o0seD9nApD7yjvzNDzPO1RQY5nfHqewge9KYbhNJbiW9iZSdHfrTTXZ36ZcCbYnKQhGT", - "Um6ZqmvS+R4q9yOkH1RBHH/9hJn8Wi9maW1EKC21lSG7wsub1vQzrR6j77AHvFBZE1Rk9NzIgfM7uxq/", - "aZASLCVJCZ3l79P/uAW2fCnYIoVRk2aZNgGxdVPr7kug3FMvG51ZHM9D1Rqm7RMcc/4OVXIKbYY2DWtA", - "OOZcymtafnm1GuZzPEV8QPE+LfCE798QyRaV6nb+fq/ppLmDt+79Tc3foRrwr2D2KGrsdUM5409TCdOb", - "yDDFPS1JKdq6yDgkucExrXX4yTdk4aLoKgk5U6wXYHzjq5o0zz0s8uV8LLd6z/ty3zp/FvoOZOweCKIi", - "b9sKCVrg/dBC2B7R35mpJE5ulMpj1Dcgiwj+YjwqTGez57q46piNbcWZnj+kkHDP5uPAEexA8/EwUc/U", - "5VkTqbl0agXDdU6+rTu4jVzU7dqm+j4MkTuWRn+Ky0K8Oobpjj4TFiFYWoYgqORvT/5GJCyxdqQgjx7h", - "BI8ezV3Tvz3tfjbH+dGjqBj3xbwlLI7cGG7eKMU4Y9ogFAa2FZOJpH/vHXN3Fzaa7wh2gHh2zhKi1WBw", - "au83+oVTQaPMvVfBb5fmGu/jZwHK/JKbiWK4/zkVu2D98xNhMr2zULOy2HcoO0FPbeVbDOv5xQXk/i61", - "d3+xuuwhm3T1Dw/xkesfAERMZK2dyYOpgnCmCZFMrlskbgmJK68l0zvME+ZVn+yXqE/ND421xFmBm8wy", - "Tu7Q4gqaTHOtbaVWXrL5QdASZQHznkEPRS1EeUS+29JNVYJjUn96sPgDPPvj8+Lxsyd/WPzx8dePc3j+", - "9YvHj+mL5/TJi2dP4Okfv37+GJ4sv3mxeFo8ff508fzp82++fpE/e/5k8fybF394YO4AA7IFdOazUsz+", - "Nxaozk7fnWUXBtgWJ7RiP8LO1sI0ZOyrbNIcuSBsKCtnJ/6n/+m521EuNu3w/teZC3qfrbWu1Mnx8c3N", - "zVHY5XiFytRMizpfH/t5BmU4T9+dNeFh1hcKd9RG/hhSwE11pHCK395/d35BTt+dHbUEMzuZPT56fPQE", - "cxlXwGnFZiezZ/gTnp417vuxTyJ88unzfHa8BlqiTdz8sQEtWe4/qRu6WoE8cuVGzU/XT4+9GHf8ySmS", - "P499Ow4r9xx/6ujbiz090dHl+JNPYjXeupMlytkZgg4ToRhrdrzACOSpTUEFjdNLwcedOv6Ez5Pk78cu", - "LDP+EZ+J9gwce6NUvGUHS5/01sDa65FTna/r6vgT/gdpMgDLOkEPwbVuYMe2rv/w5x3Poz8OB+rXl4v9", - "fPypm7a9g1C1rnUhboK++ACyr/fhfE3Fr87fxzeUaSPSOMsi5qQadtZAy2MXuNT7tfUVHnxBB+jgx2BP", - "4r8eN/H40Y99Yo99dZudaOTDTlHoEja0teE+ZwWq5WyLUDFnrzdQ+ltR7EbKFW+zBeNU7roli9vr3X4c", - "yjLDguprsOkkvXYq1Nniq9ItI7x4tazB5s1BywLywKePH4/Au1GryoXJpEqlLykrawnZJqVSurz8sMT0", - "T9/bll7pMI8azfAJjzXAzMBtVAUlJbsGspCCFjlViURUTKEdq6maF3+JbFSYEqxXplRNh4EsIKfmSabX", - "sLPWRgdBW7dPTUhS2EdhdCFTimu7WCH0hwuxivUKPE34svnPD9/5UVVwx+M/Aty3tCA+LDwjb2hpyB4K", - "LLAg0Rm6G9j5/PGTLwrfGUfnESNKECsqfZ7Pvv7CSDrjRrCnJcGWFoJnXxSCc5DXLAdyAZtKSCpZuSM/", - "8SbOOci6NzxbP/ErLm64B95I2/Vmg/yuYZuKULSLhPQpZIRcqSJMtzo9sFGJ0I+aPiJ/PX3/9uztDydW", - "JG+kR/P/bQWSbYBrWqJFoXbGHG3OcQHXUIrKfMZUcxJQo80FWdVUUq4BXCJEucFH57LmuQ1QYXpngF7W", - "WJrTXPVCWpZEVwotM1heZjafhSCYM7zNDL9eAc/cjZEtRLHzOVIlvdFbyyCOg3dW+G6ZnXwIXiwfPn7+", - "aL5J0xo/tWL4yfExGorXQunj2ef5p56IHn782IDuU4nMKsmuMTLp4+f/FwAA//+fi696zcQAAA==", + "H4sIAAAAAAAC/+x9a5PcNpLgX0HUboQeV6xuPewZdYRjryXZnj5LGoW67bldSWejyKwqTJMABwC7qqzT", + "f79AAiBBEmSxH5ZmLvaT1EU8EolEIt/4NEtFUQoOXKvZyadZSSUtQIPEv2iaiorrhGXmrwxUKlmpmeCz", + "E/+NKC0ZX8/mM2Z+LanezOYzTgto2pj+85mEf1RMQjY70bKC+UylGyioGVjvS9O6HmmXrEXihji1Q5y9", + "nH0e+UCzTIJSfSj/yvM9YTzNqwyIlpQrmppPimyZ3hC9YYq4zoRxIjgQsSJ602pMVgzyTC38Iv9RgdwH", + "q3STDy/pcwNiIkUOfThfiGLJOHiooAaq3hCiBclghY02VBMzg4HVN9SCKKAy3ZCVkAdAtUCE8AKvitnJ", + "+5kCnoHE3UqBXeF/VxLgd0g0lWvQs4/z2OJWGmSiWRFZ2pnDvgRV5VoRbItrXLMr4MT0WpDXldJkCYRy", + "8u6HF+TJkyfPzEIKqjVkjsgGV9XMHq7Jdp+dzDKqwX/u0xrN10JSniV1+3c/vMD5z90Cp7aiSkH8sJya", + "L+Ts5dACfMcICTGuYY370KJ+0yNyKJqfl7ASEibuiW18p5sSzv9VdyWlOt2UgnEd2ReCX4n9HOVhQfcx", + "HlYD0GpfGkxJM+j74+TZx0+P5o+OP//b+9Pkv9yf3zz5PHH5L+pxD2Ag2jCtpASe7pO1BIqnZUN5Hx/v", + "HD2ojajyjGzoFW4+LZDVu77E9LWs84rmlaETlkpxmq+FItSRUQYrWuWa+IlJxXPDpsxojtoJU6SU4opl", + "kM0N991uWLohKVV2CGxHtizPDQ1WCrIhWouvbuQwfQ5RYuC6ET5wQf+8yGjWdQATsENukKS5UJBoceB6", + "8jcO5RkJL5TmrlLXu6zIxQYITm4+2MsWcccNTef5nmjc14xQRSjxV9OcsBXZi4pscXNydon93WoM1gpi", + "kIab07pHzeEdQl8PGRHkLYXIgXJEnj93fZTxFVtXEhTZbkBv3J0nQZWCKyBi+XdItdn2/3X+1zdESPIa", + "lKJreEvTSwI8FdnwHrtJYzf435UwG16odUnTy/h1nbOCRUB+TXesqArCq2IJ0uyXvx+0IBJ0JfkQQHbE", + "A3RW0F1/0gtZ8RQ3t5m2JagZUmKqzOl+Qc5WpKC7747nDhxFaJ6TEnjG+JroHR8U0szch8FLpKh4NkGG", + "0WbDgltTlZCyFYOM1KOMQOKmOQQP49eDp5GsAnD8IIPg1LMcAIfDLkIz5uiaL6SkawhIZkF+dpwLv2px", + "CbxmcGS5x0+lhCsmKlV3GoARpx4Xr7nQkJQSVixCY+cOHYZ72DaOvRZOwEkF15RxyAznRaCFBsuJBmEK", + "JhxXZvpX9JIq+Pbp0AXefJ24+yvR3fXRHZ+029gosUcyci+ar+7AxsWmVv8Jyl84t2LrxP7c20i2vjBX", + "yYrleM383eyfR0OlkAm0EOEvHsXWnOpKwskH/tD8RRJyrinPqMzML4X96XWVa3bO1uan3P70SqxZes7W", + "A8isYY1qU9itsP+Y8eLsWO+iSsMrIS6rMlxQ2tJKl3ty9nJok+2Y1yXM01qVDbWKi53XNK7bQ+/qjRwA", + "chB3JTUNL2EvwUBL0xX+s1shPdGV/N38U5a56a3LVQy1ho7dfYu2AWczOC3LnKXUIPGd+2y+GiYAVkug", + "TYsjvFBPPgUgllKUIDWzg9KyTHKR0jxRmmoc6d8lrGYns387aowrR7a7Ogomf2V6nWMnI49aGSehZXmN", + "Md4auUaNMAvDoPETsgnL9lAiYtxuoiElZlhwDleU60Wjj7T4QX2A37uZGnxbUcbiu6NfDSKc2IZLUFa8", + "tQ3vKRKgniBaCaIVpc11Lpb1D/dPy7LBIH4/LUuLDxQNgaHUBTumtHqAy6fNSQrnOXu5ID+GY6OcLXi+", + "N5eDFTXM3bByt5a7xWrDkVtDM+I9RXA7hVyYrfFoMDL8XVAc6gwbkRup5yCtmMZ/cW1DMjO/T+r8r0Fi", + "IW6HiQu1KIc5q8DgL4Hmcr9DOX3CcbacBTnt9r0Z2ZhR4gRzI1oZ3U877ggeaxRuJS0tgO6LvUsZRw3M", + "NrKw3pKbTmR0UZiDMxzQGkJ147N28DxEIUFS6MDwPBfp5V+o2tzBmV/6sfrHD6chG6AZSLKharOYxaSM", + "8Hg1o005YqYhau9kGUy1qJd4V8s7sLSMahoszcEbF0ss6rEfMj2QEd3lr/gfmhPz2Zxtw/rtsAtygQxM", + "2ePsPAiZUeWtgmBnMg3QxCBIYbV3YrTua0H5opk8vk+T9uh7azBwO+QWgTskdnd+DJ6LXQyG52LXOwJi", + "B+ou6MOMg2KkhkJNgO+lg0zg/jv0USnpvo9kHHsKks0Cjeiq8DTw8MY3szSW19OlkDfjPh22wkljTybU", + "jBow33kHSdi0KhNHihGblG3QGahx4Y0zje7wMYy1sHCu6R+ABWVGvQsstAe6ayyIomQ53AHpb6JMf0kV", + "PHlMzv9y+s2jx78+/uZbQ5KlFGtJC7Lca1DkvtPNiNL7HB70V4baUZXr+OjfPvVWyPa4sXGUqGQKBS37", + "Q1nrphWBbDNi2vWx1kYzrroGcMrhvADDyS3aiTXcG9BeMmUkrGJ5J5sxhLCsmSUjDpIMDhLTdZfXTLMP", + "lyj3sroLVRakFDJiX8MjpkUq8uQKpGIi4ip561oQ18KLt2X3dwst2VJFzNxo+q04ChQRytI7Pp3v26Ev", + "drzBzSjnt+uNrM7NO2Vf2sj3lkRFSpCJ3nGSwbJatzShlRQFoSTDjnhH/wj6fM9TtKrdBZEOq2kF42ji", + "V3ueBjqb2agcsnVrE26vm3Wx4u1zdqp7KgKOQccr/Ixq/UvINb1z+aU7QQz2F34jLbAkMw1RC37F1hsd", + "CJhvpRCru4cxNksMUPxgxfPc9OkL6W9EBmaxlbqDy7gZrKF1s6chhdOlqDShhIsM0KJSqfg1PeCWR38g", + "ujF1ePPrjZW4l2AIKaWVWW1VEnTS9ThH0zGhqaXeBFGjBrwYtfvJtrLTWZdvLoFmRqsHTsTSuQqcEwMX", + "SdHDqP1F54SEyFlqwVVKkYJSkCXORHEQNN/OMhE9gicEHAGuZyFKkBWVtwb28uognJewT9Afrsj9n35R", + "D74CvFpomh9ALLaJobdW+Jw/qA/1tOnHCK47eUh2VALxPNdol4ZB5KBhCIXXwsng/nUh6u3i7dFyBRI9", + "M38oxftJbkdANah/ML3fFtqqHIjycorOBSvQbscpFwpSwTMVHSynSieH2LJp1NLGzAoCThjjxDjwgFDy", + "iiptvYmMZ2gEsdcJzmMFFDPFMMCDAqkZ+Rcvi/bHTs09yFWlasFUVWUppIYstgYOu5G53sCunkusgrFr", + "6VcLUik4NPIQloLxHbLsSiyCqK6N7s7d3l8cmqbNPb+PorIFRIOIMUDOfasAu2GkywAgTDWItoTDVIdy", + "6vCa+UxpUZaGW+ik4nW/ITSd29an+uembZ+4qG7u7UyAmV17mBzkW4tZG+O0oUaFxpFJQS+N7IEKsXV7", + "9mE2hzFRjKeQjFG+OZbnplV4BA4e0qpcS5pBkkFO9/1Bf7afif08NgDueKP4CA2JjWeJb3pDyT58YGRo", + "geOpmPBI8AtJzRE0mkdDIK73gZEzwLFjzMnR0b16KJwrukV+PFy23erIiHgbXgltdtySA0LsGPoUeAfQ", + "UI98c0xg56RRy7pT/CcoN0EtRlx/kj2ooSU0419rAQPGNBcGHByXDnfvMOAo1xzkYgfYyNCJHbDsvaVS", + "s5SVqOr8BPs71/y6E0T9TSQDTVkOGQk+WC2wDPsTG4jRHfNmmuAkI0wf/J4VJrKcnCmUeNrAX8IeVe63", + "NsLvIogLvANVNjKquZ4oJwiojxsyEnjYBHY01fneyGl6A3uyBQlEVcuCaW1DNtuarhZlEg4QNXCPzOi8", + "OTY6zu/AFPfSOQ4VLK+/FfOZVQnG4bvo6AUtdDhVoBQin2A86iEjCsEkxz8phdl15iKEfRipp6QWkI5p", + "oyuvvv3vqRaacQXkP0VFUspR46o01CKNkCgnoPxoZjASWD2nc/E3GIIcCrCKJH55+LC78IcP3Z4zRVaw", + "9WH1pmEXHQ8fohnnrVC6dbjuwFRojttZ5PpAyz/eey54ocNTDruY3chTdvJtZ/DaXWDOlFKOcM3yb80A", + "OidzN2XtIY1Mc6/juJOM+sHQsXXjvp+zosrvasNXlOWVhGHv2IcP71fFhw8fyQ+2pXdszz2Rh+jYNmkR", + "K3cbVRJDa0jOjH4rBc2MgBC17eMi+TqpgzNVFJxCGXD+5s4h5ftOIt9UGMgSUlrZqGTHtR0ETXioWkTk", + "xc7udlEYXchE83iVa3tph1hdS1GVRNXbbqlAUw1/jKm5GToGZX/iIDao+TgUHmTUxHx/B7e1HYhIKCUo", + "5K2heUXZr2IV5t845qv2SkPRt0Dbrr8O6GfvBvUcwXPGISkEh3005ZRxeI0fY70tfx/ojDftUN+u8NyC", + "vwNWe54p1Hhb/OJuBwztbR0Xdweb3x2343wIM4/QuAZ5SShJc4amN8GVllWqP3CKyn1w2CLxA16NGTb3", + "vPBN4valiPnHDfWBU4wdqVX+KF9cQYQv/wDgrT6qWq9B6Y6UuAL4wF0rxknFmca5CrNfid2wEiQ68Re2", + "ZUH3ZEVztE79DlKQZaXbzBUTJJRmee48IWYaIlYfONUkB8NVXzN+scPhvCfR0wwHvRXyssbCInoe1sBB", + "MZXE4xx+tF8xBM0tf+PC0TBb1X62tnMzfpNFsUfdv8nA/D/3/+Pk/WnyXzT5/Th59j+OPn56+vnBw96P", + "jz9/993/bf/05PN3D/7j32M75WGPhe87yM9eOp3i7CUKjo3xvAf7FzOcFownUSILXcQd2iL3jfjrCehB", + "26ygN/CB6x03hHRFc5ZRfTNy6LK43lm0p6NDNa2N6JgR/FqvKY7dgsuQCJPpsMYbX+P90KB4ogx6c1zu", + "C56XVcXtVlbKeZQwDtyHaIjVvE6GskUQTghmymyojy9yfz7+5tvZvMlwqb/P5jP39WOEklm2i+UxZbCL", + "SdnugODBuKdISfcKdJx7IOzRaBTrFA+HLcCoZ2rDyi/PKZRmyziH89G1Tlvf8TNuw17N+UHf0N6ZnMXq", + "y8OtJUAGpd7EkqNbkgK2anYToOOvL6W4Aj4nbAGLrracrUH5uJgc6AqTdNG/IaZkC9TnwBKap4oA6+FC", + "JqmkMfpB4dZx68/zmbv81Z3L427gGFzdOWtHkP9bC3Lvx+8vyJFjmOqeTamzQwdJUBErlIvzb0VyGG5m", + "S0LYnMIP/AN/CSvGmfl+8oFnVNOjJVUsVUeVAvmc5pSnsFgLcuJTB15STT/wnqQ1WLUlSNogZbXMWUou", + "Q4m4IU+biR9VG2m+FkZx7Dq1+/KrmyrKX+wEyZbpjah04lKNEwlbKmNOA1WnmuLItlDA2Kxz4sa2rNil", + "Mrvx4zyPlqXqppz1l1+WuVl+QIbKJVSZLSNKC+llESOgWGhwf98IdzFIuvV56pUCRX4raPmecf2RJB+q", + "4+MnQFo5WL+5K9/Q5L6Elr3yRilxXVslLtzqNbDTkiYlXQ8YDTTQEncf5eUClew8J9itlfvlY1txqGYB", + "Hh/DG2DhuHYeCy7u3PbyNWPiS8BPuIXYxogbjcf0pvsVZIPdeLs6GWW9Xar0JjFnO7oqZUjc70xdSmJt", + "hCzvxlZsjaGCrurGEki6gfQSMiwAAEWp9/NWdx8p4QRNzzqYsoUybC4HZnOjaXcJpCoz6kTxjkHJYFiB", + "1j5W8R1cwv5CNMng18mjbad1qqGDipQaSJeGWMNj68bobr4Lx0FbV1n67EhMk/FkcVLThe8zfJCtyHsH", + "hzhGFK20wyFEUBlBhCX+ARTcYKFmvFuRfmx5RstY2psvUlfD837imjTKk4ucCVeD2ZT2ewFYdUdsFVlS", + "I7cLVzDGpi4GXKxSdA0DEnJoXZ+YINiyyOMgh+696E0nVt0LrXffREG2jROz5iilgPliSAWVmU68lJ/J", + "OnCsAZVgHTiHsGWOYlIdWGaZDpUtL4ctbDUEWpyAQfJG4PBgtDESSjYbqnwtGyz548/yJBngD0zFHSvA", + "cBaE+gR1fWrDt+e53XPa0y5dGQZfe8EXXAhVywnFE4yEj9HFse0QHAWgDHJY24Xbxp5QmrTgZoMMHH9d", + "rXLGgSSxqCGqlEiZLUbUXDNuDjDy8UNCrAmYTB4hRsYB2OiYxIHJGxGeTb6+DpDcpTVTPza6NIO/IZ6B", + "YeNojcgjSsPCGR+I2PYcgLpQs/r+6gQ84jCE8TkxbO6K5obNOY2vGaRXBwDF1k7Wv3ONPxgSZ0cs8PZi", + "udaa7FV0k9WEMpMHOi7QjUC8FLvEpmBFJd7lbmnoPRpajAlhsYNpKy7cU2QpdhhugVeLDWU9AMswHB6M", + "QMPfMYX0iv2GbnMLzNi049JUjAoVkowz59XkMiROTJl6QIIZIpf7QRGFGwHQMXY05Uad8ntQSW2LJ/3L", + "vLnV5k1xIJ+1ETv+Q0couksD+OtbYeqyB2+7EkvUTtGOGmhXfAhEyBjRGzbRd9L0XUEKckClIGkJUcll", + "zHVndBvAG+fcdwuMF1hXgvL9gyAURcKaKQ2NEd1czN4r9KXNkxTLWQmxGl6dLuXKrO+dEPU1ZeulYMfW", + "Mr/4CjCUc8Wk0gl6IKJLMI1+UKhU/2CaxmWldrCLrezIsjhvwGkvYZ9kLK/i9Orm/emlmfZNzRJVtUR+", + "yzgBmm7IEiuRRkPgRqa2UZKjC35lF/yK3tl6p50G09RMLA25tOf4FzkXHc47xg4iBBgjjv6uDaJ0hEEG", + "mYt97hjITfZwYubiYsz62jtMmR/7YNiIz58cuqPsSNG1BAaD0VUwdBMZsYTpoJBnP6Vw4AzQsmTZrmML", + "taMOasz0WgYPXyGpgwXcXTfYAQwEds9YVoME1S6G1Qj4tiRrqxbFYhJmLtolq0KGEE7FlC8o3kdUnfV0", + "CFcXQPOfYP+LaYvLmX2ez25nOo3h2o14ANdv6+2N4hld89aU1vKEXBPltCyluKJ54gzMQ6QpxZUjTWzu", + "7dFfmNXFzZgX35++euvA/zyfpTlQmdSiwuCqsF35L7MqW3dr4ID4gsVG5/MyuxUlg82viwWFRuntBlxx", + "2EAa7VWxaxwOwVF0RupVPELooMnZ+UbsEkd8JFDWLpLGfGc9JG2vCL2iLPd2Mw/tQDQPLm5aKcQoVwgH", + "uLV3JXCSJXfKbnqnO346Guo6wJPCuUbK1xa2QrMigndd6EaERHMckmpBsQadtYr0mROvCrQkJCpnadzG", + "ypcYdsut78w0Jth4QBg1I1ZswBXLKxaMZZqpCYpuB8hgjigyfT3DIdwthXtao+LsHxUQlgHX5pPEU9k5", + "qFj0z1nb+9epkR36c7mBrYW+Gf42MkZYf7F74yEQ4wJG6KnrgfuyVpn9QmuLlPkhcElcw+Efzti7Ekec", + "9Y4+HDXb4MVN2+MWvoTR53+GMGzV5MPPcHjl1RWCHJgj+qwGU8lKit8hruehehzJGPEVJxlGufwOfEKY", + "eWPdaV4HaWYf3O4h6Sa0QrWDFAaoHnc+cMth6TtvoabcbrWtct+KdYsTTBhVemTHbwjGwdyLxM3pdklj", + "dQGNkGFgOm0cwC1buhbEd/a4d2Z/5oqALkjgS67bMpsMXIJskrn6hUVuKDDYaSeLCo1kgFQbygRz6//L", + "lYgMU/Et5faxBNPPHiXXW4E1fpleWyExlV/Fzf4ZpKygeVxyyNK+iTdja2afCqgUBLXo3UD2jRVLRa6e", + "v3WxN6g5W5HjefDahduNjF0xxZY5YItHtsWSKuTktSGq7mKWB1xvFDZ/PKH5puKZhExvlEWsEqQW6lC9", + "qZ1XS9BbAE6Osd2jZ+Q+uu0Uu4IHBovufp6dPHqGRlf7x3HsAnBvgoxxk2wVJr7E6Rj9lnYMw7jdqIto", + "1rN9yGmYcY2cJtt1ylnClo7XHT5LBeV0DfFIkeIATLYv7iYa0jp44Zl9hURpKfaEDaQggaaGPw1Enxv2", + "Z8EgqSgKpgvn3FGiMPTUFJq3k/rh7JMmrkaoh8t/RB9p6V1EHSXyyxpN7f0WWzV6st/QAtponRNq6zfk", + "rIle8JWLyZkvD4NFU+taqRY3Zi6zdBRzMJhhRUrJuEbFotKr5M8k3VBJU8P+FkPgJstvn0YKxbYLFvLr", + "Af7F8S5BgbyKo14OkL2XIVxfcp8LnhSGo2QPmmyP4FQOOnPjbrsh3+H40FOFMjNKMkhuVYvcaMCpb0V4", + "fGTAW5JivZ5r0eO1V/bFKbOScfKgldmhn9+9clJGIWSs5ltz3J3EIUFLBlcYuxffJDPmLfdC5pN24TbQ", + "f13Pgxc5A7HMn+WYIvBcRLRTX7y4tqS7WPWIdWDomJoPhgyWbqg5aReK/fJOP2987jufzBcPK/7RBfYr", + "byki2a9gYBODItbR7czq74H/m5LnYjd1UzsnxG/sPwFqoiipWJ790mRldmqES8rTTdSftTQdf21eM6oX", + "Z++naGm1DeUc8uhwVhb81cuMEan272LqPAXjE9t2y5bb5XYW1wDeBtMD5Sc06GU6NxOEWG0nvNUB1fla", + "ZATnaep4NdyzX+4+KEr8jwqUjiUP4Qcb1IV2S6Pv2pq4BHiG2uKC/GhfI90AaVVpQS3N5sdD5iu0WoN6", + "VeaCZnNixrn4/vQVsbPaPvZNDluTd41KSnsVHXtVUKJwWniwf14jnrowfZzxWGqzaqWxaJLStChjyaGm", + "xYVvgBmooQ0f1ZcQOwvy0mqOyusldhJDDysmC6Nx1aNZ2QVpwvxHa5puUCVrsdRhkp9eTNpTpQoecKsf", + "Yqnr9uG5M3C7etK2nPScCKM3b5myj1DCFbTzUevkbGcS8Pmp7eXJinNLKVHZY6x4wE3Q7oGzgRrezB+F", + "rIP4awrkthb7dWtrn2OvaB2hbqHu3sttNruxfmDDPy6cUi44S7GKT+xqdg9aTvGBTSh41DWy+iPuTmjk", + "cEXLg9dhcg6LgwXDPSN0iOsb4YOvZlMtddg/Nb6cuKGarEErx9kgm/sq984OyLgCV4cR3zYN+KSQLb8i", + "csioqzqpXRrXJCNMixlQ7H4w3944tR/jxS8ZRwHfoc2FpltLHb63p41WwDRZC1BuPe3cYPXe9FlgmmwG", + "u48L/z4fjmHdcmbZ1gfdH+rUe6SdB9i0fWHa2lImzc+tCGQ76WlZukmH30CIygN6xwcRHPEsJt61EyC3", + "Hj8cbYTcRkNJ8D41hAZX6IiGEu/hHmHU7wF03poxQqulKGxBbAhXtIIB4xEwXjEOzeuRkQsijV4JuDF4", + "Xgf6qVRSbUXASTztAmiO3ucYQ1PauR5uO1RngxEluEY/x/A2Nk8ZDDCOukEjuFG+rx+tNNQdCBMv8LVc", + "h8j+wwQoVTkhKsOMgs5TBTHGYRi3L4XUvgD6x6AvE9nuWlJ7cq5zEw0liS6rbA06oVkWq4v5HL8S/OoL", + "RcEO0qqun1iWJMWaKO0iMX1qcxOlgquqGJnLN7jldMHbHxFqCN8f8TuMSSjLPf4bKx44vDMuCOPaYYA+", + "4sI9lnBNubk9Uk/qNTSdKLZOpmMC75Tbo6OZ+maE3vS/U0rPxboNyBcuDTHG5cI9ivG3783FEVZO6FXE", + "tFdLXdgAg+6Ef7EN1cY6JbfNlfAq65XIRGdPXfNu3AAx/LbTHC+/gdDboCAGtfer9R4OBeCmg/HiVLvM", + "NU3JKAsazAay0Ts27wehiFtOhyJ2bMCO+dzrPU0y7MnZOPYoQn0oWB+gn3ycKSkpc67xhln0Mesi0ofN", + "hWOHrtng7iJcnPegxe6nq6GYbKIYX+dA8Hv3NZxLcOns9XPodq0+KsmrhPZX9xqpHa+Oio+uvx+dgFN9", + "XTPooNH2wlVet8t0OvlPv9gYNgJcy/0/gQm3t+m9t4T60q41TzVNSF21d1IV39atGH8WaLj+UVPzCOmp", + "FIo1laJj7wVNjHW7wCd/gvpN/bF8oMkVpBrLgzcOdAlwnWpOZrLgLbr/roM0oDvWIYGu/NFYzaN+TfAD", + "F1ovLSlIrbP1lBfTK/yc1mFSyJTwNbg1cPccXDvhYHLY82oFqWZXB9LA/rYBHqQYzb0Rwj7rGmSFsTqM", + "FquIXN/E1gA0lqU1Ck9Qze/W4AwlgVzC/p4iLWqIFnie+3vlJgUkEAPIHRJDIkLFwhCs1dR5hpmqKQOx", + "4MN+bHdoSnENPg0TJDXecC5PkubGbRIdR6aMv00xaS7T9VrpvxgROpQp1q9tPyxsv8SnBFT9bJsvQBGq", + "pOQsUv3ZFbDApL3aUeBLWYDyv/kMXTtLzi4hfLwG3TJbKjPfImpn8CaMZOQ+6qV3RUtWU2WDKJ0fvA7S", + "7Cf0RAo/YShumgss9zwUz9yOiwzfeMfoD7wOsPw0wrUC6R75QmEvFwoSLXxQ5xgcY6hw75HfBAlqsNii", + "BW6wBMq7psYLFp2lWPKEusiWcIFEQkENdDKoxDI85xiyX9jvPoPFFx09aE6p6TU5WErFh+cy1UNiSPUr", + "4m7Lw5kxN7GsMM7tk6IqVpaFG1SGpv9SiqxK7QUdHoza+jS56NEIK4kaJdL+Knv6ZY4lwF4FeYaXsD+y", + "on+6obypxdY+1laEsmsI8vo7u32nRqe4fp2v7QLWdwLn1zTczGelEHkyYOs/61eX6Z6BS5ZeQkbM3eED", + "2wZe1yD30cRcO3O3m72vplKWwCF7sCDklNtQYu/XbZc37kzO7+mx+Xc4a1bZgk/OprT4wOMxmViKSd6S", + "v/lhxrmaAsP8bjmVHeRA7ZLdQGUbSbeRt2YWU5XSvqe1+/5HQ1QWipiUcsNE9knnu29XipB+8PTBuPYT", + "1rloAuikNU+itNQ8B9EWXl43VsdpjzD4DgfAC5Xi4BkGz40cOF85yu11jZRgKYOU0Fr+IT3bLbDhS8EW", + "KUyLMMu0VYdshER7XwIjinpR2ybieO6bMLCoheBY6Kdv+lBorsZ6wSHhmHMpr2j+5c0XWO3kFPHhnkSM", + "LzTUf0MkW1Sqm4WavKKT5g503bubmr9Fc8vfwOxR1M/ghnJ2x/r5C2+dxbp2NCe5aB5DwiHJFse0jolH", + "35KlC5MvJaRMsU4G0daXMq3VPazs3byUOa5fHlrnL0LfgoydgiBK8qYpi6gF3g8NhM0R/cpMZeDkRqk8", + "Rn09sojgL8ajwnz1A9fFZctjYcvMdkJxhIQ79lwEMQjX9Fz0M/GnLs9a582lUynor3Pybd3CbeSibtY2", + "1e3WR+5Y7bwp3rJ4SUzTHd11FiFYT5YgqOS3R78RCSt8MEKQhw9xgocP567pb4/bn81xfvgw/iLnl3LU", + "WRy5Mdy8MYr5ZSh004YnDkQJd/ajYnl2iDBaMd/NkysY1fyry/r4Ko++/Grtqf2j6grvXydEoLsJiJjI", + "WluTB1MF0dwTArldt0jYNmomaSWZ3mMxCm9+Y79GXYo/1hZ75/Gp05fd3afFJdTlTBr7fqX87fqjoDne", + "R0amxgANja8wfr+jRZmDOyjf3Vv+CZ78+Wl2/OTRn5Z/Pv7mOIWn3zw7PqbPntJHz548gsd//ubpMTxa", + "ffts+Th7/PTx8unjp99+8yx98vTR8um3z/50z/AhA7IFdOZTH2f/G19GSk7fniUXBtgGJ7Rk9eOrhoz9", + "8w40xZMIBWX57MT/9D/9CVukomiG97/OXGbVbKN1qU6Ojrbb7SLscrRGg16iRZVujvw8/Ucv357V0fHW", + "FYw7agOfDSngpjpSOMVv774/vyCnb88WDcHMTmbHi+PFI3zMrAROSzY7mT3Bn/D0bHDfjxyxzU4+fZ7P", + "jjZAc/R/mT8K0JKl/pPa0vUa5MK9c2F+unp85EWJo0/OmPl57NtRWDL26FPL5psd6IklJY8++UoJ461b", + "pQicrTvoMBGKsWZHS0zAmtoUVNB4eCmoYKijTygiD/5+5LJS4h9RVbFn4Mg7RuItW1j6pHcG1k4P93rz", + "0Sf8D9JkAJaNAeuDa7M1juyDcv2f9zyN/tgfqFvYPPbz0ad2Yb0WQtWm0pnYBn1RCLcaZH++utR06++j", + "LWXaXKvOu4WFD/qdNdD8yMVtd35tQqV6XzD+K/ix/Upv5Nejuq5M9GOX2GNf3WYPNPJZN5h+JWxmT819", + "zjI0DdkWoXHIXm+g9HOR7UfeydklS8ap3Lffymmud/uxrx/0X/LagK1Z5C0kod0QNRu3jPDi1bICm5yN", + "1m3kgY+Pj0fgLdS6dFHC//3g6v9nD64+vf7Oj5ojWwGPEeCe04z4rLiEvKa5IXvIyKmTEkKILXyPvih8", + "ZxwDGIwoQayo9Hk+++YLI+mMG8Ge5gRbWgiefFEIzkFesRTIBRSlkFSyfE9+5nWaV1DapX+2fuaXXGy5", + "B95I21VRIL+r2aZRYiXdth+AlxFypYow3diVwCZlQDdpbEH+dvruzdmbH0+sSF5Lj+b/uxIkK4BrmqNV", + "u3IOBW3OcQZXkIvSfMZ6JhLQqsoFWVdUUq4BXLUdWaDly78xSHOm9wboVYVvQpirXkjLkuhaoXcACwDP", + "5rMQBHOGd4nh12vgibsxkqXI9r4Ql6RbvbMM4ijQs0K9ZXbyPtBY3n/8/NF8k6Y1fmrE8JOjI3RWboTS", + "R7PP808dET38+LEG3WdSz0rJrjAw++Pn/xcAAP//LCP3WkazAAA=", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/generated/model/types.go b/daemon/algod/api/server/v2/generated/model/types.go index e3fd2d22ff..273bff101c 100644 --- a/daemon/algod/api/server/v2/generated/model/types.go +++ b/daemon/algod/api/server/v2/generated/model/types.go @@ -106,6 +106,12 @@ const ( GetTransactionProofParamsFormatMsgpack GetTransactionProofParamsFormat = "msgpack" ) +// Defines values for GetLedgerStateDeltaParamsFormat. +const ( + GetLedgerStateDeltaParamsFormatJson GetLedgerStateDeltaParamsFormat = "json" + GetLedgerStateDeltaParamsFormatMsgpack GetLedgerStateDeltaParamsFormat = "msgpack" +) + // Defines values for GetPendingTransactionsParamsFormat. const ( GetPendingTransactionsParamsFormatJson GetPendingTransactionsParamsFormat = "json" @@ -218,30 +224,6 @@ type Account struct { // * lsig type AccountSigType string -// AccountBalanceRecord Account and its address -type AccountBalanceRecord struct { - // AccountData Account information at a given round. - // - // Definition: - // data/basics/userBalance.go : AccountData - AccountData Account `json:"account-data"` - - // Address Address of the updated account. - Address string `json:"address"` -} - -// AccountDeltas Exposes deltas for account based resources in a single round -type AccountDeltas struct { - // Accounts Array of Account updates for the round - Accounts *[]AccountBalanceRecord `json:"accounts,omitempty"` - - // Apps Array of App updates for the round. - Apps *[]AppResourceRecord `json:"apps,omitempty"` - - // Assets Array of Asset updates for the round. - Assets *[]AssetResourceRecord `json:"assets,omitempty"` -} - // AccountParticipation AccountParticipation describes the parameters used by this account in consensus protocol. type AccountParticipation struct { // SelectionParticipationKey \[sel\] Selection public key (if any) currently registered for this round. @@ -271,42 +253,6 @@ type AccountStateDelta struct { Delta StateDelta `json:"delta"` } -// AccountTotals Total Algos in the system grouped by account status -type AccountTotals struct { - // NotParticipating Amount of stake in non-participating accounts - NotParticipating uint64 `json:"not-participating"` - - // Offline Amount of stake in offline accounts - Offline uint64 `json:"offline"` - - // Online Amount of stake in online accounts - Online uint64 `json:"online"` - - // RewardsLevel Total number of algos received per reward unit since genesis - RewardsLevel uint64 `json:"rewards-level"` -} - -// AppResourceRecord Represents AppParams and AppLocalStateDelta in deltas -type AppResourceRecord struct { - // Address App account address - Address string `json:"address"` - - // AppDeleted Whether the app was deleted - AppDeleted bool `json:"app-deleted"` - - // AppIndex App index - AppIndex uint64 `json:"app-index"` - - // AppLocalState Stores local state associated with an application. - AppLocalState *ApplicationLocalState `json:"app-local-state,omitempty"` - - // AppLocalStateDeleted Whether the app local state was deleted - AppLocalStateDeleted bool `json:"app-local-state-deleted"` - - // AppParams Stores the global information associated with an application. - AppParams *ApplicationParams `json:"app-params,omitempty"` -} - // Application Application index and its parameters type Application struct { // Id \[appidx\] application index. @@ -443,35 +389,6 @@ type AssetParams struct { UrlB64 *[]byte `json:"url-b64,omitempty"` } -// AssetResourceRecord Represents AssetParams and AssetHolding in deltas -type AssetResourceRecord struct { - // Address Account address of the asset - Address string `json:"address"` - - // AssetDeleted Whether the asset was deleted - AssetDeleted bool `json:"asset-deleted"` - - // AssetHolding Describes an asset held by an account. - // - // Definition: - // data/basics/userBalance.go : AssetHolding - AssetHolding *AssetHolding `json:"asset-holding,omitempty"` - - // AssetHoldingDeleted Whether the asset holding was deleted - AssetHoldingDeleted bool `json:"asset-holding-deleted"` - - // AssetIndex Index of the asset - AssetIndex uint64 `json:"asset-index"` - - // AssetParams AssetParams specifies the parameters for an asset. - // - // \[apar\] when part of an AssetConfig transaction. - // - // Definition: - // data/transactions/asset.go : AssetParams - AssetParams *AssetParams `json:"asset-params,omitempty"` -} - // Box Box name and its content. type Box struct { // Name \[name\] box name, base64 encoded @@ -598,32 +515,8 @@ type KvDelta struct { Value *[]byte `json:"value,omitempty"` } -// LedgerStateDelta Contains ledger updates. -type LedgerStateDelta struct { - // Accts Exposes deltas for account based resources in a single round - Accts *AccountDeltas `json:"accts,omitempty"` - - // KvMods Array of KV Deltas - KvMods *[]KvDelta `json:"kv-mods,omitempty"` - - // ModifiedApps List of modified Apps - ModifiedApps *[]ModifiedApp `json:"modified-apps,omitempty"` - - // ModifiedAssets List of modified Assets - ModifiedAssets *[]ModifiedAsset `json:"modified-assets,omitempty"` - - // PrevTimestamp Previous block timestamp - PrevTimestamp *uint64 `json:"prev-timestamp,omitempty"` - - // StateProofNext Next round for which we expect a state proof - StateProofNext *uint64 `json:"state-proof-next,omitempty"` - - // Totals Total Algos in the system grouped by account status - Totals *AccountTotals `json:"totals,omitempty"` - - // TxLeases List of transaction leases - TxLeases *[]TxLease `json:"tx-leases,omitempty"` -} +// LedgerStateDelta Ledger StateDelta object +type LedgerStateDelta = map[string]interface{} // LightBlockHeaderProof Proof of membership and position of a light block header. type LightBlockHeaderProof struct { @@ -637,30 +530,6 @@ type LightBlockHeaderProof struct { Treedepth uint64 `json:"treedepth"` } -// ModifiedApp App which was created or deleted. -type ModifiedApp struct { - // Created Created if true, deleted if false - Created bool `json:"created"` - - // Creator Address of the creator. - Creator string `json:"creator"` - - // Id App Id - Id uint64 `json:"id"` -} - -// ModifiedAsset Asset which was created or deleted. -type ModifiedAsset struct { - // Created Created if true, deleted if false - Created bool `json:"created"` - - // Creator Address of the creator. - Creator string `json:"creator"` - - // Id Asset Id - Id uint64 `json:"id"` -} - // ParticipationKey Represents a participation key used by the node. type ParticipationKey struct { // Address Address the key was generated for. @@ -786,18 +655,6 @@ type TealValue struct { Uint uint64 `json:"uint"` } -// TxLease defines model for TxLease. -type TxLease struct { - // Expiration Round that the lease expires - Expiration uint64 `json:"expiration"` - - // Lease Lease data - Lease []byte `json:"lease"` - - // Sender Address of the lease sender - Sender string `json:"sender"` -} - // Version algod version information. type Version struct { Build BuildVersion `json:"build"` @@ -984,7 +841,7 @@ type GetSyncRoundResponse struct { Round uint64 `json:"round"` } -// LedgerStateDeltaResponse Contains ledger updates. +// LedgerStateDeltaResponse Ledger StateDelta object type LedgerStateDeltaResponse = LedgerStateDelta // LightBlockHeaderProofResponse Proof of membership and position of a light block header. @@ -1260,6 +1117,15 @@ type GetTransactionProofParamsHashtype string // GetTransactionProofParamsFormat defines parameters for GetTransactionProof. type GetTransactionProofParamsFormat string +// GetLedgerStateDeltaParams defines parameters for GetLedgerStateDelta. +type GetLedgerStateDeltaParams struct { + // Format Configures whether the response object is JSON or MessagePack encoded. + Format *GetLedgerStateDeltaParamsFormat `form:"format,omitempty" json:"format,omitempty"` +} + +// GetLedgerStateDeltaParamsFormat defines parameters for GetLedgerStateDelta. +type GetLedgerStateDeltaParamsFormat string + // ShutdownNodeParams defines parameters for ShutdownNode. type ShutdownNodeParams struct { Timeout *uint64 `form:"timeout,omitempty" json:"timeout,omitempty"` diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go b/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go index bc7cdf5d5b..1b790e2ba0 100644 --- a/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go +++ b/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go @@ -130,177 +130,166 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9/XPcNrLgv4KafVX+uKEkf2XXqkq9U6wkq4vtuCwle+9ZvgRD9sxgxQEYApRm4tP/", - "foUGQIIkwOFIE3tztT/ZGgKNRqPRaPQXPk1SsSoEB67k5PjTpKAlXYGCEv+iaSoqrhKW6b8ykGnJCsUE", - "nxy7b0SqkvHFZDph+teCquVkOuF0BU0b3X86KeG3ipWQTY5VWcF0ItMlrKgGrDaFbl1DWicLkVgQJwbE", - "2enkduADzbISpOxj+SPPN4TxNK8yIKqkXNJUf5LkhqklUUsmie1MGCeCAxFzopatxmTOIM/kgZvkbxWU", - "G2+WdvD4lG4bFJNS5NDH85VYzRgHhxXUSNULQpQgGcyx0ZIqokfQuLqGShAJtEyXZC7KLagaJHx8gVer", - "yfGHiQSeQYmrlQK7xv/OS4DfIVG0XICafJyGJjdXUCaKrQJTO7PUL0FWuZIE2+IcF+waONG9DsibSioy", - "A0I5ef/dK/Ls2bOXeiIrqhRklsmis2pG9+dkuk+OJxlV4D73eY3mC1FSniV1+/ffvcLxz+0Ex7aiUkJ4", - "s5zoL+TsNDYB1zHAQowrWOA6tLhf9whsiubnGcxFCSPXxDTe66L443/RVUmpSpeFYFwF1oXgV2I+B2WY", - "131IhtUItNoXmlKlBvrhKHn58dOT6ZOj2798OEn+2/754tntyOm/quFuoUCwYVqVJfB0kyxKoLhblpT3", - "6fHe8oNciirPyJJe4+LTFYp625fovkZ0XtO80nzC0lKc5AshCbVslMGcVrkibmBS8VyLKQ3NcjthkhSl", - "uGYZZFMtfW+WLF2SlEoDAtuRG5bnmgcrCVmM18KzG9hMtz5JNF53ogdO6F+XGM28tlAC1igNkjQXEhIl", - "thxP7sShPCP+gdKcVXK3w4pcLIHg4PqDOWyRdlzzdJ5viMJ1zQiVhBJ3NE0Jm5ONqMgNLk7OrrC/nY2m", - "2opoouHitM5RvXlj5OsRI0C8mRA5UI7Ec/uuTzI+Z4uqBElulqCW9swrQRaCSyBi9k9IlV72/3X+41si", - "SvIGpKQLeEfTKwI8FVl8je2goRP8n1LoBV/JRUHTq/BxnbMVC6D8hq7ZqloRXq1mUOr1cueDEqQEVZU8", - "hpCBuIXPVnTdH/SirHiKi9sM21LUNCsxWeR0c0DO5mRF118fTS06ktA8JwXwjPEFUWseVdL02NvRS0pR", - "8WyEDqP0gnmnpiwgZXMGGamhDGBih9mGD+O74dNoVh46DkgUnXqULehwWAd4Rm9d/YUUdAEeyxyQn6zk", - "wq9KXAGvBRyZbfBTUcI1E5WsO0VwxKGH1WsuFCRFCXMW4LFzSw4tPUwbK15XVsFJBVeUcci05EWkhQIj", - "iaI4eQMOX2b6R/SMSvjqeewAb76OXP256K764IqPWm1slJgtGTgX9Ve7YcNqU6v/iMufP7Zki8T83FtI", - "trjQR8mc5XjM/FOvnyNDJVEItAjhDh7JFpyqqoTjS/5Y/0UScq4oz2iZ6V9W5qc3Va7YOVvon3Lz02ux", - "YOk5W0SIWeMavE1ht5X5R8MLi2O1Dl4aXgtxVRX+hNLWrXS2IWensUU2MHdlzJP6KuvfKi7W7qaxaw+1", - "rhcygmSUdgXVDa9gU4LGlqZz/Gc9R36i8/J3/U9R5Lq3KuYh0mo+tuct2gaszeCkKHKWUk3E9/az/qqF", - "AJhbAm1aHOKBevzJQ7EoRQGlYgYoLYokFynNE6moQkj/UcJ8cjz5y2FjXDk03eWhN/hr3escO2l91Og4", - "CS2KHWC803qNHBAWWkDjJxQTRuyhRsS4WUTNSkyL4ByuKVcHzX2kJQ/qDfzBjtTQ26gyht6d+1WU4MQ0", - "nIE06q1p+EASj/QEyUqQrKhtLnIxq394eFIUDQXx+0lRGHqgaggMtS5YM6nkI5w+bXaSP87Z6QH53oeN", - "erbg+UYfDkbV0GfD3J5a9hSrDUd2Dg3EB5LgcoryQC+NI4PW4ffBcXhnWIpcaz1beUU3/rtt67OZ/n1U", - "5z8Hi/m0jTMX3qIs5cwFBn/xbi4PO5zTZxxryzkgJ92+d2MbDSXMMHfilcH1NHAH6FiT8KakhUHQfjFn", - "KeN4AzONDK73lKYjBV0QZ28Pe7yGWN15r23dD0FMkBU6OHyTi/Tq71Qu97DnZw5Wf/vhMGQJNIOSLKlc", - "HkxCWoa/vRpoY7aYboi3dzLzhjqop7iv6W2ZWkYV9aZm8Q2rJYb02A+FHpSBu8uP+B+aE/1Z720t+g3Y", - "A3KBAkya7Ww9CJm+ypsLghlJN0ATgyArc3sn+ta9E5avmsHD6zRqjb41BgO7QnYSuEJivfdt8I1Yh3D4", - "Rqx7W0CsQe6DPzQcVCMVrOQI/E4tZgLX35KPliXd9ImMsMcQWU9Qq64SdwP3T3w9SmN5PZmJ8m7SpyNW", - "OGnsyYRqqJ7wnXaIhE2rIrGsGLBJmQYdQI0Lb1hodMGHKNaiwrmifwAVpIa6Dyq0Ae2bCmJVsBz2wPrL", - "oNCfUQnPnpLzv5+8ePL0l6cvvtIsWZRiUdIVmW0USPLQ3s2IVJscHvVnhrejKldh6F89d1bINtwQHCmq", - "MoUVLfqgjHXTqECmGdHt+lRrkxlnXSM4ZnNegJbkhuzEGO41aqdMag1rNdvLYsQIljWjZMRiksFWZtp1", - "es0wG3+K5aas9nGVhbIUZcC+hltMiVTkyTWUkomAq+SdbUFsC6feFt3fDbbkhkqix0bTb8VRoQhwllrz", - "8XLfgL5Y84Y2g5LfzDcwOzvumHVpE99ZEiUpoEzUmpMMZtWidROal2JFKMmwI57R34M63/AUrWr7YNL4", - "NW3FOJr45Yan3p1NL1QO2aK1CPe/m3Wp4uxzZqgHMoCOJsdr/IzX+lPIFd27/tIdIIT7K7eQBlmS6YZ4", - "C37NFkvlKZjvSiHm+8cxNEoIUfxg1PNc9+kr6W9FBnqyldzDYdwAa3hdr6nP4XQmKkUo4SIDtKhUMnxM", - "R9zy6A9EN6byT361NBr3DDQjpbTSs60Kgk66nuRoOiY0NdybIGlkxItRu59MKzOccfnmJdBM3+qBEzGz", - "rgLrxMBJUvQwKnfQWSUhsJdaeBWlSEFKyBJrotiKmmtnhIgaoBMijgjXoxApyJyW90b26nornlewSdAf", - "LsnDH36Wj74Avkoomm8hLLYJkbe+8Fl/UB/rccMPMVx3cJ/taAnEyVx9u9QCIgcFMRLuRJPo+nUx6q3i", - "/clyDSV6Zv5QjneD3I+BalT/YH6/L7ZVEYnyshedC7ZCux2nXEhIBc9kEFhOpUq2iWXdqHUb0zPwJGFI", - "EiPgiFLymkplvImMZ2gEMccJjmMUFD1EHOGoQqoh/+x00T7sVJ+DXFayVkxlVRSiVJCF5sBhPTDWW1jX", - "Y4m5B7vWfpUglYRtkGNU8uBbYpmZGAJRVRvdrbu9Pzk0TetzfhMkZQuJhhBDiJy7Vh51/UiXCCJMNoQ2", - "jMNkh3Pq8JrpRCpRFFpaqKTidb8Ymc5N6xP1U9O2z1xUNed2JkCPrhxOFvMbQ1kT47Sk+gqNkMmKXmnd", - "Ay/Exu3Zx1lvxkQynkIyxPl6W57rVv4W2LpJq2JR0gySDHK66QP9yXwm5vMQAFzx5uIjFCQmniW86A0n", - "u/CBAdAC4cmQ8kjwC0n1FtQ3j4ZBbO8tkDNA2CHhZPnoQQ0KxwoukYOH0zZLHYCIp+G1UHrFDTsgxlag", - "j8E3QoYa8t0pgZ2T5lrWHeK/QNoBajVi90E2IGNTaODvNIGIMc2GAXvbpSPdOwI4KDWjUmyLGInt2Ihl", - "7x0tFUtZgVedH2Cz95tfd4Cgv4lkoCjLISPeB3MLLPz+xARidGHe7SY4ygjTR79nhQlMJ2cSNZ428lew", - "wSv3OxPhd+HFBe7hKhuAqo8nygki6uKGtAbuN4E1TVW+0XqaWsKG3EAJRFazFVPKhGy2b7pKFIkPIGjg", - "HhjRenNMdJxbgTHupXME5U2vvxTTibkSDON30bkXtMhhrwKFEPkI41GPGEEMRjn+SSH0qjMbIezCSB0n", - "tZC0QhtdefXp/0C2yIwzIP8lKpJSjjeuSkGt0ogS9QTUH/UIWgOrx7Qu/oZCkMMKzEUSvzx+3J3448d2", - "zZkkc7hxYfW6YZccjx+jGeedkKq1ufZgKtTb7SxwfKDlH889G7zQkSnbXcwW8piVfNcBXrsL9J6S0jKu", - "nv69BUBnZ67HzN3nkXHudYQ7yqjvgQ7NG9f9nK2qfF8LPqcsr0qIe8cuLz/MV5eXH8l3pqVzbE8dk/vk", - "uGnSIub2NKpKDK0hOdP321LQTCsIQds+TpIvkjo4UwbRWUmNzj/sPqR800nkG4sDmUFKKxOVbKW2xaAJ", - "D5UHAX2xs7pdEgYnMtI8XuXKHNo+VRelqAoi62U3XKCogj/G1NyADmHZH9iLDWo+xsKD9DUx3+zhtDaA", - "SAlFCRJlq29ekearmPv5N1b4yo1UsOpboE3XXyL3s/fRe47gOeOQrASHTTDllHF4gx9DvY18j3TGkzbW", - "t6s8t/DvoNUeZww33pe+uNqeQHtXx8XtYfG7cDvOBz/zCI1rkBeEkjRnaHoTXKqyStUlp3i59zZbIH7A", - "XWPi5p5XrknYvhQw/1hQl5xi7Eh95Q/KxTkE5PJ3AM7qI6vFAqTqaIlzgEtuWzFOKs4UjrXS65WYBSug", - "RCf+gWm5ohsypzlap36HUpBZpdrCFRMkpGJ5bj0hehgi5pecKpKDlqpvGL9YIzjnSXQ8w0HdiPKqpsJB", - "cD8sgINkMgnHOXxvvmIImp3+0oajYbaq+Wxs5xp+k0Wxwbt/k4H5fx7+5/GHk+S/afL7UfLyfxx+/PT8", - "9tHj3o9Pb7/++v+2f3p2+/Wj//yP0Eo53EPh+xbzs1N7pzg7RcWxMZ73cP9shtMV40mQyXwXcYe3yEOt", - "/joGetQ2K6glXHK15pqRrmnOMqruxg5dEdfbi2Z3dLimtRAdM4Kb647q2D2kDAkImY5ovPMx3g8NCifK", - "oDfH5r7gfplX3CxlJa1HCePAXYiGmE/rZChTBOGYYKbMkrr4Ivvn0xdfTaZNhkv9fTKd2K8fA5zMsnUo", - "jymDdUjLthsEN8YDSQq6kaDC0gNxD0ajGKe4D3YF+noml6z4/JJCKjYLSzgXXWtv62t+xk3Yq94/6Bva", - "WJOzmH9+vFUJkEGhlqHk6JamgK2a1QTo+OuLUlwDnxJ2AAfd23K2AOniYnKgc0zSRf+GGJMtUO8Dw2iO", - "Kzyq+xMZdSUN8Q8qt1Za304n9vCXe9fHLeAQXt0xa0eQ+1sJ8uD7by/IoRWY8oFJqTOgvSSogBXKxvm3", - "Ijm0NDMlIUxO4SW/5KcwZ5zp78eXPKOKHs6oZKk8rCSU39Cc8hQOFoIcu9SBU6roJe9pWtGqLV7SBimq", - "Wc5ScuVrxA17mkz84LWR5guhL45dp3Zff7VDBeWLGSC5YWopKpXYVOOkhBtahpwGsk41RcimUMDQqFNi", - "YRtRbFOZLfywzKNFIbspZ/3pF0Wup++xobQJVXrJiFSidLqIVlAMNri+b4U9GEp64/LUKwmS/LqixQfG", - "1UeSXFZHR8+AtHKwfrVHvubJTQEte+WdUuK6tkqcuLnXwFqVNCnoImI0UEALXH3Ul1d4yc5zgt1auV8u", - "thVBNRNw9IgvgMFj5zwWnNy56eVqxoSngJ9wCbGNVjcaj+ld18vLBrvzcnUyynqrVKllovd2cFZSs7hb", - "mbqUxEIrWc6NLdkCQwVt1Y0ZkHQJ6RVkWAAAVoXaTFvdXaSEVTSd6GDSFMowuRyYzY2m3RmQqsioVcU7", - "BiVNYQlKuVjF93AFmwvRJIPvkkfbTuuUsY2KnOppl5pZ/W1rYXQX34bjoK2rKFx2JKbJOLY4rvnC9Ylv", - "ZKPy7mETh5iilXYYIwQtA4QwzB8hwR0mquHdi/VD09O3jJk5+QJ1NZzsJ7ZJc3mykTP+bDCb0nxfAVbd", - "ETeSzKjW24UtGGNSFz0pVkm6gIiG7FvXRyYItizyCGTbuRc86cS8e6D1zpsgyqZxoucc5BTQXzSr4GWm", - "Ey/lRjIOHGNAJVgHzhJslqOaVAeWGaFDy5aXwxS2iqEWZmAoeaNwODTaFPE1myWVrpYNlvxxe3mUDvAH", - "puIOFWA480J9vLo+teHbydzuPu3dLm0ZBld7wRVc8K+WI4onaA0fo4tDyyE4KkAZ5LAwEzeNHaM0acHN", - "Amk8fpzPc8aBJKGoISqlSJkpRtQcM3YM0PrxY0KMCZiMhhBiYw9tdEwiYPJW+HuTL3ZBktu0Zupgo0vT", - "+xvCGRgmjlarPKLQIpzxSMS2kwDUhprV51cn4BHBEManRIu5a5prMWdvfA2QXh0AVFs7Wf/WNf4ops4O", - "WODNwbLTnMxRdJfZ+DqTQzqs0A1gPBPrxKRgBTXe2Xqm+T0YWowJYaGNaSouPJBkJtYYboFHiwll3YJL", - "HA+HhnfDXzOJ/Ir9Yqe5QWZo2GFtKsSFElnGmvNqdompE2OGjmgwMXZ56BVRuBMCHWNHU27UXn63XlLb", - "6kn/MG9OtWlTHMhlbYS2f2wLBVcpQr++FaYue2BNCO8hFWUWt1NoRmWqrt/aNy/Y6rNabowujDBQS/ak", - "fdtwV4j+ykWiAlr4NOMMEOLU5Bz1MPl2XQit3ZqcJFOgwhLF6IklmFRLaWxWkvFFDnXkZpBMoQm7mCRH", - "cTPlpuCUAzhOdw4tbuSSP4RLUYTx2OWm8t7SZwCLyC5v8EA9/J6Y2CIVg7jcxvnjXVe1D26UdnhNuzSK", - "d9cKnQ6affrezL7PVEIOeHtOWreN5Crk4768/CABVbNz182z8mEBFso3j7yYrRIWTCpovE1ag3WU/tx2", - "fIp134SYx2eninKu5/deiFqfM4WFsGNrmp99BhjzPGelVAm66oJT0I2+k2h9+k43DV8q2lFhpgQqy8KH", - "KA57BZskY3kV5lc77g+neti3te4gqxkqJowToOmSzLBkbzBWdGBoE048OOHXZsKv6d7mO2436KZ64FKz", - "S3uMP8m+6Jx0Q+IgwIAh5uivWpSkAweol+Lbl47eBcNsTjxOD4bcFL3NlDnYW+OrXKJxTJkzkAbmgqFB", - "0eDcQECOiSMzQr2p1h9MxuVCJS3jR4BctYFHKnplEsraC8wXtU0lHDZl7tWjQNu2WwDy8fD4dnBWCU5y", - "uIZ8exA0RYo7Aw5GRhgIGHpDMJ3AxXhs1+r7K9AQrJ5pF8cgt/S0myHHbXM1svXzmrs1Mqymnc18H+29", - "0xqa47eGv/uuu6JIMsghmGf2Dy+RjBYFVotwjUMJPRoY4xmsw+iYT9NQTf2+8b5iXJn6q/sq7diBM37a", - "fgHEMSQoTKm+3ctHxu+Y3ir5ZI5PKsKUtXNgUBAj8Ppm571G0uW+yDFOi4Jl647f00CNWsf3QjE8oCyw", - "LRTweCOUwViCbBe+bIx5pvx6q+7UwSjKXLTLU/o6jT8Uk+7xkD6h6gznbbS6AJr/AJufdVuczuR2Ormf", - "mzREawtxC63f1csbpDOG4Rm3WSvqYUeS06IoxTXNE+tMjrFmKa4ta2Jz53v+zNpaWOpdfHvy+p1F/3Y6", - "SXOgZVLfdqKzwnbFn2ZWpsZmZIO4xwmWVNX2OXMb9ha/LgzoO6BvlmALwXsX6l7F2ia4wNuK1iE9D0cD", - "b3Uv2zgIM8WBeAgo6nCIxlVnoiHaERD0mrLc+cgctpHIXZzcuLMxKBV8APeOpPDPor2Km97uDu+Ohru2", - "yCR/rIFS9SvzGoMkgnfD5fQtGF1vyKorivVmjQekL5x4tUKvQSJzlob9qXyGKTbcxMnoxgQbR+7TGmLF", - "ImFXvGIeLN1MjjBqd5D0xggS09UujtFuJuwzWhVnv1VAWAZc6U8l7srORkX7qfWs94/TsFZpARtvfAP+", - "PjqGX2u5e+JZnWtIwfCjcnrontZWPzfR2vukf/DCD3YI7vNH7B2JA4F5lj8sN5tEhWU7uma0hr71yS1n", - "f7NFnyNjBJ/QYjKZl+J3CJuq0MIXyA511aUZRrT+DnxESlnjyWleAmtGjy53TLvxPU7tgMQI1+PKeyE4", - "WObWeaMpN0ttXrRpxbWHGcbPIDk08BuGsTj3sm5yejOjoRrAWsnQOHnul5bfXAniOjvaWx8NswW/D4gX", - "N1a3ZabwRwFlk7jdLyJ2R4XBDDtaVWg0A+RaXyeYmlifXIoAmIrfUG4eRkJvBG4l21tf8J1B6EaUWLZH", - "hl38GaRsFTQuXV5+yNK+OzdjC2aeBaokeO/OWEDmPTXDRfbtHhNO15DmbE6Opt7LVnY1MnbNJJvlgC2e", - "mBYzKsEYVVzkhuuipwdcLSU2fzqi+bLiWQmZWkpDWClIrdTh9aYOVJmBugHg5AjbPXlJHmKIjmTX8EhT", - "0Z7Pk+MnL9HBav44Ch0A9v2vIWmSzf0k1zAfY4ySgaEFt4V6ELQGmEcb44JrYDeZrmP2Era0sm77XlpR", - "ThcQjgpdbcHJ9MXVRF9Ahy48My+OSVWKDWGRdGNQVMunSKaZFn8GDZKK1YqplQ3kkGKl+al5VMYM6sCZ", - "58tsPXCHl/uI8VCFCwfpXCI/r9/HnG+hWWPU2lu6gjZZp4SaWk05ayIV3SsF5MyVgsMC6XVddEMbPZae", - "Oqo5GLg4J0XJuMKLRaXmyd9IuqQlTbX4O4ihm8y+eh4oCt8uTsx3Q/yz070ECeV1mPRlhO2dDmH7kodc", - "8GSlJUr2qMns9HZlNHArHKITixMaBj1WKdNQkii7VS12o56kvhfj8QGA92TFej478ePOM/vsnFmVYfag", - "lV6hn96/tlrGSpSh+q7NdrcaRwmqZHCNcfrhRdIw77kWZT5qFe6D/Zd1njqV01PL3F6OXgR28fh4dwP0", - "+fiRiXfx9rQ9PS2dK+j2wRvOOA+IefN0m9/jPq8htTrvgpWT0OOwixgRWgmwHYrtdgO+v4nBc/m0VihG", - "o/bUQpz5jQhM2T2hUft4bMZkwG4VO0D0By2gZhbUlLSfK/j8ETXOLdKP7NBfHK74RxfZLyxskMhuBpFF", - "9J5SCS5nVn/3gsso+Uasxy5qR3a7hf0XIE2QJBXLs5+b2iCdl2pKytNlMFhkpjv+0rypWU/ObOZggd8l", - "5dxEI/RtE3hL+cXdZgL3rX+KseOsGB/Ztvt4jpluZ3IN4m00HVJuQE1epnI9gE/VdtmFOq0vX4iM4DhN", - "NdnmXO8/uuQ9jfFbBVKFzkX8YFIL0KI+11xsXqgAnqEd44B8b97EXwJp1QpE+4Gp0gSZeyfAuHqqIhc0", - "mxIN5+Lbk9fEjGr6mJfhzMsQC3PstmYRj8/dJdB2KLZ2Hxl9etZSYelOqeiqCJUo0S0uXAOsg+J7l/Bi", - "7VPngJwam4Z0N2YziOaHOStXkJF6OKtVI0/o/yhF0yUaC1oiNc7y4580cVwpvWeE6+cA6+rRuO803vZV", - "E/OoyZQIrTncMGmeQodraFdFqUsEWTXAVUlpT6+sODecEtSKh0pY3YXsDjkTBekcUEHMOoTfUXuxYeo7", - "vvByjr2C1Sy7z8X03g82NTbqZ97euBegKRecpVhLMnQ022fVx3hnR5TdDGcG2HgbOQlsruAjNXWyhqVi", - "9NkaJwgt4fruIe+rXlTDHeZPhe93L6kiC1DSSjbIpu6tJWuhZlyCrQaOL+x7clKULY83SshgEEWjJ+/I", - "RpicHTE5fKe/vbUGKcxavGIcr54uR8IkSBobMr76rPR9lSmyEJhBYTeFP6cPus8BFmvJYP3xwL0SjTCM", - "w1hP20RH9EGduFgJG5ug277SbU1BvebnVh6cGfSkKOyg8Ze4gvqAWvMogQM+7zrQyyNuDd+HNsBug0FO", - "eJ5qRoNrDJGAgtjUmMirVJ0kGK20Go7CFsTERwfraAXDRF8zDs0b5oEDIg0eCbgwuF8j/WRaUmVUwFEy", - "7QJojnERIYEmlXWK3RdUZ4FtPGmRTtwY8WVsHtSKCI66QaO4Ub6pn07X3O0pE69oXgcJBZ7HQq3KKlE2", - "uab9YFZIcGjB7Qpytg+A/jbo60Smuyqp2Tm7nESxUiWzKluASmiWhewJ3+BXgl9duVJYQ1rVVbyLgqRY", - "ma9dqrDPbXagVHBZrQbGcg3uOZz3Al2AG/xX8NwKY+D1bIP/hkpYx1fGhgftHGPvYoGyOn1uF725Damn", - "9WqeTiRbJOMpgWfK/cnRDH03Rm/675XTc7FoI/KZC5QNSTl/jULy7Vt9cPj1u3p12c3RUpfXwnBQ4d4N", - "xmtjXRimLZVc1mlvTK/y8rABIv7C6BQPv0hei2frpeZ8NX7tWHZLGk3GosrWT1CUDIqgaE66iSsz2eeI", - "RdimH4slM6Fk+nOv9zjNsKdnI+xBgrogxT5CP7gIaFJQZoM2GmHRp6xN94qbC4c2XbPA3UnYJKqoxe6H", - "61jCk8sDNpkdnTcZr8AWVSpKuGaicuEQLl7OXQnNr/ZNfC+vODr/ftwMDvVlzaBRo+2Fff/HTNPeyX/4", - "2URXEuCq3PwLmHB7i9570TJUs7j1nqVVroL2JjX2rDytH8W8uk5WIhtKmP7hZ3LqfEujzh3HyKFySyKz", - "r8gFk8Vf2ycgXDOtfY4e9o3tdFIUw0NHMsT7g5uGuw4fKzWl9+eQ1e2d27/mHVDfhBC4q3jpzBzWKvL4", - "Uzcb9gYIrAvAWrdeYnO8esZYhrJJjnhbTXKgEgYo7Fdts21HEvli/Vq3H5dsH36JNV5ytikzi8KzEJI1", - "j/OEnmgdGXJ8ga+seh7DPiwX73cNqcIXmZo4phJglwK6ejDv+e9/l56NGErqyGzH/wNlZqcTX7YEExXt", - "9qJNiRz0qqHLNVCq3rQJCHvbmelNUsHUgdA/zGkuw6+iRYNdO5VPvICVQKHn8MTOshHVvu10pl4MBMuG", - "CRnOBDDB3/9/EtPEte+XnL03u4ZvFb3CC17xEPO00sEOASR1FDVqhrheC+D2Zfh5iDTbs6Lmc0gVu95S", - "6OIfS+BeEYWpswQjLnOv7gWrs2ywoOjufo4GoaE6FIP4eIX9741OLEf0CjYPJGlxQ/Ctp6lT7u9SSxIp", - "gKeWVjwKIUNRisZ1ZQPHmKw5A6ngooJNd2iqckdfifX0nDuO5ViyrfEMDBl+pnLUWLrrTpXAMGEkVguj", - "/8xd3OJxiq8KyvoFd1eL0rcLkrPAQ1C2liWWJam9ta6qJUj3m6tBZEbJ2RX479iibxxLKNgWQWOvsyMn", - "A3pSL/s7+HoV1s5yI7Mmh6Of7xuoAY3RT2ku8OWnWLpTO22iDvN6IE1wKKop+BIV4jWH0r73jSdDLiQk", - "SrjQuiE8hkhhImDvRAQZfXfBIBethvq+KfeK78+YYhnUBr76EyQlrKjGrvSKssbHHCL2K/PdJbi6mlxb", - "bdo1vyZbq6q67B0me0T0uX5O7Gm5PXH2LuZtxjmUifN1d2MKuSal738tSpFVqS0E422M2gUwumDZgCgJ", - "WobT/ix7Rr4cq4G/9soQXMHm0Nhf0iXlC6+8mo+9Ue3NHLzKZZ3V3qvlP2zkzBdmAou94PklrefTSSFE", - "nkQcrmf9QrPdPXDF0iutZldN3HvkoU3yEP18dUTNzXLjCqsWBXDIHh0QcsJNppELrmm/dNQZnD9QQ+Ov", - "cdSsMrWfrWH/4JKHUzawqE95T/nmwAxLNQla+N1zKANkSxnTdaTIbUlvAs/O9uPpRoe7dJ8CbZjKYBHS", - "Uu5YqmvU/u4b9wOs772COHz78Sv5NVHMpfERobbUvAzZVl7eNK6fce8xug5b0PONNd6LjE4aWXS+cKjx", - "m5oo3lSinNCa/jb7j51gI5e8JZKYNamnaQoQmzC19rp4xj35qraZhencN61h2T7BseZv3yQn0WdoyrB6", - "jKP3ZXlN889vVsN6jidID8jexxUe//7rE9mQUt4t3u81HTW2d9fd39D8HZoB/wF6jYLOXgvKOn/qlzCd", - "iwxL3NOc5KJ5FxlBkhuEabzDT74iM5tFV5SQMsk6CcY37lWT+rqHj3zZGMu12nK/3DbPn4W6BxvbC4Io", - "yNvmhQQl8HxoMGy26BcWKpGdG+TyEPf12CJAv5CM8svZbDkurlpuY/PiTCceUpSwZ/exFwi2o/u4X6hn", - "7PSMi1QfOpWE/jxHn9Yt2gYO6mZuY2Mf+sQdKqM/JmQh/DqG7o4xE4Yg+LQMQVTJr09+JSXM8e1IQR4/", - "xgEeP57apr8+bX/W2/nx46Aa99miJQyNLAw7bpBjrDOtlwoD64KVkaJ/761wtwc2uu8IdoBwdc4cgq/B", - "4NAubvQzl4JGnXurgd9MzTbeJs88krkp1wOFaP9zLHfBxOdH0mQ6e6FiebZtU7aSnpqXbzGt5xebkPtF", - "3t79xdiy+2LSvn+4S4xcdwMgYQJzbQ3uDeWlM43IZLLdAnlLyFxpVTK1wTphzvTJfgnG1Hxfe0usF7iu", - "LGP1DiWuoK401/hWKuk0m+8FzVEX0PcZjFBUQuQH5Ns1XRU5WCH19YPZX+HZ355nR8+e/HX2t6MXRyk8", - "f/Hy6Ii+fE6fvHz2BJ7+7cXzI3gy/+rl7Gn29PnT2fOnz7968TJ99vzJ7PlXL//6QJ8BGmWD6MRVpZj8", - "b3ygOjl5d5ZcaGQbmtCC/QAb8xamZmP3yiZNUQrCirJ8cux++p9Ouh2kYtWAd79ObNL7ZKlUIY8PD29u", - "bg78LocLNKYmSlTp8tCN03uG8+TdWZ0eZmKhcEVN5o9mBVxUywon+O39t+cX5OTd2UHDMJPjydHB0cET", - "rGVcAKcFmxxPnuFPuHuWuO6Hrojw8afb6eRwCTRHn7j+YwWqZKn7JG/oYgHlgX1uVP90/fTQqXGHn6wh", - "+Xbo26H/cs/hp5a9PdvSEwNdDj+5IlbDrVtVoqyfweswEouhZoczzEAe2xSk1zg+FbzcycNPeD2J/n5o", - "0zLDH/GaaPbAoXNKhVu2qPRJrTWunR4pVemyKg4/4X+QJ2+NkMgh5IIy2YyUNM2nhClCZ6LE6lEqXWq5", - "4MrWMOm1nCCnGiY/yzRz616vDAauQJ2p2Hv8oR+AiICIg4SSQLN5s1FbIzWyGP3uXhHZ+qRptW/Omw9H", - "ycuPn55Mnxzd/kWfJ/bPF89uR/qSX9VwyXl9WIxs+BFrvqBVHPfv06OjnZ4G7l1Lm0maRarDkQNBDGYl", - "klXMcmKXqgOI1MTYUpuiAz70lPLtdPJ8xxkP2u5aIdqBJ5G/oRlxCb449pPPN/YZR0++luvEnFu308mL", - "zzn7M65ZnuYEW3rFxvpL/xO/4uKGu5ZayahWK1pu3DaWLaFA7GLjUUYXEi25JbumqNtxwdvl6j+i9yCU", - "ZB2RN1LRO8ibc93r3/Lmc8kbXKR9yJs2oD3Lm6c77vk//4z/LWH/bBL23Ii7e0lYq/CZvLa+Bmoi+w+x", - "vtim//OGp8Ef+4C6TwaHfj781H6Jp6Ujy2WlMnFjyqMEDwWs1UxzW9gRDdD1hUoJ4gA0AYXkR5t1lW/Q", - "6s4yIBSj20Wlmhuv7uzcxI15SUNonhNfMI4DoGEfRzEVTKkXqiMhFdw8vts5gCxmb0UG/QMIj5jfKig3", - "zRljcZxMWxLIslCgXui9BXpfYNzuxmDogDDesz5z1C/utv4+vKFM6WPKRvYhRfudFdD80BYO6Pza5Or1", - "vmACovejdycK/3pY18MKfuxeNkNf7WUr0siVfXGfG2OTb7xBlqjNNh8+6pXFgo6WWxpbxPHhIUbLLIVU", - "h5Pb6aeOncL/+LFeTFdPqV7U24+3/y8AAP//k2KIE9LJAAA=", + "H4sIAAAAAAAC/+x9a3PcNrLoX0HNnio/7nAkP3etqtS5ip1kdeM4LkvJ3nNs3wRD9sxgRQIMAEoz8dV/", + "P4UGQIIkyKEesTdV+8nWEGg0Go1Go1/4NEtFUQoOXKvZ0adZSSUtQIPEv2iaiorrhGXmrwxUKlmpmeCz", + "I/+NKC0ZX8/mM2Z+LanezOYzTgto2pj+85mE3yomIZsdaVnBfKbSDRTUANa70rSuIW2TtUgciGML4uTV", + "7GrkA80yCUr1sfyR5zvCeJpXGRAtKVc0NZ8UuWR6Q/SGKeI6E8aJ4EDEiuhNqzFZMcgztfCT/K0CuQtm", + "6QYfntJVg2IiRQ59PF+KYsk4eKygRqpeEKIFyWCFjTZUEzOCwdU31IIooDLdkJWQe1C1SIT4Aq+K2dH7", + "mQKegcTVSoFd4H9XEuB3SDSVa9Czj/PY5FYaZKJZEZnaiaO+BFXlWhFsi3NcswvgxPRakB8qpckSCOXk", + "3bcvyZMnT16YiRRUa8gckw3Oqhk9nJPtPjuaZVSD/9znNZqvhaQ8S+r27759ieOfuglObUWVgvhmOTZf", + "yMmroQn4jhEWYlzDGtehxf2mR2RTND8vYSUkTFwT2/hOFyUc/4uuSkp1uikF4zqyLgS/Evs5KsOC7mMy", + "rEag1b40lJIG6PvD5MXHT4/mjw6v/vL+OPlv9+ezJ1cTp/+yhruHAtGGaSUl8HSXrCVQ3C0byvv0eOf4", + "QW1ElWdkQy9w8WmBot71JaavFZ0XNK8Mn7BUiuN8LRShjo0yWNEq18QPTCqeGzFloDluJ0yRUooLlkE2", + "N9L3csPSDUmpsiCwHblkeW54sFKQDfFafHYjm+kqJInB60b0wAn96xKjmdceSsAWpUGS5kJBosWe48mf", + "OJRnJDxQmrNKXe+wImcbIDi4+WAPW6QdNzyd5zuicV0zQhWhxB9Nc8JWZCcqcomLk7Nz7O9mY6hWEEM0", + "XJzWOWo27xD5esSIEG8pRA6UI/H8vuuTjK/YupKgyOUG9MadeRJUKbgCIpb/hFSbZf8/pz++IUKSH0Ap", + "uoa3ND0nwFORDa+xGzR2gv9TCbPghVqXND2PH9c5K1gE5R/olhVVQXhVLEGa9fLngxZEgq4kH0LIQtzD", + "ZwXd9gc9kxVPcXGbYVuKmmElpsqc7hbkZEUKuv3qcO7QUYTmOSmBZ4yvid7yQSXNjL0fvUSKimcTdBht", + "Fiw4NVUJKVsxyEgNZQQTN8w+fBi/Hj6NZhWg44EMolOPsgcdDtsIz5ita76Qkq4hYJkF+clJLvyqxTnw", + "WsCR5Q4/lRIumKhU3WkARxx6XL3mQkNSSlixCI+dOnIY6WHbOPFaOAUnFVxTxiEzkheRFhqsJBrEKRhw", + "/DLTP6KXVMHzp0MHePN14uqvRHfVR1d80mpjo8Ruyci5aL66DRtXm1r9J1z+wrEVWyf2595CsvWZOUpW", + "LMdj5p9m/TwZKoVCoEUIf/AotuZUVxKOPvCH5i+SkFNNeUZlZn4p7E8/VLlmp2xtfsrtT6/FmqWnbD1A", + "zBrX6G0KuxX2HwMvLo71NnppeC3EeVWGE0pbt9Lljpy8GlpkC/O6jHlcX2XDW8XZ1t80rttDb+uFHEBy", + "kHYlNQ3PYSfBYEvTFf6zXSE/0ZX83fxTlrnprctVjLSGj915i7YBZzM4LsucpdQQ8Z37bL4aIQD2lkCb", + "Fgd4oB59ClAspShBamaB0rJMcpHSPFGaaoT0HxJWs6PZXw4a48qB7a4OgsFfm16n2Mnoo1bHSWhZXgPG", + "W6PXqBFhYQQ0fkIxYcUeakSM20U0rMSMCM7hgnK9aO4jLXlQb+D3bqSG3laVsfTu3K8GCU5swyUoq97a", + "hvcUCUhPkKwEyYra5joXy/qH+8dl2VAQvx+XpaUHqobAUOuCLVNaPcDp02YnheOcvFqQ70LYqGcLnu/M", + "4WBVDXM2rNyp5U6x2nDk5tBAvKcILqeQC7M0ngxGh78LjsM7w0bkRuvZyyum8d9d25DNzO+TOv85WCyk", + "7TBz4S3KUc5eYPCX4OZyv8M5fcZxtpwFOe72vRnbGChxhrkRr4yup4U7QseahJeSlhZB98WepYzjDcw2", + "srjeUppOFHRRnIM9HPAaYnXjvbZ3P0QxQVbo4PB1LtLzv1O1uYM9v/Sw+tsPhyEboBlIsqFqs5jFtIxw", + "ezXQpmwx0xBv72QZDLWop3hX09sztYxqGkzN4RtXSyzpsR8KPZCRu8uP+B+aE/PZ7G0j+i3YBTlDAabs", + "dnYehMxc5e0FwY5kGqCJQZDC3t6JuXVfC8uXzeDxdZq0Rt9Yg4FbITcJXCGxvfNt8LXYxnD4Wmx7W0Bs", + "Qd0Ffxg4qEZqKNQE/F45zASuvyMflZLu+kRG2FOIbCZoVFeFu4GHJ74ZpbG8Hi+FvJn06YgVThp7MqEG", + "aiB85x0iYdOqTBwrRmxStkEHUOPCGxcaXfAxirWocKrpH0AFZaDeBRXagO6aCqIoWQ53wPqbqNBfUgVP", + "HpPTvx8/e/T4l8fPnhuWLKVYS1qQ5U6DIvfd3YwovcvhQX9meDuqch2H/vypt0K24cbgKFHJFApa9kFZ", + "66ZVgWwzYtr1qdYmM866RnDK5jwDI8kt2Yk13BvUXjFlNKxieSeLMUSwrBklIw6TDPYy03Wn1wyzC6co", + "d7K6i6ssSClkxL6GW0yLVOTJBUjFRMRV8ta1IK6FV2/L7u8WW3JJFTFjo+m34qhQRDhLb/l0uW9Bn215", + "Q5tRyW/nG5mdG3fKurSJ7y2JipQgE73lJINltW7dhFZSFISSDDviGf0d6NMdT9GqdhdMOnxNKxhHE7/a", + "8TS4s5mFyiFbtxbh9nezLlW8fc4OdU9F0DHkeI2f8Vr/CnJN71x/6Q4Qw/2lX0iLLMlMQ7wFv2brjQ4U", + "zLdSiNXd4xgbJYYofrDqeW769JX0NyIDM9lK3cFh3ABreN2sacjhdCkqTSjhIgO0qFQqfkwPuOXRH4hu", + "TB2e/HpjNe4lGEZKaWVmW5UEnXQ9ydF0TGhquTdB0qgBL0btfrKt7HDW5ZtLoJm51QMnYulcBc6JgZOk", + "6GHU/qBzSkJkL7XwKqVIQSnIEmei2Iuab2eFiB6hEyKOCNejECXIispbI3t+sRfPc9gl6A9X5P73P6sH", + "XwBfLTTN9xAW28TIW1/4nD+oj/W04ccYrjt4yHZUAvEy19wujYDIQcMQCa9Fk8H162LUW8Xbk+UCJHpm", + "/lCO94PcjoFqVP9gfr8ttlU5EOXlLjpnrEC7HadcKEgFz1QUWE6VTvaJZdOodRszMwgkYUwSI+ABpeQ1", + "Vdp6ExnP0AhijxMcxyooZohhhAcVUgP5Z6+L9mGn5hzkqlK1YqqqshRSQxabA4ftyFhvYFuPJVYB7Fr7", + "1YJUCvZBHqJSAN8Ry87EEojq2uju3O39yaFp2pzzuygpW0g0hBhD5NS3CqgbRroMIMJUQ2jLOEx1OKcO", + "r5nPlBZlaaSFTipe9xsi06ltfax/atr2mYvq5tzOBJjRtcfJYX5pKWtjnDbUXKERMinoudE98EJs3Z59", + "nM1mTBTjKSRjnG+25alpFW6BvZu0KteSZpBkkNNdH+hP9jOxn8cA4Io3Fx+hIbHxLPFFbzjZhw+MgBYI", + "T8WUR4JfSGq2oLl5NAzieu+BnAHCjgknx0f3alA4VnSJPDyctl3qCEQ8DS+ENitu2QExdgJ9Cr4DZKgh", + "35wS2DlprmXdIf4LlBugViOuP8gO1NAUGvjXmsCAMc2FAQfbpSPdOwI4KjUHpdgeMTK0Ywcse2+p1Cxl", + "JV51vofdnd/8ugNE/U0kA01ZDhkJPthbYBn2JzYQowvzZjfBSUaYPvo9K0xkOjlTqPG0kT+HHV6539oI", + "v7MgLvAOrrIRqOZ4opwgoj5uyGjgYRPY0lTnO6On6Q3syCVIIKpaFkxrG7LZvulqUSYhgKiBe2RE582x", + "0XF+Baa4l04RVDC9/lLMZ/ZKMI7fWede0CKHuwqUQuQTjEc9YkQxmOT4J6Uwq85chLAPI/Wc1ELSCW10", + "5dWn/z3VIjPOgPyXqEhKOd64Kg21SiMk6gmoP5oRjAZWj+lc/A2FIIcC7EUSvzx82J34w4duzZkiK7j0", + "YfWmYZccDx+iGeetULq1ue7AVGi220nk+EDLP557LnihI1P2u5gd5Ckr+bYDvHYXmD2llGNcM/1bC4DO", + "ztxOmXvII9Pc6wh3klE/AB2bN677KSuq/K4WfEVZXkkY9o59+PB+VXz48JF8a1t6x/bcM3lIjssmLWLl", + "TqNKYmgNyZm530pBM6MgRG37OEm+TurgTBVFp1AGnX+4fUj5rpPINxUHsoSUVjYq2Ulth0ETHqoWEX2x", + "s7pdEkYnMtE8XuXaHtohVddSVCVR9bJbLtBUwx9jam5Ax7DsDxzEBjUfh8KDzDUx393BaW0BEQmlBIWy", + "NTSvKPtVrML8Gyd81U5pKPoWaNv1l4H72bvBe47gOeOQFILDLppyyjj8gB9jva18H+iMJ+1Q367y3MK/", + "g1Z7nCnceFv64moHAu1tHRd3B4vfhdtxPoSZR2hcg7wklKQ5Q9Ob4ErLKtUfOMXLfbDZIvED/hozbO55", + "6ZvE7UsR848D9YFTjB2pr/xRubiCiFz+FsBbfVS1XoPSHS1xBfCBu1aMk4ozjWMVZr0Su2AlSHTiL2zL", + "gu7IiuZonfodpCDLSreFKyZIKM3y3HlCzDBErD5wqkkORqr+wPjZFsF5T6LnGQ76UsjzmgqL6H5YAwfF", + "VBKPc/jOfsUQNDf9jQtHw2xV+9nazg38Jotih3f/JgPz/93/z6P3x8l/0+T3w+TF/zr4+Onp1YOHvR8f", + "X3311f9v//Tk6qsH//kfsZXyuMfC9x3mJ6/cneLkFSqOjfG8h/tnM5wWjCdRJgtdxB3eIveN+usZ6EHb", + "rKA38IHrLTeMdEFzllF9M3boirjeXrS7o8M1rYXomBH8XK+pjt1CypCIkOmIxhsf4/3QoHiiDHpzXO4L", + "7pdVxe1SVsp5lDAO3IdoiNW8ToayRRCOCGbKbKiPL3J/Pn72fDZvMlzq77P5zH39GOFklm1jeUwZbGNa", + "ttsguDHuKVLSnQIdlx6IezQaxTrFQ7AFmOuZ2rDy80sKpdkyLuF8dK27rW/5Cbdhr2b/oG9o50zOYvX5", + "8dYSIINSb2LJ0S1NAVs1qwnQ8deXUlwAnxO2gEX3tpytQfm4mBzoCpN00b8hpmQL1PvAMprnioDq4UQm", + "XUlj/IPKrZPWV/OZO/zVnevjDnAMr+6YtSPI/60FuffdN2fkwAlMdc+m1FnQQRJUxArl4vxbkRxGmtmS", + "EDan8AP/wF/BinFmvh994BnV9GBJFUvVQaVAfk1zylNYrAU58qkDr6imH3hP0xqs2hIkbZCyWuYsJeeh", + "Rtywp83Ej14bab4W5uLYdWr39Vc3VFS+2AGSS6Y3otKJSzVOJFxSGXMaqDrVFCHbQgFjo86Jg21FsUtl", + "dvDjMo+WpeqmnPWnX5a5mX7AhsolVJklI0oL6XURo6BYbHB93wh3MEh66fPUKwWK/FrQ8j3j+iNJPlSH", + "h0+AtHKwfnVHvuHJXQkte+WNUuK6tkqcuL3XwFZLmpR0PWA00EBLXH3Ulwu8ZOc5wW6t3C8f24qgmgl4", + "egwvgMXj2nksOLlT28vXjIlPAT/hEmIbo240HtObrleQDXbj5epklPVWqdKbxOzt6KyUYXG/MnUpibVR", + "srwbW7E1hgq6qhtLIOkG0nPIsAAAFKXezVvdfaSEUzS96GDKFsqwuRyYzY2m3SWQqsyoU8U7BiVDYQVa", + "+1jFd3AOuzPRJINfJ4+2ndaphjYqcmqgXRpmDbetg9FdfBeOg7ausvTZkZgm49niqOYL32d4I1uV9w42", + "cYwpWmmHQ4SgMkIIy/wDJLjBRA28W7F+bHrmlrG0J1+kroaX/cQ1aS5PLnImnA1mU9rvBWDVHXGpyJIa", + "vV24gjE2dTGQYpWiaxjQkEPr+sQEwZZFHoHsO/eiJ51YdQ+03nkTRdk2Tsyco5wC5othFbzMdOKl/EjW", + "gWMNqATrwDmCLXNUk+rAMit0qGx5OWxhqyHU4gwMkjcKh0ejTZFQs9lQ5WvZYMkfv5cn6QB/YCruWAGG", + "kyDUJ6jrUxu+vczt7tPe7dKVYfC1F3zBhfBqOaF4gtHwMbo4thyCowKUQQ5rO3Hb2DNKkxbcLJDB48fV", + "KmccSBKLGqJKiZTZYkTNMePGAKMfPyTEmoDJZAgxNg7QRsckAiZvRLg3+fo6SHKX1kw9bHRpBn9DPAPD", + "xtEalUeURoQzPhCx7SUAdaFm9fnVCXhEMITxOTFi7oLmRsy5G18DpFcHANXWTta/c40/GFJnRyzw9mC5", + "1pzsUXST2YQ6k0c6rtCNYLwU28SmYEU13uV2afg9GlqMCWGxjWkrLtxTZCm2GG6BR4sNZd2DyzAeHo3g", + "hr9lCvkV+w2d5haZsWHHtakYFypkGWfOq9llSJ2YMvSABjPELveDIgo3QqBj7GjKjbrL795Lals96R/m", + "zak2b4oD+ayN2PYf2kLRVRqgX98KU5c9eNvVWKJ2inbUQLviQ6BCxpjeiIm+k6bvClKQA14KkpYSlZzH", + "XHfmbgN44pz6boHxAutKUL57EISiSFgzpaExopuD2XuFPrd5kmI5KyFWw7PTpVyZ+b0Toj6mbL0U7Nia", + "5mefAYZyrphUOkEPRHQKptG3Ci/V35qmcV2pHexiKzuyLC4bcNhz2CUZy6s4v7pxv39lhn1Ti0RVLVHe", + "Mk6AphuyxEqk0RC4kaFtlOTohF/bCb+mdzbfabvBNDUDS8Mu7TH+JPuiI3nHxEGEAWPM0V+1QZKOCMgg", + "c7EvHQO9yW5OzFxcjFlfe5sp87D3ho34/MmhM8pCis4lMBiMzoKhm8ioJUwHhTz7KYUDe4CWJcu2HVuo", + "hTp4Y6bXMnj4CkkdKuDqOmB7KBDYPWNZDRJUuxhWo+DbkqytWhSLSZQ5a5esCgVCOBRTvqB4n1B11tM+", + "Wp0Bzb+H3c+mLU5ndjWf3c50GqO1g7iH1m/r5Y3SGV3z1pTW8oRck+S0LKW4oHniDMxDrCnFhWNNbO7t", + "0Z9Z1MXNmGffHL9+69C/ms/SHKhMalVhcFbYrvzTzMrW3RrYIL5gsbnzeZ3dqpLB4tfFgkKj9OUGXHHY", + "QBvtVbFrHA7BVnRG6lU8Qmivydn5RuwUR3wkUNYuksZ8Zz0kba8IvaAs93Yzj+1ANA9OblopxKhUCAHc", + "2rsSOMmSOxU3vd0d3x0Nd+2RSeFYI+VrC1uhWRHBuy50o0KiOQ5ZtaBYg85aRfrCiVcFWhISlbM0bmPl", + "Swy75dZ3ZhoTbDygjBqIFRtwxfKKBbBMMzXhottBMhgjSkxfz3CIdkvhntaoOPutAsIy4Np8krgrOxsV", + "i/45a3v/ODW6Q38sB9ha6Bvwt9ExwvqL3RMPkRhXMEJPXQ/dV/WV2U+0tkiZHwKXxDUc/uGIvSNxxFnv", + "+MNxsw1e3LQ9buFLGH35ZxjDVk3e/wyHv7y6QpADY0Sf1WAqWUnxO8TveXg9jmSM+IqTDKNcfgc+Icy8", + "se40r4M0ow8u95B2E1qh2kEKA1yPKx+45bD0nbdQU26X2la5b8W6xRkmjCo9sPAbhnE49yJxc3q5pLG6", + "gEbJMDgdNw7gli1dC+I7e9o7sz9zRUAXJPAl122ZTQYuQTbJXP3CIjdUGOywk1WFRjNArg11grn1/+VK", + "RMBU/JJy+1iC6We3kuutwBq/TK9LITGVX8XN/hmkrKB5XHPI0r6JN2NrZp8KqBQEtegdIPvGiuUiV8/f", + "utgb0pysyOE8eO3CrUbGLphiyxywxSPbYkkVSvLaEFV3MdMDrjcKmz+e0HxT8UxCpjfKElYJUit1eL2p", + "nVdL0JcAnBxiu0cvyH102yl2AQ8MFd35PDt69AKNrvaPw9gB4N4EGZMm2SpMfInzMfotLQwjuB3URTTr", + "2T7kNCy4RnaT7TplL2FLJ+v276WCcrqGeKRIsQcn2xdXEw1pHbrwzL5CorQUO8IGUpBAUyOfBqLPjfiz", + "aJBUFAXThXPuKFEYfmoKzdtBPTj7pImrEerx8h/RR1p6F1HnEvl5jab2fIvNGj3Zb2gBbbLOCbX1G3LW", + "RC/4ysXkxJeHwaKpda1USxszlpk6qjkYzLAipWRc48Wi0qvkbyTdUElTI/4WQ+gmy+dPI4Vi2wUL+fUQ", + "/+x0l6BAXsRJLwfY3usQri+5zwVPCiNRsgdNtkewKweduXG33ZDvcBz0VKXMQEkG2a1qsRsNJPWtGI+P", + "ALwlK9bzuRY/Xntmn50zKxlnD1qZFfrp3WunZRRCxmq+NdvdaRwStGRwgbF78UUyMG+5FjKftAq3wf7L", + "eh68yhmoZX4vxy4CX4vI7dQXL64t6S5WPWIdGNqm5oNhg6UDNSftQrGf3+nnjc9955P54nHFP7rIfuEl", + "RSL7GQwsYlDEOrqcWf098H9T8rXYTl3Uzg7xC/svQJooSSqWZz83WZmdGuGS8nQT9WctTcdfmteM6snZ", + "8ylaWm1DOYc8Cs7qgr94nTGi1f5TTB2nYHxi227ZcjvdzuQaxNtoeqT8gIa8TOdmgJCq7YS3OqA6X4uM", + "4DhNHa9GevbL3QdFiX+rQOlY8hB+sEFdaLc0911bE5cAz/C2uCDf2ddIN0BaVVrwlmbz4yHzFVqtQb0q", + "c0GzOTFwzr45fk3sqLaPfZPD1uRd4yWlPYuOvSooUTgtPNg/rxFPXZgOZzyW2sxaaSyapDQtylhyqGlx", + "5htgBmpow8frS0idBXllb47K30vsIIYfVkwW5sZVQ7O6C/KE+Y/WNN3glawlUodZfnoxac+VKnjArX6I", + "pa7bh/vO4O3qSdty0nMizL35kin7CCVcQDsftU7OdiYBn5/anp6sOLecEtU9xooH3ITsHjkbqOHN/FHM", + "OoS/pkJua7Fft7b2KfaK1hHqFuruvdxmsxvrBzb848Ip5YKzFKv4xI5m96DlFB/YhIJHXSOr3+Juh0Y2", + "V7Q8eB0m56g4WDDcC0JHuL4RPvhqFtVyh/1T48uJG6rJGrRykg2yua9y7+yAjCtwdRjxbdNATgrZ8iui", + "hIy6qpPapXFNNsK0mIGL3bfm2xt37cd48XPGUcF3ZHOh6dZSh+/taXMrYJqsBSg3n3ZusHpv+iwwTTaD", + "7ceFf58PYVi3nJm29UH3QR17j7TzAJu2L01bW8qk+bkVgWwHPS5LN+jwGwhRfUBv+SCBI57FxLt2AuLW", + "8ENoI+w2GkqC56lhNLhARzSUeA73GKN+D6Dz1oxRWi1HYQtiQ7iiFQwYj6DxmnFoXo+MHBBp9EjAhcH9", + "OtBPpZJqqwJOkmlnQHP0PscEmtLO9XBbUJ0FRpLgHP0Yw8vYPGUwIDjqBo3iRvmufrTScHegTLzE13Id", + "IfsPE6BW5ZSoDDMKOk8VxASHEdy+FFL7AOhvg75OZLtrSe3Ouc5JNJQkuqyyNeiEZlmsLubX+JXgV18o", + "CraQVnX9xLIkKdZEaReJ6XObGygVXFXFyFi+wS2HC97+iHBD+P6IX2FMQlnu8N9Y8cDhlXFBGNcOA/QR", + "F+6xhGvqzW1IPa3X8HSi2DqZTgk8U25PjmbomzF60/9OOT0X6zYin7k0xJiUC9coJt++MQdHWDmhVxHT", + "Hi11YQMMuhP+xTa8NtYpuW2phEdZr0QmOnvqmnfjBojht53mePgNhN4GBTGoPV+t93AoADcdjBen2mWu", + "aUpGRdBgNpCN3rF5P4hF3HI6FLFjA3bM517vaZphT89G2KME9aFgfYS+93GmpKTMucYbYdGnrItIHzYX", + "jm26ZoG7k3Bx3oMWu+8vhmKyiWJ8nQPB793XcM7BpbPXz6HbufqoJH8ltL+610gtvDoqPjr/fnQCDvVl", + "zaCDRtszV3ndTtPdyb//2cawEeBa7v4FTLi9Re+9JdTXdq15qmlC6qq9k6r4tk7F+LNAw/WPmppHyE+l", + "UKypFB17L2hirNsZPvkT1G/qw/KBJheQaiwP3jjQJcB1qjmZwYK36P5dB2ng7liHBLryR2M1j/o1wfcc", + "aL20pCC1ztZTXkyv8HNch0mhUMLX4NbA3XNw7YSDyWHPqxWkml3sSQP7xwZ4kGI090YI+6xrkBXG6jBa", + "rCJyfRNbg9BYltYoPkE1v1ujM5QEcg67e4q0uCFa4Hnuz5WbFJBACqB0SAyLCBULQ7BWU+cZZqrmDKSC", + "D/ux3aEpxTX4NEyQ1HjDsTxLmhO3SXQcGTL+NsWksUzXa6X/YkToUKZYv7b9sLL9Cp8SUPWzbb4ARXgl", + "JSeR6s+ugAUm7dWOAl/KApT/zWfo2lFydg7h4zXolrmkMvMtonYGb8JIRs6jXnpXtGQ1VTaI0vnB6yDN", + "fkJPpPAThuKmucByz0PxzO24yPCNd4z+wOMAy08jXiuQ7pEvVPZyoSDRwgd1juExRgr3HvlNiKAGiy1a", + "5AZLoLxrarxg0VmKJU+oi2wJJ0gkFNRgJ4NKLMNjjhH7pf3uM1h80dG95pSaX5O9pVR8eC5TPSKGXL8i", + "7rTcnxlzE8sK49w+KapiZVm4IWVo+i+lyKrUHtDhxqitT5OLHo2IkqhRIu3Psne/zLEE2Osgz/AcdgdW", + "9U83lDe12Nrb2qpQdg5BXn9nte/U6BS/X+drO4H1neD5JQ0381kpRJ4M2PpP+tVlunvgnKXnkBFzdvjA", + "toHXNch9NDHXztzLzc5XUylL4JA9WBByzG0osffrtssbdwbn9/TY+FscNatswSdnU1p84PGYTCzFJG8p", + "3zyYcammwAi/Ww5lgeypXbIdqGwj6WXkrZnF1Etp39Paff+jYSqLRUxLuWEi+6T93bcrRVg/ePpg/PYT", + "1rloAuikNU+ittQ8B9FWXn5orI7THmHwHfagF16Kg2cYvDRy6HzhKLcfaqIEUxnkhNb0992z3QQbuRQs", + "kcK0CDNNW3XIRki01yUwoqiXtW0iTue+CQOLWgiOhX76pg+F5mqsFxwyjtmX8oLmn998gdVOjpEe7knE", + "+ETD+29IZEtKdbNQk9d00tjBXffuhuZv0dzyDzBrFPUzOFDO7lg/f+Gts1jXjuYkF81jSAiSXCJM65h4", + "9JwsXZh8KSFlinUyiC59KdP6uoeVvZuXMsfvl/vm+bPQt2Bjd0EQJXnTlEXUAs+HBsNmi35hoTKwc6Nc", + "HuO+HltE6BeTUWG++p7j4rzlsbBlZjuhOELCHXsughiEa3ou+pn4U6dnrfPm0KkU9Oc5+bRu0TZyUDdz", + "m+p26xN3rHbeFG9ZvCSm6Y7uOksQrCdLEFXy66NfiYQVPhghyMOHOMDDh3PX9NfH7c9mOz98GH+R83M5", + "6iyNHAw3boxjfh4K3bThiQNRwp31qFie7WOMVsx38+QKRjX/4rI+vsijL79Ye2p/q7rC+9cJEeguAhIm", + "MtfW4MFQQTT3hEBu1y0Sto03k7SSTO+wGIU3v7Ffoi7F72qLvfP41OnL7uzT4hzqciaNfb9S/nT9TtAc", + "zyOjU2OAhsZXGL/Z0qLMwW2Ur+4t/wpP/vY0O3zy6K/Lvx0+O0zh6bMXh4f0xVP66MWTR/D4b8+eHsKj", + "1fMXy8fZ46ePl08fP33+7EX65Omj5dPnL/56z8ghg7JFdOZTH2f/F19GSo7fniRnBtmGJrRk9eOrho39", + "8w40xZ0IBWX57Mj/9L/9DlukomjA+19nLrNqttG6VEcHB5eXl4uwy8EaDXqJFlW6OfDj9B+9fHtSR8db", + "VzCuqA18NqyAi+pY4Ri/vfvm9Iwcvz1ZNAwzO5odLg4Xj/AxsxI4LdnsaPYEf8Lds8F1P3DMNjv6dDWf", + "HWyA5uj/Mn8UoCVL/Sd1SddrkAv3zoX56eLxgVclDj45Y+bV2LeDsGTswaeWzTfb0xNLSh588pUSxlu3", + "ShE4W3fQYSIWY80OlpiANbUpqKDx8FTwgqEOPqGKPPj7gctKiX/Eq4rdAwfeMRJv2aLSJ701uHZ6uNeb", + "Dz7hf5Anr6yQyCHmBrHJHJQ0zeeEaUKXQmKJAp1ujFzwudFMBS1nyKmWyU8yw9ym10uLga+CYsvCHb3v", + "x18gIOIhoSQwbN5s1NZIjSzWsoKwUll90rTaN+fN+8PkxcdPj+aPDq/+Ys4T9+ezJ1cT/Zkva7jktD4s", + "Jjb8iInFaJnF/fv48PAWT+4d84D8dpGClx17tTrcS96D74q6peoAIjUx9iRAdsAPvM399JozHrUftSLU", + "Im/xfE0z4vObcOxHn2/sE47eZCPXiT23ruazZ59z9ifcsDzNCbYMKlr0l/4nfs7FJfctjZJRFQWVO7+N", + "VUsoELfYeJTRtUJromQXFHU7LnirTP/sI1qwYzlmA/JGaXoDeXNqev1b3nwueYOLdBfypg3ojuXN42vu", + "+T//jP8tYf9sEvbUirtbSVin8Nmw/r4GahNwD+wbwf2fdzyN/tgH1H2rJvbzwad2reSWjqw2lc7Epc0O", + "jx4KWBCQ5q56EBpB6wuVFsQDaILayI8u6DzfoeWXZUAoZsOKSjc3XtPZuyobn4SB0LxjtWYcB0DjMo5i", + "y2TRIFxEQSq4ffWlcwA5zN6IDPoHEB4xv1Ugd80Z43CczVsSyLFQpCjVrQV6X2BcXY/B0AhuPTh95qif", + "emn9fXBJmTbHlIsuQ4r2O2ug+YHLm+z82qQq9L5g/kXwY3Aniv96UNd1jH7sXjZjX91la6CRz3r3nxtj", + "U2i8QZaozTbvP5qVxapBjlsaW8TRwQFGbGyE0gezq/mnjp0i/PixXkxfTqJe1KuPV/8TAAD//xceSpBL", + "uAAA", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go index dbfb7a17ca..4df6570b5c 100644 --- a/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go +++ b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go @@ -548,234 +548,222 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9+3PcNpLwv4Kauyo/bjjyM7dRVeo+2U6yuthel61kby/yl2DInhmsOAAXAKWZ+PP/", - "/hUaAAmSIIcjyXKc6CdbQzwajUaj0c8Pk1SsC8GBazU5/DApqKRr0CDxL5qmouQ6YZn5KwOVSlZoJvjk", - "0H8jSkvGl5PphJlfC6pXk+mE0zXUbUz/6UTCv0omIZscalnCdKLSFaypGVhvC9O6GmmTLEXihjiyQxy/", - "mHwc+ECzTIJSXSj/xvMtYTzNywyIlpQrmppPilwwvSJ6xRRxnQnjRHAgYkH0qtGYLBjkmZr5Rf6rBLkN", - "Vukm71/SxxrERIocunA+F+s54+ChggqoakOIFiSDBTZaUU3MDAZW31ALooDKdEUWQu4A1QIRwgu8XE8O", - "f54o4BlI3K0U2Dn+dyEBfoNEU7kEPXk/jS1uoUEmmq0jSzt22Jegylwrgm1xjUt2DpyYXjPyqlSazIFQ", - "Tt5+95w8fvz4a7OQNdUaMkdkvauqZw/XZLtPDicZ1eA/d2mN5kshKc+Sqv3b757j/O/cAse2okpB/LAc", - "mS/k+EXfAnzHCAkxrmGJ+9CgftMjcijqn+ewEBJG7oltfK2bEs7/WXclpTpdFYJxHdkXgl+J/RzlYUH3", - "IR5WAdBoXxhMSTPozw+Sr99/eDh9+ODjv/18lPyv+/Pp448jl/+8GncHBqIN01JK4Ok2WUqgeFpWlHfx", - "8dbRg1qJMs/Iip7j5tM1snrXl5i+lnWe07w0dMJSKY7ypVCEOjLKYEHLXBM/MSl5btiUGc1RO2GKFFKc", - "swyyqeG+FyuWrkhKlR0C25ELlueGBksFWR+txVc3cJg+higxcF0KH7ig3y8y6nXtwARskBskaS4UJFrs", - "uJ78jUN5RsILpb6r1H6XFTlZAcHJzQd72SLuuKHpPN8SjfuaEaoIJf5qmhK2IFtRkgvcnJydYX+3GoO1", - "NTFIw81p3KPm8Pahr4OMCPLmQuRAOSLPn7suyviCLUsJilysQK/cnSdBFYIrIGL+T0i12fb/fve310RI", - "8gqUokt4Q9MzAjwVWf8eu0ljN/g/lTAbvlbLgqZn8es6Z2sWAfkV3bB1uSa8XM9Bmv3y94MWRIIuJe8D", - "yI64g87WdNOd9ESWPMXNradtCGqGlJgqcrqdkeMFWdPNNw+mDhxFaJ6TAnjG+JLoDe8V0szcu8FLpCh5", - "NkKG0WbDgltTFZCyBYOMVKMMQOKm2QUP4/vBU0tWATh+kF5wqll2gMNhE6EZc3TNF1LQJQQkMyM/Os6F", - "X7U4A14xODLf4qdCwjkTpao69cCIUw+L11xoSAoJCxahsXcOHYZ72DaOva6dgJMKrinjkBnOi0ALDZYT", - "9cIUTDj8mOle0XOq4KsnfRd4/XXk7i9Ee9cHd3zUbmOjxB7JyL1ovroDGxebGv1HPP7CuRVbJvbnzkay", - "5Ym5ShYsx2vmn2b/PBpKhUyggQh/8Si25FSXEg5P+X3zF0nIO015RmVmflnbn16VuWbv2NL8lNufXool", - "S9+xZQ8yK1ijrynstrb/mPHi7Fhvoo+Gl0KclUW4oLTxKp1vyfGLvk22Y+5LmEfVUzZ8VZxs/Etj3x56", - "U21kD5C9uCuoaXgGWwkGWpou8J/NAumJLuRv5p+iyE1vXSxiqDV07O5b1A04ncFRUeQspQaJb91n89Uw", - "AbCvBFq3OMAL9fBDAGIhRQFSMzsoLYokFynNE6WpxpH+XcJicjj5t4NauXJgu6uDYPKXptc77GTkUSvj", - "JLQo9hjjjZFr1ACzMAwaPyGbsGwPJSLG7SYaUmKGBedwTrme1e+RBj+oDvDPbqYa31aUsfhuva96EU5s", - "wzkoK97ahncUCVBPEK0E0YrS5jIX8+qHu0dFUWMQvx8VhcUHiobAUOqCDVNa3cPl0/okhfMcv5iR78Ox", - "Uc4WPN+ay8GKGuZuWLhby91ileLIraEe8Y4iuJ1CzszWeDQYGf46KA7fDCuRG6lnJ62Yxn91bUMyM7+P", - "6vxlkFiI237iwleUw5x9wOAvwcvlbotyuoTjdDkzctTuezmyMaPECeZStDK4n3bcATxWKLyQtLAAui/2", - "LmUcX2C2kYX1itx0JKOLwhyc4YDWEKpLn7Wd5yEKCZJCC4ZnuUjP/krV6hrO/NyP1T1+OA1ZAc1AkhVV", - "q9kkJmWEx6sebcwRMw3x9U7mwVSzaonXtbwdS8uopsHSHLxxscSiHvsh0wMZebv8Df9Dc2I+m7NtWL8d", - "dkZOkIEpe5ydBSEzT3n7QLAzmQaoYhBkbV/vxLy694LyeT15fJ9G7dG3VmHgdsgtAndIbK79GDwTmxgM", - "z8SmcwTEBtR10IcZB8VIDWs1Ar4XDjKB++/QR6Wk2y6ScewxSDYLNKKrwtPAwxvfzFJrXo/mQl6O+7TY", - "Cie1PplQM2rAfKctJGHTskgcKUZ0UrZBa6DahDfMNNrDxzDWwMI7TT8BFpQZ9Tqw0BzourEg1gXL4RpI", - "fxVl+nOq4PEj8u6vR08fPvrl0dOvDEkWUiwlXZP5VoMid93bjCi9zeFed2X4OipzHR/9qydeC9kcNzaO", - "EqVMYU2L7lBWu2lFINuMmHZdrDXRjKuuABxzOE/AcHKLdmIV9wa0F0wZCWs9v5bN6ENYVs+SEQdJBjuJ", - "ad/l1dNswyXKrSyv4ykLUgoZ0a/hEdMiFXlyDlIxETGVvHEtiGvhxdui/buFllxQRczcqPotOQoUEcrS", - "Gz6e79uhTza8xs0g57frjazOzTtmX5rI95pERQqQid5wksG8XDZeQgsp1oSSDDviHf096HdbnqJW7TqI", - "tP+ZtmYcVfxqy9PgzWY2Kods2diEq7/N2ljx+jk71R0VAceg4yV+xmf9C8g1vXb5pT1BDPbnfiMtsCQz", - "DfEV/JItVzoQMN9IIRbXD2Nslhig+MGK57np0xXSX4sMzGJLdQ2XcT1YTetmT0MKp3NRakIJFxmgRqVU", - "8Wu6xyyP9kA0Y+rw5tcrK3HPwRBSSkuz2rIgaKTrcI66Y0JTS70Jokb1WDEq85NtZaezJt9cAs3Mqx44", - "EXNnKnBGDFwkRQuj9hedExIiZ6kBVyFFCkpBljgVxU7QfDvLRPQAnhBwBLiahShBFlReGdiz851wnsE2", - "QXu4Ind/+End+wzwaqFpvgOx2CaG3urB5+xBXajHTT9EcO3JQ7KjEojnueZ1aRhEDhr6ULgXTnr3rw1R", - "ZxevjpZzkGiZ+aQU7ye5GgFVoH5ier8qtGXR4+XlHjonbI16O065UJAKnqnoYDlVOtnFlk2jxmvMrCDg", - "hDFOjAP3CCUvqdLWmsh4hkoQe53gPFZAMVP0A9wrkJqRf/KyaHfs1NyDXJWqEkxVWRRCashia+CwGZjr", - "NWyqucQiGLuSfrUgpYJdI/dhKRjfIcuuxCKI6krp7szt3cWhatrc89soKhtA1IgYAuSdbxVgN/R06QGE", - "qRrRlnCYalFO5V4znSgtisJwC52UvOrXh6Z3tvWR/rFu2yUuqut7OxNgZtceJgf5hcWs9XFaUfOExpHJ", - "mp4Z2QMfxNbs2YXZHMZEMZ5CMkT55li+M63CI7DzkJbFUtIMkgxyuu0O+qP9TOznoQFwx+uHj9CQWH+W", - "+KbXlOzdBwaGFjieigmPBL+Q1BxB8/KoCcT13jFyBjh2jDk5OrpTDYVzRbfIj4fLtlsdGRFvw3OhzY5b", - "ckCIHUMfA28PGqqRL48J7JzUz7L2FP8A5SaoxIj9J9mC6ltCPf5eC+hRpjk34OC4tLh7iwFHuWYvF9vB", - "RvpObI9m7w2VmqWswKfOD7C99pdfe4KovYlkoCnLISPBB/sKLML+xDpitMe83EtwlBKmC35HCxNZTs4U", - "SjxN4M9gi0/uN9bD7yTwC7yGp2xkVHM9UU4QUO83ZCTwsAlsaKrzrZHT9Aq25AIkEFXO10xr67LZfOlq", - "USThAFEF98CMzppjveP8DowxL73DoYLldbdiOrFPgmH4TlrvggY63FOgECIfoTzqICMKwSjDPymE2XXm", - "PIS9G6mnpAaQjmmjKa+6/e+oBppxBeQfoiQp5fjiKjVUIo2QKCeg/GhmMBJYNacz8dcYghzWYB+S+OX+", - "/fbC7993e84UWcCFd6s3DdvouH8f1ThvhNKNw3UNqkJz3I4j1wdq/vHec84LLZ6y28TsRh6zk29ag1fm", - "AnOmlHKEa5Z/ZQbQOpmbMWsPaWSceR3HHaXUD4aOrRv3/R1bl/l1bfiCsryU0G8dOz39ebE+PX1PvrMt", - "vWF76ok8RMdFHRaxcLdRKdG1huTMvG+loJkREKK6fVwkXyaVc6aKgrNWBpy/u3NI+bYVyDcWBjKHlJbW", - "K9lxbQdB7R6qZhF5sbW7bRRGFzJSPV7m2l7aIVaXUpQFUdW2WyrQVMOnUTXXQ8eg7E4c+AbVH/vcg8wz", - "Md9ew21tByISCgkKeWuoXlH2q1iE8TeO+aqt0rDuaqBt11963mdve985gueMQ7IWHLbRkFPG4RV+jPW2", - "/L2nM960fX3bwnMD/hZYzXnGUONV8Yu7HTC0N5Vf3DVsfnvclvEhjDxC5RrkBaEkzRmq3gRXWpapPuUU", - "H/fBYYv4D/hnTL+657lvEtcvRdQ/bqhTTtF3pHryR/niAiJ8+TsAr/VR5XIJSrekxAXAKXetGCclZxrn", - "Wpv9SuyGFSDRiD+zLdd0SxY0R+3UbyAFmZe6yVwxQEJplufOEmKmIWJxyqkmORiu+orxkw0O5y2JnmY4", - "6AshzyoszKLnYQkcFFNJ3M/he/sVXdDc8lfOHQ2jVe1nqzs349dRFFt8+9cRmP/37n8d/nyU/C9NfnuQ", - "fP0fB+8/PPl4737nx0cfv/nm/zV/evzxm3v/9e+xnfKwx9z3HeTHL9yb4vgFCo618rwD+40pTteMJ1Ei", - "C03ELdoid4346wnoXlOtoFdwyvWGG0I6pznLqL4cObRZXOcs2tPRoprGRrTUCH6te4pjV+AyJMJkWqzx", - "0td41zUoHiiD1hwX+4LnZVFyu5WlchYl9AP3LhpiMa2CoWwShEOCkTIr6v2L3J+Pnn41mdYRLtX3yXTi", - "vr6PUDLLNrE4pgw2MSnbHRA8GHcUKehWgY5zD4Q96o1ijeLhsGswzzO1YsXNcwql2TzO4bx3rXutb/gx", - "t26v5vygbWjrVM5icfNwawmQQaFXseDohqSArerdBGjZ6wspzoFPCZvBrP1azpagvF9MDnSBQbpo3xBj", - "ogWqc2AJzVNFgPVwIaOepDH6QeHWceuP04m7/NW1y+Nu4Bhc7TkrQ5D/Wwty5/tvT8iBY5jqjg2ps0MH", - "QVARLZTz8294chhuZlNC2JjCU37KX8CCcWa+H57yjGp6MKeKpeqgVCCf0ZzyFGZLQQ596MALqukp70ha", - "vVlbgqANUpTznKXkLJSIa/K0kfjRZyPNl8I8HNtG7a786qaK8hc7QXLB9EqUOnGhxomECypjRgNVhZri", - "yDZRwNCsU+LGtqzYhTK78eM8jxaFaoecdZdfFLlZfkCGygVUmS0jSgvpZREjoFhocH9fC3cxSHrh49RL", - "BYr8uqbFz4zr9yQ5LR88eAykEYP1q7vyDU1uC2joKy8VEtfWVeLC7bsGNlrSpKDLHqWBBlrg7qO8vMZH", - "dp4T7NaI/fK+rThUvQCPj/4NsHDsHceCi3tne/mcMfEl4CfcQmxjxI3aYnrZ/QqiwS69Xa2Iss4ulXqV", - "mLMdXZUyJO53pkolsTRCljdjK7ZEV0GXdWMOJF1BegYZJgCAdaG300Z37ynhBE3POpiyiTJsLAdGc6Nq", - "dw6kLDLqRPGWQslgWIHW3lfxLZzB9kTUweD7xNE2wzpV30FFSg2kS0Os4bF1Y7Q337njoK6rKHx0JIbJ", - "eLI4rOjC9+k/yFbkvYZDHCOKRthhHyKojCDCEn8PCi6xUDPelUg/tjzzypjbmy+SV8PzfuKa1I8n5zkT", - "rgajKe33NWDWHXGhyJwauV24hDE2dDHgYqWiS+iRkEPt+sgAwYZGHgfZde9FbzqxaF9onfsmCrJtnJg1", - "RykFzBdDKviYaflL+ZmsAccqUAnmgXMIm+coJlWOZZbpUNmwctjEVn2gxQkYJK8FDg9GEyOhZLOiyuey", - "wZQ//iyPkgE+YSjuUAKG48DVJ8jrUym+Pc9tn9PO69KlYfC5F3zChfBpOSJ5gpHw0bs4th2CowCUQQ5L", - "u3Db2BNKHRZcb5CB42+LRc44kCTmNUSVEimzyYjqa8bNAUY+vk+IVQGT0SPEyDgAGw2TODB5LcKzyZf7", - "AMldWDP1Y6NJM/gb4hEY1o/WiDyiMCyc8R6Pbc8BqHM1q+6vlsMjDkMYnxLD5s5pbtice/HVg3TyAKDY", - "2or6d6bxe33i7IAG3l4se63JXkWXWU0oM3mg4wLdAMRzsUlsCFZU4p1v5obeo67FGBAWO5g248IdReZi", - "g+4WeLVYV9YdsPTD4cEIXvgbppBesV/fbW6BGZp2WJqKUaFCknHqvIpc+sSJMVP3SDB95HI3SKJwKQBa", - "yo463ah7/O58pDbFk+5lXt9q0zo5kI/aiB3/viMU3aUe/HW1MFXaA6dCeAupkFm/nsIQKtNV/tauesFl", - "nzV8Y3RihIFcskfN14Z/QnR3rscroAFPPc8AIl7YmKMOJN9uCmGkWxuTZBNUOKRYOVGCDbVUVmelGF/m", - "UHluRtEUW7D3SfIYt0uuE075AcfJzrHN7XnkD8FSFHE49nmpvHX4GYCi55TXcKAcfkVIXJKKQVg+9tPH", - "m7ZoHz0oTfeaZmqU4K0Vux0M+XStmV2bqYIc8PWcNF4byVnMxn16+rMCFM3e+W6Blg8TsFC+vRf4bElY", - "MqWhtjYZCdZj+qb1+BTzvgmx6F+dLuTCrO+tEJU8ZxMLYcfGMm98BejzvGBS6QRNddElmEbfKdQ+fWea", - "xh8VTa8wmwKVZfFLFKc9g22SsbyM06ub94cXZtrXleygyjkKJowToOmKzDFlb9RXdGBq6048uOCXdsEv", - "6bWtd9xpME3NxNKQS3OOL+RctG66IXYQIcAYcXR3rRelAxdoEOLb5Y7BA8MeTrxOZ0Nmis5hyvzYO/2r", - "fKBxnzBnRxpYC7oG9TrnRhxyrB+ZZep1tv5oMC4XOmkoPyLoqhQ8StMzG1DW3GC+rHQqcbcp+64eNbRr", - "u2NAPn48vns4JwQnOZxDvtsJmiLGvQIHPSPsCOh6QzCcwPt47JbquztQI6xaaRvGKLV0pJshw239NHL5", - "8+q3NRKswZ2LfB9tvTMSmqe3mr67pruiSDLIIRpn9vcgkIwWBWaL8I1jAT1mMMYz2MTBsZ+msZz6XeV9", - "ybi2+VevK7Vja5zxyw4TII5BQWFT9e2fPrL/jRnsUojm/kX1EGVlHBhkxDh49bILqpG0qa/nGqdFwbJN", - "y+5pR+3Vjl8LxvCCcoPtwEBAG7EIRgmqmfiyVubZ9OuNvFOzUZg5aaanDGWacCqmfPGQLqKqCOdduDoB", - "mv8A259MW1zO5ON0cjUzaQzXbsQduH5TbW8Uz+iGZ81mDa+HPVFOi0KKc5onzpjcR5pSnDvSxObe9nzD", - "0lqc6518e/TyjQP/43SS5kBlUr12eleF7YovZlU2x2bPAfHFCVZUV/o5+xoONr9KDBgaoC9W4BLBBw/q", - "Tsba2rkgOIrOIL2IewPvNC87Pwi7xAF/CCgqd4jaVGe9IZoeEPScstzbyDy0PZ67uLhxd2OUK4QDXNmT", - "IryLrpXddE53/HTU1LWDJ4VzDaSqX9tqDIoI3naXM69gNL0hqa4p5pu1FpAuc+LlGq0GicpZGren8jmG", - "2HDrJ2MaE2zc8542I5asx+2KlywYyzRTI5TaLSCDOaLI9LmL+3A3F66MVsnZv0ogLAOuzSeJp7J1UFF/", - "6izr3es0LlW6ga01vh7+KjJGmGu5feM5mWtIwAi9cjrgvqi0fn6hlfXJ/BC4H+zh3BfO2LkSBxzzHH04", - "araBCqumd81oCX1nyS2vf3NJn3vmiJbQYipZSPEbxFVVqOGLRIf67NIMPVp/Az4ipKy25NSVwOrZe7e7", - "T7oJLU5Nh8QeqsedD1xwMM2tt0ZTbrfaVrRp+LXHCSaMIDmw49cE42DuRN3k9GJOYzmAjZBhYArMLw27", - "uRbEd/a4dzYa5hJ+z0jgN1a1ZTbxRwGyDtzuJhG7pMBgpx0tKtSSAVJtKBNMra9PrkRkmJJfUG4LI6E1", - "Ao+S620e+F4hdCEkpu1RcRN/BilbR5VLp6c/Z2nXnJuxJbNlgUoFQd0ZN5Ctp2apyNXuse50NWqOF+TB", - "NKhs5XYjY+dMsXkO2OKhbTGnCqxSxXtu+C5mecD1SmHzRyOar0qeScj0SlnEKkEqoQ6fN5Wjyhz0BQAn", - "D7Ddw6/JXXTRUewc7hksuvt5cvjwazSw2j8exC4AV/9riJtkizDINU7H6KNkxzCM2406i2oDbNHGfsY1", - "cJps1zFnCVs6Xrf7LK0pp0uIe4Wud8Bk++Juoi2ghRee2YpjSkuxJawn3Bg0NfypJ9LMsD8LBknFes30", - "2jlyKLE29FQXlbGT+uFs+TKXD9zD5T+iP1Th3UFaj8ibtfvY+y22avRae03X0ETrlFCbqylntaeir1JA", - "jn0qOEyQXuVFt7gxc5mlo5iDjosLUkjGNT4sSr1I/kLSFZU0Nexv1gduMv/qSSQpfDM5Md8P8BvHuwQF", - "8jyOetlD9l6GcH3JXS54sjYcJbtXR3YGp7LXcSvuotPnJzQ89FihzIyS9JJb2SA3GnDqKxEeHxjwiqRY", - "rWcvetx7ZTdOmaWMkwctzQ79+PalkzLWQsbyu9bH3UkcErRkcI5++vFNMmNecS9kPmoXrgL95zWeepEz", - "EMv8We59COxj8QneBmjzCT0TL2PtaVp6GjJX1OyDL5xxFhBb83SX3eMq1ZAanfeBynPocdD1KBEaAbAt", - "jO33Ar66iiEw+TR2qA9HzaXFKPOZiCzZl9CobDwuYjKit+q7QMwHw6DmbqgpaZYruHmPGm8W6Xp2mC8e", - "VvyjDexnZjaIZL+Cnk0MSqlEtzOrvgfOZZQ8E5uxm9ri3X5jfweoiaKkZHn2U50bpFWpRlKerqLOInPT", - "8Ze6pma1OHuYowl+V5Rz643Q1U3gK+UX/5qJvLf+KcbOs2Z8ZNt28Ry73NbiasCbYHqg/IQGvUznZoIQ", - "q820C1VYX74UGcF56myy9b3eLboUlMb4VwlKx+5F/GBDC1CjvjBUbCtUAM9QjzEj39ua+CsgjVyBqD+w", - "WZog83UCrKmnLHJBsykx45x8e/SS2FltH1sZzlaGWNprt7GKfv/cfRxth3xrryOiz6xaaUzdqTRdF7EU", - "JabFiW+AeVBC6xI+rEPszMgLq9NQ/sVsJzH0sGByDRmppnNSNdKE+Y/WNF2hsqDBUvtJfnxJE0+VKigj", - "XJUDrLJH47kzcLuqJraoyZQIIzlcMGVLocM5NLOiVCmCnBjgs6Q0lydLzi2lRKXioRRWl0G7B856QXoD", - "VBSyFuL3lF6cm/qeFV7eYa9oNst2uZhO/WCbY6Mq8/bKV4CmXHCWYi7J2NXsyqqPsc6OSLsZjwxw/jZq", - "Ejlc0SI1VbCGw2Jv2RrPCB3iuuah4KvZVEsd9k+N9btXVJMlaOU4G2RTX2vJaagZV+CygWOF/YBPCtmw", - "eCOHjDpR1HLynmSEwdk9KofvzLfXTiGFUYtnjOPT08dI2ABJq0PGqs/avFeZJkuBERTuUIRr+tn0mWGy", - "lgw272e+SjSOYQ3GZtnWO6I71JH3lXC+Cabtc9PWJtSrf27EwdlJj4rCTdpfiSsqD+gN70VwxOZdOXoF", - "yK3GD0cbILdBJye8Tw2hwTm6SEBBXGhMT1WqVhCMEVotRWELYv2jo3m0om6iLxmHuoZ55IJIo1cCbgye", - "155+KpVUWxFwFE87AZqjX0SMoSntjGJXHaq1wc6ftEgnfo7+bawLavUwjqpBLbhRvq1KpxvqDoSJ5zSv", - "nIQi5bFQqnJClAuuaRbMijEOw7h9Qs7mBdA9Bl2ZyHbXktqTs89N1JeqZF5mS9AJzbKYPuEZfiX41acr", - "hQ2kZZXFuyhIipn5mqkKu9TmJkoFV+V6YC7f4IrTBRXoItQQVsHzO4yO1/Mt/htLYd2/M849aG8fe+8L", - "lFXhc/vIzc2ROlKvoelEsWUyHhN4p1wdHfXUlyP0uv+1Unoulk1AbjhB2RCXC/coxt++NRdHmL+rk5fd", - "Xi1Vei10BxW+bjA+G6vEME2u5KNOO3MGmZeHFRD9FUanePn1xLUEul5q71dr1+6Lbkl7g7GodvkTNCWD", - "LKg3Jt36ldnoc4QirtPv8yWzrmTmc6f3OMmwI2fj2IMI9U6KXYB+8B7QpKDMOW3UzKKLWRfu1a8uHDp0", - "9Qa3F+GCqHo1dj+c9wU8+ThgG9nRqsl4Bi6pUiHhnInSu0N4fzn/JLS/upr4QVxx7/q7fjM41edVg/Yq", - "bU9c/R+7TPcm/+En611JgGu5/R2ocDub3qloGctZ3Khn6YSrqL5Jj70rX1RFMc/Ok7XIhgKmf/iJvPC2", - "pVH3jifkWLolkbkqctFg8ZeuBIRvZqTP0dO+cp2OimJ46p4I8e7ktuG+0/elmjLnc0jr9safX1sHNFQh", - "RN4qQTgzh43uKf7Ujoa9AAKbAjDXbRDY3J89YyxBuSBHfK0mOVAFAxgOs7a5tiORfLJ5adqPC7aPV2Lt", - "Tzlbp5lF5lkIxeriPLESrSNdjk+wympgMeyO5f39ziHVWJGp9mOSAPsk0DWTBeW/b1PP9ihKKs9sT/8D", - "aWank5C3RAMV3fGidYoctKqhyTWSqt62iTB715mZQ1LC1A9hfljQXMWrovU6u7YynwQOK5FEz/GFHWcj", - "sn275UwDHwiWDSMyHglgnb//mMi0fu3Xi85Oza7hV0Un8UKQPMSWVprt4UBSeVGjZIj7tQTuKsMvYqjZ", - "HRW1WECq2fmORBd/XwEPkihMvSYYYVkEeS9YFWWDCUX3t3PUAA3loRiEJ0jsf2Vw+mJEz2B7R5EGNURr", - "PU29cH+ZXJKIAby1jOBRCBXzUrSmK+c4xlRFGYgF7xVsu0Odlbu3Smwg51xyLk+STYlnYMp4mcpRc5mu", - "e2UCw4CRvlwY3TJ3/RqPF1hVUFUV3H0uylAvSI4jhaBcLktMS1JZa31WS1D+N5+DyM6SszMI69iibRxT", - "KLgWUWWv1yMnA3JSJ/o7Wr0Kc2f5mVkdw9GN943kgEbvpzQXWPmpL9ypGTZRuXndUdY5FMUUrESFcC1A", - "unrfeDPkQkGihXetG4JjCBXWA/ZSSFC9dRcscL3ZUN/W6V6x/oxNlkGd42u4QCJhTQ10MkjK2j/nELKf", - "2+8+wNXn5Nqp067oNdmZVdVH7zDVQWJI9QvibsvdgbOXUW8zzkEm3tbd9inkBpWh/bWQIitTlwgmOBiV", - "CWB0wrIBVhLVDKfdVXaUfDlmA38ZpCE4g+2B1b+kK8qXQXq1EHor2ts1BJnLWrt9rZr/uJIzX9oFLK8F", - "zs+pPZ9OCiHypMfgetxNNNs+A2csPTNidln7vfcU2iR30c5XedRcrLY+sWpRAIfs3oyQI24jjbxzTbPS", - "UWtyfkcPzb/BWbPS5n52iv3ZKY+HbGBSH3lF/uaHGeZqCgzzu+JUdpAdaUw3PUluJb2IlJ3t+tONdndp", - "lwKticpCEZNSLpmqa9T57ir3I6QfVEEcfv2EmfxqL2ZpbUQoLdWVIZvCy6va9DOuHqPvsAO8UFkTVGT0", - "3MiB85ldjV9VSAmW0ksJjeXv0v+4BdZ8KdgihVGTZpk2AbF1U2vuS6DcU88rnVkcz13VGqbtExxz/nZV", - "cgpthjYNa0A45lzKc5rfvFoN8zkeIT4ge9sv8ITv3xDJFpXqcv5+L+mouYO37vVNzd+gGvDvYPYoaux1", - "QznjT1UJ05vIMMU9zUku6rrIOCS5wDGtdfjhV2TuougKCSlTrBVgfOGrmlTPPSzy5XwsN3rH+3LXOn8S", - "+gpk7B4IoiCv6woJWuD9UENYH9HPzFR6Tm6UymPU1yGLCP5iPCpMZ7PjujhrmI1txZmWP6SQcM3m48AR", - "bE/zcTdRz9jlWROpuXRKBd11jr6tG7iNXNT12sb6PnSRO5RGf4zLQrw6humOPhMWIVhahiCo5NeHvxIJ", - "C6wdKcj9+zjB/ftT1/TXR83P5jjfvx8V427MW8LiyI3h5o1SjDOmdUJhYFMw2ZP0761j7u7CRvMdwQ4Q", - "z86ZQ7QaDE7t/UZvOBU0ytw7Ffx2aa7xLn4WoMwvuZoohvuf+mIXrH9+T5hM6yyULM92HcpG0FNd+RbD", - "en5xAbmfpfbuL1aX3WWTrv7hPj5y7QOAiImstTF5MFUQzjQiksl1i8QtIXGlpWR6i3nCvOqT/RL1qfm+", - "spY4K3CVWcbJHVqcQZVprratlMpLNt8LmqMsYN4z6KGohchn5NsNXRc5OCb1zZ35f8LjvzzJHjx++J/z", - "vzx4+iCFJ0+/fvCAfv2EPvz68UN49JenTx7Aw8VXX88fZY+ePJo/efTkq6dfp4+fPJw/+err/7xj7gAD", - "sgV04rNSTP4HC1QnR2+OkxMDbI0TWrAfYGtrYRoy9lU2aYpcENaU5ZND/9P/8dxtlop1Pbz/deKC3icr", - "rQt1eHBwcXExC7scLFGZmmhRpqsDP0+nDOfRm+MqPMz6QuGO2sgfQwq4qY4UjvDb22/fnZCjN8ezmmAm", - "h5MHswezh5jLuABOCzY5nDzGn/D0rHDfD3wS4cMPH6eTgxXQHG3i5o81aMlS/0ld0OUS5MyVGzU/nT86", - "8GLcwQenSP5oRl3G7KY20C2IbupW4XRGKfQWtoFsjapWyqWYnla1zpyeh2cYf2R1s4bFV8g6zuow8uOa", - "Ufl0Zzb/6+HPEYemBVuWEpVHdXh25arpCiEyRf773d9eEyGJe06+oelZ6LuFBPmvEuS2JhjHysLEpb4u", - "lYsEWqtl0XSbr1l65GkRLWeKM5t9Dii1sunUnAitzmER6IqvGl75IPn6/Yenf/k4GQEIGhgVYFqbX2me", - "/0ouGFbFRCtNM7RdTSM1mPBpMq1tBNih3qYp+v1XX8Mym1WbZrTZr1xw+LVvGxxg0X2geW4aCg6xPXiP", - "iVeQEvAQPXrw4Nrq81YBljZ6oBrFk8QlBupyGPupqvN7IWlhD5pPuIDhqqhX8AvFqsRPrnGhTffoKy+3", - "PVxn0c9ohqUPQWm7lIdf7FKOOdr4Dccn9kb7OJ08/YL35pgbnkNzgi2DrGbdW+RHfsbFBfctjTRTrtdU", - "blFWCeqztoK36VKhuhhZpD3bzZT47z/2XmkHYcG5gw8NM3F2pQuvU2vz+MWOO/CO6uOc3ZzArXp2Lgu/", - "zdGBhkRXtA8LqKl7M/J92Bu5N6bYsQlsSsmdo5LTTbHM8GH3IPGZCGvY7qjQ/yh6Iwe699vL+ZNezkdN", - "tVAjqWwMmAaJD8LU8SO56u3YDcC7jjIJQdm4SyTk/6Q1UVsvQzvT+9jDbScXvsVdD+76ZKAA3kocalYx", - "+/R81we8VNdE4z74hFz5C5foXtHc0Emw3FYyAJtp+VbS+9NIepVr4dKKXq64wNVkP4ywOfjgs2dfg7zn", - "soePkPQa6eDqvkF257stdnJv5qpHBm0uxzOcL+FOGQ5zmt9Kb59aeusWA4iBUad4/3wS21VyJjYK+e6V", - "cvALFdH+xMjqlclc1tEd0tgleGNH0nKc+JPxzD+khOWQditb/allq8p9/0rSVaOchwsICaxLV9K7tfVq", - "TFdiVjOEI+BsGFJiGIo7wtO69JhhMZhzy6dbUVP/7EPLpn0R2s2adh6FXfnpewhfn8+2xy92iU5fkBJn", - "dO7HyC0Q35tPzUujBoO3N2MwGMebnjx4cnMQhLvwWmjyHd7in5hDflKWFierfVnYEEc6mNus1ENcibfY", - "EjKKOtt0wKOw3EyY0do6Stx1dcrDLCH3ZsTnvlZVjRkXrr8UhkH5HFxULm0nw+MMEsgd/+chjn9nRr4T", - "kjCu1RR97bQrQELuMK4PHz56/MQ1kfTCurK1282/enJ49M03rlmdg9++bzrNlZaHK8hz4Tq4u6E7rvlw", - "+D//+N/ZbHZnJzsVm2fb1zat4O+Fp3afdeHG9+3WF75JsVe6S/e4E3U3YnB/JjZR7i82t7fPZ7t9DPb/", - "ELfOvElG7gFaqScbYcDXeAvZY7LPPTT1mcMN36kukxl5LVxGhjKnkgiZgXRFuZYllZRrgGzmKZUsMPQa", - "I9DTnAHX5sGIZYZkolgGNpB1WUrISM7WWIdbwjmGCOD0+JZvQLCb0aNT7++Wyb+imyBKe15d01q4JWPM", - "+5pufKEzLOUjJP70zTfkwbR+teS5GSCpEBNjrmu6mdygtq8itlHu982KDzt9ZHHsMZqjWvqxNSVpM738", - "n5tzf7ESuyV3t7HXxDn3tubU1ppQf+DyHgxqDqxgZ8ugYV2uLaniko2U50WoOIszM4xVCvyObQM7VdLR", - "x2cbvbeH+PbxfyVW0iaoPdkGBt2qgw9oywh5RufcYtDgH8gGGhiEpFh7i5AgC9DpygUjt/Aa4T2+mEQ/", - "4xkqcnvdIgtuUTeXeZjrEIuvjkxSEMSJolUOZIRC/+bzOpvPbIGpJqpCIb6WM9qbmC9vWFU2dPVfmfLu", - "9T5m2eziXlA+ryfvSluIluswat4ieD8Edzjft75YGWLMLeKP4IDv34kJeS3qkHhXJ+OPaE/8lNf2p17Q", - "a8HBGs6NWGtp8dZGWskUqJ9HpPhcKPZxUmUsv7R8ceDL7g0KGX+1Re8GBY0xt7eZ7Iu8wv8aLbXeuGXM", - "2mY7A6Pr0cYwZ9PQ5ltuZlr+jE+Uz8JPf4fvls/BsW6GxeAh9XzGiQX8epkOpheyxHxQJTPt40DxvOWj", - "uZEWlW9ZNNX4HHLBl+r3yYqGqCOOlwiVVBnd42nb/3xn9zlmLuLCJwl1uawU4ynYspJYEYcpsmZKOQ/I", - "Jw/+cnMQarb2+f94GEr6mbnL0wePb276dyDPWQrkBNaFkFSyfEt+5FUJ0KtwO0z+XeWW86reaB0CNCU1", - "c56lYYKmyzPBhj/aB71h2cfdzDDIT7gnH2Q84INhzklaFEDl5RngbrvUSWvG4xehy28jJ3WVLSwCikHR", - "nl7v/zEZqXfCKHSxcJdfyS2gPrOZYxPOH1csppXni5ECxOKQnPL7RK3o04ePfnn09Cv/56OnX/Vozsw8", - "LiFRV3dWD2Q+22HGKNB+v7q+6xXJK+Qd3vRW7rdD0wnLNtEEtHXxk/BcOMcc5BN3FCnotjdvdbGjeEs4", - "bF3I5eazNCrN5qvo48m/bapaxsf8WfXEtakEXc2T26ItPeEOARMxhFZXb6mwPlzIZUBUbJFlVZngpl+e", - "dViAvcU88mTrQvmsUqz+XC/QBB+gwL3U0kTL5xMYMUnyNDBUV9Xh0eukLAohdXW61WyULAd9BreGKNdH", - "uHtJainV6aosDj7gfzA91sc6VMDWYw0sdO53W5HuwNrfh4S4d7bFFe/ElrRsrf6yyZx8pjbnEyAW5BVL", - "pTjC3NvuulFbpWHdrSNku/4yVJM/ejUJnjMOyVrwWJK3v+HXV/ixt8xaX2csq9bXt102qAF/C6zmPGM4", - "41Xx+zt5Z19JP9RarQRzjOuCSZb+9zxq/tBsedo9SVuedo9Zo2pTz88HHxp/Ou8b11KtSp2Ji6Avvu4s", - "LxpjeA8Sf49XilcPnlYCbUUyUIZovzwNVICH2ImpvkayfwXp3XsTgP1JdVILxrMWkaBEmYpzkKrSVkjv", - "KHOrmPrjKKZG7/tePNamstzF0Up1vRLJa5GBHbeZPTYW6MlFBi7jZlcQqWSw+Hvf30p1u9YLLKXlcqVJ", - "WRAtYm+9umNCU8tkbV03tasQlm3lC76cA6G5BJptyRyAEzE3i24WFCRUoZN7VTbRSprxek41XIUUKSgF", - "WeIDW3eBVuUxxeelHsATAo4AV7MQJciCyisDe3a+E84q77oid3/4Sd37DPBaUXAYsda1NoLeysPHSXtd", - "qMdNP0Rw7clDsqMSiBcNUL8l1kUOTsMVQeFeOOndvzZEnV28OlpQBcQ+McX7Sa5GQBWon5jerwptWWDB", - "7UjFOfv1hK1REuOUCwWp4Jnqrwu5iy1j7ZNgLcqsIOCEMU6MA/c8OF9Spd86S0ZYPiuosWKmGChk2Zdj", - "3oz8U5VhvjN2au5DrkpVpaF3CgzIYmvgsBmY6zVsqrnQlOTHrjQkWpBSwa6R+7AUjO+QpcLKlDqwAWEF", - "lO7iMBsJdQqKLiobQNSIGALknW8VYDe0T/QAwlSN6KrcXJNygjrFSouiMNxCJyWv+vWh6Z1tfaR/rNt2", - "icsVdcB7OxOgQu2Vg/zCYlZhuMWKKuLgIGt65hRcS5etqQuzOYwJWp2TIco3x/KdaRUegZ2HtCyWkmZY", - "sZBGVCk/2s/Efh4aAHfckyeWg03msIhWVMEi+xUly14VUTW0wPFUTHjE6rGKpOYILrDEjycQ13vHyBn0", - "lK49CcrpueY4V3SL/Hi4bLvVPWopM4bZcUsOCLFj6GPg7UFDNfLlMYGdk1p70J7iH6DcBJUYsf8kW1B9", - "S6jH32sBbW1eeH81LooWd28x4CjX7OViO9hI34mN6Q+/yHC8ttn2EzqcNfWnwftvdpm37cEFZTpZCOkK", - "ctOFBhlR5bXKCFCmfbSfNaBo4dwhCI7grk03jivsXKfMcEzEgkB8LVG2jmTgMVN9J+SokJ2m7xplmpRc", - "szwIW65eyr8/feGtDuBWB3CrA7jVAdzqAG51ALc6gFsdwK0O4FYHcKsDuNUB/Gl1AJ8rTC/xAof3b+aC", - "JxyWVLNzqOL3btMG/aHCWqqryuskUItxQZl2STgJ9WIAfrlaVJ8GmiMOWG7LJgvVm90Iq1grUcoUSGog", - "ZJwUOTVPA9joKiVcM9moT3/s6lhj/lKq4PEj8u6vR95Bf+UcyZtt7/ryxUpvc7jn8jJUxU59ggbgBuku", - "PwP1V4JPHecS6bEciDLo/RZbv4BzyEUB0vr+Ei3LiMbnBGj+3OFmh8KnUc7SjPbrtKFncmhb0yKo149r", - "pYpQDOZoVaNc0Fz1l6O0461pEcveVl18VhWE3OSZyLatE2J27QA3sHk2ajd9xqncRuJvOieiQxpaGH7l", - "CKury/p47cEkXaLtktkuCotJ6xJU9BwPUXk0iqLasM5QNpJn0aKTaC3mdujApAJwjAOsoWe/J+St7fd5", - "49ARInfEamb+u/EbbLasmAa2NY8Ix3q+1KBxj/jo6cWzPzWEnZUpEKYV8fEou6+X6WSTmJGWwBPHgJK5", - "yLZJg31NGrdQxhRVCtbz3TdRyD9dvmJ3+Zgvw/fU57lGXgSLG+LJIdFsEseAe7jzVsNo3lxhC0d07DnA", - "+Kdm0X1sNASBOP4UUyq1q8TsyfTqaba3jO+W8QWnsSURMO7i99pMZPYJGZ/cypL387xvN5CWBrjwJN9F", - "7Tya5GCjG3bNDOblcol5lzs2OrM0wPGY4J+JFdrljuWC+1GQHbzKxXnVDFHt4brcJYhVuyskWUpRFvds", - "gSm+RWPGuqB8602+kCi2LnOLQ5vV7noZrQ2x6zoCoDnW6f76tNpvvMov0N26q7b5u0ULuaCK2P2FjJQ8", - "c5FDnUDcDR+f89kOfbLhNZsezPps1xtZnZt3zBXhd9mFuFRm7gJkojfcHqhmYnYb8GtP7uw23+yf49p4", - "Ywu59TDYbvBqzRCu6faQAV/D6yPIP1KHwjWrZNkafn2BI2EyEtvyWp1HOsM3fUiCCnrWRgp5QagvBpAK", - "rrQsU33KKdpogoXNuv4lXhvdz9+e+yZxM2HEiueGOuUUc8VXlpson1tAxEzxHYBno6pcLkEZXhkSyQLg", - "lLtWjJOSm5eWWJA1S6VIbBiqOUNGPpnZlmu6JQuao5HxN5CCzM3NHuy6VRgrzfLcObSYaYhYnHKqSQ5U", - "afKKGS5rhvOJwipPLtAXQp5VWIinr1gCB8VUEle+fG+/YoYIt3yv5EOFpf1cR3bfbGoIDzvLeiE/fmHg", - "ppjpJmdK1z4QHdhvzP69ZjyJEtnJCohzCWvTFrlrGK8noHtN65BewSk3N5wWBLk61Zcjh7aZp3MW7elo", - "UU1jI1rWIL/WUU+8a+EyJMJkbk0rf6DAzIAOvPkSNx6ryLT3fk8zymBhythXly6sp5F7JID/bE8R3vFm", - "WZCWkukt2iFowX45A/P/9x/fm2/y3JsoSplPDicrrYvDgwOsOLkSSh9MPk7Db6r18X218g/e2lBIdo45", - "qt9//P8BAAD//0vKaWunRwEA", + "H4sIAAAAAAAC/+x9a3PbuJLoX0FptyqPFWXnuSeumtrrJPPwTpJJxZ7ZPWeSOwORLQnHFMADgLY0ufnv", + "t9AASJAEJcp27GTGnxKLeDQajUajnx9HqVgWggPXanTwcVRQSZegQeJfNE1FyXXCMvNXBiqVrNBM8NGB", + "/0aUlozPR+MRM78WVC9G4xGnS6jbmP7jkYR/lUxCNjrQsoTxSKULWFIzsF4XpnU10iqZi8QNcWiHOHo5", + "+rThA80yCUp1ofyJ52vCeJqXGRAtKVc0NZ8UOWd6QfSCKeI6E8aJ4EDEjOhFozGZMcgzNfGL/FcJch2s", + "0k3ev6RPNYiJFDl04XwhllPGwUMFFVDVhhAtSAYzbLSgmpgZDKy+oRZEAZXpgsyE3AKqBSKEF3i5HB38", + "OlLAM5C4WymwM/zvTAL8AYmmcg569GEcW9xMg0w0W0aWduSwL0GVuVYE2+Ia5+wMODG9JuR1qTSZAqGc", + "vPvuBXn06NEzs5Al1RoyR2S9q6pnD9dku48ORhnV4D93aY3mcyEpz5Kq/bvvXuD8x26BQ1tRpSB+WA7N", + "F3L0sm8BvmOEhBjXMMd9aFC/6RE5FPXPU5gJCQP3xDa+0k0J57/RXUmpTheFYFxH9oXgV2I/R3lY0H0T", + "D6sAaLQvDKakGfTX/eTZh48Pxg/2P/3br4fJP9yfTx59Grj8F9W4WzAQbZiWUgJP18lcAsXTsqC8i493", + "jh7UQpR5Rhb0DDefLpHVu77E9LWs84zmpaETlkpxmM+FItSRUQYzWuaa+IlJyXPDpsxojtoJU6SQ4oxl", + "kI0N9z1fsHRBUqrsENiOnLM8NzRYKsj6aC2+ug2H6VOIEgPXhfCBC/pykVGvawsmYIXcIElzoSDRYsv1", + "5G8cyjMSXij1XaV2u6zIyQIITm4+2MsWcccNTef5mmjc14xQRSjxV9OYsBlZi5Kc4+bk7BT7u9UYrC2J", + "QRpuTuMeNYe3D30dZESQNxUiB8oRef7cdVHGZ2xeSlDkfAF64e48CaoQXAER039Cqs22//fxT2+IkOQ1", + "KEXn8JampwR4KrL+PXaTxm7wfyphNnyp5gVNT+PXdc6WLALya7piy3JJeLmcgjT75e8HLYgEXUreB5Ad", + "cQudLemqO+mJLHmKm1tP2xDUDCkxVeR0PSFHM7Kkq2/2xw4cRWiekwJ4xvic6BXvFdLM3NvBS6QoeTZA", + "htFmw4JbUxWQshmDjFSjbIDETbMNHsZ3g6eWrAJw/CC94FSzbAGHwypCM+bomi+koHMISGZCfnacC79q", + "cQq8YnBkusZPhYQzJkpVdeqBEafeLF5zoSEpJMxYhMaOHToM97BtHHtdOgEnFVxTxiEznBeBFhosJ+qF", + "KZhw82Ome0VPqYKnj/su8PrrwN2fifaub9zxQbuNjRJ7JCP3ovnqDmxcbGr0H/D4C+dWbJ7YnzsbyeYn", + "5iqZsRyvmX+a/fNoKBUygQYi/MWj2JxTXUo4eM/vm79IQo415RmVmfllaX96XeaaHbO5+Sm3P70Sc5Ye", + "s3kPMitYo68p7La0/5jx4uxYr6KPhldCnJZFuKC08SqdrsnRy75NtmPuSpiH1VM2fFWcrPxLY9ceelVt", + "ZA+QvbgrqGl4CmsJBlqazvCf1Qzpic7kH+afoshNb13MYqg1dOzuW9QNOJ3BYVHkLKUGie/cZ/PVMAGw", + "rwRat9jDC/XgYwBiIUUBUjM7KC2KJBcpzROlqcaR/l3CbHQw+re9WrmyZ7urvWDyV6bXMXYy8qiVcRJa", + "FDuM8dbINWoDszAMGj8hm7BsDyUixu0mGlJihgXncEa5ntTvkQY/qA7wr26mGt9WlLH4br2vehFObMMp", + "KCve2oZ3FAlQTxCtBNGK0uY8F9Pqh7uHRVFjEL8fFoXFB4qGwFDqghVTWt3D5dP6JIXzHL2ckO/DsVHO", + "Fjxfm8vBihrmbpi5W8vdYpXiyK2hHvGOIridQk7M1ng0GBn+KigO3wwLkRupZyutmMY/uLYhmZnfB3X+", + "OkgsxG0/ceErymHOPmDwl+DlcrdFOV3CcbqcCTls970Y2ZhR4gRzIVrZuJ923A14rFB4LmlhAXRf7F3K", + "OL7AbCML6yW56UBGF4U5OMMBrSFUFz5rW89DFBIkhRYMz3ORnv5A1eIKzvzUj9U9fjgNWQDNQJIFVYvJ", + "KCZlhMerHm3IETMN8fVOpsFUk2qJV7W8LUvLqKbB0hy8cbHEoh77IdMDGXm7/IT/oTkxn83ZNqzfDjsh", + "J8jAlD3OzoKQmae8fSDYmUwDVDEIsrSvd2Je3TtB+aKePL5Pg/boW6swcDvkFoE7JFZXfgyei1UMhudi", + "1TkCYgXqKujDjINipIalGgDfSweZwP136KNS0nUXyTj2ECSbBRrRVeFp4OGNb2apNa+HUyEvxn1abIWT", + "Wp9MqBk1YL7jFpKwaVkkjhQjOinboDVQbcLbzDTaw8cw1sDCsaafAQvKjHoVWGgOdNVYEMuC5XAFpL+I", + "Mv0pVfDoITn+4fDJg4e/PXzy1JBkIcVc0iWZrjUocte9zYjS6xzudVeGr6My1/HRnz72WsjmuLFxlChl", + "CktadIey2k0rAtlmxLTrYq2JZlx1BeCQw3kChpNbtBOruDegvWTKSFjL6ZVsRh/CsnqWjDhIMthKTLsu", + "r55mHS5RrmV5FU9ZkFLIiH4Nj5gWqciTM5CKiYip5K1rQVwLL94W7d8ttOScKmLmRtVvyVGgiFCWXvHh", + "fN8OfbLiNW42cn673sjq3LxD9qWJfK9JVKQAmegVJxlMy3njJTSTYkkoybAj3tHfgz5e8xS1aldBpP3P", + "tCXjqOJXa54GbzazUTlk88YmXP5t1saK18/Zqe6oCDgGHa/wMz7rX0Ku6ZXLL+0JYrC/8BtpgSWZaYiv", + "4FdsvtCBgPlWCjG7ehhjs8QAxQ9WPM9Nn66Q/kZkYBZbqiu4jOvBalo3expSOJ2KUhNKuMgANSqlil/T", + "PWZ5tAeiGVOHN79eWIl7CoaQUlqa1ZYFQSNdh3PUHROaWupNEDWqx4pRmZ9sKzudNfnmEmhmXvXAiZg6", + "U4EzYuAiKVoYtb/onJAQOUsNuAopUlAKssSpKLaC5ttZJqI34AkBR4CrWYgSZEblpYE9PdsK5ymsE7SH", + "K3L3x1/UvRuAVwtN8y2IxTYx9FYPPmcP6kI9bPpNBNeePCQ7KoF4nmtel4ZB5KChD4U74aR3/9oQdXbx", + "8mg5A4mWmc9K8X6SyxFQBepnpvfLQlsWPV5e7qFzwpaot+OUCwWp4JmKDpZTpZNtbNk0arzGzAoCThjj", + "xDhwj1DyiiptrYmMZ6gEsdcJzmMFFDNFP8C9AqkZ+Rcvi3bHTs09yFWpKsFUlUUhpIYstgYOqw1zvYFV", + "NZeYBWNX0q8WpFSwbeQ+LAXjO2TZlVgEUV0p3Z25vbs4VE2be34dRWUDiBoRmwA59q0C7IaeLj2AMFUj", + "2hIOUy3KqdxrxiOlRVEYbqGTklf9+tB0bFsf6p/rtl3iorq+tzMBZnbtYXKQn1vMWh+nBTVPaByZLOmp", + "kT3wQWzNnl2YzWFMFOMpJJso3xzLY9MqPAJbD2lZzCXNIMkgp+vuoD/bz8R+3jQA7nj98BEaEuvPEt/0", + "mpK9+8CGoQWOp2LCI8EvJDVH0Lw8agJxvbeMnAGOHWNOjo7uVEPhXNEt8uPhsu1WR0bE2/BMaLPjlhwQ", + "YsfQh8Dbg4Zq5ItjAjsn9bOsPcXfQbkJKjFi90nWoPqWUI+/0wJ6lGnODTg4Li3u3mLAUa7Zy8W2sJG+", + "E9uj2XtLpWYpK/Cp8yOsr/zl154gam8iGWjKcshI8MG+AouwP7GOGO0xL/YSHKSE6YLf0cJElpMzhRJP", + "E/hTWOOT+6318DsJ/AKv4CkbGdVcT5QTBNT7DRkJPGwCK5rqfG3kNL2ANTkHCUSV0yXT2rpsNl+6WhRJ", + "OEBUwb1hRmfNsd5xfgeGmJeOcahged2tGI/sk2AzfCetd0EDHe4pUAiRD1AedZARhWCQ4Z8Uwuw6cx7C", + "3o3UU1IDSMe00ZRX3f53VAPNuALyd1GSlHJ8cZUaKpFGSJQTUH40MxgJrJrTmfhrDEEOS7APSfxy/357", + "4ffvuz1niszg3LvVm4ZtdNy/j2qct0LpxuG6AlWhOW5HkesDNf947znnhRZP2W5idiMP2cm3rcErc4E5", + "U0o5wjXLvzQDaJ3M1ZC1hzQyzLyO4w5S6gdDx9aN+37MlmV+VRs+oywvJfRbx96//3W2fP/+A/nOtvSG", + "7bEn8hAd53VYxMzdRqVE1xqSM/O+lYJmRkCI6vZxkXyeVM6ZKgrOUhlw/sedQ8rXrUC+oTCQKaS0tF7J", + "jms7CGr3UDWJyIut3W2jMLqQgerxMtf20g6xOpeiLIiqtt1SgaYaPo+quR46BmV34sA3qP7Y5x5knon5", + "+gpuazsQkVBIUMhbQ/WKsl/FLIy/ccxXrZWGZVcDbbv+1vM+e9f7zhE8ZxySpeCwjoacMg6v8WOst+Xv", + "PZ3xpu3r2xaeG/C3wGrOM4QaL4tf3O2Aob2t/OKuYPPb47aMD2HkESrXIC8IJWnOUPUmuNKyTPV7TvFx", + "Hxy2iP+Af8b0q3te+CZx/VJE/eOGes8p+o5UT/4oX5xBhC9/B+C1Pqqcz0HplpQ4A3jPXSvGScmZxrmW", + "Zr8Su2EFSDTiT2zLJV2TGc1RO/UHSEGmpW4yVwyQUJrlubOEmGmImL3nVJMcDFd9zfjJCofzlkRPMxz0", + "uZCnFRYm0fMwBw6KqSTu5/C9/YouaG75C+eOhtGq9rPVnZvx6yiKNb796wjM/3v3vw5+PUz+QZM/9pNn", + "/7H34ePjT/fud358+Ombb/5f86dHn76591//HtspD3vMfd9BfvTSvSmOXqLgWCvPO7Bfm+J0yXgSJbLQ", + "RNyiLXLXiL+egO411Qp6Ae+5XnFDSGc0ZxnVFyOHNovrnEV7OlpU09iIlhrBr3VHcewSXIZEmEyLNV74", + "Gu+6BsUDZdCa42Jf8LzMSm63slTOooR+4N5FQ8zGVTCUTYJwQDBSZkG9f5H78+GTp6NxHeFSfR+NR+7r", + "hwgls2wVi2PKYBWTst0BwYNxR5GCrhXoOPdA2KPeKNYoHg67BPM8UwtWXD+nUJpN4xzOe9e61/qKH3Hr", + "9mrOD9qG1k7lLGbXD7eWABkUehELjm5ICtiq3k2Alr2+kOIM+JiwCUzar+VsDsr7xeRAZxiki/YNMSRa", + "oDoHltA8VQRYDxcy6Ekaox8Ubh23/jQeuctfXbk87gaOwdWeszIE+b+1IHe+//aE7DmGqe7YkDo7dBAE", + "FdFCOT//hieH4WY2JYSNKXzP3/OXMGOcme8H73lGNd2bUsVStVcqkM9pTnkKk7kgBz504CXV9D3vSFq9", + "WVuCoA1SlNOcpeQ0lIhr8rSR+NFnI83nwjwc20btrvzqporyFztBcs70QpQ6caHGiYRzKmNGA1WFmuLI", + "NlHAplnHxI1tWbELZXbjx3keLQrVDjnrLr8ocrP8gAyVC6gyW0aUFtLLIkZAsdDg/r4R7mKQ9NzHqZcK", + "FPl9SYtfGdcfSPK+3N9/BKQRg/W7u/INTa4LaOgrLxQS19ZV4sLtuwZWWtKkoPMepYEGWuDuo7y8xEd2", + "nhPs1oj98r6tOFS9AI+P/g2wcOwcx4KLO7a9fM6Y+BLwE24htjHiRm0xveh+BdFgF96uVkRZZ5dKvUjM", + "2Y6uShkS9ztTpZKYGyHLm7EVm6OroMu6MQWSLiA9hQwTAMCy0Otxo7v3lHCCpmcdTNlEGTaWA6O5UbU7", + "BVIWGXWieEuhZDCsQGvvq/gOTmF9Iupg8F3iaJthnarvoCKlBtKlIdbw2Lox2pvv3HFQ11UUPjoSw2Q8", + "WRxUdOH79B9kK/JewSGOEUUj7LAPEVRGEGGJvwcFF1ioGe9SpB9bnnllTO3NF8mr4Xk/cU3qx5PznAlX", + "g9GU9vsSMOuOOFdkSo3cLlzCGBu6GHCxUtE59EjIoXZ9YIBgQyOPg2y796I3nZi1L7TOfRMF2TZOzJqj", + "lALmiyEVfMy0/KX8TNaAYxWoBPPAOYRNcxSTKscyy3SobFg5bGKrPtDiBAyS1wKHB6OJkVCyWVDlc9lg", + "yh9/lgfJAJ8xFHdTAoajwNUnyOtTKb49z22f087r0qVh8LkXfMKF8Gk5IHmCkfDRuzi2HYKjAJRBDnO7", + "cNvYE0odFlxvkIHjp9ksZxxIEvMaokqJlNlkRPU14+YAIx/fJ8SqgMngEWJkHICNhkkcmLwR4dnk812A", + "5C6smfqx0aQZ/A3xCAzrR2tEHlEYFs54j8e25wDUuZpV91fL4RGHIYyPiWFzZzQ3bM69+OpBOnkAUGxt", + "Rf070/i9PnF2gwbeXiw7rcleRRdZTSgzeaDjAt0GiKdildgQrKjEO11NDb1HXYsxICx2MG3GhTuKTMUK", + "3S3warGurFtg6YfDgxG88FdMIb1iv77b3AKzadrN0lSMChWSjFPnVeTSJ04MmbpHgukjl7tBEoULAdBS", + "dtTpRt3jd+sjtSmedC/z+lYb18mBfNRG7Pj3HaHoLvXgr6uFqdIevG1LLFE9RdNroJnxIRAhY0Rv2ETX", + "SNM1BSnIAR8FSUOISk5jpjvztgG8cY59t0B5gXklKF/fC1xRJMyZ0lAr0c3F7K1C162epJjOSohZ/+p0", + "IWdmfe+EqK4pmy8FOzaWee0rQFfOGZNKJ2iBiC7BNPpO4aP6O9M0Lis1nV1sZkeWxXkDTnsK6yRjeRmn", + "Vzfvjy/NtG8qlqjKKfJbxgnQdEGmmIk06gK3YWrrJblxwa/sgl/RK1vvsNNgmpqJpSGX5hxfyblocd5N", + "7CBCgDHi6O5aL0o3MMggcrHLHQO5yR5OjFycbNK+dg5T5sfe6jbi4yf77ig7UnQtgcJg4yoYmomMWMJ0", + "kMizG1LYcwZoUbBs1dKF2lF7X8x0J4WHz5DUwgLurhtsCwYCvWcsqkGCaibDqgV8m5K1kYtiMggzJ82U", + "VSFDCKdiyicU7yKqinrahqsToPmPsP7FtMXljD6NR5dTncZw7Ubcguu31fZG8YymeatKa1hCdkQ5LQop", + "zmieOAVzH2lKceZIE5t7ffQ1s7q4GvPk28NXbx34n8ajNAcqk0pU6F0Vtiu+mlXZvFs9B8QnLDZvPi+z", + "W1Ey2PwqWVColD5fgEsOG0ijnSx2tcEhOIpOST2LewhtVTk724hd4gYbCRSViaRW31kLSdMqQs8oy73e", + "zEPb482DixuWCjHKFcIBLm1dCYxkyZWym87pjp+Omrq28KRwrg3pa5c2Q7MigrdN6EaERHUckuqSYg46", + "qxXpMideLlGTkKicpXEdK5+i2y23tjPTmGDjHmHUjFiyHlMsL1kwlmmmBjx0W0AGc0SR6fMZ9uFuKlxp", + "jZKzf5VAWAZcm08ST2XroGLSP6dt716nRnbozuUGthr6evjLyBhh/sX2jYdAbBYwQktdB9yX1ZPZL7TS", + "SJkfApPEDgb/cMbOlbjBWO/ow1GzdV5cNC1uYSWMLv8zhGGzJm8vw+Efry4RZM8c0bIaTCUzKf6A+DsP", + "n8eRiBGfcZKhl8sfwAe4mdfanbo6SD1773b3STehFqrppNBD9bjzgVkOU995DTXldqttlvuGr1ucYEKv", + "0j07fk0wDuaOJ25Oz6c0lhfQCBkGpsPaANzQpWtBfGePe6f2Zy4J6IQEtuSqLbPBwAXIOpirm1jkggKD", + "nXawqFBLBki1oUwwtva/XInIMCU/p9wWSzD97FFyvRVY5ZfpdS4khvKruNo/g5QtaR6XHLK0q+LN2JzZ", + "UgGlgiAXvRvI1lixVOTy+VsTe42aoxnZHwfVLtxuZOyMKTbNAVs8sC2mVCEnrxRRVRezPOB6obD5wwHN", + "FyXPJGR6oSxilSCVUIfPm8p4NQV9DsDJPrZ78IzcRbOdYmdwz2DR3c+jgwfPUOlq/9iPXQCuJsgmbpLN", + "wsCXOB2j3dKOYRi3G3USjXq2hZz6GdeG02S7DjlL2NLxuu1naUk5nUPcU2S5BSbbF3cTFWktvPDMViFR", + "Woo1YT0hSKCp4U893ueG/VkwSCqWS6aXzrijxNLQU51o3k7qh7MlTVyOUA+X/4g20sKbiFqPyOtVmtr7", + "LbZqtGS/oUtoonVMqM3fkLPae8FnLiZHPj0MJk2tcqVa3Ji5zNJRzEFnhhkpJOMaHxalniV/I+mCSpoa", + "9jfpAzeZPn0cSRTbTFjIdwP82vEuQYE8i6Ne9pC9lyFcX3KXC54sDUfJ7tXRHsGp7DXmxs12fbbDzUMP", + "FcrMKEkvuZUNcqMBp74U4fENA16SFKv17ESPO6/s2imzlHHyoKXZoZ/fvXJSxlLIWM63+rg7iUOClgzO", + "0HcvvklmzEvuhcwH7cJloL9Zy4MXOQOxzJ/l2EPguYi8Tn3y4kqT7nzVI9qBvmNqPhgymLqhxqSZKPb6", + "jX5e+dw1PpkvHlb8ow3sDW8pItmvoGcTgyTW0e3Mqu+B/ZuS52I1dFNbJ8Rv7BeAmihKSpZnv9RRma0c", + "4ZLydBG1Z01Nx9/qakbV4uz9FE2ttqCcQx4dzsqCv3mZMSLV/lMMnWfJ+MC27bTldrmtxdWAN8H0QPkJ", + "DXqZzs0EIVabAW+VQ3U+FxnBeeo8XjX37Ka7D5IS/6sEpWPBQ/jBOnWh3tK8d21OXAI8w9fihHxvq5Eu", + "gDSytOArzcbHQ+YztFqFelnkgmZjYsY5+fbwFbGz2j62JofNyTvHR0pzFS19VZCicJh7sC+vEQ9dGD7O", + "Zl9qs2qlMWmS0nRZxIJDTYsT3wAjUEMdPj5fQuxMyEv7clT+XWInMfQwY3JpXlzVaFZ2QZow/9Gapgt8", + "kjVYaj/JD08m7alSBQXcqkIsVd4+PHcGbpdP2qaTHhNh3s3nTNkilHAGzXjUKjjbqQR8fGpzebLk3FJK", + "VPbYlDzgImj3wFlHDa/mj0LWQvyOArnNxb5rbu1j7BXNI9RO1N2p3GajG6sCG764cEq54CzFLD6xq9kV", + "tBxiAxuQ8KitZPVH3J3QyOGKpgev3OQcFnsThntG6BDXVcIHX82mWuqwf2qsnLigmsxBK8fZIBv7LPdO", + "D8i4ApeHEWubBnxSyIZdETlk1FSdVCaNHckIw2J6HnbfmW9v3LMf/cVPGUcB36HNuaZbTR3W29PmVcA0", + "mQtQbj3N2GD1q+kzwTDZDFYfJr4+H45hzXJm2dYG3R3q0FuknQXYtH1h2tpUJvXPDQ9kO+lhUbhJ+2sg", + "ROUBveK9CI5YFhNv2gmQW40fjraB3Da6kuB9aggNztAQDQXewx3CqOoBtGrNGKHVUhS2INaFK5rBgPEI", + "GK8Yh7p6ZOSCSKNXAm4MnteefiqVVFsRcBBPOwGao/U5xtCUdqaHyw7V2mBECa7Rz9G/jXUpgx7GUTWo", + "BTfK11XRSkPdgTDxAqvlOkR2CxOgVOWEqAwjClqlCmKMwzBunwqpeQF0j0FXJrLdtaT25OxyE/UFiU7L", + "bA46oVkWy4v5HL8S/OoTRcEK0rLKn1gUJMWcKM0kMV1qcxOlgqtyuWEu3+CS0wW1PyLUENYf8TuMQSjT", + "Nf4bSx7YvzPOCWNnN0DvceGKJewoNzdH6ki9hqYTxebJcEzgnXJ5dNRTX4zQ6/5XSum5mDcBuebUEJu4", + "XLhHMf72rbk4wswJnYyY9mqpEhug053wFdvw2ViF5Da5El5lnRSZaOypct5tVkD013Ya4+XX43obJMSg", + "9n611sM+B9y011+cahe5pinZyIJ6o4Gs946N+0Eo4prTPo8d67BjPnd6D5MMO3I2jr0Rod4VrAvQj97P", + "lBSUOdN4zSy6mHUe6f3qwk2Hrt7g9iKcn3evxu7Hsz6fbKIYn+dA8Hu7Gs4puHD2qhy6Xav3SvJPQvur", + "q0Zqx6u84qPr73on4FQ3qwbtVdqeuMzrdpnuTf7jL9aHjQDXcv0FqHA7m96pJdSVdq16qm5Cqqy9g7L4", + "Nm7FeFmg/vxHdc4jpKdCKFZnio7VCxro63aCJX+C/E3dsbyjyRmkGtOD1wZ0CbBLNiczWVCL7jYPUs/b", + "sXIJdOmPNuU86uYE33KhdcKSgtA6m095MjzDz2HlJoVMCavBzYG7cnDNgIPBbs+zGaSanW0JA/ufBfAg", + "xGjslRC2rGsQFcYqN1rMIrK7iq0GaFOU1kZ4gmx+lwanLwjkFNZ3FGlQQzTB89jfKxdJIIEYQO6QGBIR", + "KuaGYLWmzjLMVEUZiAXv9mO7Q52Kq7c0TBDUeMG5PEmaG7cOdNwwZbw2xaC5TNedwn/RI7QvUqyb275f", + "2H6JpQRUVbbNJ6AIn6TkKJL92SWwwKC9ylDgU1mA8r/5CF07S85OISxeg2aZcyoz3yKqZ/AqjGTDfdQJ", + "74qmrKbKOlE6O3jlpNkN6IkkfkJX3DQXmO65z5+56RcZ1nhH7w+8DjD9NMI1A+mKfKGwlwsFiRbeqXMT", + "HJtQ4eqRXwQJqjfZogWuNwXKuzrHCyadpZjyhDrPlnCBRMKSGuhkkImlf85NyH5hv/sIFp90dKs6paLX", + "ZGsqFe+ey1QHiSHVz4i7LbdHxlxEs8I4tyVFVSwtCzeoDFX/hRRZmdoLOjwYlfZpcNKjDawkqpRIu6vs", + "vC9zTAH2KogzPIX1nhX90wXldS625rG2IpRdQxDX39rtK1U6xd/X+dwuYH4lcN6k4mY8KoTIkx5d/1E3", + "u0z7DJyy9BQyYu4O79jWU12D3EUVc2XMPV+sfTaVogAO2b0JIYfcuhJ7u24zvXFrcn5Hb5p/hbNmpU34", + "5HRKk/c87pOJqZjkJfmbH2YzV1NgmN8lp7KDbMldsurJbCPpeaTWzGToo7RraW3X/6iJykIRk1IuGMg+", + "6Hx39UoR0g9KH2x+/YR5LmoHOmnVkygt1eUgmsLL61rrOKwIg++wBbzwURyUYfDcyIFzw15uryukBEvp", + "pYTG8re9s90Ca74UbJHCsAizTJt1yHpINPclUKKoF5VuIo7nrgoDk1oIjol+uqoPhepqzBccEo45l/KM", + "5tevvsBsJ4eID1cSMb7Q8P0bItmiUl3M1eQVHTR38Na9uqn5W1S3/A+YPYraGdxQTu9Ylb/w2lnMa0dz", + "kou6GBIOSc5xTGuYePCUTJ2bfCEhZYq1IojOfSrT6rmHmb3rSpmb35fb1vmL0JcgY/dAEAV5U6dF1ALv", + "hxrC+ojeMFPpOblRKo9RX4csIviL8agwXn3LdXHasFjYNLMtVxwh4YotF4EPwo6Wi24k/tDlWe28uXRK", + "Bd11Dr6tG7iNXNT12oaa3brI3ZQ7b4i1LJ4S03RHc51FCOaTJQgq+f3B70TCDAtGCHL/Pk5w//7YNf39", + "YfOzOc7378crcl6Xoc7iyI3h5o1RzC99rpvWPbHHS7i1HyXLs22E0fD5rkuuoFfzby7q40aKvvxm9and", + "o+oS7+/iItDeBERMZK2NyYOpAm/uAY7crlvEbRtfJmkpmV5jMgqvfmO/RU2K31cae2fxqcKX3d2nxSlU", + "6Uxq/X6p/O36vaA53kdGpkYHDY1VGL9d0WWRgzso39yZ/ic8+tvjbP/Rg/+c/m3/yX4Kj58829+nzx7T", + "B88ePYCHf3vyeB8ezJ4+mz7MHj5+OH388PHTJ8/SR48fTB8/ffafdwwfMiBbQEc+9HH0v1gZKTl8e5Sc", + "GGBrnNCCVcVXDRn78g40xZMIS8ry0YH/6f/4EzZJxbIe3v86cpFVo4XWhTrY2zs/P5+EXfbmqNBLtCjT", + "xZ6fp1v08u1R5R1vTcG4o9bx2ZACbqojhUP89u7b4xNy+PZoUhPM6GC0P9mfPMBiZgVwWrDRwegR/oSn", + "Z4H7vueIbXTw8dN4tLcAmqP9y/yxBC1Z6j+pczqfg5y4Ohfmp7OHe16U2PvolJmfzKjzWJoO6+cfOHd3", + "yz84wwg6S1k//kY6ZeWy+46rJNtO18AzdL+2+kHD2ipkHWV1NsmjmlH5nBo2ydjBr5HqXzM2L2Wr3nnl", + "qeIy8DNF/vv4pzdESOKeNG9pehqarpEg/1WCXNcE41hZmB3LJ0R2jtBLNS+aXoO1mBQrLBuro4Ezm30O", + "KLWyK9ScSMsSQkhqvmp45X7y7MPHJ3/7NBoACBq5FGDs9O80z3+3BelhhZYCn33ERZePI8l/UTwe13pq", + "7FBv0xjdHquvYX2Hqk3T2f53Ljj83rcNDrDoPtA8Nw0Fh9gefMDoXqQEPEQP9/evrDBMFV9inSerUTxJ", + "XGCgLoexnyK1Hn19mJ5Cj4+vcKFN77BLL7c9XGfRz2mGOfdBabuUB1/tUo442pkNxyf2Rvs0Hj35ivfm", + "iBueQ3OCLYPUGd1b5Gd+ysU59y2NNFMul1SuUVYJCoO0YtfoXKHKElmkPduNUgCjD596r7S9MNP53seG", + "qTK71IXXKfJw9HLLHXhH9XHObuK5ViJ1873Kk43GLJctHjN3q3sT8n3YG7k3xnHbKOlS8roMdiHFGcsM", + "H3ZeHj7dTQ3bHRWGuEdv5ED/e3s5f9bL+bCpmmhkLosB0yDxjTB1fBkuezt24w9adbAuVGcqyFd+gayv", + "n7UYR+tlaGf6EHu4beXCt7jrwV2fDBTAW4lDzTzzn5/ven/f6ppo3AefkSt/5RLda5obOgmW24qFtOn8", + "biW9v4ykV7m32XKWPoPt5WQ/rGWx99GnaLwCec+lqBwg6YVv5qBvkELwboud3JvYfIthm4vxDOfPtlWG", + "w8SZt9Lb55beuhlnY2DUeURvTmJDGBZ1StpdCk02KsjslDr3KxXR/sLI6pXJDKTbpbEL8MaOpOU48Wfj", + "mX9KCcsh7Va2+kvLVpUL+aWkq0bOaBeUEFiXLqV3a+vVmK7ErGYYQcDZqgqd7giP6/oWhsVgyhEfba7G", + "/tmHlk37IrSbNe48Crvy0/cQvj6fr49ebhOdviIlzuDUV5FbIL43n5uXRg0G767HYDCMNz3ef3x9EIS7", + "8EZo8h3e4p+ZQ35WlhYnq11Z2CaOtDe1STk3cSXeYkvIKOpkmwGPwpzmYUJP6yhx11WSC4Ok702IT/2p", + "qkTmLjR3LmhepyChcm47GR5nkEDu+D8PcPw7E/KdkIRxrcbo76Vdlmtyh3F98ODho8euiaTn1p2q3W76", + "9PHB4TffuGZ1olf7vuk0V1oeLCDPhevg7obuuObDwf/+/R+TyeTOVnYqVs/Xb2xWpS+Fp3afdeHG9+3W", + "V75JsVe6y3a1FXXXYnB/LlZR7i9Wt7fPjd0+Bvt/iltn2iQj9wCt1JONUNQrvIXsMdnlHhr7xKmG71SX", + "yYS8ES4rQJlTSYTMQLrKD/OSSso1QDbxlEpmGP6LUdBpzoBr82DEXPYyUSwDG0w5LyVkJGdLLPYo4Qzd", + "1HF6fMs3INjO6NGZ9Ytl8q/pKqzSXV3TWrglY9z1kq58NQ3MFy8k/vTNN2R/XL9a8twMkFSIiTHXJV2N", + "rlHbVxHbIBfwZsLrrT6yOPYQzVEt/djCRbSZXfevzbm/Wondkrvb2CvinDtbc2prTag/cLH3GzUHVrCz", + "tTaw+MOaVLGxRsrzIlScxZkZhioFvmDbwFaVdPTx2Ubv7SG+ffxfipW0CWpHtoGBn2rvI9oyQp7RObcY", + "uPYnsoEGBiEplt4iJMgMdLpwAbEtvEZ4j8+l3c94NlVSu2qRBbeom8o1zGuGFb4GBsoHsYpolQMZodCf", + "fFpL85nNMN1BlSfdFwxEexPzNXSq8jmuyBhT3r3ex82aXdwJyhf15F1pC9FyFUbNWwTvhuAO5/vW12pB", + "jLlF/Bkc8P07MSFvRB2W7dKE/xntiZ/z2v7cC3ojOFjDuRFrLS3e2kgrmQL184gUn4/DPk6qhK0Xli/2", + "fNWhjULGD7bmz0ZBY8jtbSb7Kq/wH6L1PBu3jFnbZGuygXq0IczZNLS5VZtZVW/wiXIj/PQLfLfcBMe6", + "HhaDh9TzGScW8KtlOpjixhLzXpVQs48DxXMUD+ZGWlS+ZdG0wlPIBZ+rL5MVbaKOOF4iVFJlb46naP7r", + "nd0XmD2HC5+o0uVTUoynYKtq+cLKS6aU84B8vP+364NQs6XPQcfDUNIb5i5P9h9d3/THIM9YCuQEloWQ", + "VLJ8TX7mVQW0y3A7TEBd5Tfzqt5oznE0JTXzbqVhkqCLM8GGP9pHvWLZp+3MMMiRtyMfZDzgg2HeQ1oU", + "QOXFGeB2u9RJa8ajl6HLbyMvcpWxKgKKQdGOXu//MRqod8IodDFzl1/JLaA+u5ZjE84fV8zGleeLkQLE", + "7IC85/eJWtAnDx7+9vDJU//nwydPezRnZh6XFKerO6sHMp/tMEMUaF+uru9qRfIKeQfXvZW77dB4xLJV", + "NAlqXeggPBfOMQf5xB1FCrruzZ1cbCnUEA5bF224/kyBSrPpIvp48m+bqpTjEX9ePXFtOjtX3+C2QENP", + "uEPARAyh1ZUaKqxvLtqwQVRskWWVHf+6X551WIC9xTzyZOtCuVEpVt/UCzTBByhwL7U00XJzAiMm6h0H", + "huqqOC56nZRFIaSuTreaDJLloM/g1hDl+gh3J0ktpTpdlMXeR/wPpsf6VIcK2HJ0gYXO/W4LR+9Z+/sm", + "Ie7YtrjkndiSlq3VXzaZk8/U5nwCxIy8ZqkUh5j/2V03aq00LDvp9FzX3zaVJI5eTYLnjEOyFDyW5O0n", + "/PoaP0YzTAtN877OJ+ZjX98Wc2zC3wKrOc8QznhZ/H4h7+xL6Ydaq5VgjnFdtMfS/45HzR+aNU+7J2nN", + "0+4xa1QO6vl572PjT+d941qqRakzcR70xded5UVDDO9B8unhSvHqwdNK4qxIBsoQ7dengQrwEDsx1ddI", + "9q8gxXhvArC/qE5qxnjWIhKUKFNxBlJV2grpHWVuFVN/HsXU4H3ficfaVJbbOFqprlYieSMysOM2s8fG", + "Aj25yMBl3OwKIpUMFn/v+1upbtd6gaW0nC80KQuiReytV3dMaGqZrK0tprYVY7KtfNGRMyA0l0CzNZkC", + "cCKmZtHNonaEKnRy9w9GJ2nGawrVcBVSpKAUZIkPbN0GWpXHFJ+XegOeEHAEuJqFKEFmVF4a2NOzrXBW", + "ub8VufvjL+reDcBrRcHNiLWutRH0Vh4+TtrrQj1s+k0E1548JDsqgXjRAPVbYlnk4DRcERTuhJPe/WtD", + "1NnFy6MFVUDsM1O8n+RyBFSB+pnp/bLQlkVi7u9I1TP79YQtURLjlAsFqeCZ6q9NuI0tY/2NYC3KrCDg", + "hDFOjAP3PDhfUaXfOUtGWMIpqPNhpthQTLEvx7wZ+Zcqw3xn7NTch1yVqkpD7xQYkMXWwGG1Ya43sKrm", + "QlOSH7vSkGhBSgXbRu7DUjC+Q5YKqyPqwAaEVTi6i8NsJNQpKLqobABRI2ITIMe+VYDd0D7RAwhTNaKr", + "kmdNypkKkQPlVtEsisJwC52UvOrXh6Zj2/pQ/1y37RKXqwSE93YmQIXaKwf5ucWswnCLBVXEwUGW9NQp", + "uOYuW1MXZnMYE7Q6J5so3xzLY9MqPAJbD2lZzCXNsGoejahSfrafif28aQDccU+eWJI0mcIsWtXDbHpN", + "ybJXRVQNLXA8FRMesYKpIqk5gjMsM+MJxPXeMnIGPeVTT4KSbq45zhXdIj8eLttudY9ayoxhdtySA0Ls", + "GPoQeHvQUI18cUxg56TWHrSn+DsoN0ElRuw+yRpU3xLq8XdaQFubF95fjYuixd1bDDjKNXu52BY20ndi", + "Y/rDrzIcr222/YwOZ039afD+m1zkbbt3TplOZkK6otB0pkFGVHmtMgKUaR/tZw0oWjh3CIIjuGvTjeOK", + "C9cpMxwTsSAQX8+SLSMZeMxU3wk5KGSn6btGmSYl1ywPwparl/KXpy+81QHc6gBudQC3OoBbHcCtDuBW", + "B3CrA7jVAdzqAG51ALc6gL+sDuCmwvQSL3B4/2YueMJhTjU7gyp+7zZt0J8qrKW6qrxOArUY55Rpl4ST", + "UC8G4JfLRfVpoDnigOXIYwuherMbYSVlJUqZAkkNhIyTIqfmaQArXaWEayYb9emPXS1lzF9KFTx6SI5/", + "OPQO+gvnSN5se/fQpRFXep3DPZeXoSp26hM0ADdId/kZqL8SfOo4l0iP5UCUQe+32PolnEEuCpDW95do", + "WUY0PidA8xcON1sUPo1ylma038cNPZND25IWQc14XCtVhGIwR6sa5Yzmqr8cpR1vSYtY9rbq4rOqIOQm", + "z0W2bp0Qs2t7uIHNs1G76TNO5ToSf9M5ER3S0MLwK0dYXV3WpysPJukSbZfMtlFYTFqXoKLneBOVR6Mo", + "qg3rDGUjeWYtOonWYm6HDowqAIc4wBp69ntC3tl+NxuHjhC5I1Yz8y/Gb7DZsmIa2NY8Ihzr+VqDxj3i", + "o6cXz/7YEHZWpkCYVsTHo2y/XsajVWJGmgNPHANKpiJbJw32NWrcQhlTVClYTrffRCH/dPmK3eVjvmy+", + "p27mGnkZLG4TTw6JZpU4BtzDndcaBvPmCls4omPPAcY/N4vuY6MhCMTxp5hSqV0lZkemV0+zvmV8t4wv", + "OI0tiYBxF7/XZiKTz8j45FqWvJ/nfbuCtDTAhSf5Lmrn0SQHK92wa2YwLedzzLvcsdGZpQGOxwS/IVZo", + "lzuUC+5GQXbwKhfnZTNEtYfrcpcgVu2ukGQuRVncswWm+BqNGcuC8rU3+UKi2LLMLQ5tVrurZbQ2xK7r", + "CIDmWKf769Nqv/Uqv0B3667a5u8WLeScKmL3FzJS8sxFDnUCcVd8eM5nO/TJitdsemPWZ7veyOrcvEOu", + "CL/LLsSlMnMXIBO94vZANROz24Bfe3Int/lm/xrXxltbyK2HwXaDV2uGcEW3hwz4Gl4fQf6ROhSuWSXL", + "1vDrCxwJk5HYllfqPNIZvulDElTQszZSyAtCfTGAVHClZZnq95yijSZY2KTrX+K10f387YVvEjcTRqx4", + "bqj3nGKu+MpyE+VzM4iYKb4D8GxUlfM5KMMrQyKZAbznrhXjpOTmpSVmZMlSKRIbhmrOkJFPJrblkq7J", + "jOZoZPwDpCBTc7MHu24VxkqzPHcOLWYaImbvOdUkB6o0ec0MlzXD+URhlScX6HMhTyssxNNXzIGDYiqJ", + "K1++t18xQ4RbvlfyocLSfq4ju683NYSHnWW9kB+9NHBTzHSTM6VrH4gO7Ndm/14ynkSJ7GQBxLmEtWmL", + "3DWM1xPQvaZ1SC/gPTc3nBYEuTrVFyOHtpmncxbt6WhRTWMjWtYgv9ZBT7wr4TIkwmRuTSt/osDMgA68", + "+RI3HqvItPd+RzPKxsKUsa8uXVhPI/dIAP/ZniK8482yIC0l02u0Q9CC/XYK5v8fPn0w3+SZN1GUMh8d", + "jBZaFwd7e1hxciGU3ht9GoffVOvjh2rlH721oZDsDHNUf/j0/wMAAP//y2yOEiA2AQA=", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/generated/participating/private/routes.go b/daemon/algod/api/server/v2/generated/participating/private/routes.go index 9052808c03..a95cdb1458 100644 --- a/daemon/algod/api/server/v2/generated/participating/private/routes.go +++ b/daemon/algod/api/server/v2/generated/participating/private/routes.go @@ -158,179 +158,167 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9+3PbONLgv4LS91XlcaLtPCa7cdXUd048M+vLY1OxZ/a+jXMzENmSsKYALgDa0uT8", - "v1+hAZAgCVKU7XF2r/JTYhGPRqPRaPTzyyQVq0Jw4FpNDr9MCirpCjRI/IumqSi5Tlhm/spApZIVmgk+", - "OfTfiNKS8cVkOmHm14Lq5WQ64XQFdRvTfzqR8M+SScgmh1qWMJ2odAkragbWm8K0rkZaJwuRuCGO7BAn", - "x5PrgQ80yyQo1YXyrzzfEMbTvMyAaEm5oqn5pMgV00uil0wR15kwTgQHIuZELxuNyZxBnqk9v8h/liA3", - "wSrd5P1Luq5BTKTIoQvna7GaMQ4eKqiAqjaEaEEymGOjJdXEzGBg9Q21IAqoTJdkLuQWUC0QIbzAy9Xk", - "8NNEAc9A4m6lwC7xv3MJ8DskmsoF6MnnaWxxcw0y0WwVWdqJw74EVeZaEWyLa1ywS+DE9Noj70qlyQwI", - "5eTjj6/Js2fPXpqFrKjWkDki611VPXu4Jtt9cjjJqAb/uUtrNF8ISXmWVO0//vga5z91CxzbiioF8cNy", - "ZL6Qk+O+BfiOERJiXMMC96FB/aZH5FDUP89gLiSM3BPb+E43JZz/q+5KSnW6LATjOrIvBL8S+znKw4Lu", - "QzysAqDRvjCYkmbQTwfJy89fnkyfHFz/x6ej5O/uz++eXY9c/utq3C0YiDZMSymBp5tkIYHiaVlS3sXH", - "R0cPainKPCNLeombT1fI6l1fYvpa1nlJ89LQCUulOMoXQhHqyCiDOS1zTfzEpOS5YVNmNEfthClSSHHJ", - "MsimhvteLVm6JClVdghsR65YnhsaLBVkfbQWX93AYboOUWLguhE+cEH/usio17UFE7BGbpCkuVCQaLHl", - "evI3DuUZCS+U+q5Su11W5GwJBCc3H+xli7jjhqbzfEM07mtGqCKU+KtpSticbERJrnBzcnaB/d1qDNZW", - "xCANN6dxj5rD24e+DjIiyJsJkQPliDx/7roo43O2KCUocrUEvXR3ngRVCK6AiNk/INVm2//X6V/fEyHJ", - "O1CKLuADTS8I8FRk/XvsJo3d4P9Qwmz4Si0Kml7Er+ucrVgE5Hd0zVblivByNQNp9svfD1oQCbqUvA8g", - "O+IWOlvRdXfSM1nyFDe3nrYhqBlSYqrI6WaPnMzJiq6/P5g6cBSheU4K4BnjC6LXvFdIM3NvBy+RouTZ", - "CBlGmw0Lbk1VQMrmDDJSjTIAiZtmGzyM7wZPLVkF4PhBesGpZtkCDod1hGbM0TVfSEEXEJDMHvnZcS78", - "qsUF8IrBkdkGPxUSLpkoVdWpB0aceli85kJDUkiYswiNnTp0GO5h2zj2unICTiq4poxDZjgvAi00WE7U", - "C1Mw4fBjpntFz6iCF8/7LvD668jdn4v2rg/u+KjdxkaJPZKRe9F8dQc2LjY1+o94/IVzK7ZI7M+djWSL", - "M3OVzFmO18w/zP55NJQKmUADEf7iUWzBqS4lHJ7zx+YvkpBTTXlGZWZ+Wdmf3pW5ZqdsYX7K7U9vxYKl", - "p2zRg8wK1uhrCrut7D9mvDg71uvoo+GtEBdlES4obbxKZxtycty3yXbMXQnzqHrKhq+Ks7V/aezaQ6+r", - "jewBshd3BTUNL2AjwUBL0zn+s54jPdG5/N38UxS56a2LeQy1ho7dfYu6AaczOCqKnKXUIPGj+2y+GiYA", - "9pVA6xb7eKEefglALKQoQGpmB6VFkeQipXmiNNU40n9KmE8OJ/+xXytX9m13tR9M/tb0OsVORh61Mk5C", - "i2KHMT4YuUYNMAvDoPETsgnL9lAiYtxuoiElZlhwDpeU6736PdLgB9UB/uRmqvFtRRmL79b7qhfhxDac", - "gbLirW34QJEA9QTRShCtKG0ucjGrfnh4VBQ1BvH7UVFYfKBoCAylLlgzpdUjXD6tT1I4z8nxHvkpHBvl", - "bMHzjbkcrKhh7oa5u7XcLVYpjtwa6hEfKILbKeSe2RqPBiPD3wXF4ZthKXIj9WylFdP4L65tSGbm91Gd", - "/z1ILMRtP3HhK8phzj5g8Jfg5fKwRTldwnG6nD1y1O57M7Ixo8QJ5ka0MrifdtwBPFYovJK0sAC6L/Yu", - "ZRxfYLaRhfWW3HQko4vCHJzhgNYQqhufta3nIQoJkkILhle5SC/+QtXyDs78zI/VPX44DVkCzUCSJVXL", - "vUlMygiPVz3amCNmGuLrncyCqfaqJd7V8rYsLaOaBktz8MbFEot67IdMD2Tk7fJX/A/NiflszrZh/XbY", - "PXKGDEzZ4+wsCJl5ytsHgp3JNEAVgyAr+3on5tW9E5Sv68nj+zRqj36wCgO3Q24RuENifefH4JVYx2B4", - "JdadIyDWoO6CPsw4KEZqWKkR8B07yATuv0MflZJuukjGsccg2SzQiK4KTwMPb3wzS615PZoJeTPu02Ir", - "nNT6ZELNqAHznbaQhE3LInGkGNFJ2QatgWoT3jDTaA8fw1gDC6ea/gFYUGbUu8BCc6C7xoJYFSyHOyD9", - "ZZTpz6iCZ0/J6V+Ovnvy9Nen370wJFlIsZB0RWYbDYo8dG8zovQmh0fdleHrqMx1fPQXz70WsjlubBwl", - "SpnCihbdoax204pAthkx7bpYa6IZV10BOOZwnoHh5BbtxCruDWjHTBkJazW7k83oQ1hWz5IRB0kGW4lp", - "1+XV02zCJcqNLO/iKQtSChnRr+ER0yIVeXIJUjERMZV8cC2Ia+HF26L9u4WWXFFFzNyo+i05ChQRytJr", - "Pp7v26HP1rzGzSDnt+uNrM7NO2Zfmsj3mkRFCpCJXnOSwaxcNF5CcylWhJIMO+Id/RPo0w1PUat2F0Ta", - "/0xbMY4qfrXhafBmMxuVQ7ZobMLt32ZtrHj9nJ3qgYqAY9DxFj/js/4Yck3vXH5pTxCD/bXfSAssyUxD", - "fAW/ZYulDgTMD1KI+d3DGJslBih+sOJ5bvp0hfT3IgOz2FLdwWVcD1bTutnTkMLpTJSaUMJFBqhRKVX8", - "mu4xy6M9EM2YOrz59dJK3DMwhJTS0qy2LAga6Tqco+6Y0NRSb4KoUT1WjMr8ZFvZ6azJN5dAM/OqB07E", - "zJkKnBEDF0nRwqj9ReeEhMhZasBVSJGCUpAlTkWxFTTfzjIRPYAnBBwBrmYhSpA5lbcG9uJyK5wXsEnQ", - "Hq7Iwze/qEdfAV4tNM23IBbbxNBbPficPagL9bjphwiuPXlIdlQC8TzXvC4Ng8hBQx8Kd8JJ7/61Iers", - "4u3RcgkSLTN/KMX7SW5HQBWofzC93xbasujx8nIPnTO2Qr0dp1woSAXPVHSwnCqdbGPLplHjNWZWEHDC", - "GCfGgXuEkrdUaWtNZDxDJYi9TnAeK6CYKfoB7hVIzci/eFm0O3Zq7kGuSlUJpqosCiE1ZLE1cFgPzPUe", - "1tVcYh6MXUm/WpBSwbaR+7AUjO+QZVdiEUR1pXR35vbu4lA1be75TRSVDSBqRAwBcupbBdgNPV16AGGq", - "RrQlHKZalFO510wnSouiMNxCJyWv+vWh6dS2PtI/1227xEV1fW9nAszs2sPkIL+ymLU+TktqntA4MlnR", - "CyN74IPYmj27MJvDmCjGU0iGKN8cy1PTKjwCWw9pWSwkzSDJIKeb7qA/28/Efh4aAHe8fvgIDYn1Z4lv", - "ek3J3n1gYGiB46mY8EjwC0nNETQvj5pAXO8tI2eAY8eYk6OjB9VQOFd0i/x4uGy71ZER8Ta8FNrsuCUH", - "hNgx9DHw9qChGvnmmMDOSf0sa0/x36DcBJUYsfskG1B9S6jH32kBPco05wYcHJcWd28x4CjX7OViW9hI", - "34nt0ex9oFKzlBX41HkDmzt/+bUniNqbSAaashwyEnywr8Ai7E+sI0Z7zJu9BEcpYbrgd7QwkeXkTKHE", - "0wT+Ajb45P5gPfzOAr/AO3jKRkY11xPlBAH1fkNGAg+bwJqmOt8YOU0vYUOuQAJR5WzFtLYum82XrhZF", - "Eg4QVXAPzOisOdY7zu/AGPPSKQ4VLK+7FdOJfRIMw3fWehc00OGeAoUQ+QjlUQcZUQhGGf5JIcyuM+ch", - "7N1IPSU1gHRMG0151e3/QDXQjCsg/y1KklKOL65SQyXSCIlyAsqPZgYjgVVzOhN/jSHIYQX2IYlfHj9u", - "L/zxY7fnTJE5XHm3etOwjY7Hj1GN80Eo3Thcd6AqNMftJHJ9oOYf7z3nvNDiKdtNzG7kMTv5oTV4ZS4w", - "Z0opR7hm+bdmAK2TuR6z9pBGxpnXcdxRSv1g6Ni6cd9P2arM72rD55TlpYR+69j5+af56vz8M/nRtvSG", - "7akn8hAdV3VYxNzdRqVE1xqSM/O+lYJmRkCI6vZxkXyRVM6ZKgrOShlw/ubOIeWbViDfWBjIDFJaWq9k", - "x7UdBLV7qNqLyIut3W2jMLqQkerxMtf20g6xupCiLIiqtt1SgaYa/hhVcz10DMruxIFvUP2xzz3IPBPz", - "zR3c1nYgIqGQoJC3huoVZb+KeRh/45iv2igNq64G2nb9ted99rH3nSN4zjgkK8FhEw05ZRze4cdYb8vf", - "ezrjTdvXty08N+BvgdWcZww13ha/uNsBQ/tQ+cXdwea3x20ZH8LII1SuQV4QStKcoepNcKVlmepzTvFx", - "Hxy2iP+Af8b0q3te+yZx/VJE/eOGOucUfUeqJ3+UL84hwpd/BPBaH1UuFqB0S0qcA5xz14pxUnKmca6V", - "2a/EblgBEo34e7blim7InOaonfodpCCzUjeZKwZIKM3y3FlCzDREzM851SQHw1XfMX62xuG8JdHTDAd9", - "JeRFhYW96HlYAAfFVBL3c/jJfkUXNLf8pXNHw2hV+9nqzs34dRTFBt/+dQTm/3n4X4efjpK/0+T3g+Tl", - "/9j//OX59aPHnR+fXn///f9t/vTs+vtH//WfsZ3ysMfc9x3kJ8fuTXFyjIJjrTzvwH5vitMV40mUyEIT", - "cYu2yEMj/noCetRUK+glnHO95oaQLmnOMqpvRg5tFtc5i/Z0tKimsREtNYJf647i2C24DIkwmRZrvPE1", - "3nUNigfKoDXHxb7geZmX3G5lqZxFCf3AvYuGmE+rYCibBOGQYKTMknr/Ivfn0+9eTKZ1hEv1fTKduK+f", - "I5TMsnUsjimDdUzKdgcED8YDRQq6UaDj3ANhj3qjWKN4OOwKzPNMLVlx/5xCaTaLczjvXete62t+wq3b", - "qzk/aBvaOJWzmN8/3FoCZFDoZSw4uiEpYKt6NwFa9vpCikvgU8L2YK/9Ws4WoLxfTA50jkG6aN8QY6IF", - "qnNgCc1TRYD1cCGjnqQx+kHh1nHr6+nEXf7qzuVxN3AMrvaclSHI/60FefDTD2dk3zFM9cCG1NmhgyCo", - "iBbK+fk3PDkMN7MpIWxM4Tk/58cwZ5yZ74fnPKOa7s+oYqnaLxXIVzSnPIW9hSCHPnTgmGp6zjuSVm/W", - "liBogxTlLGcpuQgl4po8bSR+9NlI84UwD8e2Ubsrv7qpovzFTpBcMb0UpU5cqHEi4YrKmNFAVaGmOLJN", - "FDA065S4sS0rdqHMbvw4z6NFodohZ93lF0Vulh+QoXIBVWbLiNJCelnECCgWGtzf98JdDJJe+Tj1UoEi", - "v61o8Ylx/Zkk5+XBwTMgjRis39yVb2hyU0BDX3mjkLi2rhIXbt81sNaSJgVd9CgNNNACdx/l5RU+svOc", - "YLdG7Jf3bcWh6gV4fPRvgIVj5zgWXNyp7eVzxsSXgJ9wC7GNETdqi+lN9yuIBrvxdrUiyjq7VOplYs52", - "dFXKkLjfmSqVxMIIWd6MrdgCXQVd1o0ZkHQJ6QVkmAAAVoXeTBvdvaeEEzQ962DKJsqwsRwYzY2q3RmQ", - "ssioE8VbCiWDYQVae1/Fj3ABmzNRB4PvEkfbDOtUfQcVKTWQLg2xhsfWjdHefOeOg7quovDRkRgm48ni", - "sKIL36f/IFuR9w4OcYwoGmGHfYigMoIIS/w9KLjBQs14tyL92PLMK2Nmb75IXg3P+4lrUj+enOdMuBqM", - "prTfV4BZd8SVIjNq5HbhEsbY0MWAi5WKLqBHQg616yMDBBsaeRxk270XvenEvH2hde6bKMi2cWLWHKUU", - "MF8MqeBjpuUv5WeyBhyrQCWYB84hbJajmFQ5llmmQ2XDymETW/WBFidgkLwWODwYTYyEks2SKp/LBlP+", - "+LM8Sgb4A0NxhxIwnASuPkFen0rx7Xlu+5x2XpcuDYPPveATLoRPyxHJE4yEj97Fse0QHAWgDHJY2IXb", - "xp5Q6rDgeoMMHH+dz3PGgSQxryGqlEiZTUZUXzNuDjDy8WNCrAqYjB4hRsYB2GiYxIHJexGeTb7YBUju", - "wpqpHxtNmsHfEI/AsH60RuQRhWHhjPd4bHsOQJ2rWXV/tRwecRjC+JQYNndJc8Pm3IuvHqSTBwDF1lbU", - "vzONP+oTZwc08PZi2WlN9iq6yWpCmckDHRfoBiCeiXViQ7CiEu9sPTP0HnUtxoCw2MG0GRceKDITa3S3", - "wKvFurJugaUfDg9G8MJfM4X0iv36bnMLzNC0w9JUjAoVkoxT51Xk0idOjJm6R4LpI5eHQRKFGwHQUnbU", - "6Ubd43frI7UpnnQv8/pWm9bJgXzURuz49x2h6C714K+rhanSHjgVwkdIhcz69RSGUJmu8rd21Qsu+6zh", - "G6MTIwzkkj1qvjb8E6K7cz1eAQ146nkGEHFsY446kPywLoSRbm1Mkk1Q4ZBi5UQJNtRSWZ2VYnyRQ+W5", - "GUVTbMHeJ8lj3C65TjjlBxwnO8c2t+eRPwRLUcTh2OWl8tHhZwCKnlNew4Fy+C0hcUkqBmG57qePD23R", - "PnpQmu41zdQowVsrdjsY8ulaM7s2UwU54Os5abw2kouYjfv8/JMCFM1OfbdAy4cJWCjfPAp8tiQsmNJQ", - "W5uMBOsxfd96fIp534SY969OF3Ju1vdRiEqes4mFsGNjmfe+AvR5njOpdIKmuugSTKMfFWqffjRN44+K", - "pleYTYHKsvglitNewCbJWF7G6dXN++bYTPu+kh1UOUPBhHECNF2SGabsjfqKDkxt3YkHF/zWLvgtvbP1", - "jjsNpqmZWBpyac7xb3IuWjfdEDuIEGCMOLq71ovSgQs0CPHtcsfggWEPJ16ne0Nmis5hyvzYW/2rfKBx", - "nzBnRxpYC7oG9TrnRhxyrB+ZZep1tv5oMC4XOmkoPyLoqhQ8StMLG1DW3GC+qHQqcbcp+64eNbRru2VA", - "Pn48vn04JwQnOVxCvt0JmiLGvQIHPSPsCOh6QzCcwPt4bJfquztQI6xaaRvGKLV0pJshw239NHL58+q3", - "NRKswZ2LfB9tvTMSmqe3mr67pruiSDLIIRpn9rcgkIwWBWaL8I1jAT1mMMYzWMfBsZ+msZz6XeV9ybi2", - "+VfvKrVja5zxyw4TII5BQWFT9e2ePrL/jRnsUojm/kX1EGVlHBhkxDh49bILqpG0qa/nGqdFwbJ1y+5p", - "R+3Vjt8JxvCCcoNtwUBAG7EIRgmqmfiyVubZ9OuNvFN7ozBz1kxPGco04VRM+eIhXURVEc7bcHUGNH8D", - "m19MW1zO5Ho6uZ2ZNIZrN+IWXH+otjeKZ3TDs2azhtfDjiinRSHFJc0TZ0zuI00pLh1pYnNve75naS3O", - "9c5+OHr7wYF/PZ2kOVCZVK+d3lVhu+LfZlU2x2bPAfHFCZZUV/o5+xoONr9KDBgaoK+W4BLBBw/qTsba", - "2rkgOIrOID2PewNvNS87Pwi7xAF/CCgqd4jaVGe9IZoeEPSSstzbyDy0PZ67uLhxd2OUK4QD3NqTIryL", - "7pTddE53/HTU1LWFJ4VzDaSqX9lqDIoI3naXM69gNL0hqa4o5pu1FpAuc+LlCq0GicpZGren8hmG2HDr", - "J2MaE2zc8542I5asx+2KlywYyzRTI5TaLSCDOaLI9LmL+3A3E66MVsnZP0sgLAOuzSeJp7J1UFF/6izr", - "3es0LlW6ga01vh7+NjJGmGu5feM5mWtIwAi9cjrgHldaP7/QyvpkfgjcD3Zw7gtn7FyJA455jj4cNdtA", - "hWXTu2a0hL615JbXv7mkzz1zREtoMZXMpfgd4qoq1PBFokN9dmmGHq2/Ax8RUlZbcupKYPXsvdvdJ92E", - "FqemQ2IP1ePOBy44mObWW6Mpt1ttK9o0/NrjBBNGkOzb8WuCcTB3om5yejWjsRzARsgwMAXml4bdXAvi", - "O3vcOxsNcwm/90jgN1a1ZTbxRwGyDtzuJhG7ocBgpx0tKtSSAVJtKBNMra9PrkRkmJJfUW4LI6E1Ao+S", - "620e+F4hdCUkpu1RcRN/BilbRZVL5+efsrRrzs3YgtmyQKWCoO6MG8jWU7NU5Gr3WHe6GjUnc3IwDSpb", - "ud3I2CVTbJYDtnhiW8yoAqtU8Z4bvotZHnC9VNj86Yjmy5JnEjK9VBaxSpBKqMPnTeWoMgN9BcDJAbZ7", - "8pI8RBcdxS7hkcGiu58nh09eooHV/nEQuwBc/a8hbpLNwyDXOB2jj5IdwzBuN+peVBtgizb2M66B02S7", - "jjlL2NLxuu1naUU5XUDcK3S1BSbbF3cTbQEtvPDMVhxTWooNYT3hxqCp4U89kWaG/VkwSCpWK6ZXzpFD", - "iZWhp7qojJ3UD2fLl7l84B4u/xH9oQrvDtJ6RN6v3cfeb7FVo9fae7qCJlqnhNpcTTmrPRV9lQJy4lPB", - "YYL0Ki+6xY2ZyywdxRx0XJyTQjKu8WFR6nnyZ5IuqaSpYX97feAmsxfPI0nhm8mJ+W6A3zveJSiQl3HU", - "yx6y9zKE60secsGTleEo2aM6sjM4lb2OW3EXnT4/oeGhxwplZpSkl9zKBrnRgFPfivD4wIC3JMVqPTvR", - "484ru3fKLGWcPGhpdujnj2+dlLESMpbftT7uTuKQoCWDS/TTj2+SGfOWeyHzUbtwG+i/rvHUi5yBWObP", - "cu9DYBeLT/A2QJtP6Jl4E2tP09LTkLmiZh984YyzgNiap9vsHrephtTovAtUnkOPg65HidAIgG1hbLcX", - "8O1VDIHJp7FDfThqLi1Gma9EZMm+hEZl43ERkxG9Vd8FYj4YBjVzQ01Js1zB/XvUeLNI17PDfPGw4h9t", - "YL8ys0Ek+xX0bGJQSiW6nVn1PXAuo+SVWI/d1Bbv9hv7L4CaKEpKlme/1LlBWpVqJOXpMuosMjMdf61r", - "alaLs4c5muB3STm33ghd3QS+Un71r5nIe+sfYuw8K8ZHtm0Xz7HLbS2uBrwJpgfKT2jQy3RuJgix2ky7", - "UIX15QuREZynziZb3+vdoktBaYx/lqB07F7EDza0ADXqc0PFtkIF8Az1GHvkJ1sTfwmkkSsQ9Qc2SxNk", - "vk6ANfWURS5oNiVmnLMfjt4SO6vtYyvD2coQC3vtNlbR75+7i6PtkG/tXUT0mVUrjak7laarIpaixLQ4", - "8w0wD0poXcKHdYidPXJsdRrKv5jtJIYe5kyuICPVdE6qRpow/9GapktUFjRYaj/Jjy9p4qlSBWWEq3KA", - "VfZoPHcGblfVxBY1mRJhJIcrpmwpdLiEZlaUKkWQEwN8lpTm8mTJuaWUqFQ8lMLqJmj3wFkvSG+AikLW", - "QvyO0otzU9+xwssp9opms2yXi+nUD7Y5Nqoyb+98BWjKBWcp5pKMXc2urPoY6+yItJvxyADnb6MmkcMV", - "LVJTBWs4LPaWrfGM0CGuax4KvppNtdRh/9RYv3tJNVmAVo6zQTb1tZachppxBS4bOFbYD/ikkA2LN3LI", - "qBNFLSfvSEYYnN2jcvjRfHvvFFIYtXjBOD49fYyEDZC0OmSs+qzNe5VpshAYQeEORbimT6bPHiZryWD9", - "ec9XicYxrMHYLNt6R3SHOvK+Es43wbR9bdrahHr1z404ODvpUVG4SfsrcUXlAb3mvQiO2LwrR68AudX4", - "4WgD5Dbo5IT3qSE0uEQXCSiIC43pqUrVCoIxQqulKGxBrH90NI9W1E30LeNQ1zCPXBBp9ErAjcHz2tNP", - "pZJqKwKO4mlnQHP0i4gxNKWdUey2Q7U22PmTFunEz9G/jXVBrR7GUTWoBTfKN1XpdEPdgTDxmuaVk1Ck", - "PBZKVU6IcsE1zYJZMcZhGLdPyNm8ALrHoCsT2e5aUntydrmJ+lKVzMpsATqhWRbTJ7zCrwS/+nSlsIa0", - "rLJ4FwVJMTNfM1Vhl9rcRKngqlwNzOUb3HK6oAJdhBrCKnh+h9HxerbBf2MprPt3xrkH7exj732Bsip8", - "bhe5uTlSR+o1NJ0otkjGYwLvlNujo576ZoRe979TSs/FognIPScoG+Jy4R7F+NsP5uII83d18rLbq6VK", - "r4XuoMLXDcZnY5UYpsmVfNRpZ84g8/KwAqK/wugUL7+euJZA10vt/Wrt2n3RLWlvMBbVLn+CpmSQBfXG", - "pFu/Mht9jlDEdfp9vmTWlcx87vQeJxl25GwcexCh3kmxC9Ab7wFNCsqc00bNLLqYdeFe/erCoUNXb3B7", - "ES6Iqldj9+ayL+DJxwHbyI5WTcYLcEmVCgmXTJTeHcL7y/knof3V1cQP4op719/1m8Gpvq4atFdpe+bq", - "/9hlujf5m1+sdyUBruXmX0CF29n0TkXLWM7iRj1LJ1xF9U167F15XBXFvLhMViIbCph+8ws59ralUfeO", - "J+RYuiWRuSpy0WDxt64EhG9mpM/R075znY6KYnjqngjx7uS24a7T96WaMudzSOv2wZ9fWwc0VCFE3ipB", - "ODOHte4p/tSOhr0CAusCMNdtENjcnz1jLEG5IEd8rSY5UAUDGA6ztrm2I5F8tn5r2o8Lto9XYu1POVun", - "mUXmWQjF6uI8sRKtI12Oz7DKamAx7I7l/f0uIdVYkan2Y5IAuyTQNZMF5b+/pZ7tUZRUntme/gfSzE4n", - "IW+JBiq640XrFDloVUOTayRVvW0TYfauMzOHpISpH8L8MKe5ildF63V2bWU+CRxWIome4ws7yUZk+3bL", - "mQY+ECwbRmQ8EsA6f///iUzr13636OzU7Bp+VXQSLwTJQ2xppb0dHEgqL2qUDHG/FsBdZfh5DDXbo6Lm", - "c0g1u9yS6OJvS+BBEoWp1wQjLPMg7wWromwwoejudo4aoKE8FIPwBIn9bw1OX4zoBWweKNKghmitp6kX", - "7m+SSxIxgLeWETwKoWJeitZ05RzHmKooA7HgvYJtd6izcvdWiQ3knBvO5UmyKfEMTBkvUzlqLtN1p0xg", - "GDDSlwujW+auX+NxjFUFVVXB3eeiDPWC5CRSCMrlssS0JJW11me1BOV/8zmI7Cw5u4Cwji3axjGFgmsR", - "VfZ6PXIyICd1or+j1aswd5afmdUxHN1430gOaPR+SnOBlZ/6wp2aYROVm9cDZZ1DUUzBSlQI1xykq/eN", - "N0MuFCRaeNe6ITiGUGE9YG+EBNVbd8EC15sN9WOd7hXrz9hkGdQ5voYLJBJW1EAng6Ss/XMOIfu1/e4D", - "XH1Orq067Ypek61ZVX30DlMdJIZUPyfuttweOHsT9TbjHGTibd1tn0JuUBnaXwspsjJ1iWCCg1GZAEYn", - "LBtgJVHNcNpdZUfJl2M28LdBGoIL2Oxb/Uu6pHwRpFcLobeivV1DkLmstdt3qvmPKznzhV3A4k7g/Jra", - "8+mkECJPegyuJ91Es+0zcMHSCyNml7Xfe0+hTfIQ7XyVR83VcuMTqxYFcMge7RFyxG2kkXeuaVY6ak3O", - "H+ih+dc4a1ba3M9Osb93zuMhG5jUR96Sv/lhhrmaAsP8bjmVHWRLGtN1T5JbSa8iZWe7/nSj3V3apUBr", - "orJQxKSUG6bqGnW+u8r9COkHVRCHXz9hJr/ai1laGxFKS3VlyKbw8q42/Yyrx+g7bAEvVNYEFRk9N3Lg", - "fGVX43cVUoKl9FJCY/nb9D9ugTVfCrZIYdSkWaZNQGzd1Jr7Eij31OtKZxbHc1e1hmn7BMecv12VnEKb", - "oU3DGhCOOZfykub3r1bDfI5HiA/IPvYLPOH7N0SyRaW6mb/fWzpq7uCte3dT8w+oBvwbmD2KGnvdUM74", - "U1XC9CYyTHFPc5KLui4yDkmucExrHX7ygsxcFF0hIWWKtQKMr3xVk+q5h0W+nI/lWm95X25b5y9C34KM", - "3QNBFOR9XSFBC7wfagjrI/qVmUrPyY1SeYz6OmQRwV+MR4XpbLZcFxcNs7GtONPyhxQS7th8HDiC7Wg+", - "7ibqGbs8ayI1l06poLvO0bd1A7eRi7pe21jfhy5yh9Loj3FZiFfHMN3RZ8IiBEvLEASV/PbkNyJhjrUj", - "BXn8GCd4/Hjqmv72tPnZHOfHj6Ni3L15S1gcuTHcvFGKcca0TigMrAsme5L+fXTM3V3YaL4j2AHi2Tlz", - "iFaDwam93+g9p4JGmXurgt8uzTXexs8ClPklVxPFcP9LX+yC9c/vCZNpnYWS5dm2Q9kIeqor32JYz68u", - "IPer1N791eqyu2zS1T/cxUeufQAQMZG1NiYPpgrCmUZEMrlukbglJK60lExvME+YV32yX6M+NT9V1hJn", - "Ba4yyzi5Q4sLqDLN1baVUnnJ5idBc5QFzHsGPRS1EPke+WFNV0UOjkl9/2D2J3j25+fZwbMnf5r9+eC7", - "gxSef/fy4IC+fE6fvHz2BJ7++bvnB/Bk/uLl7Gn29PnT2fOnz1989zJ99vzJ7PmLl396YO4AA7IFdOKz", - "Ukz+NxaoTo4+nCRnBtgaJ7Rgb2Bja2EaMvZVNmmKXBBWlOWTQ//T//TcbS8Vq3p4/+vEBb1PlloX6nB/", - "/+rqai/ssr9AZWqiRZku9/08nTKcRx9OqvAw6wuFO2ojfwwp4KY6UjjCbx9/OD0jRx9O9mqCmRxODvYO", - "9p5gLuMCOC3Y5HDyDH/C07PEfd/3SYQPv1xPJ/tLoDnaxM0fK9CSpf6TuqKLBcg9V27U/HT5dN+Lcftf", - "nCL5eujbfli5Z/9LQ9+ebemJji77X3wSq+HWjSxRzs4QdBgJxVCz/RlGII9tCipo3L8UfNyp/S/4POn9", - "fd+FZcY/4jPRnoF9b5SKt2xg6YteG1hbPVKq02VZ7H/B/yBNBmBZJ+guuNYNbN/W9e/+vOFp9MfuQJ36", - "cguIRlpizCPFGujx5P0TPAX2AJ1kyNd02zBti9VYlTMejqcHBzvV3R2n5m6bw7s3RZclDK3sejp5viOg", - "g/qshttyBJhXNCM+6BXnfnJ/c59wtG4bXkcsL0cInt8fBM2CJ29gQ94LTX7EB9/1dPLdfe7ECTciEM0J", - "tgySgXWPyM/8gosr7lsaIaBcrajcjD4+mi4UKlwlu6ROBAtTyn9GDb8NhG4etaMs6xC9FYZA6Vci2wxg", - "bKUWhQtSqpFWy4KMmyV0Bd9u9f0lRDxLrP3T67m5yGASSmlalnB9S57QFIcNCCcRvRQqWLF829yn7wtA", - "jbpJtPXgduRR9cdbg1dVZMrZiikvhH/jKd94irTTP7u/6U9BXrIUyBmsCiGpZPmG/MyrEPMb87ijLIv6", - "ljWP/lYeN52sk1RksACeOAaWzES28QleGxNcgH32dQSZ/S/NQjNWBJxY17+Y34z5vaqV313EbENOjjsS", - "ju3W5ryvNtg0qH5w+OmLfTeZR0H9rGmD2OGMYeL9Nm/6HOeaQ2RvFrIQunKAtIv6xoi+MaJbCTejD88Y", - "+Sb6+rAJXGjnzp76XCyx/HBUd0EZ80b5qsf3Tja++/6JvXesjx5kJPhggxzaaP7GIr6xiNuxiJ8gchjx", - "1DqmESG63d5DYxkGuidl7XKOaLbxzcucSqJgrJrjCEd0yo374Br3/aiL4sq+6SivS0NHNvBu33nfWN43", - "lvfvw/KOtjOapmBy65fRBWxWtKjeQ2pZ6kxcBZYEhMV6VXX1wK60ZOvv/SvKdDIX0kV8YK2AbmcNNN93", - "CaVav9Y5HDpfMDFF8GOgK4//ul/lSY1+bBshYl+dEr6nkU8H6D/XRsjQqIesvTLnffps2DIm+nZcv7ZR", - "He7voxf1Uii9P7mefmnZr8KPnysS+FLdFY4Urj9f/78AAAD//zKgbnPq0wAA", + "H4sIAAAAAAAC/+y9e3PcNrIo/lVQc06VH7/hSH4ku1ZV6vxkO8nqxvG6LCV7z7F9EwzZM4MVCXABUJqJ", + "r777LTQAEiRBDvWIvKnKX7aGeDQajUa/8XmWiqIUHLhWs6PPs5JKWoAGiX/RNBUV1wnLzF8ZqFSyUjPB", + "Z0f+G1FaMr6ezWfM/FpSvZnNZ5wW0LQx/eczCf+qmIRsdqRlBfOZSjdQUDOw3pWmdT3SNlmLxA1xbIc4", + "eT27GvlAs0yCUn0o/87zHWE8zasMiJaUK5qaT4pcMr0hesMUcZ0J40RwIGJF9KbVmKwY5Jla+EX+qwK5", + "C1bpJh9e0lUDYiJFDn04X4liyTh4qKAGqt4QogXJYIWNNlQTM4OB1TfUgiigMt2QlZB7QLVAhPACr4rZ", + "0YeZAp6BxN1KgV3gf1cS4DdINJVr0LNP89jiVhpkolkRWdqJw74EVeVaEWyLa1yzC+DE9FqQHyulyRII", + "5eT9d6/Is2fPXpiFFFRryByRDa6qmT1ck+0+O5plVIP/3Kc1mq+FpDxL6vbvv3uF85+6BU5tRZWC+GE5", + "Nl/IyeuhBfiOERJiXMMa96FF/aZH5FA0Py9hJSRM3BPb+E43JZz/i+5KSnW6KQXjOrIvBL8S+znKw4Lu", + "YzysBqDVvjSYkmbQD4fJi0+fn8yfHF79x4fj5H/cn189u5q4/Ff1uHswEG2YVlICT3fJWgLF07KhvI+P", + "944e1EZUeUY29AI3nxbI6l1fYvpa1nlB88rQCUulOM7XQhHqyCiDFa1yTfzEpOK5YVNmNEfthClSSnHB", + "Msjmhvtebli6ISlVdghsRy5ZnhsarBRkQ7QWX93IYboKUWLguhE+cEH/vsho1rUHE7BFbpCkuVCQaLHn", + "evI3DuUZCS+U5q5S17usyNkGCE5uPtjLFnHHDU3n+Y5o3NeMUEUo8VfTnLAV2YmKXOLm5Owc+7vVGKwV", + "xCANN6d1j5rDO4S+HjIiyFsKkQPliDx/7voo4yu2riQocrkBvXF3ngRVCq6AiOU/IdVm2//X6d/fEiHJ", + "j6AUXcM7mp4T4KnIhvfYTRq7wf+phNnwQq1Lmp7Hr+ucFSwC8o90y4qqILwqliDNfvn7QQsiQVeSDwFk", + "R9xDZwXd9ic9kxVPcXObaVuCmiElpsqc7hbkZEUKuv3mcO7AUYTmOSmBZ4yvid7yQSHNzL0fvESKimcT", + "ZBhtNiy4NVUJKVsxyEg9yggkbpp98DB+PXgaySoAxw8yCE49yx5wOGwjNGOOrvlCSrqGgGQW5CfHufCr", + "FufAawZHljv8VEq4YKJSdacBGHHqcfGaCw1JKWHFIjR26tBhuIdt49hr4QScVHBNGYfMcF4EWmiwnGgQ", + "pmDCcWWmf0UvqYKvnw9d4M3Xibu/Et1dH93xSbuNjRJ7JCP3ovnqDmxcbGr1n6D8hXMrtk7sz72NZOsz", + "c5WsWI7XzD/N/nk0VAqZQAsR/uJRbM2priQcfeSPzV8kIaea8ozKzPxS2J9+rHLNTtna/JTbn96INUtP", + "2XoAmTWsUW0KuxX2HzNenB3rbVRpeCPEeVWGC0pbWulyR05eD22yHfO6hHlcq7KhVnG29ZrGdXvobb2R", + "A0AO4q6kpuE57CQYaGm6wn+2K6QnupK/mX/KMje9dbmKodbQsbtv0TbgbAbHZZmzlBokvnefzVfDBMBq", + "CbRpcYAX6tHnAMRSihKkZnZQWpZJLlKaJ0pTjSP9p4TV7Gj2HweNceXAdlcHweRvTK9T7GTkUSvjJLQs", + "rzHGOyPXqBFmYRg0fkI2YdkeSkSM2000pMQMC87hgnK9aPSRFj+oD/AHN1ODbyvKWHx39KtBhBPbcAnK", + "ire24QNFAtQTRCtBtKK0uc7Fsv7h4XFZNhjE78dlafGBoiEwlLpgy5RWj3D5tDlJ4Twnrxfk+3BslLMF", + "z3fmcrCihrkbVu7WcrdYbThya2hGfKAIbqeQC7M1Hg1Ghr8LikOdYSNyI/XspRXT+G+ubUhm5vdJnf8Y", + "JBbidpi4UItymLMKDP4SaC4PO5TTJxxny1mQ427fm5GNGSVOMDeildH9tOOO4LFG4aWkpQXQfbF3KeOo", + "gdlGFtZbctOJjC4Kc3CGA1pDqG581vaehygkSAodGF7mIj3/G1WbOzjzSz9W//jhNGQDNANJNlRtFrOY", + "lBEer2a0KUfMNETtnSyDqRb1Eu9qeXuWllFNg6U5eONiiUU99kOmBzKiu/wd/0NzYj6bs21Yvx12Qc6Q", + "gSl7nJ0HITOqvFUQ7EymAZoYBCms9k6M1n0tKF81k8f3adIefWsNBm6H3CJwh8T2zo/BS7GNwfBSbHtH", + "QGxB3QV9mHFQjNRQqAnwvXaQCdx/hz4qJd31kYxjT0GyWaARXRWeBh7e+GaWxvJ6vBTyZtynw1Y4aezJ", + "hJpRA+Y77yAJm1Zl4kgxYpOyDToDNS68cabRHT6GsRYWTjX9HbCgzKh3gYX2QHeNBVGULIc7IP1NlOkv", + "qYJnT8np346/evL0l6dffW1IspRiLWlBljsNijx0uhlRepfDo/7KUDuqch0f/evn3grZHjc2jhKVTKGg", + "ZX8oa920IpBtRky7PtbaaMZV1wBOOZxnYDi5RTuxhnsD2mumjIRVLO9kM4YQljWzZMRBksFeYrru8ppp", + "duES5U5Wd6HKgpRCRuxreMS0SEWeXIBUTERcJe9cC+JaePG27P5uoSWXVBEzN5p+K44CRYSy9JZP5/t2", + "6LMtb3AzyvnteiOrc/NO2Zc28r0lUZESZKK3nGSwrNYtTWglRUEoybAj3tHfgz7d8RStandBpMNqWsE4", + "mvjVjqeBzmY2Kods3dqE2+tmXax4+5yd6oGKgGPQ8QY/o1r/GnJN71x+6U4Qg/2V30gLLMlMQ9SC37D1", + "RgcC5jspxOruYYzNEgMUP1jxPDd9+kL6W5GBWWyl7uAybgZraN3saUjhdCkqTSjhIgO0qFQqfk0PuOXR", + "H4huTB3e/HpjJe4lGEJKaWVWW5UEnXQ9ztF0TGhqqTdB1KgBL0btfrKt7HTW5ZtLoJnR6oETsXSuAufE", + "wEVS9DBqf9E5ISFyllpwlVKkoBRkiTNR7AXNt7NMRI/gCQFHgOtZiBJkReWtgT2/2AvnOewS9Icr8vCH", + "n9WjLwCvFprmexCLbWLorRU+5w/qQz1t+jGC604ekh2VQDzPNdqlYRA5aBhC4bVwMrh/XYh6u3h7tFyA", + "RM/M70rxfpLbEVAN6u9M77eFtioHoryconPGCrTbccqFglTwTEUHy6nSyT62bBq1tDGzgoATxjgxDjwg", + "lLyhSltvIuMZGkHsdYLzWAHFTDEM8KBAakb+2cui/bFTcw9yValaMFVVWQqpIYutgcN2ZK63sK3nEqtg", + "7Fr61YJUCvaNPISlYHyHLLsSiyCqa6O7c7f3F4emaXPP76KobAHRIGIMkFPfKsBuGOkyAAhTDaIt4TDV", + "oZw6vGY+U1qUpeEWOql43W8ITae29bH+qWnbJy6qm3s7E2Bm1x4mB/mlxayNcdpQo0LjyKSg50b2QIXY", + "uj37MJvDmCjGU0jGKN8cy1PTKjwCew9pVa4lzSDJIKe7/qA/2c/Efh4bAHe8UXyEhsTGs8Q3vaFkHz4w", + "MrTA8VRMeCT4haTmCBrNoyEQ13vPyBng2DHm5OjoQT0UzhXdIj8eLttudWREvA0vhDY7bskBIXYMfQq8", + "A2ioR745JrBz0qhl3Sn+G5SboBYjrj/JDtTQEprxr7WAAWOaCwMOjkuHu3cYcJRrDnKxPWxk6MQOWPbe", + "UalZykpUdX6A3Z1rft0Jov4mkoGmLIeMBB+sFliG/YkNxOiOeTNNcJIRpg9+zwoTWU7OFEo8beDPYYcq", + "9zsb4XcWxAXegSobGdVcT5QTBNTHDRkJPGwCW5rqfGfkNL2BHbkECURVy4JpbUM225quFmUSDhA1cI/M", + "6Lw5NjrO78AU99IpDhUsr78V85lVCcbhO+voBS10OFWgFCKfYDzqISMKwSTHPymF2XXmIoR9GKmnpBaQ", + "jmmjK6++/R+oFppxBeS/RUVSylHjqjTUIo2QKCeg/GhmMBJYPadz8TcYghwKsIokfnn8uLvwx4/dnjNF", + "VnDpw+pNwy46Hj9GM847oXTrcN2BqdAct5PI9YGWf7z3XPBCh6fsdzG7kafs5LvO4LW7wJwppRzhmuXf", + "mgF0TuZ2ytpDGpnmXsdxJxn1g6Fj68Z9P2VFld/Vhq8oyysJw96xjx8/rIqPHz+R72xL79ieeyIP0XHZ", + "pEWs3G1USQytITkz+q0UNDMCQtS2j4vk66QOzlRRcAplwPmHO4eU7zqJfFNhIEtIaWWjkh3XdhA04aFq", + "EZEXO7vbRWF0IRPN41Wu7aUdYnUtRVUSVW+7pQJNNfw+puZm6BiU/YmD2KDm41B4kFET890d3NZ2ICKh", + "lKCQt4bmFWW/ilWYf+OYr9opDUXfAm27/jKgn70f1HMEzxmHpBAcdtGUU8bhR/wY6235+0BnvGmH+naF", + "5xb8HbDa80yhxtviF3c7YGjv6ri4O9j87rgd50OYeYTGNchLQkmaMzS9Ca60rFL9kVNU7oPDFokf8GrM", + "sLnnlW8Sty9FzD9uqI+cYuxIrfJH+eIKInz5OwBv9VHVeg1Kd6TEFcBH7loxTirONM5VmP1K7IaVINGJ", + "v7AtC7ojK5qjdeo3kIIsK91mrpggoTTLc+cJMdMQsfrIqSY5GK76I+NnWxzOexI9zXDQl0Ke11hYRM/D", + "GjgoppJ4nMP39iuGoLnlb1w4Gmar2s/Wdm7Gb7Iodqj7NxmY/+fhfx19OE7+hya/HSYv/r+DT5+fXz16", + "3Pvx6dU33/zf9k/Prr559F//GdspD3ssfN9BfvLa6RQnr1FwbIznPdjvzXBaMJ5EiSx0EXdoizw04q8n", + "oEdts4LewEeut9wQ0gXNWUb1zcihy+J6Z9Gejg7VtDaiY0bwa72mOHYLLkMiTKbDGm98jfdDg+KJMujN", + "cbkveF5WFbdbWSnnUcI4cB+iIVbzOhnKFkE4Ipgps6E+vsj9+fSrr2fzJsOl/j6bz9zXTxFKZtk2lseU", + "wTYmZbsDggfjgSIl3SnQce6BsEejUaxTPBy2AKOeqQ0r759TKM2WcQ7no2udtr7lJ9yGvZrzg76hnTM5", + "i9X9w60lQAal3sSSo1uSArZqdhOg468vpbgAPidsAYuutpytQfm4mBzoCpN00b8hpmQL1OfAEpqnigDr", + "4UImqaQx+kHh1nHrq/nMXf7qzuVxN3AMru6ctSPI/60FefD9t2fkwDFM9cCm1NmhgySoiBXKxfm3IjkM", + "N7MlIWxO4Uf+kb+GFePMfD/6yDOq6cGSKpaqg0qBfElzylNYrAU58qkDr6mmH3lP0hqs2hIkbZCyWuYs", + "JeehRNyQp83Ej6qNNF8Lozh2ndp9+dVNFeUvdoLkkumNqHTiUo0TCZdUxpwGqk41xZFtoYCxWefEjW1Z", + "sUtlduPHeR4tS9VNOesvvyxzs/yADJVLqDJbRpQW0ssiRkCx0OD+vhXuYpD00uepVwoU+bWg5QfG9SeS", + "fKwOD58BaeVg/equfEOTuxJa9sobpcR1bZW4cKvXwFZLmpR0PWA00EBL3H2UlwtUsvOcYLdW7pePbcWh", + "mgV4fAxvgIXj2nksuLhT28vXjIkvAT/hFmIbI240HtOb7leQDXbj7epklPV2qdKbxJzt6KqUIXG/M3Up", + "ibURsrwbW7E1hgq6qhtLIOkG0nPIsAAAFKXezVvdfaSEEzQ962DKFsqwuRyYzY2m3SWQqsyoE8U7BiWD", + "YQVa+1jF93AOuzPRJINfJ4+2ndaphg4qUmogXRpiDY+tG6O7+S4cB21dZemzIzFNxpPFUU0Xvs/wQbYi", + "7x0c4hhRtNIOhxBBZQQRlvgHUHCDhZrxbkX6seUZLWNpb75IXQ3P+4lr0ihPLnImXA1mU9rvBWDVHXGp", + "yJIauV24gjE2dTHgYpWiaxiQkEPr+sQEwZZFHgfZd+9Fbzqx6l5ovfsmCrJtnJg1RykFzBdDKqjMdOKl", + "/EzWgWMNqATrwDmELXMUk+rAMst0qGx5OWxhqyHQ4gQMkjcChwejjZFQstlQ5WvZYMkff5YnyQC/Yyru", + "WAGGkyDUJ6jrUxu+Pc/tntOedunKMPjaC77gQqhaTiieYCR8jC6ObYfgKABlkMPaLtw29oTSpAU3G2Tg", + "+PtqlTMOJIlFDVGlRMpsMaLmmnFzgJGPHxNiTcBk8ggxMg7ARsckDkzeivBs8vV1gOQurZn6sdGlGfwN", + "8QwMG0drRB5RGhbO+EDEtucA1IWa1fdXJ+ARhyGMz4lhcxc0N2zOaXzNIL06ACi2drL+nWv80ZA4O2KB", + "txfLtdZkr6KbrCaUmTzQcYFuBOKl2CY2BSsq8S63S0Pv0dBiTAiLHUxbceGBIkuxxXALvFpsKOseWIbh", + "8GAEGv6WKaRX7Dd0m1tgxqYdl6ZiVKiQZJw5ryaXIXFiytQDEswQuTwMiijcCICOsaMpN+qU371Kals8", + "6V/mza02b4oD+ayN2PEfOkLRXRrAX98KU5c9eNeVWKJ2inbUQLviQyBCxojesIm+k6bvClKQAyoFSUuI", + "Ss5jrjuj2wDeOKe+W2C8wLoSlO8eBaEoEtZMaWiM6OZi9l6h+zZPUixnJcRqeHW6lCuzvvdC1NeUrZeC", + "HVvLvPcVYCjnikmlE/RARJdgGn2nUKn+zjSNy0rtYBdb2ZFlcd6A057DLslYXsXp1c37w2sz7duaJapq", + "ifyWcQI03ZAlViKNhsCNTG2jJEcX/MYu+A29s/VOOw2mqZlYGnJpz/EHORcdzjvGDiIEGCOO/q4NonSE", + "QQaZi33uGMhN9nBi5uJizPraO0yZH3tv2IjPnxy6o+xI0bUEBoPRVTB0ExmxhOmgkGc/pXDgDNCyZNm2", + "Ywu1ow5qzPRaBg9fIamDBdxdN9geDAR2z1hWgwTVLobVCPi2JGurFsViEmbO2iWrQoYQTsWULyjeR1Sd", + "9bQPV2dA8x9g97Npi8uZXc1ntzOdxnDtRtyD63f19kbxjK55a0preUKuiXJallJc0DxxBuYh0pTiwpEm", + "Nvf26HtmdXEz5tm3x2/eOfCv5rM0ByqTWlQYXBW2K/8wq7J1twYOiC9YbHQ+L7NbUTLY/LpYUGiUvtyA", + "Kw4bSKO9KnaNwyE4is5IvYpHCO01OTvfiF3iiI8EytpF0pjvrIek7RWhF5Tl3m7moR2I5sHFTSuFGOUK", + "4QC39q4ETrLkTtlN73THT0dDXXt4UjjXSPnawlZoVkTwrgvdiJBojkNSLSjWoLNWkT5z4lWBloRE5SyN", + "21j5EsNuufWdmcYEGw8Io2bEig24YnnFgrFMMzVB0e0AGcwRRaavZziEu6VwT2tUnP2rAsIy4Np8kngq", + "OwcVi/45a3v/OjWyQ38uN7C10DfD30bGCOsvdm88BGJcwAg9dT1wX9cqs19obZEyPwQuiWs4/MMZe1fi", + "iLPe0YejZhu8uGl73MKXMPr8zxCGrZq8/xkOr7y6QpADc0Sf1WAqWUnxG8T1PFSPIxkjvuIkwyiX34BP", + "CDNvrDvN6yDN7IPbPSTdhFaodpDCANXjzgduOSx95y3UlNuttlXuW7FucYIJo0oP7PgNwTiYe5G4Ob1c", + "0lhdQCNkGJiOGwdwy5auBfGdPe6d2Z+5IqALEviS67bMJgOXIJtkrn5hkRsKDHbayaJCIxkg1YYywdz6", + "/3IlIsNU/JJy+1iC6WePkuutwBq/TK9LITGVX8XN/hmkrKB5XHLI0r6JN2NrZp8KqBQEtejdQPaNFUtF", + "rp6/dbE3qDlZkcN58NqF242MXTDFljlgiye2xZIq5OS1IaruYpYHXG8UNn86ofmm4pmETG+URawSpBbq", + "UL2pnVdL0JcAnBxiuycvyEN02yl2AY8MFt39PDt68gKNrvaPw9gF4N4EGeMm2SpMfInTMfot7RiGcbtR", + "F9GsZ/uQ0zDjGjlNtuuUs4QtHa/bf5YKyuka4pEixR6YbF/cTTSkdfDCM/sKidJS7AgbSEECTQ1/Gog+", + "N+zPgkFSURRMF865o0Rh6KkpNG8n9cPZJ01cjVAPl/+IPtLSu4g6SuT9Gk3t/RZbNXqy39IC2midE2rr", + "N+SsiV7wlYvJiS8Pg0VT61qpFjdmLrN0FHMwmGFFSsm4RsWi0qvkryTdUElTw/4WQ+Amy6+fRwrFtgsW", + "8usBfu94l6BAXsRRLwfI3ssQri95yAVPCsNRskdNtkdwKgeduXG33ZDvcHzoqUKZGSUZJLeqRW404NS3", + "Ijw+MuAtSbFez7Xo8doru3fKrGScPGhlduin92+clFEIGav51hx3J3FI0JLBBcbuxTfJjHnLvZD5pF24", + "DfRf1vPgRc5ALPNnOaYIvBQR7dQXL64t6S5WPWIdGDqm5oMhg6Ubak7ahWLv3+nnjc9955P54mHFP7rA", + "fuEtRST7FQxsYlDEOrqdWf098H9T8lJsp25q54T4jf03QE0UJRXLs5+brMxOjXBJebqJ+rOWpuMvzWtG", + "9eLs/RQtrbahnEMeHc7Kgr94mTEi1f5TTJ2nYHxi227ZcrvczuIawNtgeqD8hAa9TOdmghCr7YS3OqA6", + "X4uM4DxNHa+Ge/bL3QdFif9VgdKx5CH8YIO60G5p9F1bE5cAz1BbXJDv7WukGyCtKi2opdn8eMh8hVZr", + "UK/KXNBsTsw4Z98evyF2VtvHvslha/KuUUlpr6JjrwpKFE4LD/bPa8RTF6aPMx5LbVatNBZNUpoWZSw5", + "1LQ48w0wAzW04aP6EmJnQV5bzVF5vcROYuhhxWRhNK56NCu7IE2Y/2hN0w2qZC2WOkzy04tJe6pUwQNu", + "9UMsdd0+PHcGbldP2paTnhNh9OZLpuwjlHAB7XzUOjnbmQR8fmp7ebLi3FJKVPYYKx5wE7R74Gyghjfz", + "RyHrIP6aArmtxX7d2tqn2CtaR6hbqLv3cpvNbqwf2PCPC6eUC85SrOITu5rdg5ZTfGATCh51jaz+iLsT", + "Gjlc0fLgdZicw+JgwXDPCB3i+kb44KvZVEsd9k+NLyduqCZr0MpxNsjmvsq9swMyrsDVYcS3TQM+KWTL", + "r4gcMuqqTmqXxjXJCNNiBhS778y3t07tx3jxc8ZRwHdoc6Hp1lKH7+1poxUwTdYClFtPOzdYfTB9Fpgm", + "m8H208K/z4djWLecWbb1QfeHOvYeaecBNm1fmba2lEnzcysC2U56XJZu0uE3EKLygN7yQQRHPIuJd+0E", + "yK3HD0cbIbfRUBK8Tw2hwQU6oqHEe7hHGPV7AJ23ZozQaikKWxAbwhWtYMB4BIw3jEPzemTkgkijVwJu", + "DJ7XgX4qlVRbEXASTzsDmqP3OcbQlHauh9sO1dlgRAmu0c8xvI3NUwYDjKNu0AhulO/qRysNdQfCxCt8", + "Ldchsv8wAUpVTojKMKOg81RBjHEYxu1LIbUvgP4x6MtEtruW1J6c69xEQ0miyypbg05olsXqYr7ErwS/", + "+kJRsIW0qusnliVJsSZKu0hMn9rcRKngqipG5vINbjld8PZHhBrC90f8DmMSynKH/8aKBw7vjAvCuHYY", + "oI+4cI8lXFNubo/Uk3oNTSeKrZPpmMA75fboaKa+GaE3/e+U0nOxbgNyz6UhxrhcuEcx/vatuTjCygm9", + "ipj2aqkLG2DQnfAvtqHaWKfktrkSXmW9Epno7Klr3o0bIIbfdprj5TcQehsUxKD2frXew6EA3HQwXpxq", + "l7mmKRllQYPZQDZ6x+b9IBRxy+lQxI4N2DGfe72nSYY9ORvHHkWoDwXrA/SDjzMlJWXONd4wiz5mXUT6", + "sLlw7NA1G9xdhIvzHrTY/XAxFJNNFOPrHAh+776Gcw4unb1+Dt2u1UcleZXQ/upeI7Xj1VHx0fX3oxNw", + "qi9rBh002p65yut2mU4n/+FnG8NGgGu5+zcw4fY2vfeWUF/ateappgmpq/ZOquLbuhXjzwIN1z9qah4h", + "PZVCsaZSdOy9oImxbmf45E9Qv6k/lg80uYBUY3nwxoEuAa5TzclMFrxF92cdpAHdsQ4JdOWPxmoe9WuC", + "77nQemlJQWqdrae8mF7h57gOk0KmhK/BrYG75+DaCQeTw55XK0g1u9iTBvaPDfAgxWjujRD2WdcgK4zV", + "YbRYReT6JrYGoLEsrVF4gmp+twZnKAnkHHYPFGlRQ7TA89zfKzcpIIEYQO6QGBIRKhaGYK2mzjPMVE0Z", + "iAUf9mO7Q1OKa/BpmCCp8YZzeZI0N26T6DgyZfxtiklzma7XSv/FiNChTLF+bfthYfs1PiWg6mfbfAGK", + "UCUlJ5Hqz66ABSbt1Y4CX8oClP/NZ+jaWXJ2DuHjNeiWuaQy8y2idgZvwkhG7qNeele0ZDVVNojS+cHr", + "IM1+Qk+k8BOG4qa5wHLPQ/HM7bjI8I13jP7A6wDLTyNcK5DukS8U9nKhINHCB3WOwTGGCvce+U2QoAaL", + "LVrgBkugvG9qvGDRWYolT6iLbAkXSCQU1EAng0osw3OOIfuV/e4zWHzR0b3mlJpek72lVHx4LlM9JIZU", + "vyLuttyfGXMTywrj3D4pqmJlWbhBZWj6L6XIqtRe0OHBqK1Pk4sejbCSqFEi7a+yp1/mWALsTZBneA67", + "Ayv6pxvKm1ps7WNtRSi7hiCvv7Pbd2p0iuvX+douYH0ncH5Jw818VgqRJwO2/pN+dZnuGThn6TlkxNwd", + "PrBt4HUN8hBNzLUz93Kz89VUyhI4ZI8WhBxzG0rs/brt8sadyfkDPTb/FmfNKlvwydmUFh95PCYTSzHJ", + "W/I3P8w4V1NgmN8tp7KD7Kldsh2obCPpZeStmcVUpbTvae2+/9EQlYUiJqXcMJF90vnu25UipB88fTCu", + "/YR1LpoAOmnNkygtNc9BtIWXHxur47RHGHyHPeCFSnHwDIPnRg6cLxzl9mONlGApg5TQWv4+PdstsOFL", + "wRYpTIswy7RVh2yERHtfAiOKelXbJuJ47pswsKiF4Fjop2/6UGiuxnrBIeGYcykvaH7/5gusdnKM+HBP", + "IsYXGuq/IZItKtXNQk3e0ElzB7ru3U3N36G55R9g9ijqZ3BDObtj/fyFt85iXTuak1w0jyHhkOQSx7SO", + "iSdfk6ULky8lpEyxTgbRpS9lWqt7WNm7eSlzXL/ct86fhb4FGTsFQZTkbVMWUQu8HxoImyP6hZnKwMmN", + "UnmM+npkEcFfjEeF+ep7rovzlsfClpnthOIICXfsuQhiEK7puehn4k9dnrXOm0unUtBf5+TbuoXbyEXd", + "rG2q262P3LHaeVO8ZfGSmKY7uussQrCeLEFQya9PfiUSVvhghCCPH+MEjx/PXdNfn7Y/m+P8+HH8Rc77", + "ctRZHLkx3Lwxivl5KHTThicORAl39qNiebaPMFox382TKxjV/IvL+vgij778Yu2p/aPqCu9fJ0SguwmI", + "mMhaW5MHUwXR3BMCuV23SNg2aiZpJZneYTEKb35jv0Rdit/XFnvn8anTl93dp8U51OVMGvt+pfzt+r2g", + "Od5HRqbGAA2NrzB+u6VFmYM7KN88WP4Fnv31eXb47Mlfln89/OowhedfvTg8pC+e0ycvnj2Bp3/96vkh", + "PFl9/WL5NHv6/Ony+dPnX3/1In32/Mny+dcv/vLA8CEDsgV05lMfZ/8bX0ZKjt+dJGcG2AYntGT146uG", + "jP3zDjTFkwgFZfnsyP/0//sTtkhF0Qzvf525zKrZRutSHR0cXF5eLsIuB2s06CVaVOnmwM/Tf/Ty3Ukd", + "HW9dwbijNvDZkAJuqiOFY/z2/tvTM3L87mTREMzsaHa4OFw8wcfMSuC0ZLOj2TP8CU/PBvf9wBHb7Ojz", + "1Xx2sAGao//L/FGAliz1n9QlXa9BLtw7F+ani6cHXpQ4+OyMmVdj3w7CkrEHn1s232xPTywpefDZV0oY", + "b90qReBs3UGHiVCMNTtYYgLW1KaggsbDS0EFQx18RhF58PcDl5US/4iqij0DB94xEm/ZwtJnvTWwdnq4", + "15sPPuN/kCYDsGwMWB9cm61xYB+U6/+842n0x/5AvcLma4gmmmDKBx17YRhPgT1AJxnyNd17MBmrpFqz", + "Jx6Op4eHf4y3k59fE9BRm0oraisCzEuaEZ/zg3M/ub+5Tzh6WA2vI5aXIwTP7w+CdknaH2BH3gpNvkOl", + "42o+++o+d+KEGxGI5gRbBhUn+kfkJ37OxSX3LY0QUBUFlbvJx0fTtUKjn2QX1IlgQZXy2Se0Mts8sPZR", + "O86yHtFbYQiUfimy3QjGCrUuXYx2g7RGFmTcLKGvTPaffes9cHwOO2J9cN7WygW+Z99IaVpWcHVLnvCH", + "fYv5T57yJ0+Rdvpn9zf9KcgLlgI5g6IUkkqW78hPvM6wuzGPO86yaHxT++jv5XFGz05FBmvgiWNgyVJk", + "O19FrDXBOVi1ryfIHHxulwK2IuAsgxx0NHbD/F4/0tZfxHJHTl73JBzbrct5X+6waVBi9+jDZ6s3GaWg", + "UWu6IPY4Y1jdtcubPsW55hjZm4WshSYWC5lb1J+M6E9GdCvhZvLhmSLfRLUPm79Oe3f23Keix4qQUN0H", + "ZYqO8kWP751sfF//iek7Nk4MMhJ8sAHNXTT/ySL+ZBG3YxHfQ+Qw4ql1TCNCdNfTh6YyDAyRyboPbqDr", + "wDevciqJgqlmjmMc0Rk37oNr3LdSF8WV1ekob94kimzg3ep5f7K8P1neH4flHe9nNG3B5Naa0TnsClrW", + "+pDaVDoTl4EnAWGxkT19O3D9BGDr74NLynSyEtJlHWBB2n5nDTQ/cPU0Or82Kay9L5iXG/wY2Mrjvx7U", + "9b6jH7tOiNhXZ4QfaOSrIfnPjRMydOoha6/deR8+GbaM1SQd1298VEcHBxjJuxFKH8yu5p87/qvw46ea", + "BD7Xd4UjhatPV/8vAAD//xVsHQFjwgAA", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/generated/participating/public/routes.go b/daemon/algod/api/server/v2/generated/participating/public/routes.go index bf14f4ef1a..0d5e4ac890 100644 --- a/daemon/algod/api/server/v2/generated/participating/public/routes.go +++ b/daemon/algod/api/server/v2/generated/participating/public/routes.go @@ -177,188 +177,174 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9/XPcNpLov4I3d1W2dUNJ/srFqkrdk+0kq4vtuCwlu3uWX4Ihe2aw4gAMAEoz8dP/", - "/goNgARJkMORFHuzzz/ZGuKj0Wg0Gv35cZKKVSE4cK0mRx8nBZV0BRok/kXTVJRcJywzf2WgUskKzQSf", - "HPlvRGnJ+GIynTDza0H1cjKdcLqCuo3pP51I+K1kErLJkZYlTCcqXcKKmoH1pjCtq5HWyUIkbohjO8TJ", - "y8n1wAeaZRKU6kL5I883hPE0LzMgWlKuaGo+KXLF9JLoJVPEdSaME8GBiDnRy0ZjMmeQZ2rfL/K3EuQm", - "WKWbvH9J1zWIiRQ5dOF8IVYzxsFDBRVQ1YYQLUgGc2y0pJqYGQysvqEWRAGV6ZLMhdwCqgUihBd4uZoc", - "vZ8o4BlI3K0U2CX+dy4BfodEU7kAPfkwjS1urkEmmq0iSztx2Jegylwrgm1xjQt2CZyYXvvkdak0mQGh", - "nLz77gV5/PjxM7OQFdUaMkdkvauqZw/XZLtPjiYZ1eA/d2mN5gshKc+Sqv27717g/KdugWNbUaUgfliO", - "zRdy8rJvAb5jhIQY17DAfWhQv+kRORT1zzOYCwkj98Q2vtNNCef/rLuSUp0uC8G4juwLwa/Efo7ysKD7", - "EA+rAGi0LwympBn0/WHy7MPHh9OHh9f/9v44+R/359PH1yOX/6IadwsGog3TUkrg6SZZSKB4WpaUd/Hx", - "ztGDWooyz8iSXuLm0xWyeteXmL6WdV7SvDR0wlIpjvOFUIQ6MspgTstcEz8xKXlu2JQZzVE7YYoUUlyy", - "DLKp4b5XS5YuSUqVHQLbkSuW54YGSwVZH63FVzdwmK5DlBi4boQPXNA/LzLqdW3BBKyRGyRpLhQkWmy5", - "nvyNQ3lGwgulvqvUbpcVOVsCwcnNB3vZIu64oek83xCN+5oRqggl/mqaEjYnG1GSK9ycnF1gf7cag7UV", - "MUjDzWnco+bw9qGvg4wI8mZC5EA5Is+fuy7K+JwtSgmKXC1BL92dJ0EVgisgYvYPSLXZ9v8+/fENEZK8", - "BqXoAt7S9IIAT0XWv8du0tgN/g8lzIav1KKg6UX8us7ZikVAfk3XbFWuCC9XM5Bmv/z9oAWRoEvJ+wCy", - "I26hsxVddyc9kyVPcXPraRuCmiElpoqcbvbJyZys6Pqbw6kDRxGa56QAnjG+IHrNe4U0M/d28BIpSp6N", - "kGG02bDg1lQFpGzOICPVKAOQuGm2wcP4bvDUklUAjh+kF5xqli3gcFhHaMYcXfOFFHQBAcnsk58c58Kv", - "WlwArxgcmW3wUyHhkolSVZ16YMSph8VrLjQkhYQ5i9DYqUOH4R62jWOvKyfgpIJryjhkhvMi0EKD5US9", - "MAUTDj9mulf0jCr46knfBV5/Hbn7c9He9cEdH7Xb2CixRzJyL5qv7sDGxaZG/xGPv3BuxRaJ/bmzkWxx", - "Zq6SOcvxmvmH2T+PhlIhE2ggwl88ii041aWEo3O+Z/4iCTnVlGdUZuaXlf3pdZlrdsoW5qfc/vRKLFh6", - "yhY9yKxgjb6msNvK/mPGi7NjvY4+Gl4JcVEW4YLSxqt0tiEnL/s22Y65K2EeV0/Z8FVxtvYvjV176HW1", - "kT1A9uKuoKbhBWwkGGhpOsd/1nOkJzqXv5t/iiI3vXUxj6HW0LG7b1E34HQGx0WRs5QaJL5zn81XwwTA", - "vhJo3eIAL9SjjwGIhRQFSM3soLQoklykNE+UphpH+ncJ88nR5N8OauXKge2uDoLJX5lep9jJyKNWxklo", - "Uewwxlsj16gBZmEYNH5CNmHZHkpEjNtNNKTEDAvO4ZJyvV+/Rxr8oDrA791MNb6tKGPx3Xpf9SKc2IYz", - "UFa8tQ3vKRKgniBaCaIVpc1FLmbVD/ePi6LGIH4/LgqLDxQNgaHUBWumtHqAy6f1SQrnOXm5T74Px0Y5", - "W/B8Yy4HK2qYu2Hubi13i1WKI7eGesR7iuB2Crlvtsajwcjwd0Fx+GZYitxIPVtpxTT+i2sbkpn5fVTn", - "PweJhbjtJy58RTnM2QcM/hK8XO63KKdLOE6Xs0+O231vRjZmlDjB3IhWBvfTjjuAxwqFV5IWFkD3xd6l", - "jOMLzDaysN6Sm45kdFGYgzMc0BpCdeOztvU8RCFBUmjB8DwX6cVfqFrewZmf+bG6xw+nIUugGUiypGq5", - "P4lJGeHxqkcbc8RMQ3y9k1kw1X61xLta3palZVTTYGkO3rhYYlGP/ZDpgYy8XX7E/9CcmM/mbBvWb4fd", - "J2fIwJQ9zs6CkJmnvH0g2JlMA1QxCLKyr3diXt07Qfminjy+T6P26FurMHA75BaBOyTWd34Mnot1DIbn", - "Yt05AmIN6i7ow4yDYqSGlRoB30sHmcD9d+ijUtJNF8k49hgkmwUa0VXhaeDhjW9mqTWvxzMhb8Z9WmyF", - "k1qfTKgZNWC+0xaSsGlZJI4UIzop26A1UG3CG2Ya7eFjGGtg4VTTPwALyox6F1hoDnTXWBCrguVwB6S/", - "jDL9GVXw+BE5/cvx04ePfnn09CtDkoUUC0lXZLbRoMh99zYjSm9yeNBdGb6OylzHR//qiddCNseNjaNE", - "KVNY0aI7lNVuWhHINiOmXRdrTTTjqisAxxzOMzCc3KKdWMW9Ae0lU0bCWs3uZDP6EJbVs2TEQZLBVmLa", - "dXn1NJtwiXIjy7t4yoKUQkb0a3jEtEhFnlyCVExETCVvXQviWnjxtmj/bqElV1QRMzeqfkuOAkWEsvSa", - "j+f7duizNa9xM8j57Xojq3PzjtmXJvK9JlGRAmSi15xkMCsXjZfQXIoVoSTDjnhHfw/6dMNT1KrdBZH2", - "P9NWjKOKX214GrzZzEblkC0am3D7t1kbK14/Z6e6pyLgGHS8ws/4rH8JuaZ3Lr+0J4jB/sJvpAWWZKYh", - "voJfscVSBwLmWynE/O5hjM0SAxQ/WPE8N326QvobkYFZbKnu4DKuB6tp3expSOF0JkpNKOEiA9SolCp+", - "TfeY5dEeiGZMHd78emkl7hkYQkppaVZbFgSNdB3OUXdMaGqpN0HUqB4rRmV+sq3sdNbkm0ugmXnVAydi", - "5kwFzoiBi6RoYdT+onNCQuQsNeAqpEhBKcgSp6LYCppvZ5mIHsATAo4AV7MQJcicylsDe3G5Fc4L2CRo", - "D1fk/g8/qwefAV4tNM23IBbbxNBbPficPagL9bjphwiuPXlIdlQC8TzXvC4Ng8hBQx8Kd8JJ7/61Iers", - "4u3RcgkSLTN/KMX7SW5HQBWofzC93xbasujx8nIPnTO2Qr0dp1woSAXPVHSwnCqdbGPLplHjNWZWEHDC", - "GCfGgXuEkldUaWtNZDxDJYi9TnAeK6CYKfoB7hVIzcg/e1m0O3Zq7kGuSlUJpqosCiE1ZLE1cFgPzPUG", - "1tVcYh6MXUm/WpBSwbaR+7AUjO+QZVdiEUR1pXR35vbu4lA1be75TRSVDSBqRAwBcupbBdgNPV16AGGq", - "RrQlHKZalFO510wnSouiMNxCJyWv+vWh6dS2PtY/1W27xEV1fW9nAszs2sPkIL+ymLU+TktqntA4MlnR", - "CyN74IPYmj27MJvDmCjGU0iGKN8cy1PTKjwCWw9pWSwkzSDJIKeb7qA/2c/Efh4aAHe8fvgIDYn1Z4lv", - "ek3J3n1gYGiB46mY8EjwC0nNETQvj5pAXO8tI2eAY8eYk6Oje9VQOFd0i/x4uGy71ZER8Ta8FNrsuCUH", - "hNgx9DHw9qChGvnmmMDOSf0sa0/xd1BugkqM2H2SDai+JdTj77SAHmWacwMOjkuLu7cYcJRr9nKxLWyk", - "78T2aPbeUqlZygp86vwAmzt/+bUniNqbSAaashwyEnywr8Ai7E+sI0Z7zJu9BEcpYbrgd7QwkeXkTKHE", - "0wT+Ajb45H5rPfzOAr/AO3jKRkY11xPlBAH1fkNGAg+bwJqmOt8YOU0vYUOuQAJR5WzFtLYum82XrhZF", - "Eg4QVXAPzOisOdY7zu/AGPPSKQ4VLK+7FdOJfRIMw3fWehc00OGeAoUQ+QjlUQcZUQhGGf5JIcyuM+ch", - "7N1IPSU1gHRMG0151e1/TzXQjCsgfxclSSnHF1epoRJphEQ5AeVHM4ORwKo5nYm/xhDksAL7kMQve3vt", - "he/tuT1niszhyrvVm4ZtdOztoRrnrVC6cbjuQFVojttJ5PpAzT/ee855ocVTtpuY3chjdvJta/DKXGDO", - "lFKOcM3yb80AWidzPWbtIY2MM6/juKOU+sHQsXXjvp+yVZnf1YbPKctLCf3WsfPz9/PV+fkH8p1t6Q3b", - "U0/kITqu6rCIubuNSomuNSRn5n0rBc2MgBDV7eMi+SKpnDNVFJyVMuD81Z1DyjetQL6xMJAZpLS0XsmO", - "azsIavdQtR+RF1u720ZhdCEj1eNlru2lHWJ1IUVZEFVtu6UCTTX8MarmeugYlN2JA9+g+mOfe5B5Juab", - "O7it7UBEQiFBIW8N1SvKfhXzMP7GMV+1URpWXQ207fpLz/vsXe87R/CccUhWgsMmGnLKOLzGj7Helr/3", - "dMabtq9vW3huwN8CqznPGGq8LX5xtwOG9rbyi7uDzW+P2zI+hJFHqFyDvCCUpDlD1ZvgSssy1eec4uM+", - "OGwR/wH/jOlX97zwTeL6pYj6xw11zin6jlRP/ihfnEOEL38H4LU+qlwsQOmWlDgHOOeuFeOk5EzjXCuz", - "X4ndsAIkGvH3bcsV3ZA5zVE79TtIQWalbjJXDJBQmuW5s4SYaYiYn3OqSQ6Gq75m/GyNw3lLoqcZDvpK", - "yIsKC/vR87AADoqpJO7n8L39ii5obvlL546G0ar2s9Wdm/HrKIoNvv3rCMz/c/+/jt4fJ/9Dk98Pk2f/", - "cfDh45PrB3udHx9df/PN/23+9Pj6mwf/9e+xnfKwx9z3HeQnL92b4uQlCo618rwD+ydTnK4YT6JEFpqI", - "W7RF7hvx1xPQg6ZaQS/hnOs1N4R0SXOWUX0zcmizuM5ZtKejRTWNjWipEfxadxTHbsFlSITJtFjjja/x", - "rmtQPFAGrTku9gXPy7zkditL5SxK6AfuXTTEfFoFQ9kkCEcEI2WW1PsXuT8fPf1qMq0jXKrvk+nEff0Q", - "oWSWrWNxTBmsY1K2OyB4MO4pUtCNAh3nHgh71BvFGsXDYVdgnmdqyYpPzymUZrM4h/Pete61vuYn3Lq9", - "mvODtqGNUzmL+aeHW0uADAq9jAVHNyQFbFXvJkDLXl9IcQl8Stg+7Ldfy9kClPeLyYHOMUgX7RtiTLRA", - "dQ4soXmqCLAeLmTUkzRGPyjcOm59PZ24y1/duTzuBo7B1Z6zMgT5v7Ug977/9owcOIap7tmQOjt0EAQV", - "0UI5P/+GJ4fhZjYlhI0pPOfn/CXMGWfm+9E5z6imBzOqWKoOSgXyOc0pT2F/IciRDx14STU95x1Jqzdr", - "SxC0QYpylrOUXIQScU2eNhI/+myk+UKYh2PbqN2VX91UUf5iJ0iumF6KUicu1DiRcEVlzGigqlBTHNkm", - "ChiadUrc2JYVu1BmN36c59GiUO2Qs+7yiyI3yw/IULmAKrNlRGkhvSxiBBQLDe7vG+EuBkmvfJx6qUCR", - "X1e0eM+4/kCS8/Lw8DGQRgzWr+7KNzS5KaChr7xRSFxbV4kLt+8aWGtJk4IuepQGGmiBu4/y8gof2XlO", - "sFsj9sv7tuJQ9QI8Pvo3wMKxcxwLLu7U9vI5Y+JLwE+4hdjGiBu1xfSm+xVEg914u1oRZZ1dKvUyMWc7", - "uiplSNzvTJVKYmGELG/GVmyBroIu68YMSLqE9AIyTAAAq0Jvpo3u3lPCCZqedTBlE2XYWA6M5kbV7gxI", - "WWTUieIthZLBsAKtva/iO7iAzZmog8F3iaNthnWqvoOKlBpIl4ZYw2PrxmhvvnPHQV1XUfjoSAyT8WRx", - "VNGF79N/kK3IeweHOEYUjbDDPkRQGUGEJf4eFNxgoWa8W5F+bHnmlTGzN18kr4bn/cQ1qR9PznMmXA1G", - "U9rvK8CsO+JKkRk1crtwCWNs6GLAxUpFF9AjIYfa9ZEBgg2NPA6y7d6L3nRi3r7QOvdNFGTbODFrjlIK", - "mC+GVPAx0/KX8jNZA45VoBLMA+cQNstRTKocyyzTobJh5bCJrfpAixMwSF4LHB6MJkZCyWZJlc9lgyl/", - "/FkeJQP8gaG4QwkYTgJXnyCvT6X49jy3fU47r0uXhsHnXvAJF8Kn5YjkCUbCR+/i2HYIjgJQBjks7MJt", - "Y08odVhwvUEGjh/n85xxIEnMa4gqJVJmkxHV14ybA4x8vEeIVQGT0SPEyDgAGw2TODB5I8KzyRe7AMld", - "WDP1Y6NJM/gb4hEY1o/WiDyiMCyc8R6Pbc8BqHM1q+6vlsMjDkMYnxLD5i5pbtice/HVg3TyAKDY2or6", - "d6bxB33i7IAG3l4sO63JXkU3WU0oM3mg4wLdAMQzsU5sCFZU4p2tZ4beo67FGBAWO5g248I9RWZije4W", - "eLVYV9YtsPTD4cEIXvhrppBesV/fbW6BGZp2WJqKUaFCknHqvIpc+sSJMVP3SDB95HI/SKJwIwBayo46", - "3ah7/G59pDbFk+5lXt9q0zo5kI/aiB3/viMU3aUe/HW1MFXaA6dCeAepkFm/nsIQKtNV/tauesFlnzV8", - "Y3RihIFcssfN14Z/QnR3rscroAFPPc8AIl7amKMOJN+uC2GkWxuTZBNUOKRYOVGCDbVUVmelGF/kUHlu", - "RtEUW7D3SfIYt0uuE075AcfJzrHN7XnkD8FSFHE4dnmpvHP4GYCi55TXcKAcfktIXJKKQViu++njbVu0", - "jx6UpntNMzVK8NaK3Q6GfLrWzK7NVEEO+HpOGq+N5CJm4z4/f68ARbNT3y3Q8mECFso3DwKfLQkLpjTU", - "1iYjwXpMf2o9PsW8b0LM+1enCzk363snRCXP2cRC2LGxzE++AvR5njOpdIKmuugSTKPvFGqfvjNN44+K", - "pleYTYHKsvglitNewCbJWF7G6dXN+8NLM+2bSnZQ5QwFE8YJ0HRJZpiyN+orOjC1dSceXPAru+BX9M7W", - "O+40mKZmYmnIpTnHn+RctG66IXYQIcAYcXR3rRelAxdoEOLb5Y7BA8MeTrxO94fMFJ3DlPmxt/pX+UDj", - "PmHOjjSwFnQN6nXOjTjkWD8yy9TrbP3RYFwudNJQfkTQVSl4lKYXNqCsucF8UelU4m5T9l09amjXdsuA", - "fPx4fPtwTghOcriEfLsTNEWMewUOekbYEdD1hmA4gffx2C7Vd3egRli10jaMUWrpSDdDhtv6aeTy59Vv", - "ayRYgzsX+T7aemckNE9vNX13TXdFkWSQQzTO7K9BIBktCswW4RvHAnrMYIxnsI6DYz9NYzn1u8r7knFt", - "86/eVWrH1jjjlx0mQByDgsKm6ts9fWT/GzPYpRDN/YvqIcrKODDIiHHw6mUXVCNpU1/PNU6LgmXrlt3T", - "jtqrHb8TjOEF5QbbgoGANmIRjBJUM/Flrcyz6dcbeaf2R2HmrJmeMpRpwqmY8sVDuoiqIpy34eoMaP4D", - "bH42bXE5k+vp5HZm0hiu3YhbcP222t4ontENz5rNGl4PO6KcFoUUlzRPnDG5jzSluHSkic297fkTS2tx", - "rnf27fGrtw786+kkzYHKpHrt9K4K2xV/mlXZHJs9B8QXJ1hSXenn7Gs42PwqMWBogL5agksEHzyoOxlr", - "a+eC4Cg6g/Q87g281bzs/CDsEgf8IaCo3CFqU531hmh6QNBLynJvI/PQ9nju4uLG3Y1RrhAOcGtPivAu", - "ulN20znd8dNRU9cWnhTONZCqfmWrMSgieNtdzryC0fSGpLqimG/WWkC6zImXK7QaJCpnadyeymcYYsOt", - "n4xpTLBxz3vajFiyHrcrXrJgLNNMjVBqt4AM5ogi0+cu7sPdTLgyWiVnv5VAWAZcm08ST2XroKL+1FnW", - "u9dpXKp0A1trfD38bWSMMNdy+8ZzMteQgBF65XTAfVlp/fxCK+uT+SFwP9jBuS+csXMlDjjmOfpw1GwD", - "FZZN75rREvrWklte/+aSPvfMES2hxVQyl+J3iKuqUMMXiQ712aUZerT+DnxESFltyakrgdWz9253n3QT", - "WpyaDok9VI87H7jgYJpbb42m3G61rWjT8GuPE0wYQXJgx68JxsHcibrJ6dWMxnIAGyHDwBSYXxp2cy2I", - "7+xx72w0zCX83ieB31jVltnEHwXIOnC7m0TshgKDnXa0qFBLBki1oUwwtb4+uRKRYUp+RbktjITWCDxK", - "rrd54HuF0JWQmLZHxU38GaRsFVUunZ+/z9KuOTdjC2bLApUKgrozbiBbT81SkavdY93patSczMnhNKhs", - "5XYjY5dMsVkO2OKhbTGjCqxSxXtu+C5mecD1UmHzRyOaL0ueScj0UlnEKkEqoQ6fN5Wjygz0FQAnh9ju", - "4TNyH110FLuEBwaL7n6eHD18hgZW+8dh7AJw9b+GuEk2D4Nc43SMPkp2DMO43aj7UW2ALdrYz7gGTpPt", - "OuYsYUvH67afpRXldAFxr9DVFphsX9xNtAW08MIzW3FMaSk2hPWEG4Omhj/1RJoZ9mfBIKlYrZheOUcO", - "JVaGnuqiMnZSP5wtX+bygXu4/Ef0hyq8O0jrEflp7T72foutGr3W3tAVNNE6JdTmaspZ7anoqxSQE58K", - "DhOkV3nRLW7MXGbpKOag4+KcFJJxjQ+LUs+Tr0m6pJKmhv3t94GbzL56EkkK30xOzHcD/JPjXYICeRlH", - "vewhey9DuL7kPhc8WRmOkj2oIzuDU9nruBV30enzExoeeqxQZkZJesmtbJAbDTj1rQiPDwx4S1Ks1rMT", - "Pe68sk9OmaWMkwctzQ799O6VkzJWQsbyu9bH3UkcErRkcIl++vFNMmPeci9kPmoXbgP95zWeepEzEMv8", - "We59COxi8QneBmjzCT0Tb2LtaVp6GjJX1OyDL5xxFhBb83Sb3eM21ZAanXeBynPocdD1KBEaAbAtjO32", - "Ar69iiEw+TR2qA9HzaXFKPO5iCzZl9CobDwuYjKit+q7QMwHw6BmbqgpaZYr+PQeNd4s0vXsMF88rPhH", - "G9jPzGwQyX4FPZsYlFKJbmdWfQ+cyyh5LtZjN7XFu/3G/hOgJoqSkuXZz3VukFalGkl5uow6i8xMx1/q", - "mprV4uxhjib4XVLOrTdCVzeBr5Rf/Gsm8t76hxg7z4rxkW3bxXPscluLqwFvgumB8hMa9DKdmwlCrDbT", - "LlRhfflCZATnqbPJ1vd6t+hSUBrjtxKUjt2L+MGGFqBGfW6o2FaoAJ6hHmOffG9r4i+BNHIFov7AZmmC", - "zNcJsKaessgFzabEjHP27fErYme1fWxlOFsZYmGv3cYq+v1zd3G0HfKtvYuIPrNqpTF1p9J0VcRSlJgW", - "Z74B5kEJrUv4sA6xs09eWp2G8i9mO4mhhzmTK8hINZ2TqpEmzH+0pukSlQUNltpP8uNLmniqVEEZ4aoc", - "YJU9Gs+dgdtVNbFFTaZEGMnhiilbCh0uoZkVpUoR5MQAnyWluTxZcm4pJSoVD6WwugnaPXDWC9IboKKQ", - "tRC/o/Ti3NR3rPByir2i2Szb5WI69YNtjo2qzNtrXwGacsFZirkkY1ezK6s+xjo7Iu1mPDLA+duoSeRw", - "RYvUVMEaDou9ZWs8I3SI65qHgq9mUy112D811u9eUk0WoJXjbJBNfa0lp6FmXIHLBo4V9gM+KWTD4o0c", - "MupEUcvJO5IRBmf3qBy+M9/eOIUURi1eMI5PTx8jYQMkrQ4Zqz5r815lmiwERlC4QxGu6b3ps4/JWjJY", - "f9j3VaJxDGswNsu23hHdoY69r4TzTTBtX5i2NqFe/XMjDs5OelwUbtL+SlxReUCveS+CIzbvytErQG41", - "fjjaALkNOjnhfWoIDS7RRQIK4kJjeqpStYJgjNBqKQpbEOsfHc2jFXUTfcU41DXMIxdEGr0ScGPwvPb0", - "U6mk2oqAo3jaGdAc/SJiDE1pZxS77VCtDXb+pEU68XP0b2NdUKuHcVQNasGN8k1VOt1QdyBMvKB55SQU", - "KY+FUpUTolxwTbNgVoxxGMbtE3I2L4DuMejKRLa7ltSenF1uor5UJbMyW4BOaJbF9AnP8SvBrz5dKawh", - "Lass3kVBUszM10xV2KU2N1EquCpXA3P5BrecLqhAF6GGsAqe32F0vJ5t8N9YCuv+nXHuQTv72HtfoKwK", - "n9tFbm6O1JF6DU0nii2S8ZjAO+X26Kinvhmh1/3vlNJzsWgC8okTlA1xuXCPYvztW3NxhPm7OnnZ7dVS", - "pddCd1Dh6wbjs7FKDNPkSj7qtDNnkHl5WAHRX2F0ipdfT1xLoOul9n61du2+6Ja0NxiLapc/QVMyyIJ6", - "Y9KtX5mNPkco4jr9Pl8y60pmPnd6j5MMO3I2jj2IUO+k2AXoB+8BTQrKnNNGzSy6mHXhXv3qwqFDV29w", - "exEuiKpXY/fDZV/Ak48DtpEdrZqMF+CSKhUSLpkovTuE95fzT0L7q6uJH8QV966/6zeDU31eNWiv0vbM", - "1f+xy3Rv8h9+tt6VBLiWm38CFW5n0zsVLWM5ixv1LJ1wFdU36bF35cuqKObFZbIS2VDA9A8/k5fetjTq", - "3vGEHEu3JDJXRS4aLP7KlYDwzYz0OXra167TcVEMT90TId6d3Dbcdfq+VFPmfA5p3d7682vrgIYqhMhb", - "JQhn5rDWPcWf2tGwV0BgXQDmug0Cm/uzZ4wlKBfkiK/VJAeqYADDYdY213Ykks/Wr0z7ccH28Uqs/Sln", - "6zSzyDwLoVhdnCdWonWky/EZVlkNLIbdsby/3yWkGisy1X5MEmCXBLpmsqD895fUsz2Kksoz29P/QJrZ", - "6STkLdFARXe8aJ0iB61qaHKNpKq3bSLM3nVm5pCUMPVDmB/mNFfxqmi9zq6tzCeBw0ok0XN8YSfZiGzf", - "bjnTwAeCZcOIjEcCWOfvf01kWr/2u0Vnp2bX8Kuik3ghSB5iSyvt7+BAUnlRo2SI+7UA7irDz2Oo2R4V", - "NZ9DqtnllkQXf10CD5IoTL0mGGGZB3kvWBVlgwlFd7dz1AAN5aEYhCdI7H9rcPpiRC9gc0+RBjVEaz1N", - "vXB/k1ySiAG8tYzgUQgV81K0pivnOMZURRmIBe8VbLtDnZW7t0psIOfccC5Pkk2JZ2DKeJnKUXOZrjtl", - "AsOAkb5cGN0yd/0aj5dYVVBVFdx9LspQL0hOIoWgXC5LTEtSWWt9VktQ/jefg8jOkrMLCOvYom0cUyi4", - "FlFlr9cjJwNyUif6O1q9CnNn+ZlZHcPRjfeN5IBG76c0F1j5qS/cqRk2Ubl53VPWORTFFKxEhXDNQbp6", - "33gz5EJBooV3rRuCYwgV1gP2RkhQvXUXLHC92VDf1elesf6MTZZBneNruEAiYUUNdDJIyto/5xCyX9jv", - "PsDV5+TaqtOu6DXZmlXVR+8w1UFiSPVz4m7L7YGzN1FvM85BJt7W3fYp5AaVof21kCIrU5cIJjgYlQlg", - "dMKyAVYS1Qyn3VV2lHw5ZgN/FaQhuIDNgdW/pEvKF0F6tRB6K9rbNQSZy1q7faea/7iSM1/YBSzuBM7P", - "qT2fTgoh8qTH4HrSTTTbPgMXLL0wYnZZ+733FNok99HOV3nUXC03PrFqUQCH7ME+IcfcRhp555pmpaPW", - "5PyeHpp/jbNmpc397BT7++c8HrKBSX3kLfmbH2aYqykwzO+WU9lBtqQxXfckuZX0KlJ2tutPN9rdpV0K", - "tCYqC0VMSrlhqq5R57ur3I+QflAFcfj1E2byq72YpbURobRUV4ZsCi+va9PPuHqMvsMW8EJlTVCR0XMj", - "B85ndjV+XSElWEovJTSWv03/4xZY86VgixRGTZpl2gTE1k2tuS+Bck+9qHRmcTx3VWuYtk9wzPnbVckp", - "tBnaNKwB4ZhzKS9p/unVapjP8RjxAdm7foEnfP+GSLaoVDfz93tFR80dvHXvbmr+FtWAfwWzR1FjrxvK", - "GX+qSpjeRIYp7mlOclHXRcYhyRWOaa3DD78iMxdFV0hImWKtAOMrX9Wkeu5hkS/nY7nWW96X29b5s9C3", - "IGP3QBAFeVNXSNAC74cawvqIfmam0nNyo1Qeo74OWUTwF+NRYTqbLdfFRcNsbCvOtPwhhYQ7Nh8HjmA7", - "mo+7iXrGLs+aSM2lUyrornP0bd3AbeSirtc21vehi9yhNPpjXBbi1TFMd/SZsAjB0jIEQSW/PvyVSJhj", - "7UhB9vZwgr29qWv666PmZ3Oc9/aiYtwn85awOHJjuHmjFOOMaZ1QGFgXTPYk/XvnmLu7sNF8R7ADxLNz", - "5hCtBoNTe7/RT5wKGmXurQp+uzTXeBs/C1Dml1xNFMP9z32xC9Y/vydMpnUWSpZn2w5lI+iprnyLYT2/", - "uIDcz1J79xery+6ySVf/cBcfufYBQMRE1tqYPJgqCGcaEcnkukXilpC40lIyvcE8YV71yX6J+tR8X1lL", - "nBW4yizj5A4tLqDKNFfbVkrlJZvvBc1RFjDvGfRQ1ELk++TbNV0VOTgm9c292X/C46+fZIePH/7n7OvD", - "p4cpPHn67PCQPntCHz57/BAeff30ySE8nH/1bPYoe/Tk0ezJoydfPX2WPn7ycPbkq2f/ec/cAQZkC+jE", - "Z6WY/A0LVCfHb0+SMwNsjRNasB9gY2thGjL2VTZpilwQVpTlkyP/0//23G0/Fat6eP/rxAW9T5ZaF+ro", - "4ODq6mo/7HKwQGVqokWZLg/8PJ0ynMdvT6rwMOsLhTtqI38MKeCmOlI4xm/vvj09I8dvT/ZrgpkcTQ73", - "D/cfYi7jAjgt2ORo8hh/wtOzxH0/8EmEjz5eTycHS6A52sTNHyvQkqX+k7qiiwXIfVdu1Px0+ejAi3EH", - "H50i+Xro20FYuefgY0Pfnm3piY4uBx99Eqvh1o0sUc7OYJa7iBl0vwd3TzjXj4hdQqF6044+JUpIp20r", - "JBPmJE1tdHsqgSLdC4nhWVqWPLUKbzsFcPzv6+O/oaXj9fHfyDfkcOqi9hQ+82LTW11SRQInmQW7qzJV", - "zzfHdcmSOsXt0fvIkyRaBhWPkKGPgMKrEWsOhtbqsHh0xY8Njz1Mnn34+PTr69id1C2/75EUGDNC1Gvh", - "Ez0h0lZ0/U0fytb2dOAafitBbupFrOh6EgLctX9FvNrmbFFK1CDWMfqVv66rhskU+e/TH98QIYnTKbyl", - "6UXowBcDx91nIUS+OJkLB1upRdGMnahw+AEzvyAUeIofHR7uVCC45VzUpSJXVp56/7quBk8RWNNU5xtC", - "8f7ZWFOTKmd1lqamKKBFkYQDRF/JAzP6+kYxx/ZdlYiR4D6sIzQMXztLewMdzjsK66ltN692kBGF4EPs", - "9g631tPIl93919jdrjBACmHONMPg0fo+ybtuiioo3uHA7bGP7JO/ixJFNlvHEmKpJnEGtCX5OZ2BN/Bv", - "y7GKaIWdvb32wvf23J4zReZwhRyUcmzYRsfeHhY+f7IjKxtUzTciMEadnV2G62zWa7quMvxRrGDBsczi", - "JZDgsfnk8OGfdoUnHL2LjKxJrCx9PZ08/RNv2Qk3UgvNCba0q3n8p13NKchLlgI5g1UhJJUs35CfeBWg", - "H6SL7LK/n/gFF1fcI8I8E8vVisqNk5BpxXNKHqRMGOQ/HcNsLUUjF6ULhTY8lD8njXLCfDH5cO0F/JGv", - "hqFmBzPMGDS2Kaigcf/TA40x6uAjmhN6fz9waVTiH9GsY9+sB96JLN6y8ar5qNcG1laPlOp0WRYHH/E/", - "+IYMwLJBi11wbdjGASaP23R/3vA0+mN3oHY96NjPBx+bZZYaCFXLUmfiKuiLBgtrbevOV1Xobfx9cEWZ", - "NhKC8wTEHLLdzhpofuASDbR+rWP7Ol8wYDH4sSVTFMLmgmm+1d7Rq1BCsdICKP1cZJsBbrNOZozjEQxZ", - "RK0Ksx+774MOYzhbgk297i25EQFMCzKTgmYpVZia1KXk6Lz6rm/5+GjJjeuTiJ0OwcSHdNepzBym7QUx", - "cdwxElawL0FGb5R0lVWh/cFSSQei5zQjPnlQQl7T3Gw4ZFiGS2LIXADyHy1RfH4R4DPf2Z/skn3uD58i", - "FN1mGq+jhtudrb7m/XPcQR1zo5onlGEAC+CJY0HJTGQbn6Be0iu9tk42beZ2UKUBjH68Ax3bP7dibZs+", - "7Ysa64sa64ui44sa68vuflFjfVHyfFHy/H+r5NlFsxOTIZ1mo1+UxISptDGvfbjROkysYvFhsylhuhK4", - "ulnbmd4n5AyDcKi5JeASJM2xso0KoupW6I6pyjQFyI7OedKAxDo9monv1/+13qbn5eHhYyCHD9p9lGZ5", - "HvLmbl8UZvGTTRr0DTmfnE86I0lYiUvIbMx5GJRge20d9n9V4/7YiW/CsNAlvYQqjIKocj5nKbMozwVf", - "ELoQteOV4duEC/yCFYxd9gLC9NTlfmGKXJnFu7S1zdiJpljelQBO6i3cau1ukUvc0G0Ib0cr93+MMXH/", - "64rgNw3oui2XHBy7wzK/sIxPwTI+O9P4s9sPA8Xfv6QM+eTwyZ92QaGa+I3Q5Dt08b+drFXl+Y5Fwt9U", - "ivJJ472irnZVDV0/8YqsnD7ffzAXAZaDcrdn7cl4dHCAsbZLofTBxNxtTS/H8OOHCmZfjWFSSHaJyR0/", - "XP+/AAAA//+u3heREOIAAA==", + "H4sIAAAAAAAC/+y9fXPcNpIw/lXwm7sqv9xwJL9lY1Wl7ifbSVYX23FZSnbvLD8JhuyZwYoEGACUZuLH", + "3/0pNAASJMEZjqTYmz3/ZWuIl0aj0eh3fJikoigFB67V5OjDpKSSFqBB4l80TUXFdcIy81cGKpWs1Ezw", + "yZH/RpSWjC8n0wkzv5ZUrybTCacFNG1M/+lEwm8Vk5BNjrSsYDpR6QoKagbWm9K0rkdaJ0uRuCGO7RAn", + "LyYft3ygWSZBqT6UP/J8QxhP8yoDoiXliqbmkyJXTK+IXjFFXGfCOBEciFgQvWo1JgsGeaZmfpG/VSA3", + "wSrd5MNL+tiAmEiRQx/O56KYMw4eKqiBqjeEaEEyWGCjFdXEzGBg9Q21IAqoTFdkIeQOUC0QIbzAq2Jy", + "9G6igGcgcbdSYJf434UE+B0STeUS9OT9NLa4hQaZaFZElnbisC9BVblWBNviGpfsEjgxvWbkVaU0mQOh", + "nLz97jl59OjRU7OQgmoNmSOywVU1s4drst0nR5OMavCf+7RG86WQlGdJ3f7td89x/lO3wLGtqFIQPyzH", + "5gs5eTG0AN8xQkKMa1jiPrSo3/SIHIrm5zkshISRe2Ib3+qmhPN/1l1JqU5XpWBcR/aF4FdiP0d5WNB9", + "Gw+rAWi1Lw2mpBn03WHy9P2HB9MHhx//7d1x8j/uzyePPo5c/vN63B0YiDZMKymBp5tkKYHiaVlR3sfH", + "W0cPaiWqPCMreombTwtk9a4vMX0t67ykeWXohKVSHOdLoQh1ZJTBgla5Jn5iUvHcsCkzmqN2whQppbhk", + "GWRTw32vVixdkZQqOwS2I1cszw0NVgqyIVqLr27LYfoYosTAdS184IL+eZHRrGsHJmCN3CBJc6Eg0WLH", + "9eRvHMozEl4ozV2l9rusyNkKCE5uPtjLFnHHDU3n+YZo3NeMUEUo8VfTlLAF2YiKXOHm5OwC+7vVGKwV", + "xCANN6d1j5rDO4S+HjIiyJsLkQPliDx/7voo4wu2rCQocrUCvXJ3ngRVCq6AiPk/INVm2//r9MfXREjy", + "CpSiS3hD0wsCPBXZ8B67SWM3+D+UMBteqGVJ04v4dZ2zgkVAfkXXrKgKwqtiDtLsl78ftCASdCX5EEB2", + "xB10VtB1f9IzWfEUN7eZtiWoGVJiqszpZkZOFqSg628Opw4cRWiekxJ4xviS6DUfFNLM3LvBS6SoeDZC", + "htFmw4JbU5WQsgWDjNSjbIHETbMLHsb3g6eRrAJw/CCD4NSz7ACHwzpCM+bomi+kpEsISGZGfnKcC79q", + "cQG8ZnBkvsFPpYRLJipVdxqAEafeLl5zoSEpJSxYhMZOHToM97BtHHstnICTCq4p45AZzotACw2WEw3C", + "FEy4XZnpX9FzquCrx0MXePN15O4vRHfXt+74qN3GRok9kpF70Xx1BzYuNrX6j1D+wrkVWyb2595GsuWZ", + "uUoWLMdr5h9m/zwaKoVMoIUIf/EotuRUVxKOzvl98xdJyKmmPKMyM78U9qdXVa7ZKVuan3L700uxZOkp", + "Ww4gs4Y1qk1ht8L+Y8aLs2O9jioNL4W4qMpwQWlLK51vyMmLoU22Y+5LmMe1KhtqFWdrr2ns20Ov640c", + "AHIQdyU1DS9gI8FAS9MF/rNeID3Rhfzd/FOWuemty0UMtYaO3X2LtgFnMzguy5yl1CDxrftsvhomAFZL", + "oE2LA7xQjz4EIJZSlCA1s4PSskxykdI8UZpqHOnfJSwmR5N/O2iMKwe2uzoIJn9pep1iJyOPWhknoWW5", + "xxhvjFyjtjALw6DxE7IJy/ZQImLcbqIhJWZYcA6XlOtZo4+0+EF9gN+5mRp8W1HG4rujXw0inNiGc1BW", + "vLUN7ygSoJ4gWgmiFaXNZS7m9Q93j8uywSB+Py5Liw8UDYGh1AVrprS6h8unzUkK5zl5MSPfh2OjnC14", + "vjGXgxU1zN2wcLeWu8Vqw5FbQzPiHUVwO4Wcma3xaDAy/G1QHOoMK5EbqWcnrZjGf3VtQzIzv4/q/Ocg", + "sRC3w8SFWpTDnFVg8JdAc7nboZw+4Thbzowcd/tej2zMKHGCuRatbN1PO+4WPNYovJK0tAC6L/YuZRw1", + "MNvIwnpDbjqS0UVhDs5wQGsI1bXP2s7zEIUESaEDw7NcpBd/pWp1C2d+7sfqHz+chqyAZiDJiqrVbBKT", + "MsLj1Yw25oiZhqi9k3kw1axe4m0tb8fSMqppsDQHb1wssajHfsj0QEZ0lx/xPzQn5rM524b122Fn5AwZ", + "mLLH2XkQMqPKWwXBzmQaoIlBkMJq78Ro3XtB+byZPL5Po/boW2swcDvkFoE7JNa3fgyeiXUMhmdi3TsC", + "Yg3qNujDjINipIZCjYDvhYNM4P479FEp6aaPZBx7DJLNAo3oqvA08PDGN7M0ltfjuZDX4z4dtsJJY08m", + "1IwaMN9pB0nYtCoTR4oRm5Rt0BmoceFtZxrd4WMYa2HhVNM/AAvKjHobWGgPdNtYEEXJcrgF0l9Fmf6c", + "Knj0kJz+9fjJg4e/PHzylSHJUoqlpAWZbzQoctfpZkTpTQ73+itD7ajKdXz0rx57K2R73Ng4SlQyhYKW", + "/aGsddOKQLYZMe36WGujGVddAzjmcJ6B4eQW7cQa7g1oL5gyElYxv5XNGEJY1sySEQdJBjuJad/lNdNs", + "wiXKjaxuQ5UFKYWM2NfwiGmRijy5BKmYiLhK3rgWxLXw4m3Z/d1CS66oImZuNP1WHAWKCGXpNR/P9+3Q", + "Z2ve4GYr57frjazOzTtmX9rI95ZERUqQiV5zksG8WrY0oYUUBaEkw454R38P+nTDU7Sq3QaRDqtpBeNo", + "4lcbngY6m9moHLJlaxNurpt1seLtc3aqOyoCjkHHS/yMav0LyDW9dfmlO0EM9ud+Iy2wJDMNUQt+yZYr", + "HQiYb6QQi9uHMTZLDFD8YMXz3PTpC+mvRQZmsZW6hcu4GayhdbOnIYXTuag0oYSLDNCiUqn4NT3glkd/", + "ILoxdXjz65WVuOdgCCmllVltVRJ00vU4R9Mxoaml3gRRowa8GLX7ybay01mXby6BZkarB07E3LkKnBMD", + "F0nRw6j9ReeEhMhZasFVSpGCUpAlzkSxEzTfzjIRvQVPCDgCXM9ClCALKm8M7MXlTjgvYJOgP1yRuz/8", + "rO59Bni10DTfgVhsE0NvrfA5f1Af6nHTbyO47uQh2VEJxPNco10aBpGDhiEU7oWTwf3rQtTbxZuj5RIk", + "emb+UIr3k9yMgGpQ/2B6vym0VTkQ5eUUnTNWoN2OUy4UpIJnKjpYTpVOdrFl06iljZkVBJwwxolx4AGh", + "5CVV2noTGc/QCGKvE5zHCihmimGABwVSM/LPXhbtj52ae5CrStWCqarKUkgNWWwNHNZb5noN63ousQjG", + "rqVfLUilYNfIQ1gKxnfIsiuxCKK6Nro7d3t/cWiaNvf8JorKFhANIrYBcupbBdgNI10GAGGqQbQlHKY6", + "lFOH10wnSouyNNxCJxWv+w2h6dS2PtY/NW37xEV1c29nAszs2sPkIL+ymLUxTitqVGgcmRT0wsgeqBBb", + "t2cfZnMYE8V4Csk2yjfH8tS0Co/AzkNalUtJM0gyyOmmP+hP9jOxn7cNgDveKD5CQ2LjWeKb3lCyDx/Y", + "MrTA8VRMeCT4haTmCBrNoyEQ13vHyBng2DHm5OjoTj0UzhXdIj8eLttudWREvA0vhTY7bskBIXYMfQy8", + "A2ioR74+JrBz0qhl3Sn+G5SboBYj9p9kA2poCc34ey1gwJjmwoCD49Lh7h0GHOWag1xsBxsZOrEDlr03", + "VGqWshJVnR9gc+uaX3eCqL+JZKApyyEjwQerBZZhf2IDMbpjXk8THGWE6YPfs8JElpMzhRJPG/gL2KDK", + "/cZG+J0FcYG3oMpGRjXXE+UEAfVxQ0YCD5vAmqY63xg5Ta9gQ65AAlHVvGBa25DNtqarRZmEA0QN3Ftm", + "dN4cGx3nd2CMe+kUhwqW19+K6cSqBNvhO+voBS10OFWgFCIfYTzqISMKwSjHPymF2XXmIoR9GKmnpBaQ", + "jmmjK6++/e+oFppxBeS/RUVSylHjqjTUIo2QKCeg/GhmMBJYPadz8TcYghwKsIokfrl/v7vw+/fdnjNF", + "FnDlw+pNwy467t9HM84boXTrcN2CqdAct5PI9YGWf7z3XPBCh6fsdjG7kcfs5JvO4LW7wJwppRzhmuXf", + "mAF0TuZ6zNpDGhnnXsdxRxn1g6Fj68Z9P2VFld/Whi8oyysJw96x8/N3i+L8/D35zrb0ju2pJ/IQHVdN", + "WsTC3UaVxNAakjOj30pBMyMgRG37uEi+TOrgTBUFp1AGnL+5c0j5ppPINxYGMoeUVjYq2XFtB0ETHqpm", + "EXmxs7tdFEYXMtI8XuXaXtohVpdSVCVR9bZbKtBUwx9jam6GjkHZnziIDWo+DoUHGTUx39zCbW0HIhJK", + "CQp5a2heUfarWIT5N475qo3SUPQt0LbrLwP62dtBPUfwnHFICsFhE005ZRxe4cdYb8vfBzrjTTvUtys8", + "t+DvgNWeZww13hS/uNsBQ3tTx8XdwuZ3x+04H8LMIzSuQV4SStKcoelNcKVllepzTlG5Dw5bJH7AqzHD", + "5p7nvkncvhQx/7ihzjnF2JFa5Y/yxQVE+PJ3AN7qo6rlEpTuSIkLgHPuWjFOKs40zlWY/UrshpUg0Yk/", + "sy0LuiELmqN16neQgswr3WaumCChNMtz5wkx0xCxOOdUkxwMV33F+Nkah/OeRE8zHPSVkBc1FmbR87AE", + "DoqpJB7n8L39iiFobvkrF46G2ar2s7Wdm/GbLIoN6v5NBub/ufufR++Ok/+hye+HydP/OHj/4fHHe/d7", + "Pz78+M03/7f906OP39z7z3+P7ZSHPRa+7yA/eeF0ipMXKDg2xvMe7J/McFownkSJLHQRd2iL3DXiryeg", + "e22zgl7BOddrbgjpkuYso/p65NBlcb2zaE9Hh2paG9ExI/i17imO3YDLkAiT6bDGa1/j/dCgeKIMenNc", + "7guel0XF7VZWynmUMA7ch2iIxbROhrJFEI4IZsqsqI8vcn8+fPLVZNpkuNTfJ9OJ+/o+QsksW8fymDJY", + "x6Rsd0DwYNxRpKQbBTrOPRD2aDSKdYqHwxZg1DO1YuWn5xRKs3mcw/noWqetr/kJt2Gv5vygb2jjTM5i", + "8enh1hIgg1KvYsnRLUkBWzW7CdDx15dSXAKfEjaDWVdbzpagfFxMDnSBSbro3xBjsgXqc2AJzVNFgPVw", + "IaNU0hj9oHDruPXH6cRd/urW5XE3cAyu7py1I8j/rQW58/23Z+TAMUx1x6bU2aGDJKiIFcrF+bciOQw3", + "syUhbE7hOT/nL2DBODPfj855RjU9mFPFUnVQKZDPaE55CrOlIEc+deAF1fSc9yStwaotQdIGKat5zlJy", + "EUrEDXnaTPyo2kjzpTCKY9ep3Zdf3VRR/mInSK6YXolKJy7VOJFwRWXMaaDqVFMc2RYK2DbrlLixLSt2", + "qcxu/DjPo2Wpuiln/eWXZW6WH5ChcglVZsuI0kJ6WcQIKBYa3N/Xwl0Mkl75PPVKgSK/FrR8x7h+T5Lz", + "6vDwEZBWDtav7so3NLkpoWWvvFZKXNdWiQu3eg2staRJSZcDRgMNtMTdR3m5QCU7zwl2a+V++dhWHKpZ", + "gMfH8AZYOPbOY8HFndpevmZMfAn4CbcQ2xhxo/GYXne/gmywa29XJ6Ost0uVXiXmbEdXpQyJ+52pS0ks", + "jZDl3diKLTFU0FXdmANJV5BeQIYFAKAo9Wba6u4jJZyg6VkHU7ZQhs3lwGxuNO3OgVRlRp0o3jEoGQwr", + "0NrHKr6FC9iciSYZfJ882nZapxo6qEipgXRpiDU8tm6M7ua7cBy0dZWlz47ENBlPFkc1Xfg+wwfZiry3", + "cIhjRNFKOxxCBJURRFjiH0DBNRZqxrsR6ceWZ7SMub35InU1PO8nrkmjPLnImXA1mE1pvxeAVXfElSJz", + "auR24QrG2NTFgItVii5hQEIOresjEwRbFnkcZNe9F73pxKJ7ofXumyjItnFi1hylFDBfDKmgMtOJl/Iz", + "WQeONaASrAPnEDbPUUyqA8ss06Gy5eWwha2GQIsTMEjeCBwejDZGQslmRZWvZYMlf/xZHiUD/IGpuNsK", + "MJwEoT5BXZ/a8O15bvec9rRLV4bB117wBRdC1XJE8QQj4WN0cWw7BEcBKIMclnbhtrEnlCYtuNkgA8eP", + "i0XOOJAkFjVElRIps8WImmvGzQFGPr5PiDUBk9EjxMg4ABsdkzgweS3Cs8mX+wDJXVoz9WOjSzP4G+IZ", + "GDaO1og8ojQsnPGBiG3PAagLNavvr07AIw5DGJ8Sw+YuaW7YnNP4mkF6dQBQbO1k/TvX+L0hcXaLBd5e", + "LHutyV5F11lNKDN5oOMC3RaI52Kd2BSsqMQ7X88NvUdDizEhLHYwbcWFO4rMxRrDLfBqsaGsO2AZhsOD", + "EWj4a6aQXrHf0G1ugdk27XZpKkaFCknGmfNqchkSJ8ZMPSDBDJHL3aCIwrUA6Bg7mnKjTvndqaS2xZP+", + "Zd7catOmOJDP2ogd/6EjFN2lAfz1rTB12YM3XYklaqdoRw20Kz4EImSM6A2b6Dtp+q4gBTmgUpC0hKjk", + "Iua6M7oN4I1z6rsFxgusK0H55l4QiiJhyZSGxohuLmbvFfrU5kmK5ayEWAyvTpdyYdb3Voj6mrL1UrBj", + "a5mffAUYyrlgUukEPRDRJZhG3ylUqr8zTeOyUjvYxVZ2ZFmcN+C0F7BJMpZXcXp18/7wwkz7umaJqpoj", + "v2WcAE1XZI6VSKMhcFumtlGSWxf80i74Jb219Y47DaapmVgacmnP8Sc5Fx3Ou40dRAgwRhz9XRtE6RYG", + "GWQu9rljIDfZw4mZi7Nt1tfeYcr82DvDRnz+5NAdZUeKriUwGGxdBUM3kRFLmA4KefZTCgfOAC1Llq07", + "tlA76qDGTPcyePgKSR0s4O66wXZgILB7xrIaJKh2MaxGwLclWVu1KGajMHPWLlkVMoRwKqZ8QfE+ouqs", + "p124OgOa/wCbn01bXM7k43RyM9NpDNduxB24flNvbxTP6Jq3prSWJ2RPlNOylOKS5okzMA+RphSXjjSx", + "ubdHf2JWFzdjnn17/PKNA//jdJLmQGVSiwqDq8J25Z9mVbbu1sAB8QWLjc7nZXYrSgabXxcLCo3SVytw", + "xWEDabRXxa5xOARH0RmpF/EIoZ0mZ+cbsUvc4iOBsnaRNOY76yFpe0XoJWW5t5t5aAeieXBx40ohRrlC", + "OMCNvSuBkyy5VXbTO93x09FQ1w6eFM61pXxtYSs0KyJ414VuREg0xyGpFhRr0FmrSJ858apAS0KicpbG", + "bax8jmG33PrOTGOCjQeEUTNixQZcsbxiwVimmRqh6HaADOaIItPXMxzC3Vy4pzUqzn6rgLAMuDafJJ7K", + "zkHFon/O2t6/To3s0J/LDWwt9M3wN5ExwvqL3RsPgdguYISeuh64L2qV2S+0tkiZHwKXxB4O/3DG3pW4", + "xVnv6MNRsw1eXLU9buFLGH3+ZwjDVk3e/QyHV15dIciBOaLPajCVLKT4HeJ6HqrHkYwRX3GSYZTL78BH", + "hJk31p3mdZBm9sHtHpJuQitUO0hhgOpx5wO3HJa+8xZqyu1W2yr3rVi3OMGEUaUHdvyGYBzMvUjcnF7N", + "aawuoBEyDEzHjQO4ZUvXgvjOHvfO7M9cEdAZCXzJdVtmk4FLkE0yV7+wyDUFBjvtaFGhkQyQakOZYGr9", + "f7kSkWEqfkW5fSzB9LNHyfVWYI1fpteVkJjKr+Jm/wxSVtA8Ljlkad/Em7Els08FVAqCWvRuIPvGiqUi", + "V8/futgb1JwsyOE0eO3C7UbGLpli8xywxQPbYk4VcvLaEFV3McsDrlcKmz8c0XxV8UxCplfKIlYJUgt1", + "qN7Uzqs56CsATg6x3YOn5C667RS7hHsGi+5+nhw9eIpGV/vHYewCcG+CbOMm2SJMfInTMfot7RiGcbtR", + "Z9GsZ/uQ0zDj2nKabNcxZwlbOl63+ywVlNMlxCNFih0w2b64m2hI6+CFZ/YVEqWl2BA2kIIEmhr+NBB9", + "btifBYOkoiiYLpxzR4nC0FNTaN5O6oezT5q4GqEeLv8RfaSldxF1lMhPazS191ts1ejJfk0LaKN1Sqit", + "35CzJnrBVy4mJ748DBZNrWulWtyYuczSUczBYIYFKSXjGhWLSi+Sr0m6opKmhv3NhsBN5l89jhSKbRcs", + "5PsB/snxLkGBvIyjXg6QvZchXF9ylwueFIajZPeabI/gVA46c+NuuyHf4fahxwplZpRkkNyqFrnRgFPf", + "iPD4lgFvSIr1evaix71X9skps5Jx8qCV2aGf3r50UkYhZKzmW3PcncQhQUsGlxi7F98kM+YN90Lmo3bh", + "JtB/Xs+DFzkDscyf5Zgi8ExEtFNfvLi2pLtY9Yh1YOiYmg+GDOZuqClpF4r99E4/b3zuO5/MFw8r/tEF", + "9jNvKSLZr2BgE4Mi1tHtzOrvgf+bkmdiPXZTOyfEb+w/AWqiKKlYnv3cZGV2aoRLytNV1J81Nx1/aV4z", + "qhdn76doabUV5Rzy6HBWFvzFy4wRqfYfYuw8BeMj23bLltvldhbXAN4G0wPlJzToZTo3E4RYbSe81QHV", + "+VJkBOdp6ng13LNf7j4oSvxbBUrHkofwgw3qQrul0XdtTVwCPENtcUa+t6+RroC0qrSglmbz4yHzFVqt", + "Qb0qc0GzKTHjnH17/JLYWW0f+yaHrcm7RCWlvYqOvSooUTguPNg/rxFPXRg/zvZYarNqpbFoktK0KGPJ", + "oabFmW+AGaihDR/VlxA7M/LCao7K6yV2EkMPCyYLo3HVo1nZBWnC/Edrmq5QJWux1GGSH19M2lOlCh5w", + "qx9iqev24bkzcLt60rac9JQIozdfMWUfoYRLaOej1snZziTg81Pby5MV55ZSorLHtuIB10G7B84Gangz", + "fxSyDuL3FMhtLfZ9a2ufYq9oHaFuoe7ey202u7F+YMM/LpxSLjhLsYpP7Gp2D1qO8YGNKHjUNbL6I+5O", + "aORwRcuD12FyDouDBcM9I3SI6xvhg69mUy112D81vpy4oposQSvH2SCb+ir3zg7IuAJXhxHfNg34pJAt", + "vyJyyKirOqldGnuSEabFDCh235lvr53aj/HiF4yjgO/Q5kLTraUO39vTRitgmiwFKLeedm6wemf6zDBN", + "NoP1+5l/nw/HsG45s2zrg+4Pdew90s4DbNo+N21tKZPm51YEsp30uCzdpMNvIETlAb3mgwiOeBYT79oJ", + "kFuPH462hdy2hpLgfWoIDS7REQ0l3sM9wqjfA+i8NWOEVktR2ILYEK5oBQPGI2C8ZBya1yMjF0QavRJw", + "Y/C8DvRTqaTaioCjeNoZ0By9zzGGprRzPdx0qM4GI0pwjX6O4W1snjIYYBx1g0Zwo3xTP1ppqDsQJp7j", + "a7kOkf2HCVCqckJUhhkFnacKYozDMG5fCql9AfSPQV8mst21pPbk7HMTDSWJzqtsCTqhWRari/kMvxL8", + "6gtFwRrSqq6fWJYkxZoo7SIxfWpzE6WCq6rYMpdvcMPpgrc/ItQQvj/idxiTUOYb/DdWPHB4Z1wQxt5h", + "gD7iwj2WsKfc3B6pJ/Uamk4UWybjMYF3ys3R0Ux9PUJv+t8qpedi2QbkE5eG2Mblwj2K8bdvzcURVk7o", + "VcS0V0td2ACD7oR/sQ3Vxjolt82V8CrrlchEZ09d8267AWL4bacpXn4DobdBQQxq71frPRwKwE0H48Wp", + "dplrmpKtLGgwG8hG79i8H4QibjkditixATvmc6/3OMmwJ2fj2FsR6kPB+gD94ONMSUmZc403zKKPWReR", + "Pmwu3Hbomg3uLsLFeQ9a7H64HIrJJorxZQ4Ev3dfw7kAl85eP4du1+qjkrxKaH91r5Ha8eqo+Oj6+9EJ", + "ONXnNYMOGm3PXOV1u0ynk//ws41hI8C13PwTmHB7m957S6gv7VrzVNOE1FV7R1Xxbd2K8WeBhusfNTWP", + "kJ5KoVhTKTr2XtDIWLczfPInqN/UH8sHmlxCqrE8eONAlwD7VHMykwVv0X2pgzSgO9Yhga780baaR/2a", + "4DsutF5aUpBaZ+spz8ZX+Dmuw6SQKeFrcEvg7jm4dsLB6LDnxQJSzS53pIH9bQU8SDGaeiOEfdY1yApj", + "dRgtVhHZ38TWALQtS2srPEE1vxuDM5QEcgGbO4q0qCFa4Hnq75XrFJBADCB3SAyJCBULQ7BWU+cZZqqm", + "DMSCD/ux3aEpxTX4NEyQ1HjNuTxJmhu3SXTcMmX8bYpRc5mue6X/YkToUKZYv7b9sLD9Ap8SUPWzbb4A", + "RaiSkpNI9WdXwAKT9mpHgS9lAcr/5jN07Sw5u4Dw8Rp0y1xRmfkWUTuDN2EkW+6jXnpXtGQ1VTaI0vnB", + "6yDNfkJPpPAThuKmucByz0PxzO24yPCNd4z+wOsAy08jXAuQ7pEvFPZyoSDRwgd1boNjGyrce+TXQYIa", + "LLZogRssgfK2qfGCRWcpljyhLrIlXCCRUFADnQwqsQzPuQ3Zz+13n8Hii47uNKfU9JrsLKXiw3OZ6iEx", + "pPoFcbfl7syY61hWGOf2SVEVK8vCDSpD038pRVal9oIOD0ZtfRpd9GgLK4kaJdL+Knv6ZY4lwF4GeYYX", + "sDmwon+6orypxdY+1laEsmsI8vo7u32rRqe4fp0v7QKWtwLn5zTcTCelEHkyYOs/6VeX6Z6BC5ZeQEbM", + "3eED2wZe1yB30cRcO3OvVhtfTaUsgUN2b0bIMbehxN6v2y5v3Jmc39Hb5l/jrFllCz45m9LsnMdjMrEU", + "k7whf/PDbOdqCgzzu+FUdpAdtUvWA5VtJL2KvDUzG6uU9j2t3fc/GqKyUMSklGsmso863327UoT0g6cP", + "tms/YZ2LJoBOWvMkSkvNcxBt4eVVY3Uc9wiD77ADvFApDp5h8NzIgfOZo9xe1UgJljJICa3l79Kz3QIb", + "vhRskcK0CLNMW3XIRki09yUwoqjntW0ijue+CQOLWgiOhX76pg+F5mqsFxwSjjmX8pLmn958gdVOjhEf", + "7knE+EJD/TdEskWlul6oyUs6au5A1729qfkbNLf8DcweRf0Mbihnd6yfv/DWWaxrR3OSi+YxJBySXOGY", + "1jHx4Csyd2HypYSUKdbJILrypUxrdQ8rezcvZW7XL3et82ehb0DGTkEQJXndlEXUAu+HBsLmiH5mpjJw", + "cqNUHqO+HllE8BfjUWG++o7r4qLlsbBlZjuhOELCLXsughiEPT0X/Uz8scuz1nlz6VQK+uscfVu3cBu5", + "qJu1jXW79ZG7rXbeGG9ZvCSm6Y7uOosQrCdLEFTy64NfiYQFPhghyP37OMH9+1PX9NeH7c/mON+/H3+R", + "81M56iyO3Bhu3hjF/DwUumnDEweihDv7UbE820UYrZjv5skVjGr+xWV9fJZHX36x9tT+UXWF9/cJEehu", + "AiImstbW5MFUQTT3iEBu1y0Sto2aSVpJpjdYjMKb39gvUZfi97XF3nl86vRld/dpcQF1OZPGvl8pf7t+", + "L2iO95GRqTFAQ+MrjN+uaVHm4A7KN3fmf4FHXz/ODh89+Mv868Mnhyk8fvL08JA+fUwfPH30AB5+/eTx", + "ITxYfPV0/jB7+Pjh/PHDx189eZo+evxg/virp3+5Y/iQAdkCOvGpj5O/48tIyfGbk+TMANvghJasfnzV", + "kLF/3oGmeBKhoCyfHPmf/n9/wmapKJrh/a8Tl1k1WWldqqODg6urq1nY5WCJBr1EiypdHfh5+o9evjmp", + "o+OtKxh31AY+G1LATXWkcIzf3n57ekaO35zMGoKZHE0OZ4ezB/iYWQmclmxyNHmEP+HpWeG+Hzhimxx9", + "+DidHKyA5uj/Mn8UoCVL/Sd1RZdLkDP3zoX56fLhgRclDj44Y+bHbd8OwpKxBx9aNt9sR08sKXnwwVdK", + "2N66VYrA2brNcpex+iHfQ/DmZ1DPumVrm2+8uXZKVP2yeSmZMCdpaq7FDFIJFOleSIxOb14PdfoL2Kfc", + "Xx3/Ha3tr47/Tr4hh1OXtKBQ1YhNb+0ZNQmcZBbsyOu2zzbHtfcgqKN29C72IG3s/Q08QoY+AgqvR2w4", + "mJYVhPW9Gn5seOxh8vT9hydff4zJef133zySBl6f1cJXE0CkFXT9zRDK1vZ04Bp+q0BumkUUdD0JAe77", + "YCJPwC3YspKdR+/rcCX3DANT5L9Of3xNhCROr31D04swfiEGjrvPQoh8VWwXDV+oZdkOHa1x+B7TixEK", + "PMUPDw+/vJH8v+ON5Glraz2NfNndLy9g/2u8gP14T1a21TzcCkAddXb2Ga63Wa/oui4jQwkXPOFY3/8S", + "SKDnPT588Kdd4QnHCBcjaxIrS3+cTp78ibfshBupheYEW9rVPPrTruYU5CVLgZxBUQpJJcs35Cde5ycG", + "NYn67O8nfsHFFfeIMGpiVRRUbpyETGueU/EgY3Qr/+k5BxspGrkoXSr0I6H8OWm9Y8OXk/cfvYA/UmvY", + "1uxgjgUTxjYFFTQeVj3QIaAOPqBJe/D3A5dFHv+IrgWrsx74QKZ4y5ZW80GvDaydHinV6aoqDz7gf1CH", + "DMCyORt9cG129YF9ALr/84an0R/7A3UfIor9fPChXQi7hVC1qnQmroK+aDS3Hp/+fPXTMK2/D64o00ZC", + "cNFoWKis31kDzQ9cnmXn1ya1ofcF8zWCHzsyRSlsKnxbV3tLr0IJxUoLoPQzkW22cJt1Mmccj2DIIhpT", + "mP3Y1w/6r96uwNb39N7EiACmRfCOvhY+I7mn9X28ofLRkRvXJxFfEYKJinQ/sMkcptlOBwKOu+eDwEHZ", + "SJR0lfIP+/6RUkkPomc0I752QkJe0dxsOGTk2Mm+LWz80RLF5xcBPvOd/cku2Wf+8ClCMXSjpR21Qr/I", + "UoqmvJw7qGNuVKNCGQawBJ44FpTMRbbxVVAlvdJrG+jRZW4HdTnb6MdbsLH9cxvWdtnTvpixvpixvhg6", + "vpixvuzuFzPWFyPPFyPP/1ojzz6WnZgM6Swbw6Ik1oujrXmt4kabVKWaxYfNpoTpWuDqlwZlekbIGSaC", + "UHNLwCVImmP5dBVkdhUYEqiqNAXIjs550oLEBt6Zie82/7URj+516sN73T5KszwPeXO/Lwqz+MnWTPiG", + "nE/OJ72RJBTiEjKbXxoGxtteO4f9/+pxf+zl2GBqIr6J6kP5iaoWC5Yyi/Jc8CWhS9FE6xq+TbjALyAN", + "cDZTmTA9danvTJErs3hXta8dv98Wy/sSwEmzhTu93R1yiTu6DeHt6eX+jzEu7n9dEfy6SUU35ZJbx+6x", + "zC8s41OwjM/ONP7s/sPA8PcvKUM+Pnz8p11QaCZ+LTT5DsPMbyZr1WVOY9nY15WifM1cb6hrQlXD0E+8", + "Iuugz3fvzUWAbw6427OJZDw6OMB8z5VQ+mBi7rZ2lGP48X0Nsy9GPSklu8TaVu8//r8AAAD//xYAv/aJ", + "0AAA", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go index 5f296c5425..15599daa24 100644 --- a/daemon/algod/api/server/v2/handlers.go +++ b/daemon/algod/api/server/v2/handlers.go @@ -1072,17 +1072,20 @@ func (v2 *Handlers) GetSyncRound(ctx echo.Context) error { // GetLedgerStateDelta returns the deltas for a given round. // This should be a representation of the ledgercore.StateDelta object. // (GET /v2/deltas/{round}) -func (v2 *Handlers) GetLedgerStateDelta(ctx echo.Context, round uint64) error { +func (v2 *Handlers) GetLedgerStateDelta(ctx echo.Context, round uint64, params model.GetLedgerStateDeltaParams) error { + handle, contentType, err := getCodecHandle((*string)(params.Format)) + if err != nil { + return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log) + } sDelta, err := v2.Node.LedgerForAPI().GetStateDeltaForRound(basics.Round(round)) if err != nil { - return internalError(ctx, err, errFailedRetrievingStateDelta, v2.Log) + return notFound(ctx, err, errFailedRetrievingStateDelta, v2.Log) } - consensusParams := config.Consensus[sDelta.Hdr.CurrentProtocol] - response, err := StateDeltaToLedgerDelta(sDelta, consensusParams) + data, err := encode(handle, sDelta) if err != nil { - return internalError(ctx, err, errInternalFailure, v2.Log) + return internalError(ctx, err, errFailedToEncodeResponse, v2.Log) } - return ctx.JSON(http.StatusOK, response) + return ctx.Blob(http.StatusOK, contentType, data) } // TransactionParams returns the suggested parameters for constructing a new transaction. diff --git a/daemon/algod/api/server/v2/test/handlers_resources_test.go b/daemon/algod/api/server/v2/test/handlers_resources_test.go index 04e36a3e4f..bef0291006 100644 --- a/daemon/algod/api/server/v2/test/handlers_resources_test.go +++ b/daemon/algod/api/server/v2/test/handlers_resources_test.go @@ -23,6 +23,11 @@ import ( "net/http/httptest" "testing" + "github.com/labstack/echo/v4" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/algorand/go-algorand/agreement" "github.com/algorand/go-algorand/config" v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2" @@ -35,12 +40,10 @@ import ( "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/test/partitiontest" - "github.com/labstack/echo/v4" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) type mockLedger struct { + mock.Mock accounts map[basics.Address]basics.AccountData kvstore map[string][]byte latest basics.Round @@ -48,7 +51,8 @@ type mockLedger struct { } func (l *mockLedger) GetStateDeltaForRound(rnd basics.Round) (ledgercore.StateDelta, error) { - panic("implement me") + args := l.Called(rnd) + return args.Get(0).(ledgercore.StateDelta), args.Error(1) } func (l *mockLedger) LookupAccount(round basics.Round, addr basics.Address) (ledgercore.AccountData, basics.Round, basics.MicroAlgos, error) { diff --git a/daemon/algod/api/server/v2/test/handlers_test.go b/daemon/algod/api/server/v2/test/handlers_test.go index fd233e4c38..728f75ff45 100644 --- a/daemon/algod/api/server/v2/test/handlers_test.go +++ b/daemon/algod/api/server/v2/test/handlers_test.go @@ -130,30 +130,42 @@ func TestGetBlock(t *testing.T) { getBlockTest(t, 0, "bad format", 400) } -func TestGetLedgerStateDelta(t *testing.T) { - partitiontest.PartitionTest(t) - t.Parallel() - a := require.New(t) - +func testGetLedgerStateDelta(t *testing.T, round uint64, format string, expectedCode int) { handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, false) defer releasefunc() - insertRounds(a, handler, 3) - - err := handler.GetLedgerStateDelta(c, 2) + insertRounds(require.New(t), handler, 3) + err := handler.GetLedgerStateDelta(c, round, model.GetLedgerStateDeltaParams{Format: (*model.GetLedgerStateDeltaParamsFormat)(&format)}) require.NoError(t, err) - require.Equal(t, 200, rec.Code) + require.Equal(t, expectedCode, rec.Code) +} + +func TestGetLedgerStateDelta(t *testing.T) { + partitiontest.PartitionTest(t) + t.Run("json-200", func(t *testing.T) { + t.Parallel() + testGetLedgerStateDelta(t, 1, "json", 200) + }) + t.Run("msgpack-200", func(t *testing.T) { + t.Parallel() + testGetLedgerStateDelta(t, 2, "msgpack", 200) + }) + t.Run("msgp-200", func(t *testing.T) { + t.Parallel() + testGetLedgerStateDelta(t, 3, "msgp", 200) + }) + t.Run("json-404", func(t *testing.T) { + t.Parallel() + testGetLedgerStateDelta(t, 0, "json", 404) + }) + t.Run("msgpack-404", func(t *testing.T) { + t.Parallel() + testGetLedgerStateDelta(t, 9999, "msgpack", 404) + }) + t.Run("format-400", func(t *testing.T) { + t.Parallel() + testGetLedgerStateDelta(t, 1, "bad format", 400) + }) - actualResponse := model.LedgerStateDelta{} - expectedResponse := poolDeltaResponseGolden - (*expectedResponse.Accts.Accounts)[0].AccountData.Round = 2 - err = protocol.DecodeJSON(rec.Body.Bytes(), &actualResponse) - require.NoError(t, err) - require.Equal(t, poolDeltaResponseGolden.Accts, actualResponse.Accts) - require.Equal(t, poolDeltaResponseGolden.KvMods, actualResponse.KvMods) - require.Equal(t, poolDeltaResponseGolden.ModifiedAssets, actualResponse.ModifiedAssets) - require.Equal(t, poolDeltaResponseGolden.ModifiedApps, actualResponse.ModifiedApps) - require.Equal(t, poolDeltaResponseGolden.TxLeases, actualResponse.TxLeases) - require.Equal(t, poolDeltaResponseGolden.Totals, actualResponse.Totals) } func TestSyncRound(t *testing.T) { diff --git a/daemon/algod/api/server/v2/test/helpers.go b/daemon/algod/api/server/v2/test/helpers.go index f27940f33a..272e05154d 100644 --- a/daemon/algod/api/server/v2/test/helpers.go +++ b/daemon/algod/api/server/v2/test/helpers.go @@ -107,29 +107,6 @@ var poolAddrResponseGolden = model.AccountResponse{ MinBalance: 100000, } var txnPoolGolden = make([]transactions.SignedTxn, 2) -var poolDeltaResponseGolden = model.LedgerStateDelta{ - Accts: &model.AccountDeltas{ - Accounts: &[]model.AccountBalanceRecord{ - { - AccountData: model.Account{ - Address: poolAddr.String(), - Amount: 50000000000, - AmountWithoutPendingRewards: 50000000000, - MinBalance: 100000, - AppsTotalSchema: &appsTotalSchema, - Status: "Not Participating", - }, - Address: poolAddr.String(), - }, - }, - }, - Totals: &model.AccountTotals{ - NotParticipating: 100000000000, - Offline: 0, - Online: 658511, - RewardsLevel: 0, - }, -} // ordinarily mockNode would live in `components/mocks` // but doing this would create an import cycle, as mockNode needs diff --git a/tools/debug/vbconvert/main.go b/tools/debug/vbconvert/main.go index a6acc19cb1..e7cebb014d 100644 --- a/tools/debug/vbconvert/main.go +++ b/tools/debug/vbconvert/main.go @@ -24,9 +24,6 @@ import ( "github.com/algorand/go-codec/codec" - "github.com/algorand/go-algorand/config" - v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2" - "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/protocol" @@ -43,11 +40,6 @@ type algodVB struct { Delta ledgercore.StateDelta } -type conduitVB struct { - Blk bookkeeping.Block - Delta model.LedgerStateDelta -} - func run(args arguments) { var algodType algodVB @@ -64,14 +56,6 @@ func run(args arguments) { os.Exit(1) } - // Convert - consensusParams := config.Consensus[algodType.Delta.Hdr.CurrentProtocol] - modelDelta, err := v2.StateDeltaToLedgerDelta(algodType.Delta, consensusParams) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to convert ledgercore.StateDelta from input file '%s': %s\n", args.inputFile, err) - os.Exit(1) - } - // Write outputFile, err := os.Create(args.outputFile) if err != nil { @@ -90,11 +74,7 @@ func run(args arguments) { os.Exit(1) } - conduitType := conduitVB{ - Blk: algodType.Blk, - Delta: modelDelta, - } - err = enc.Encode(conduitType) + err = enc.Encode(algodType) if err != nil { fmt.Fprintf(os.Stderr, "Unable to decode input file '%s': %s\n", args.outputFile, err) os.Exit(1) From 71cec0570dca35937d1d0b9c2456e0adb31149a1 Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Mon, 13 Feb 2023 23:28:50 +0900 Subject: [PATCH 39/81] cmd: fix typo in relayCmd.go (#5135) --- cmd/algorelay/relayCmd.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/algorelay/relayCmd.go b/cmd/algorelay/relayCmd.go index 0d97485757..d658daa811 100644 --- a/cmd/algorelay/relayCmd.go +++ b/cmd/algorelay/relayCmd.go @@ -457,7 +457,7 @@ func ensureRelayStatus(checkOnly bool, relay eb.Relay, nameDomain string, srvDom if err != nil { return } - fmt.Printf("[%d] Added boostrap SRV Record: %s:%d\n", relay.ID, targetDomainAlias, port) + fmt.Printf("[%d] Added bootstrap SRV Record: %s:%d\n", relay.ID, targetDomainAlias, port) } } else { if matchCount > 0 { @@ -465,7 +465,7 @@ func ensureRelayStatus(checkOnly bool, relay eb.Relay, nameDomain string, srvDom if err != nil { return } - fmt.Printf("[%d] Removed boostrap SRV Record: %s\n", relay.ID, targetDomainAlias) + fmt.Printf("[%d] Removed bootstrap SRV Record: %s\n", relay.ID, targetDomainAlias) } } From 02af4ca6da1e6351611982694431c3acd3b28374 Mon Sep 17 00:00:00 2001 From: abebeos <110243666+abebeos@users.noreply.github.com> Date: Mon, 13 Feb 2023 17:40:03 +0200 Subject: [PATCH 40/81] add an Enhancement issue-template (#5091) --- .github/ISSUE_TEMPLATE/enhancement.md | 28 +++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/enhancement.md diff --git a/.github/ISSUE_TEMPLATE/enhancement.md b/.github/ISSUE_TEMPLATE/enhancement.md new file mode 100644 index 0000000000..82f5684802 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/enhancement.md @@ -0,0 +1,28 @@ +--- +name: Enhancement +about: How can we enhance existent functionality, constructs, processes etc. +title: '' +labels: Enhancement +assignees: '' + +--- + +## Status + + + +## Expected + + + +## Solution + + + +## Dependencies + + + +## Urgency + + \ No newline at end of file From a688c236dd0ae556dadef4d068c9140962ef422d Mon Sep 17 00:00:00 2001 From: Eric Warehime Date: Mon, 13 Feb 2023 10:27:46 -0800 Subject: [PATCH 41/81] tools: remove vbconvert (#5133) --- tools/debug/vbconvert/README.md | 4 -- tools/debug/vbconvert/main.go | 105 -------------------------------- 2 files changed, 109 deletions(-) delete mode 100644 tools/debug/vbconvert/README.md delete mode 100644 tools/debug/vbconvert/main.go diff --git a/tools/debug/vbconvert/README.md b/tools/debug/vbconvert/README.md deleted file mode 100644 index 3ac7747494..0000000000 --- a/tools/debug/vbconvert/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# vbconvert - -Utility tool to assist converting ledgercore.ValidatedBlock objects into a -format that can be parsed using types in the Algorand Go SDK. diff --git a/tools/debug/vbconvert/main.go b/tools/debug/vbconvert/main.go deleted file mode 100644 index e7cebb014d..0000000000 --- a/tools/debug/vbconvert/main.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright (C) 2019-2023 Algorand, Inc. -// This file is part of go-algorand -// -// go-algorand is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as -// published by the Free Software Foundation, either version 3 of the -// License, or (at your option) any later version. -// -// go-algorand is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with go-algorand. If not, see . -package main - -import ( - "fmt" - "os" - "strings" - - "github.com/spf13/cobra" - - "github.com/algorand/go-codec/codec" - - "github.com/algorand/go-algorand/data/bookkeeping" - "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/protocol" -) - -type arguments struct { - inputFile string - outputFile string - format string -} - -type algodVB struct { - Blk bookkeeping.Block - Delta ledgercore.StateDelta -} - -func run(args arguments) { - var algodType algodVB - - // Read - data, err := os.ReadFile(args.inputFile) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to read input file '%s': %s\n", args.inputFile, err) - os.Exit(1) - } - - err = protocol.DecodeReflect(data, &algodType) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to decode input file '%s': %s\n", args.inputFile, err) - os.Exit(1) - } - - // Write - outputFile, err := os.Create(args.outputFile) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to open output file '%s': %s\n", args.outputFile, err) - os.Exit(1) - } - - var enc *codec.Encoder - switch strings.ToLower(args.format) { - case "json": - enc = protocol.NewJSONEncoder(outputFile) - case "msgp": - enc = protocol.NewEncoder(outputFile) - default: - fmt.Fprintf(os.Stderr, "Unknown encoder type '%s', valid encoders: json, msgp.\n", args.format) - os.Exit(1) - } - - err = enc.Encode(algodType) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to decode input file '%s': %s\n", args.outputFile, err) - os.Exit(1) - } -} - -func main() { - var args arguments - - command := &cobra.Command{ - Use: "vbconvert", - Long: "Convert a ledgercore.ValidatedBlock into the conduit version of a ValidatedBlock.", - Run: func(_ *cobra.Command, _ []string) { - run(args) - }, - } - - command.Flags().StringVarP(&args.inputFile, "input", "i", "", "Input filename.") - command.Flags().StringVarP(&args.outputFile, "output", "o", "", "Optional output filename. If not present a default .convert is created.") - command.Flags().StringVarP(&args.format, "format", "f", "json", "Optional output format. Valid formats are 'json' and 'msgp'.") - command.MarkFlagRequired("input") - command.MarkFlagRequired("output") - - if err := command.Execute(); err != nil { - fmt.Fprintf(os.Stderr, "An error occurred while running vbconvert: %s.\n", err) - os.Exit(1) - } -} From 4d14c9ddfc862d9393cad2d2309c9abf0909a9c3 Mon Sep 17 00:00:00 2001 From: algobarb <78746954+algobarb@users.noreply.github.com> Date: Wed, 15 Feb 2023 10:15:43 -0500 Subject: [PATCH 42/81] CircleCI: Fix cached Results issue in self-hosted runner (#5137) --- .circleci/config.yml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 7842cb7032..5508099457 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -18,7 +18,7 @@ parameters: default: "/opt/cibuild" result_path: type: string - default: "/tmp/build_test_results" + default: "/tmp/build_test_results_<< pipeline.id >>" valid_nightly_branch: type: string default: /hotfix\/.*/ @@ -705,9 +705,6 @@ commands: TestAlgohWithExpect \ TestGoalWithExpect \ TestTealdbgWithExpect - - store_artifacts: - path: << parameters.result_path >>/<< parameters.result_subdir >> - destination: << parameters.result_subdir >>/combined-test-results upload_binaries_command: description: save build artifacts for potential deployments From eba02a671cfcf3e38651a18ca42f683df2c3232f Mon Sep 17 00:00:00 2001 From: Jason Paulos Date: Wed, 15 Feb 2023 13:44:54 -0800 Subject: [PATCH 43/81] Algod: Modify `EvalTracer` design and improve testing for failures (#5071) --- data/transactions/logic/debugger.go | 2 +- data/transactions/logic/debugger_eval_test.go | 303 +++++++++++ data/transactions/logic/debugger_test.go | 159 +----- data/transactions/logic/eval.go | 36 +- data/transactions/logic/eval_test.go | 150 +++-- data/transactions/logic/export_test.go | 6 + data/transactions/logic/ledger_test.go | 2 + .../logic/mocktracer/scenarios.go | 515 ++++++++++++++++++ data/transactions/logic/mocktracer/tracer.go | 53 +- data/transactions/logic/tracer.go | 9 +- data/transactions/logic/tracer_test.go | 332 ++++++----- data/transactions/verify/txn_test.go | 244 ++++++--- ledger/internal/eval.go | 23 +- ledger/internal/eval_test.go | 377 +++++++------ 14 files changed, 1572 insertions(+), 639 deletions(-) create mode 100644 data/transactions/logic/debugger_eval_test.go create mode 100644 data/transactions/logic/mocktracer/scenarios.go diff --git a/data/transactions/logic/debugger.go b/data/transactions/logic/debugger.go index 0a8e013da8..ac2bcab7d7 100644 --- a/data/transactions/logic/debugger.go +++ b/data/transactions/logic/debugger.go @@ -73,7 +73,7 @@ func (a *debuggerEvalTracerAdaptor) BeforeTxnGroup(ep *EvalParams) { } // AfterTxnGroup updates inner txn depth -func (a *debuggerEvalTracerAdaptor) AfterTxnGroup(ep *EvalParams) { +func (a *debuggerEvalTracerAdaptor) AfterTxnGroup(ep *EvalParams, evalError error) { a.txnDepth-- } diff --git a/data/transactions/logic/debugger_eval_test.go b/data/transactions/logic/debugger_eval_test.go new file mode 100644 index 0000000000..55d2c9f766 --- /dev/null +++ b/data/transactions/logic/debugger_eval_test.go @@ -0,0 +1,303 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package logic_test + +import ( + "os" + "strings" + "testing" + + "github.com/algorand/go-algorand/data/basics" + . "github.com/algorand/go-algorand/data/transactions/logic" + "github.com/algorand/go-algorand/data/transactions/logic/mocktracer" + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/stretchr/testify/require" +) + +const debuggerTestProgramApprove string = `intcblock 0 1 1 1 1 5 100 +bytecblock 0x414c474f 0x1337 0x2001 0xdeadbeef 0x70077007 +bytec 0 +sha256 +keccak256 +sha512_256 +len +intc_0 ++ +intc_1 +- +intc_2 +/ +intc_3 +* +intc 4 +< +intc_1 +> +intc_1 +<= +intc_1 +>= +intc_1 +&& +intc_1 +|| +bytec_1 +bytec_2 +!= +bytec_3 +bytec 4 +!= +&& +&& +` +const debuggerTestProgramReject string = debuggerTestProgramApprove + "!" +const debuggerTestProgramError string = debuggerTestProgramApprove + "err" +const debuggerTestProgramPanic string = debuggerTestProgramApprove + "panic" + +func TestWebDebuggerManual(t *testing.T) { //nolint:paralleltest // Manual test + partitiontest.PartitionTest(t) + + debugURL := os.Getenv("TEAL_DEBUGGER_URL") + if len(debugURL) == 0 { + t.Skip("this must be run manually") + } + + ep, tx, _ := MakeSampleEnv() + ep.TxnGroup[0].Lsig.Args = [][]byte{ + tx.Sender[:], + tx.Receiver[:], + tx.CloseRemainderTo[:], + tx.VotePK[:], + tx.SelectionPK[:], + tx.Note, + } + ep.Tracer = MakeEvalTracerDebuggerAdaptor(&WebDebugger{URL: debugURL}) + TestLogic(t, debuggerTestProgramApprove, AssemblerMaxVersion, ep) +} + +type testDebugger struct { + register int + update int + complete int + state *DebugState +} + +func (d *testDebugger) Register(state *DebugState) { + d.register++ + d.state = state +} + +func (d *testDebugger) Update(state *DebugState) { + d.update++ + d.state = state +} + +func (d *testDebugger) Complete(state *DebugState) { + d.complete++ + d.state = state +} + +var debuggerTestCases = []struct { + name string + program string + evalProblems []string + expectedRegister int + expectedUpdate int + expectedComplete int + expectedStack []basics.TealValue +}{ + { + name: "approve", + program: debuggerTestProgramApprove, + expectedRegister: 1, + expectedUpdate: 35, + expectedComplete: 1, + expectedStack: []basics.TealValue{ + { + Type: basics.TealUintType, + Uint: 1, + }, + }, + }, + { + name: "reject", + program: debuggerTestProgramReject, + evalProblems: []string{"REJECT"}, + expectedRegister: 1, + expectedUpdate: 36, + expectedComplete: 1, + expectedStack: []basics.TealValue{ + { + Type: basics.TealUintType, + Uint: 0, + }, + }, + }, + { + name: "error", + program: debuggerTestProgramError, + evalProblems: []string{"err opcode executed"}, + expectedRegister: 1, + expectedUpdate: 36, + expectedComplete: 1, + expectedStack: []basics.TealValue{ + { + Type: basics.TealUintType, + Uint: 1, + }, + }, + }, +} + +func TestDebuggerLogicSigEval(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + for _, testCase := range debuggerTestCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + testDbg := testDebugger{} + ep := DefaultEvalParams() + ep.Tracer = MakeEvalTracerDebuggerAdaptor(&testDbg) + TestLogic(t, testCase.program, AssemblerMaxVersion, ep, testCase.evalProblems...) + + require.Equal(t, testCase.expectedRegister, testDbg.register) + require.Equal(t, testCase.expectedComplete, testDbg.complete) + require.Equal(t, testCase.expectedUpdate, testDbg.update) + require.Equal(t, testCase.expectedStack, testDbg.state.Stack) + }) + } +} + +func TestDebuggerTopLeveLAppEval(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + for _, testCase := range debuggerTestCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + testDbg := testDebugger{} + ep := DefaultEvalParams() + ep.Tracer = MakeEvalTracerDebuggerAdaptor(&testDbg) + TestApp(t, testCase.program, ep, testCase.evalProblems...) + + require.Equal(t, testCase.expectedRegister, testDbg.register) + require.Equal(t, testCase.expectedComplete, testDbg.complete) + require.Equal(t, testCase.expectedUpdate, testDbg.update) + require.Equal(t, testCase.expectedStack, testDbg.state.Stack) + }) + } +} + +func TestDebuggerInnerAppEval(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + scenarios := mocktracer.GetTestScenarios() + for scenarioName, makeScenario := range scenarios { + scenarioName := scenarioName + makeScenario := makeScenario + t.Run(scenarioName, func(t *testing.T) { + t.Parallel() + testDbg := testDebugger{} + ep, tx, ledger := MakeSampleEnv() + + // Establish 888 as the app id, and fund it. + ledger.NewApp(tx.Receiver, 888, basics.AppParams{}) + ledger.NewAccount(basics.AppIndex(888).Address(), 200_000) + + scenario := makeScenario(mocktracer.TestScenarioInfo{ + CallingTxn: *tx, + CreatedAppID: basics.AppIndex(888), + }) + + var evalProblems []string + switch scenario.Outcome { + case mocktracer.RejectionOutcome: + evalProblems = []string{"REJECT"} + case mocktracer.ErrorOutcome: + if scenario.ExpectedError == "overspend" { + // the logic test ledger uses this error instead + evalProblems = []string{"insufficient balance"} + } else { + evalProblems = []string{scenario.ExpectedError} + } + } + + ep.Tracer = MakeEvalTracerDebuggerAdaptor(&testDbg) + ops := TestProg(t, scenario.Program, AssemblerNoVersion) + TestAppBytes(t, ops.Program, ep, evalProblems...) + + require.Equal(t, 1, testDbg.register) + require.Equal(t, 1, testDbg.complete) + + var expectedUpdateCount int + expectedStack := []basics.TealValue{} + switch { + case scenarioName == "none": + expectedUpdateCount = 26 + expectedStack = []basics.TealValue{{Type: basics.TealUintType, Uint: 1}} + case strings.HasPrefix(scenarioName, "before inners"): + expectedUpdateCount = 2 + expectedStack = []basics.TealValue{{Type: basics.TealUintType}} + case strings.HasPrefix(scenarioName, "first inner"): + expectedUpdateCount = 10 + case strings.HasPrefix(scenarioName, "between inners"): + expectedUpdateCount = 12 + expectedStack = []basics.TealValue{{Type: basics.TealUintType}} + case scenarioName == "second inner": + expectedUpdateCount = 25 + case scenarioName == "third inner": + expectedUpdateCount = 25 + case strings.HasPrefix(scenarioName, "after inners"): + expectedUpdateCount = 26 + if scenario.Outcome == mocktracer.RejectionOutcome { + expectedStack = []basics.TealValue{{Type: basics.TealUintType}} + } + } + + require.Equal(t, expectedUpdateCount, testDbg.update) + require.Equal(t, expectedStack, testDbg.state.Stack) + }) + } +} + +func TestCallStackUpdate(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + expectedCallFrames := []CallFrame{ + { + FrameLine: 2, + LabelName: "label1", + }, + { + FrameLine: 5, + LabelName: "label2", + }, + } + + testDbg := testDebugger{} + ep := DefaultEvalParams() + ep.Tracer = MakeEvalTracerDebuggerAdaptor(&testDbg) + TestLogic(t, TestCallStackProgram, AssemblerMaxVersion, ep) + + require.Equal(t, 1, testDbg.register) + require.Equal(t, 1, testDbg.complete) + require.Greater(t, testDbg.update, 1) + require.Len(t, testDbg.state.Stack, 1) + require.Equal(t, testDbg.state.CallStack, expectedCallFrames) +} diff --git a/data/transactions/logic/debugger_test.go b/data/transactions/logic/debugger_test.go index 26bd940b79..1303e57131 100644 --- a/data/transactions/logic/debugger_test.go +++ b/data/transactions/logic/debugger_test.go @@ -18,7 +18,6 @@ package logic import ( "encoding/base64" - "os" "testing" "github.com/algorand/go-algorand/data/basics" @@ -26,135 +25,6 @@ import ( "github.com/stretchr/testify/require" ) -const debuggerTestProgram string = `intcblock 0 1 1 1 1 5 100 -bytecblock 0x414c474f 0x1337 0x2001 0xdeadbeef 0x70077007 -bytec 0 -sha256 -keccak256 -sha512_256 -len -intc_0 -+ -intc_1 -- -intc_2 -/ -intc_3 -* -intc 4 -< -intc_1 -> -intc_1 -<= -intc_1 ->= -intc_1 -&& -intc_1 -|| -bytec_1 -bytec_2 -!= -bytec_3 -bytec 4 -!= -&& -&& -` - -func TestWebDebuggerManual(t *testing.T) { //nolint:paralleltest // Manual test - partitiontest.PartitionTest(t) - - debugURL := os.Getenv("TEAL_DEBUGGER_URL") - if len(debugURL) == 0 { - t.Skip("this must be run manually") - } - - ep, tx, _ := makeSampleEnv() - ep.TxnGroup[0].Lsig.Args = [][]byte{ - tx.Sender[:], - tx.Receiver[:], - tx.CloseRemainderTo[:], - tx.VotePK[:], - tx.SelectionPK[:], - tx.Note, - } - ep.Tracer = MakeEvalTracerDebuggerAdaptor(&WebDebugger{URL: debugURL}) - testLogic(t, debuggerTestProgram, AssemblerMaxVersion, ep) -} - -type testDebugger struct { - register int - update int - complete int - state *DebugState -} - -func (d *testDebugger) Register(state *DebugState) { - d.register++ - d.state = state -} - -func (d *testDebugger) Update(state *DebugState) { - d.update++ - d.state = state -} - -func (d *testDebugger) Complete(state *DebugState) { - d.complete++ - d.state = state -} - -func TestDebuggerProgramEval(t *testing.T) { - partitiontest.PartitionTest(t) - t.Parallel() - - t.Run("logicsig", func(t *testing.T) { - t.Parallel() - testDbg := testDebugger{} - ep := defaultEvalParams() - ep.Tracer = MakeEvalTracerDebuggerAdaptor(&testDbg) - testLogic(t, debuggerTestProgram, AssemblerMaxVersion, ep) - - require.Equal(t, 1, testDbg.register) - require.Equal(t, 1, testDbg.complete) - require.Equal(t, 35, testDbg.update) - require.Len(t, testDbg.state.Stack, 1) - }) - - t.Run("simple app", func(t *testing.T) { - t.Parallel() - testDbg := testDebugger{} - ep := defaultEvalParams() - ep.Tracer = MakeEvalTracerDebuggerAdaptor(&testDbg) - testApp(t, debuggerTestProgram, ep) - - require.Equal(t, 1, testDbg.register) - require.Equal(t, 1, testDbg.complete) - require.Equal(t, 35, testDbg.update) - require.Len(t, testDbg.state.Stack, 1) - }) - - t.Run("app with inner txns", func(t *testing.T) { - t.Parallel() - testDbg := testDebugger{} - ep, tx, ledger := MakeSampleEnv() - - // Establish 888 as the app id, and fund it. - ledger.NewApp(tx.Receiver, 888, basics.AppParams{}) - ledger.NewAccount(basics.AppIndex(888).Address(), 200000) - - ep.Tracer = MakeEvalTracerDebuggerAdaptor(&testDbg) - testApp(t, innerTxnTestProgram, ep) - - require.Equal(t, 1, testDbg.register) - require.Equal(t, 1, testDbg.complete) - require.Equal(t, 27, testDbg.update) - require.Len(t, testDbg.state.Stack, 1) - }) -} - func TestLineToPC(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() @@ -209,7 +79,7 @@ func TestValueDeltaToValueDelta(t *testing.T) { require.Equal(t, vDelta.Uint, ans.Uint) } -var testCallStackProgram string = `intcblock 1 +const testCallStackProgram string = `intcblock 1 callsub label1 intc_0 label1: @@ -242,30 +112,3 @@ func TestParseCallstack(t *testing.T) { cfs := dState.parseCallstack(callstack) require.Equal(t, expectedCallFrames, cfs) } - -func TestCallStackUpdate(t *testing.T) { - partitiontest.PartitionTest(t) - t.Parallel() - - expectedCallFrames := []CallFrame{ - { - FrameLine: 2, - LabelName: "label1", - }, - { - FrameLine: 5, - LabelName: "label2", - }, - } - - testDbg := testDebugger{} - ep := defaultEvalParams() - ep.Tracer = MakeEvalTracerDebuggerAdaptor(&testDbg) - testLogic(t, testCallStackProgram, AssemblerMaxVersion, ep) - - require.Equal(t, 1, testDbg.register) - require.Equal(t, 1, testDbg.complete) - require.Greater(t, testDbg.update, 1) - require.Len(t, testDbg.state.Stack, 1) - require.Equal(t, testDbg.state.CallStack, expectedCallFrames) -} diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go index 158ca6e08b..2440148735 100644 --- a/data/transactions/logic/eval.go +++ b/data/transactions/logic/eval.go @@ -841,9 +841,23 @@ func eval(program []byte, cx *EvalContext) (pass bool, err error) { if cx.Tracer != nil { cx.Tracer.BeforeProgram(cx) + defer func() { + x := recover() + tracerErr := err + if x != nil { + // A panic error occurred during the eval loop. Report it now. + tracerErr = fmt.Errorf("panic in TEAL Eval: %v", x) + cx.Tracer.AfterOpcode(cx, tracerErr) + } + // Ensure we update the tracer before exiting - cx.Tracer.AfterProgram(cx, err) + cx.Tracer.AfterProgram(cx, tracerErr) + + if x != nil { + // Panic again to trigger higher-level recovery and error reporting + panic(x) + } }() } @@ -5029,7 +5043,7 @@ func opItxnField(cx *EvalContext) error { return err } -func opItxnSubmit(cx *EvalContext) error { +func opItxnSubmit(cx *EvalContext) (err error) { // Should rarely trigger, since itxn_next checks these too. (but that check // must be imperfect, see its comment) In contrast to that check, subtxns is // already populated here. @@ -5177,6 +5191,10 @@ func opItxnSubmit(cx *EvalContext) error { if ep.Tracer != nil { ep.Tracer.BeforeTxnGroup(ep) + // Ensure we update the tracer before exiting + defer func() { + ep.Tracer.AfterTxnGroup(ep, err) + }() } for i := range ep.TxnGroup { @@ -5185,26 +5203,24 @@ func opItxnSubmit(cx *EvalContext) error { } err := cx.Ledger.Perform(i, ep) + + if ep.Tracer != nil { + ep.Tracer.AfterTxn(ep, i, ep.TxnGroup[i].ApplyData, err) + } + if err != nil { return err } + // This is mostly a no-op, because Perform does its work "in-place", but // RecordAD has some further responsibilities. ep.RecordAD(i, ep.TxnGroup[i].ApplyData) - - if ep.Tracer != nil { - ep.Tracer.AfterTxn(ep, i, ep.TxnGroup[i].ApplyData) - } } cx.txn.EvalDelta.InnerTxns = append(cx.txn.EvalDelta.InnerTxns, ep.TxnGroup...) cx.subtxns = nil // must clear the inner txid cache, otherwise prior inner txids will be returned for this group cx.innerTxidCache = nil - if ep.Tracer != nil { - ep.Tracer.AfterTxnGroup(ep) - } - return nil } diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go index 6731c19824..016940605e 100644 --- a/data/transactions/logic/eval_test.go +++ b/data/transactions/logic/eval_test.go @@ -22,6 +22,7 @@ import ( "encoding/hex" "encoding/json" "fmt" + "math" "strconv" "strings" "testing" @@ -34,7 +35,6 @@ import ( "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/transactions" - "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -3134,62 +3134,108 @@ func checkPanic(cx *EvalContext) error { panic(panicString) } -func TestPanic(t *testing.T) { +// withPanicOpcode temporarily modifies the opsByOpcode array to include an additional panic opcode. +// This opcode will be named "panic". +// +// WARNING: do not call this in a parallel test, since it's not safe for concurrent use. +func withPanicOpcode(t *testing.T, version uint64, panicDuringCheck bool, f func(opcode byte)) { + t.Helper() + const name = "panic" + + var foundEmptySpace bool + var hackedOpcode byte + var oldSpec OpSpec + // Find an unused opcode to temporarily convert to a panicing opcode, + // and append it to program. + for opcode, spec := range opsByOpcode[version] { + if spec.op == nil { + foundEmptySpace = true + require.LessOrEqual(t, opcode, math.MaxUint8) + hackedOpcode = byte(opcode) + oldSpec = spec + + details := detDefault() + if panicDuringCheck { + details.check = checkPanic + } + panicSpec := OpSpec{ + Opcode: hackedOpcode, + Name: name, + op: opPanic, + OpDetails: details, + } + + opsByOpcode[version][opcode] = panicSpec + OpsByName[version][name] = panicSpec + break + } + } + require.True(t, foundEmptySpace, "could not find an empty space for the panic opcode") + defer func() { + opsByOpcode[version][hackedOpcode] = oldSpec + delete(OpsByName[version], name) + }() + f(hackedOpcode) +} + +func TestPanic(t *testing.T) { //nolint:paralleltest // Uses withPanicOpcode partitiontest.PartitionTest(t) - t.Parallel() - log := logging.TestingLog(t) for v := uint64(1); v <= AssemblerMaxVersion; v++ { v := v - t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) { - t.Parallel() - ops := testProg(t, `int 1`, v) - var hackedOpcode int - var oldSpec OpSpec - // Find an unused opcode to temporarily convert to a panicing opcde, - // and append it to program. - for opcode, spec := range opsByOpcode[v] { - if spec.op == nil { - hackedOpcode = opcode - oldSpec = spec - opsByOpcode[v][opcode].op = opPanic - opsByOpcode[v][opcode].Modes = modeAny - opsByOpcode[v][opcode].OpDetails.FullCost.baseCost = 1 - opsByOpcode[v][opcode].OpDetails.check = checkPanic - ops.Program = append(ops.Program, byte(opcode)) - break + t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) { //nolint:paralleltest // Uses withPanicOpcode + withPanicOpcode(t, v, true, func(opcode byte) { + ops := testProg(t, `int 1`, v) + ops.Program = append(ops.Program, opcode) + + params := defaultEvalParams() + params.TxnGroup[0].Lsig.Logic = ops.Program + err := CheckSignature(0, params) + require.Error(t, err) + if pe, ok := err.(PanicError); ok { + require.Equal(t, panicString, pe.PanicValue) + pes := pe.Error() + require.True(t, strings.Contains(pes, "panic")) + } else { + t.Errorf("expected PanicError object but got %T %#v", err, err) } - } - params := defaultEvalParams() - params.logger = log - params.TxnGroup[0].Lsig.Logic = ops.Program - err := CheckSignature(0, params) - require.Error(t, err) - if pe, ok := err.(PanicError); ok { - require.Equal(t, panicString, pe.PanicValue) - pes := pe.Error() - require.True(t, strings.Contains(pes, "panic")) - } else { - t.Errorf("expected PanicError object but got %T %#v", err, err) - } - var txn transactions.SignedTxn - txn.Lsig.Logic = ops.Program - params = defaultEvalParams(txn) - params.logger = log - pass, err := EvalSignature(0, params) - if pass { - t.Log(hex.EncodeToString(ops.Program)) - t.Log(params.Trace.String()) - } - require.False(t, pass) - if pe, ok := err.(PanicError); ok { - require.Equal(t, panicString, pe.PanicValue) - pes := pe.Error() - require.True(t, strings.Contains(pes, "panic")) - } else { - t.Errorf("expected PanicError object but got %T %#v", err, err) - } - opsByOpcode[v][hackedOpcode] = oldSpec + + var txn transactions.SignedTxn + txn.Lsig.Logic = ops.Program + params = defaultEvalParams(txn) + pass, err := EvalSignature(0, params) + if pass { + t.Log(hex.EncodeToString(ops.Program)) + t.Log(params.Trace.String()) + } + require.False(t, pass) + if pe, ok := err.(PanicError); ok { + require.Equal(t, panicString, pe.PanicValue) + pes := pe.Error() + require.True(t, strings.Contains(pes, "panic")) + } else { + t.Errorf("expected PanicError object but got %T %#v", err, err) + } + + if v >= appsEnabledVersion { + txn = transactions.SignedTxn{ + Txn: transactions.Transaction{ + Type: protocol.ApplicationCallTx, + }, + } + params = defaultEvalParams(txn) + params.Ledger = NewLedger(nil) + pass, err = EvalApp(ops.Program, 0, 1, params) + require.False(t, pass) + if pe, ok := err.(PanicError); ok { + require.Equal(t, panicString, pe.PanicValue) + pes := pe.Error() + require.True(t, strings.Contains(pes, "panic")) + } else { + t.Errorf("expected PanicError object but got %T %#v", err, err) + } + } + }) }) } } diff --git a/data/transactions/logic/export_test.go b/data/transactions/logic/export_test.go index de151bfd9f..6ee072aff5 100644 --- a/data/transactions/logic/export_test.go +++ b/data/transactions/logic/export_test.go @@ -44,6 +44,7 @@ func (l *Ledger) DelBoxes(app basics.AppIndex, names ...string) { } } +var DefaultEvalParams = defaultEvalParams var MakeSampleEnv = makeSampleEnv var MakeSampleEnvWithVersion = makeSampleEnvWithVersion var MakeSampleTxn = makeSampleTxn @@ -51,9 +52,14 @@ var MakeSampleTxnGroup = makeSampleTxnGroup var MakeTestProto = makeTestProto var MakeTestProtoV = makeTestProtoV var NoTrack = notrack +var TestLogic = testLogic var TestApp = testApp var TestAppBytes = testAppBytes var TestApps = testApps var TestProg = testProg +var WithPanicOpcode = withPanicOpcode const CreatedResourcesVersion = createdResourcesVersion +const AssemblerNoVersion = assemblerNoVersion +const FirstTestID = firstTestID +const TestCallStackProgram = testCallStackProgram diff --git a/data/transactions/logic/ledger_test.go b/data/transactions/logic/ledger_test.go index f56f349f1f..ecda755627 100644 --- a/data/transactions/logic/ledger_test.go +++ b/data/transactions/logic/ledger_test.go @@ -826,9 +826,11 @@ func (l *Ledger) appl(from basics.Address, appl transactions.ApplicationCallTxnF } pass, cx, err := EvalContract(params.ApprovalProgram, gi, aid, ep) if err != nil { + ad.EvalDelta = transactions.EvalDelta{} return err } if !pass { + ad.EvalDelta = transactions.EvalDelta{} return errors.New("Approval program failed") } ad.EvalDelta = cx.txn.EvalDelta diff --git a/data/transactions/logic/mocktracer/scenarios.go b/data/transactions/logic/mocktracer/scenarios.go new file mode 100644 index 0000000000..df540efeb7 --- /dev/null +++ b/data/transactions/logic/mocktracer/scenarios.go @@ -0,0 +1,515 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package mocktracer + +import ( + "fmt" + "math" + + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/data/transactions/logic" + "github.com/algorand/go-algorand/data/txntest" + "github.com/algorand/go-algorand/protocol" +) + +const programTemplate string = `#pragma version 6 +%s + +itxn_begin +pushint 6 // appl +itxn_field TypeEnum +pushint 0 // NoOp +itxn_field OnCompletion +pushbytes %s +itxn_field ApprovalProgram +pushbytes 0x068101 // #pragma version 6; int 1; +itxn_field ClearStateProgram +itxn_submit + +%s + +itxn_begin +pushint 1 // pay +itxn_field TypeEnum +pushint %d +itxn_field Amount +global CurrentApplicationAddress +itxn_field Receiver +itxn_next +pushint 1 // pay +itxn_field TypeEnum +pushint %d +itxn_field Amount +global CurrentApplicationAddress +itxn_field Receiver +itxn_submit + +%s` + +// TestScenarioInfo holds arguments used to call a TestScenarioGenerator +type TestScenarioInfo struct { + CallingTxn transactions.Transaction + MinFee basics.MicroAlgos + CreatedAppID basics.AppIndex +} + +func expectedApplyData(info TestScenarioInfo) transactions.ApplyData { + expectedInnerAppCall := txntest.Txn{ + Type: protocol.ApplicationCallTx, + Sender: info.CreatedAppID.Address(), + ApprovalProgram: `#pragma version 6 +int 1`, + ClearStateProgram: `#pragma version 6 +int 1`, + + FirstValid: info.CallingTxn.FirstValid, + LastValid: info.CallingTxn.LastValid, + Fee: info.MinFee, + } + expectedInnerAppCallAD := transactions.ApplyData{ + ApplicationID: info.CreatedAppID + 1, + EvalDelta: transactions.EvalDelta{ + GlobalDelta: basics.StateDelta{}, + LocalDeltas: map[uint64]basics.StateDelta{}, + }, + } + expectedInnerPay1 := txntest.Txn{ + Type: protocol.PaymentTx, + Sender: info.CreatedAppID.Address(), + Receiver: info.CreatedAppID.Address(), + Amount: 1, + + FirstValid: info.CallingTxn.FirstValid, + LastValid: info.CallingTxn.LastValid, + Fee: info.MinFee, + } + expectedInnerPay1AD := transactions.ApplyData{} + expectedInnerPay2 := txntest.Txn{ + Type: protocol.PaymentTx, + Sender: info.CreatedAppID.Address(), + Receiver: info.CreatedAppID.Address(), + Amount: 2, + + FirstValid: info.CallingTxn.FirstValid, + LastValid: info.CallingTxn.LastValid, + Fee: info.MinFee, + } + expectedInnerPay2AD := transactions.ApplyData{} + return transactions.ApplyData{ + ApplicationID: info.CreatedAppID, + EvalDelta: transactions.EvalDelta{ + GlobalDelta: basics.StateDelta{}, + LocalDeltas: map[uint64]basics.StateDelta{}, + InnerTxns: []transactions.SignedTxnWithAD{ + { + SignedTxn: expectedInnerAppCall.SignedTxn(), + ApplyData: expectedInnerAppCallAD, + }, + { + SignedTxn: expectedInnerPay1.SignedTxn(), + ApplyData: expectedInnerPay1AD, + }, + { + SignedTxn: expectedInnerPay2.SignedTxn(), + ApplyData: expectedInnerPay2AD, + }, + }, + }, + } +} + +// TestScenarioOutcome represents an outcome of a TestScenario +type TestScenarioOutcome int + +const ( + // ApprovalOutcome indicates the scenario should approve the program + ApprovalOutcome TestScenarioOutcome = iota + // RejectionOutcome indicates the scenario should reject the program + RejectionOutcome + // ErrorOutcome indicates the scenario should error during the program + ErrorOutcome +) + +// TestScenario represents a testing scenario. See GetTestScenarios for more details. +type TestScenario struct { + Outcome TestScenarioOutcome + Program string + ExpectedError string + ExpectedEvents []Event +} + +// TestScenarioGenerator is a function which instantiates a TestScenario +type TestScenarioGenerator func(info TestScenarioInfo) TestScenario + +// GetTestScenarios returns scenarios for testing code that invokes a logic.EvalTracer. These +// scenarios are all app calls which invoke inner transactions under various failure conditions. +// The scenarios follow this format: +// +// 1. An app call transaction that spawns inners. They are: +// a. A basic app call transaction +// b. A payment transaction [grouped with c] +// c. A payment transaction [grouped with b] +// +// The scenarios differ by where they fail when attempting to execute that app call. Failures are +// possible during each inner transaction, as well as before all inners, between the two inner +// groups, and after all inners. For app call failures, there are scenarios for both rejection and +// runtime errors, which should invoke tracer hooks slightly differently. +func GetTestScenarios() map[string]TestScenarioGenerator { + noFailureName := "none" + noFailure := func(info TestScenarioInfo) TestScenario { + expectedAD := expectedApplyData(info) + program := fmt.Sprintf(programTemplate, "", "0x068101", "", 1, 2, "pushint 1") + return TestScenario{ + Outcome: ApprovalOutcome, + Program: program, + ExpectedError: "", // no error + ExpectedEvents: FlattenEvents([][]Event{ + { + BeforeTxn(protocol.ApplicationCallTx), + BeforeProgram(logic.ModeApp), + }, + OpcodeEvents(9, false), + { + BeforeOpcode(), + BeforeTxnGroup(1), // start first itxn group + BeforeTxn(protocol.ApplicationCallTx), + BeforeProgram(logic.ModeApp), + }, + OpcodeEvents(1, false), + { + AfterProgram(logic.ModeApp, false), + AfterTxn(protocol.ApplicationCallTx, expectedAD.EvalDelta.InnerTxns[0].ApplyData, false), + AfterTxnGroup(1, false), // end first itxn group + AfterOpcode(false), + }, + OpcodeEvents(14, false), + { + BeforeOpcode(), + BeforeTxnGroup(2), // start second itxn group + BeforeTxn(protocol.PaymentTx), + AfterTxn(protocol.PaymentTx, expectedAD.EvalDelta.InnerTxns[1].ApplyData, false), + BeforeTxn(protocol.PaymentTx), + AfterTxn(protocol.PaymentTx, expectedAD.EvalDelta.InnerTxns[2].ApplyData, false), + AfterTxnGroup(2, false), // end second itxn group + AfterOpcode(false), + }, + OpcodeEvents(1, false), + { + AfterProgram(logic.ModeApp, false), + AfterTxn(protocol.ApplicationCallTx, expectedAD, false), + }, + }), + } + } + + scenarios := map[string]TestScenarioGenerator{ + noFailureName: noFailure, + } + + for _, shouldError := range []bool{true, false} { + shouldError := shouldError + failureOps := "pushint 0\nreturn" + singleFailureOp := "pushint 0" + failureInnerProgram := "0x068100" + failureMessage := "transaction rejected by ApprovalProgram" + outcome := RejectionOutcome + if shouldError { + // We could use just the err opcode here, but we want to use two opcodes to maintain + // trace event consistency with rejections. + failureOps = "pushint 0\nerr" + singleFailureOp = "err" + failureInnerProgram = "0x0600" + failureMessage = "err opcode executed" + outcome = ErrorOutcome + } + + beforeInnersName := fmt.Sprintf("before inners,error=%t", shouldError) + beforeInners := func(info TestScenarioInfo) TestScenario { + expectedAD := expectedApplyData(info) + program := fmt.Sprintf(programTemplate, failureOps, "0x068101", "", 1, 2, "pushint 1") + // EvalDeltas are removed from failed app call transactions + expectedAD.EvalDelta = transactions.EvalDelta{} + return TestScenario{ + Outcome: outcome, + Program: program, + ExpectedError: failureMessage, + ExpectedEvents: FlattenEvents([][]Event{ + { + BeforeTxn(protocol.ApplicationCallTx), + BeforeProgram(logic.ModeApp), + }, + OpcodeEvents(2, shouldError), + { + AfterProgram(logic.ModeApp, shouldError), + AfterTxn(protocol.ApplicationCallTx, expectedAD, true), + }, + }), + } + } + scenarios[beforeInnersName] = beforeInners + + firstInnerName := fmt.Sprintf("first inner,error=%t", shouldError) + firstInner := func(info TestScenarioInfo) TestScenario { + expectedAD := expectedApplyData(info) + // EvalDeltas are removed from failed app call transactions + expectedInnerAppCallADNoEvalDelta := expectedAD.EvalDelta.InnerTxns[0].ApplyData + expectedInnerAppCallADNoEvalDelta.EvalDelta = transactions.EvalDelta{} + expectedAD.EvalDelta = transactions.EvalDelta{} + program := fmt.Sprintf(programTemplate, "", failureInnerProgram, "", 1, 2, "pushint 1") + return TestScenario{ + Outcome: outcome, + Program: program, + ExpectedError: failureMessage, + ExpectedEvents: FlattenEvents([][]Event{ + { + BeforeTxn(protocol.ApplicationCallTx), + BeforeProgram(logic.ModeApp), + }, + OpcodeEvents(9, false), + { + BeforeOpcode(), + BeforeTxnGroup(1), // start first itxn group + BeforeTxn(protocol.ApplicationCallTx), + BeforeProgram(logic.ModeApp), + }, + OpcodeEvents(1, shouldError), + { + AfterProgram(logic.ModeApp, shouldError), + AfterTxn(protocol.ApplicationCallTx, expectedInnerAppCallADNoEvalDelta, true), + AfterTxnGroup(1, true), // end first itxn group + AfterOpcode(true), + AfterProgram(logic.ModeApp, true), + AfterTxn(protocol.ApplicationCallTx, expectedAD, true), + }, + }), + } + } + scenarios[firstInnerName] = firstInner + + betweenInnersName := fmt.Sprintf("between inners,error=%t", shouldError) + betweenInners := func(info TestScenarioInfo) TestScenario { + expectedAD := expectedApplyData(info) + expectedInnerAppCallAD := expectedAD.EvalDelta.InnerTxns[0].ApplyData + // EvalDeltas are removed from failed app call transactions + expectedAD.EvalDelta = transactions.EvalDelta{} + program := fmt.Sprintf(programTemplate, "", "0x068101", failureOps, 1, 2, "pushint 1") + return TestScenario{ + Outcome: outcome, + Program: program, + ExpectedError: failureMessage, + ExpectedEvents: FlattenEvents([][]Event{ + { + BeforeTxn(protocol.ApplicationCallTx), + BeforeProgram(logic.ModeApp), + }, + OpcodeEvents(9, false), + { + BeforeOpcode(), + BeforeTxnGroup(1), // start first itxn group + BeforeTxn(protocol.ApplicationCallTx), + BeforeProgram(logic.ModeApp), + }, + OpcodeEvents(1, false), + { + AfterProgram(logic.ModeApp, false), + AfterTxn(protocol.ApplicationCallTx, expectedInnerAppCallAD, false), + AfterTxnGroup(1, false), // end first itxn group + AfterOpcode(false), + }, + OpcodeEvents(2, shouldError), + { + AfterProgram(logic.ModeApp, shouldError), + AfterTxn(protocol.ApplicationCallTx, expectedAD, true), + }, + }), + } + } + scenarios[betweenInnersName] = betweenInners + + if shouldError { + secondInnerName := "second inner" + secondInner := func(info TestScenarioInfo) TestScenario { + expectedAD := expectedApplyData(info) + expectedInnerAppCallAD := expectedAD.EvalDelta.InnerTxns[0].ApplyData + expectedInnerPay1AD := expectedAD.EvalDelta.InnerTxns[1].ApplyData + // EvalDeltas are removed from failed app call transactions + expectedAD.EvalDelta = transactions.EvalDelta{} + program := fmt.Sprintf(programTemplate, "", "0x068101", "", uint64(math.MaxUint64), 2, "pushint 1") + return TestScenario{ + Outcome: ErrorOutcome, + Program: program, + ExpectedError: "overspend", + ExpectedEvents: FlattenEvents([][]Event{ + { + BeforeTxn(protocol.ApplicationCallTx), + BeforeProgram(logic.ModeApp), + }, + OpcodeEvents(9, false), + { + BeforeOpcode(), + BeforeTxnGroup(1), // start first itxn group + BeforeTxn(protocol.ApplicationCallTx), + BeforeProgram(logic.ModeApp), + }, + OpcodeEvents(1, false), + { + AfterProgram(logic.ModeApp, false), + AfterTxn(protocol.ApplicationCallTx, expectedInnerAppCallAD, false), + AfterTxnGroup(1, false), // end first itxn group + AfterOpcode(false), + }, + OpcodeEvents(14, false), + { + BeforeOpcode(), + BeforeTxnGroup(2), // start second itxn group + BeforeTxn(protocol.PaymentTx), + AfterTxn(protocol.PaymentTx, expectedInnerPay1AD, true), + AfterTxnGroup(2, true), // end second itxn group + AfterOpcode(true), + AfterProgram(logic.ModeApp, true), + AfterTxn(protocol.ApplicationCallTx, expectedAD, true), + }, + }), + } + } + scenarios[secondInnerName] = secondInner + + thirdInnerName := "third inner" + thirdInner := func(info TestScenarioInfo) TestScenario { + expectedAD := expectedApplyData(info) + expectedInnerAppCallAD := expectedAD.EvalDelta.InnerTxns[0].ApplyData + expectedInnerPay1AD := expectedAD.EvalDelta.InnerTxns[1].ApplyData + expectedInnerPay2AD := expectedAD.EvalDelta.InnerTxns[2].ApplyData + // EvalDeltas are removed from failed app call transactions + expectedAD.EvalDelta = transactions.EvalDelta{} + program := fmt.Sprintf(programTemplate, "", "0x068101", "", 1, uint64(math.MaxUint64), "pushint 1") + return TestScenario{ + Outcome: ErrorOutcome, + Program: program, + ExpectedError: "overspend", + ExpectedEvents: FlattenEvents([][]Event{ + { + BeforeTxn(protocol.ApplicationCallTx), + BeforeProgram(logic.ModeApp), + }, + OpcodeEvents(9, false), + { + BeforeOpcode(), + BeforeTxnGroup(1), // start first itxn group + BeforeTxn(protocol.ApplicationCallTx), + BeforeProgram(logic.ModeApp), + }, + OpcodeEvents(1, false), + { + AfterProgram(logic.ModeApp, false), + AfterTxn(protocol.ApplicationCallTx, expectedInnerAppCallAD, false), + AfterTxnGroup(1, false), // end first itxn group + AfterOpcode(false), + }, + OpcodeEvents(14, false), + { + BeforeOpcode(), + BeforeTxnGroup(2), // start second itxn group + BeforeTxn(protocol.PaymentTx), + AfterTxn(protocol.PaymentTx, expectedInnerPay1AD, false), + BeforeTxn(protocol.PaymentTx), + AfterTxn(protocol.PaymentTx, expectedInnerPay2AD, true), + AfterTxnGroup(2, true), // end second itxn group + AfterOpcode(true), + AfterProgram(logic.ModeApp, true), + AfterTxn(protocol.ApplicationCallTx, expectedAD, true), + }, + }), + } + } + scenarios[thirdInnerName] = thirdInner + } + + afterInnersName := fmt.Sprintf("after inners,error=%t", shouldError) + afterInners := func(info TestScenarioInfo) TestScenario { + expectedAD := expectedApplyData(info) + expectedInnerAppCallAD := expectedAD.EvalDelta.InnerTxns[0].ApplyData + expectedInnerPay1AD := expectedAD.EvalDelta.InnerTxns[1].ApplyData + expectedInnerPay2AD := expectedAD.EvalDelta.InnerTxns[2].ApplyData + // EvalDeltas are removed from failed app call transactions + expectedAD.EvalDelta = transactions.EvalDelta{} + program := fmt.Sprintf(programTemplate, "", "0x068101", "", 1, 2, singleFailureOp) + return TestScenario{ + Outcome: outcome, + Program: program, + ExpectedError: failureMessage, + ExpectedEvents: FlattenEvents([][]Event{ + { + BeforeTxn(protocol.ApplicationCallTx), + BeforeProgram(logic.ModeApp), + }, + OpcodeEvents(9, false), + { + BeforeOpcode(), + BeforeTxnGroup(1), // start first itxn group + BeforeTxn(protocol.ApplicationCallTx), + BeforeProgram(logic.ModeApp), + }, + OpcodeEvents(1, false), + { + AfterProgram(logic.ModeApp, false), + AfterTxn(protocol.ApplicationCallTx, expectedInnerAppCallAD, false), + AfterTxnGroup(1, false), // end first itxn group + AfterOpcode(false), + }, + OpcodeEvents(14, false), + { + BeforeOpcode(), + BeforeTxnGroup(2), // start second itxn group + BeforeTxn(protocol.PaymentTx), + AfterTxn(protocol.PaymentTx, expectedInnerPay1AD, false), + BeforeTxn(protocol.PaymentTx), + AfterTxn(protocol.PaymentTx, expectedInnerPay2AD, false), + AfterTxnGroup(2, false), // end second itxn group + AfterOpcode(false), + }, + OpcodeEvents(1, shouldError), + { + AfterProgram(logic.ModeApp, shouldError), + AfterTxn(protocol.ApplicationCallTx, expectedAD, true), + }, + }), + } + } + scenarios[afterInnersName] = afterInners + } + + return scenarios +} + +func stripInnerTxnGroupIDs(ad *transactions.ApplyData) { + for i := range ad.EvalDelta.InnerTxns { + ad.EvalDelta.InnerTxns[i].Txn.Group = crypto.Digest{} + stripInnerTxnGroupIDs(&ad.EvalDelta.InnerTxns[i].ApplyData) + } +} + +// StripInnerTxnGroupIDsFromEvents removes any inner transaction GroupIDs that are present in the +// TxnApplyData fields of the events. +func StripInnerTxnGroupIDsFromEvents(events []Event) []Event { + for i := range events { + stripInnerTxnGroupIDs(&events[i].TxnApplyData) + } + return events +} diff --git a/data/transactions/logic/mocktracer/tracer.go b/data/transactions/logic/mocktracer/tracer.go index 967798ec91..2428d022e9 100644 --- a/data/transactions/logic/mocktracer/tracer.go +++ b/data/transactions/logic/mocktracer/tracer.go @@ -59,6 +59,9 @@ type Event struct { // only for BeforeTxnGroup and AfterTxnGroup GroupSize int + + // only for AfterOpcode, AfterProgram, AfterTxn, and AfterTxnGroup + HasError bool } // BeforeTxnGroup creates a new Event with the type BeforeTxnGroupEvent @@ -67,8 +70,8 @@ func BeforeTxnGroup(groupSize int) Event { } // AfterTxnGroup creates a new Event with the type AfterTxnGroupEvent -func AfterTxnGroup(groupSize int) Event { - return Event{Type: AfterTxnGroupEvent, GroupSize: groupSize} +func AfterTxnGroup(groupSize int, hasError bool) Event { + return Event{Type: AfterTxnGroupEvent, GroupSize: groupSize, HasError: hasError} } // BeforeProgram creates a new Event with the type BeforeProgramEvent @@ -82,13 +85,13 @@ func BeforeTxn(txnType protocol.TxType) Event { } // AfterTxn creates a new Event with the type AfterTxnEvent -func AfterTxn(txnType protocol.TxType, ad transactions.ApplyData) Event { - return Event{Type: AfterTxnEvent, TxnType: txnType, TxnApplyData: ad} +func AfterTxn(txnType protocol.TxType, ad transactions.ApplyData, hasError bool) Event { + return Event{Type: AfterTxnEvent, TxnType: txnType, TxnApplyData: ad, HasError: hasError} } // AfterProgram creates a new Event with the type AfterProgramEvent -func AfterProgram(mode logic.RunMode) Event { - return Event{Type: AfterProgramEvent, LogicEvalMode: mode} +func AfterProgram(mode logic.RunMode, hasError bool) Event { + return Event{Type: AfterProgramEvent, LogicEvalMode: mode, HasError: hasError} } // BeforeOpcode creates a new Event with the type BeforeOpcodeEvent @@ -97,8 +100,30 @@ func BeforeOpcode() Event { } // AfterOpcode creates a new Event with the type AfterOpcodeEvent -func AfterOpcode() Event { - return Event{Type: AfterOpcodeEvent} +func AfterOpcode(hasError bool) Event { + return Event{Type: AfterOpcodeEvent, HasError: hasError} +} + +// OpcodeEvents returns a slice of events that represent calling `count` opcodes +func OpcodeEvents(count int, endsWithError bool) []Event { + events := make([]Event, 0, count*2) + for i := 0; i < count; i++ { + hasError := false + if endsWithError && i+1 == count { + hasError = true + } + events = append(events, BeforeOpcode(), AfterOpcode(hasError)) + } + return events +} + +// FlattenEvents flattens a slice of slices into a single slice of Events +func FlattenEvents(rows [][]Event) []Event { + var out []Event + for _, row := range rows { + out = append(out, row...) + } + return out } // Tracer is a mock tracer that implements logic.EvalTracer @@ -112,8 +137,8 @@ func (d *Tracer) BeforeTxnGroup(ep *logic.EvalParams) { } // AfterTxnGroup mocks the logic.EvalTracer.AfterTxnGroup method -func (d *Tracer) AfterTxnGroup(ep *logic.EvalParams) { - d.Events = append(d.Events, AfterTxnGroup(len(ep.TxnGroup))) +func (d *Tracer) AfterTxnGroup(ep *logic.EvalParams, evalError error) { + d.Events = append(d.Events, AfterTxnGroup(len(ep.TxnGroup), evalError != nil)) } // BeforeTxn mocks the logic.EvalTracer.BeforeTxn method @@ -122,8 +147,8 @@ func (d *Tracer) BeforeTxn(ep *logic.EvalParams, groupIndex int) { } // AfterTxn mocks the logic.EvalTracer.AfterTxn method -func (d *Tracer) AfterTxn(ep *logic.EvalParams, groupIndex int, ad transactions.ApplyData) { - d.Events = append(d.Events, AfterTxn(ep.TxnGroup[groupIndex].Txn.Type, ad)) +func (d *Tracer) AfterTxn(ep *logic.EvalParams, groupIndex int, ad transactions.ApplyData, evalError error) { + d.Events = append(d.Events, AfterTxn(ep.TxnGroup[groupIndex].Txn.Type, ad, evalError != nil)) } // BeforeProgram mocks the logic.EvalTracer.BeforeProgram method @@ -133,7 +158,7 @@ func (d *Tracer) BeforeProgram(cx *logic.EvalContext) { // AfterProgram mocks the logic.EvalTracer.AfterProgram method func (d *Tracer) AfterProgram(cx *logic.EvalContext, evalError error) { - d.Events = append(d.Events, AfterProgram(cx.RunMode())) + d.Events = append(d.Events, AfterProgram(cx.RunMode(), evalError != nil)) } // BeforeOpcode mocks the logic.EvalTracer.BeforeOpcode method @@ -143,5 +168,5 @@ func (d *Tracer) BeforeOpcode(cx *logic.EvalContext) { // AfterOpcode mocks the logic.EvalTracer.AfterOpcode method func (d *Tracer) AfterOpcode(cx *logic.EvalContext, evalError error) { - d.Events = append(d.Events, AfterOpcode()) + d.Events = append(d.Events, AfterOpcode(evalError != nil)) } diff --git a/data/transactions/logic/tracer.go b/data/transactions/logic/tracer.go index 929d5cdb89..89802b1c0e 100644 --- a/data/transactions/logic/tracer.go +++ b/data/transactions/logic/tracer.go @@ -105,7 +105,7 @@ type EvalTracer interface { // AfterTxnGroup is called after a transaction group has been executed. This includes both // top-level and inner transaction groups. The argument ep is the EvalParams object for the // group; if the group is an inner group, this is the EvalParams object for the inner group. - AfterTxnGroup(ep *EvalParams) + AfterTxnGroup(ep *EvalParams, evalError error) // BeforeTxn is called before a transaction is executed. // @@ -117,7 +117,7 @@ type EvalTracer interface { // groupIndex refers to the index of the transaction in the transaction group that was just executed. // ad is the ApplyData result of the transaction; prefer using this instead of // ep.TxnGroup[groupIndex].ApplyData, since it may not be populated at this point. - AfterTxn(ep *EvalParams, groupIndex int, ad transactions.ApplyData) + AfterTxn(ep *EvalParams, groupIndex int, ad transactions.ApplyData, evalError error) // BeforeProgram is called before an app or LogicSig program is evaluated. BeforeProgram(cx *EvalContext) @@ -139,13 +139,14 @@ type NullEvalTracer struct{} func (n NullEvalTracer) BeforeTxnGroup(ep *EvalParams) {} // AfterTxnGroup does nothing -func (n NullEvalTracer) AfterTxnGroup(ep *EvalParams) {} +func (n NullEvalTracer) AfterTxnGroup(ep *EvalParams, evalError error) {} // BeforeTxn does nothing func (n NullEvalTracer) BeforeTxn(ep *EvalParams, groupIndex int) {} // AfterTxn does nothing -func (n NullEvalTracer) AfterTxn(ep *EvalParams, groupIndex int, ad transactions.ApplyData) {} +func (n NullEvalTracer) AfterTxn(ep *EvalParams, groupIndex int, ad transactions.ApplyData, evalError error) { +} // BeforeProgram does nothing func (n NullEvalTracer) BeforeProgram(cx *EvalContext) {} diff --git a/data/transactions/logic/tracer_test.go b/data/transactions/logic/tracer_test.go index c47f3d1927..9e5f9564b3 100644 --- a/data/transactions/logic/tracer_test.go +++ b/data/transactions/logic/tracer_test.go @@ -14,182 +14,220 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package logic +package logic_test import ( "testing" "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/data/transactions" + . "github.com/algorand/go-algorand/data/transactions/logic" + "github.com/algorand/go-algorand/data/transactions/logic/mocktracer" "github.com/algorand/go-algorand/test/partitiontest" "github.com/stretchr/testify/require" ) -const innerTxnTestProgram string = `itxn_begin -int appl -itxn_field TypeEnum -int NoOp -itxn_field OnCompletion -byte 0x068101 // #pragma version 6; int 1; -dup -itxn_field ApprovalProgram -itxn_field ClearStateProgram -itxn_submit - -itxn_begin -int pay -itxn_field TypeEnum -int 1 -itxn_field Amount -global CurrentApplicationAddress -itxn_field Receiver -itxn_next -int pay -itxn_field TypeEnum -int 2 -itxn_field Amount -global CurrentApplicationAddress -itxn_field Receiver -itxn_submit - -int 1 -` - -// can't use mocktracer.Tracer because the import would be circular -type testEvalTracer struct { - beforeTxnGroupCalls int - afterTxnGroupCalls int - - beforeTxnCalls int - afterTxnCalls int - - beforeProgramCalls int - afterProgramCalls int - programModes []RunMode - - beforeOpcodeCalls int - afterOpcodeCalls int +type tracerTestCase struct { + name string + program string + evalProblems []string + expectedEvents []mocktracer.Event } -func (t *testEvalTracer) BeforeTxnGroup(ep *EvalParams) { - t.beforeTxnGroupCalls++ +func getSimpleTracerTestCases(mode RunMode) []tracerTestCase { + return []tracerTestCase{ + { + name: "approve", + program: debuggerTestProgramApprove, + expectedEvents: mocktracer.FlattenEvents([][]mocktracer.Event{ + { + mocktracer.BeforeProgram(mode), + }, + mocktracer.OpcodeEvents(35, false), + { + mocktracer.AfterProgram(mode, false), + }, + }), + }, + { + name: "reject", + program: debuggerTestProgramReject, + evalProblems: []string{"REJECT"}, + expectedEvents: mocktracer.FlattenEvents([][]mocktracer.Event{ + { + mocktracer.BeforeProgram(mode), + }, + mocktracer.OpcodeEvents(36, false), + { + mocktracer.AfterProgram(mode, false), + }, + }), + }, + { + name: "error", + program: debuggerTestProgramError, + evalProblems: []string{"err opcode executed"}, + expectedEvents: mocktracer.FlattenEvents([][]mocktracer.Event{ + { + mocktracer.BeforeProgram(mode), + }, + mocktracer.OpcodeEvents(36, true), + { + mocktracer.AfterProgram(mode, true), + }, + }), + }, + } } -func (t *testEvalTracer) AfterTxnGroup(ep *EvalParams) { - t.afterTxnGroupCalls++ +func getPanicTracerTestCase(mode RunMode) tracerTestCase { + return tracerTestCase{ + name: "panic", + program: debuggerTestProgramPanic, + evalProblems: []string{"panic"}, + expectedEvents: mocktracer.FlattenEvents([][]mocktracer.Event{ + { + mocktracer.BeforeProgram(mode), + }, + mocktracer.OpcodeEvents(36, true), + { + mocktracer.AfterProgram(mode, true), + }, + }), + } } -func (t *testEvalTracer) BeforeTxn(ep *EvalParams, groupIndex int) { - t.beforeTxnCalls++ -} - -func (t *testEvalTracer) AfterTxn(ep *EvalParams, groupIndex int, ad transactions.ApplyData) { - t.afterTxnCalls++ -} - -func (t *testEvalTracer) BeforeProgram(cx *EvalContext) { - t.beforeProgramCalls++ - t.programModes = append(t.programModes, cx.RunMode()) -} - -func (t *testEvalTracer) AfterProgram(cx *EvalContext, evalError error) { - t.afterProgramCalls++ -} - -func (t *testEvalTracer) BeforeOpcode(cx *EvalContext) { - t.beforeOpcodeCalls++ +func TestLogicSigEvalWithTracer(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + testCases := getSimpleTracerTestCases(ModeSig) + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + mock := mocktracer.Tracer{} + ep := DefaultEvalParams() + ep.Tracer = &mock + TestLogic(t, testCase.program, AssemblerMaxVersion, ep, testCase.evalProblems...) + + require.Equal(t, testCase.expectedEvents, mock.Events) + }) + } } -func (t *testEvalTracer) AfterOpcode(cx *EvalContext, evalError error) { - t.afterOpcodeCalls++ +func TestTopLevelAppEvalWithTracer(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + testCases := getSimpleTracerTestCases(ModeApp) + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + mock := mocktracer.Tracer{} + ep := DefaultEvalParams() + ep.Tracer = &mock + TestApp(t, testCase.program, ep, testCase.evalProblems...) + + require.Equal(t, testCase.expectedEvents, mock.Events) + }) + } } -func TestEvalWithTracer(t *testing.T) { +func TestInnerAppEvalWithTracer(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() + scenarios := mocktracer.GetTestScenarios() + for name, makeScenario := range scenarios { + makeScenario := makeScenario + t.Run(name, func(t *testing.T) { + t.Parallel() + mock := mocktracer.Tracer{} + ep, tx, ledger := MakeSampleEnv() + ep.Tracer = &mock + + // Establish FirstTestID as the app id, and fund it. We do this so that the created + // inner app will get a sequential ID, which is what the mocktracer scenarios expect + createdAppIndex := basics.AppIndex(FirstTestID) + ledger.NewApp(tx.Receiver, createdAppIndex, basics.AppParams{}) + ledger.NewAccount(createdAppIndex.Address(), 200_000) + tx.ApplicationID = createdAppIndex + + scenario := makeScenario(mocktracer.TestScenarioInfo{ + CallingTxn: *tx, + CreatedAppID: createdAppIndex, + }) + + var evalProblems []string + switch scenario.Outcome { + case mocktracer.RejectionOutcome: + evalProblems = []string{"REJECT"} + case mocktracer.ErrorOutcome: + if scenario.ExpectedError == "overspend" { + // the logic test ledger uses this error instead + evalProblems = []string{"insufficient balance"} + } else { + evalProblems = []string{scenario.ExpectedError} + } + } + + ops := TestProg(t, scenario.Program, AssemblerNoVersion) + TestAppBytes(t, ops.Program, ep, evalProblems...) + + // trim BeforeTxn and AfterTxn events from scenario.ExpectedEvents, since they are + // not emitted from TestAppBytes + require.Equal(t, scenario.ExpectedEvents[0].Type, mocktracer.BeforeTxnEvent) + require.Equal(t, scenario.ExpectedEvents[len(scenario.ExpectedEvents)-1].Type, mocktracer.AfterTxnEvent) + trimmedExpectedEvents := scenario.ExpectedEvents[1 : len(scenario.ExpectedEvents)-1] + require.Equal(t, trimmedExpectedEvents, mock.Events) + }) + } +} - t.Run("logicsig", func(t *testing.T) { - t.Parallel() - testTracer := testEvalTracer{} - ep := defaultEvalParams() - ep.Tracer = &testTracer - testLogic(t, debuggerTestProgram, AssemblerMaxVersion, ep) - - // BeforeTxnGroup/AfterTxnGroup/BeforeTxn/AfterTxn are only called for the inner txns in - // this test, not the top-level ones - require.Zero(t, testTracer.beforeTxnGroupCalls) - require.Zero(t, testTracer.afterTxnGroupCalls) - require.Zero(t, testTracer.beforeTxnCalls) - require.Zero(t, testTracer.afterTxnCalls) - - require.Equal(t, 1, testTracer.beforeProgramCalls) - require.Equal(t, 1, testTracer.afterProgramCalls) - require.Equal(t, []RunMode{ModeSig}, testTracer.programModes) - - require.Equal(t, 35, testTracer.beforeOpcodeCalls) - require.Equal(t, testTracer.beforeOpcodeCalls, testTracer.afterOpcodeCalls) - }) +func TestEvalPanicWithTracer(t *testing.T) { //nolint:paralleltest // Uses WithPanicOpcode + partitiontest.PartitionTest(t) - t.Run("simple app", func(t *testing.T) { - t.Parallel() - testTracer := testEvalTracer{} - ep := defaultEvalParams() - ep.Tracer = &testTracer - testApp(t, debuggerTestProgram, ep) - - // BeforeTxnGroup/AfterTxnGroup/BeforeTxn/AfterTxn are only called for the inner txns in - // this test, not the top-level ones - require.Zero(t, testTracer.beforeTxnGroupCalls) - require.Zero(t, testTracer.afterTxnGroupCalls) - require.Zero(t, testTracer.beforeTxnCalls) - require.Zero(t, testTracer.afterTxnCalls) - - require.Equal(t, 1, testTracer.beforeProgramCalls) - require.Equal(t, 1, testTracer.afterProgramCalls) - require.Equal(t, []RunMode{ModeApp}, testTracer.programModes) - - require.Equal(t, 35, testTracer.beforeOpcodeCalls) - require.Equal(t, testTracer.beforeOpcodeCalls, testTracer.afterOpcodeCalls) + WithPanicOpcode(t, LogicVersion, false, func(opcode byte) { + for _, mode := range []RunMode{ModeSig, ModeApp} { + t.Run(mode.String(), func(t *testing.T) { //nolint:paralleltest // Uses WithPanicOpcode + testCase := getPanicTracerTestCase(mode) + mock := mocktracer.Tracer{} + ep := DefaultEvalParams() + ep.Tracer = &mock + switch mode { + case ModeSig: + TestLogic(t, testCase.program, AssemblerMaxVersion, ep, testCase.evalProblems...) + case ModeApp: + TestApp(t, testCase.program, ep, testCase.evalProblems...) + default: + require.Fail(t, "unknown mode") + } + + require.Equal(t, testCase.expectedEvents, mock.Events) + }) + } }) +} - t.Run("app with inner txns", func(t *testing.T) { - t.Parallel() - testTracer := testEvalTracer{} - ep, tx, ledger := MakeSampleEnv() - - // Establish 888 as the app id, and fund it. - ledger.NewApp(tx.Receiver, 888, basics.AppParams{}) - ledger.NewAccount(basics.AppIndex(888).Address(), 200000) - - ep.Tracer = &testTracer - testApp(t, innerTxnTestProgram, ep) - - // BeforeTxnGroup/AfterTxnGroup/BeforeTxn/AfterTxn are only called for the inner txns in - // this test, not the top-level ones - - // two groups of inner txns were issued - require.Equal(t, 2, testTracer.beforeTxnGroupCalls) - require.Equal(t, 2, testTracer.afterTxnGroupCalls) - - // three total inner txns were issued - require.Equal(t, 3, testTracer.beforeTxnCalls) - require.Equal(t, 3, testTracer.afterTxnCalls) - - require.Equal(t, 2, testTracer.beforeProgramCalls) - require.Equal(t, 2, testTracer.afterProgramCalls) - require.Equal(t, []RunMode{ModeApp, ModeApp}, testTracer.programModes) +type panicTracer struct { + NullEvalTracer +} - appCallTealOps := 27 - innerAppCallTealOps := 1 - require.Equal(t, appCallTealOps+innerAppCallTealOps, testTracer.beforeOpcodeCalls) - require.Equal(t, testTracer.beforeOpcodeCalls, testTracer.afterOpcodeCalls) - }) +func (t *panicTracer) AfterOpcode(cx *EvalContext, evalError error) { + panic("panicTracer panics") } -func TestNullEvalTracerIsEvalTracer(t *testing.T) { +// TestEvalWithTracerTracerPanic ensures that tracer panics get recovered and turned into errors +func TestEvalWithTracerPanic(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - var tracer EvalTracer = NullEvalTracer{} - require.NotNil(t, tracer) + for _, mode := range []RunMode{ModeSig, ModeApp} { + mode := mode + t.Run(mode.String(), func(t *testing.T) { + t.Parallel() + tracer := panicTracer{} + ep := DefaultEvalParams() + ep.Tracer = &tracer + TestLogic(t, debuggerTestProgramApprove, AssemblerMaxVersion, ep, "panicTracer panics") + }) + } } diff --git a/data/transactions/verify/txn_test.go b/data/transactions/verify/txn_test.go index b279cf4070..caaeaf239c 100644 --- a/data/transactions/verify/txn_test.go +++ b/data/transactions/verify/txn_test.go @@ -360,83 +360,197 @@ func TestTxnGroupWithTracer(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - proto := config.Consensus[protocol.ConsensusCurrentVersion] - - account := keypair() - accountAddr := basics.Address(account.SignatureVerifier) - - ops1, err := logic.AssembleString(`#pragma version 6 -pushint 1`) - require.NoError(t, err) - program1 := ops1.Program - program1Addr := basics.Address(logic.HashProgram(program1)) - - ops2, err := logic.AssembleString(`#pragma version 6 + // In all cases, a group of three transactions is tested. They are: + // 1. A payment transaction from a LogicSig (program1) + // 2. An app call from a normal account + // 3. An app call from a LogicSig (program2) + + testCases := []struct { + name string + program1 string + program2 string + expectedError string + expectedEvents []mocktracer.Event + }{ + { + name: "both approve", + program1: `#pragma version 6 +pushint 1`, + program2: `#pragma version 6 pushbytes "test" pop -pushint 1`) - require.NoError(t, err) - program2 := ops2.Program - program2Addr := basics.Address(logic.HashProgram(program2)) - - // this shouldn't be invoked during this test - appProgram := "err" - - lsigPay := txntest.Txn{ - Type: protocol.PaymentTx, - Sender: program1Addr, - Receiver: accountAddr, - Fee: proto.MinTxnFee, - } - - normalSigAppCall := txntest.Txn{ - Type: protocol.ApplicationCallTx, - Sender: accountAddr, - ApprovalProgram: appProgram, - ClearStateProgram: appProgram, - Fee: proto.MinTxnFee, - } - - lsigAppCall := txntest.Txn{ - Type: protocol.ApplicationCallTx, - Sender: program2Addr, - ApprovalProgram: appProgram, - ClearStateProgram: appProgram, - Fee: proto.MinTxnFee, - } - - txntest.Group(&lsigPay, &normalSigAppCall, &lsigAppCall) - - txgroup := []transactions.SignedTxn{ +pushint 1`, + expectedEvents: mocktracer.FlattenEvents([][]mocktracer.Event{ + { + mocktracer.BeforeProgram(logic.ModeSig), // first txn start + mocktracer.BeforeOpcode(), mocktracer.AfterOpcode(false), // first txn LogicSig: 1 op + mocktracer.AfterProgram(logic.ModeSig, false), // first txn end + // nothing for second txn (not signed with a LogicSig) + mocktracer.BeforeProgram(logic.ModeSig), // third txn start + }, + mocktracer.OpcodeEvents(3, false), // third txn LogicSig: 3 ops + { + mocktracer.AfterProgram(logic.ModeSig, false), // third txn end + }, + }), + }, + { + name: "approve then reject", + program1: `#pragma version 6 +pushint 1`, + program2: `#pragma version 6 +pushbytes "test" +pop +pushint 0`, + expectedError: "rejected by logic", + expectedEvents: mocktracer.FlattenEvents([][]mocktracer.Event{ + { + mocktracer.BeforeProgram(logic.ModeSig), // first txn start + mocktracer.BeforeOpcode(), mocktracer.AfterOpcode(false), // first txn LogicSig: 1 op + mocktracer.AfterProgram(logic.ModeSig, false), // first txn end + // nothing for second txn (not signed with a LogicSig) + mocktracer.BeforeProgram(logic.ModeSig), // third txn start + }, + mocktracer.OpcodeEvents(3, false), // third txn LogicSig: 3 ops + { + mocktracer.AfterProgram(logic.ModeSig, false), // third txn end + }, + }), + }, + { + name: "approve then error", + program1: `#pragma version 6 +pushint 1`, + program2: `#pragma version 6 +pushbytes "test" +pop +err +pushbytes "test2" +pop`, + expectedError: "rejected by logic err=err opcode executed", + expectedEvents: mocktracer.FlattenEvents([][]mocktracer.Event{ + { + mocktracer.BeforeProgram(logic.ModeSig), // first txn start + mocktracer.BeforeOpcode(), mocktracer.AfterOpcode(false), // first txn LogicSig: 1 op + mocktracer.AfterProgram(logic.ModeSig, false), // first txn end + // nothing for second txn (not signed with a LogicSig) + mocktracer.BeforeProgram(logic.ModeSig), // third txn start + }, + mocktracer.OpcodeEvents(3, true), // third txn LogicSig: 3 ops + { + mocktracer.AfterProgram(logic.ModeSig, true), // third txn end + }, + }), + }, { - Lsig: transactions.LogicSig{ - Logic: program1, + name: "reject then approve", + program1: `#pragma version 6 +pushint 0`, + program2: `#pragma version 6 +pushbytes "test" +pop +pushint 1`, + expectedError: "rejected by logic", + expectedEvents: []mocktracer.Event{ + mocktracer.BeforeProgram(logic.ModeSig), // first txn start + mocktracer.BeforeOpcode(), mocktracer.AfterOpcode(false), // first txn LogicSig: 1 op + mocktracer.AfterProgram(logic.ModeSig, false), // first txn end + // execution stops at rejection }, - Txn: lsigPay.Txn(), }, - normalSigAppCall.Txn().Sign(account), { - Lsig: transactions.LogicSig{ - Logic: program2, + name: "error then approve", + program1: `#pragma version 6 +err`, + program2: `#pragma version 6 +pushbytes "test" +pop +pushint 1`, + expectedError: "rejected by logic err=err opcode executed", + expectedEvents: []mocktracer.Event{ + mocktracer.BeforeProgram(logic.ModeSig), // first txn start + mocktracer.BeforeOpcode(), mocktracer.AfterOpcode(true), // first txn LogicSig: 1 op + mocktracer.AfterProgram(logic.ModeSig, true), // first txn end + // execution stops at error }, - Txn: lsigAppCall.Txn(), }, } - mockTracer := &mocktracer.Tracer{} - _, err = TxnGroupWithTracer(txgroup, blockHeader, nil, logic.NoHeaderLedger{}, mockTracer) - require.NoError(t, err) + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + proto := config.Consensus[protocol.ConsensusCurrentVersion] + + account := keypair() + accountAddr := basics.Address(account.SignatureVerifier) + + ops1, err := logic.AssembleString(testCase.program1) + require.NoError(t, err) + program1Bytes := ops1.Program + program1Addr := basics.Address(logic.HashProgram(program1Bytes)) + + ops2, err := logic.AssembleString(testCase.program2) + require.NoError(t, err) + program2Bytes := ops2.Program + program2Addr := basics.Address(logic.HashProgram(program2Bytes)) + + // This app program shouldn't be invoked during this test + appProgram := "err" + + lsigPay := txntest.Txn{ + Type: protocol.PaymentTx, + Sender: program1Addr, + Receiver: accountAddr, + Fee: proto.MinTxnFee, + } + + normalSigAppCall := txntest.Txn{ + Type: protocol.ApplicationCallTx, + Sender: accountAddr, + ApprovalProgram: appProgram, + ClearStateProgram: appProgram, + Fee: proto.MinTxnFee, + } + + lsigAppCall := txntest.Txn{ + Type: protocol.ApplicationCallTx, + Sender: program2Addr, + ApprovalProgram: appProgram, + ClearStateProgram: appProgram, + Fee: proto.MinTxnFee, + } + + txntest.Group(&lsigPay, &normalSigAppCall, &lsigAppCall) + + txgroup := []transactions.SignedTxn{ + { + Lsig: transactions.LogicSig{ + Logic: program1Bytes, + }, + Txn: lsigPay.Txn(), + }, + normalSigAppCall.Txn().Sign(account), + { + Lsig: transactions.LogicSig{ + Logic: program2Bytes, + }, + Txn: lsigAppCall.Txn(), + }, + } + + mockTracer := &mocktracer.Tracer{} + _, err = TxnGroupWithTracer(txgroup, blockHeader, nil, logic.NoHeaderLedger{}, mockTracer) + + if len(testCase.expectedError) != 0 { + require.ErrorContains(t, err, testCase.expectedError) + } else { + require.NoError(t, err) + } - expectedEvents := []mocktracer.Event{ - mocktracer.BeforeProgram(logic.ModeSig), // first txn start - mocktracer.BeforeOpcode(), mocktracer.AfterOpcode(), // first txn LogicSig: 1 op - mocktracer.AfterProgram(logic.ModeSig), // first txn end - // nothing for second txn (not signed with a LogicSig) - mocktracer.BeforeProgram(logic.ModeSig), // third txn start - mocktracer.BeforeOpcode(), mocktracer.AfterOpcode(), mocktracer.BeforeOpcode(), mocktracer.AfterOpcode(), mocktracer.BeforeOpcode(), mocktracer.AfterOpcode(), // third txn LogicSig: 3 ops - mocktracer.AfterProgram(logic.ModeSig), // third txn end + require.Equal(t, testCase.expectedEvents, mockTracer.Events) + }) } - require.Equal(t, expectedEvents, mockTracer.Events) } func TestPaysetGroups(t *testing.T) { diff --git a/ledger/internal/eval.go b/ledger/internal/eval.go index 1a1a1eea50..2d184e4b4c 100644 --- a/ledger/internal/eval.go +++ b/ledger/internal/eval.go @@ -927,7 +927,7 @@ func (eval *BlockEvaluator) Transaction(txn transactions.SignedTxn, ad transacti // TransactionGroup tentatively adds a new transaction group as part of this block evaluation. // If the transaction group cannot be added to the block without violating some constraints, // an error is returned and the block evaluator state is unchanged. -func (eval *BlockEvaluator) TransactionGroup(txgroup []transactions.SignedTxnWithAD) error { +func (eval *BlockEvaluator) TransactionGroup(txgroup []transactions.SignedTxnWithAD) (err error) { // Nothing to do if there are no transactions. if len(txgroup) == 0 { return nil @@ -952,6 +952,10 @@ func (eval *BlockEvaluator) TransactionGroup(txgroup []transactions.SignedTxnWit if eval.Tracer != nil { eval.Tracer.BeforeTxnGroup(evalParams) + // Ensure we update the tracer before exiting + defer func() { + eval.Tracer.AfterTxnGroup(evalParams, err) + }() } // Evaluate each transaction in the group @@ -964,12 +968,13 @@ func (eval *BlockEvaluator) TransactionGroup(txgroup []transactions.SignedTxnWit } err := eval.transaction(txad.SignedTxn, evalParams, gi, txad.ApplyData, cow, &txib) - if err != nil { - return err - } if eval.Tracer != nil { - eval.Tracer.AfterTxn(evalParams, gi, txib.ApplyData) + eval.Tracer.AfterTxn(evalParams, gi, txib.ApplyData, err) + } + + if err != nil { + return err } txibs = append(txibs, txib) @@ -1018,10 +1023,6 @@ func (eval *BlockEvaluator) TransactionGroup(txgroup []transactions.SignedTxnWit eval.blockTxBytes += groupTxBytes cow.commitToParent() - if eval.Tracer != nil { - eval.Tracer.AfterTxnGroup(evalParams) - } - return nil } @@ -1107,6 +1108,10 @@ func (eval *BlockEvaluator) transaction(txn transactions.SignedTxn, evalParams * // Apply the transaction, updating the cow balances applyData, err := eval.applyTransaction(txn.Txn, cow, evalParams, gi, cow.Counter()) if err != nil { + if eval.Tracer != nil { + // If there is a tracer, save the ApplyData so that it's viewable by the tracer + txib.ApplyData = applyData + } return fmt.Errorf("transaction %v: %w", txid, err) } diff --git a/ledger/internal/eval_test.go b/ledger/internal/eval_test.go index 5803a4d613..0d4e6aaa39 100644 --- a/ledger/internal/eval_test.go +++ b/ledger/internal/eval_test.go @@ -310,199 +310,218 @@ func TestPrivateTransactionGroup(t *testing.T) { require.Error(t, err) // too many } -func tealOpLogs(count int) []mocktracer.Event { - var log []mocktracer.Event - - for i := 0; i < count; i++ { - log = append(log, mocktracer.BeforeOpcode(), mocktracer.AfterOpcode()) - } - - return log -} - -func flatten(rows [][]mocktracer.Event) []mocktracer.Event { - var out []mocktracer.Event - for _, row := range rows { - out = append(out, row...) - } - return out -} - -const innerTxnTestProgram string = `#pragma version 6 -itxn_begin -int appl -itxn_field TypeEnum -int NoOp -itxn_field OnCompletion -byte 0x068101 // #pragma version 6; int 1; -dup -itxn_field ApprovalProgram -itxn_field ClearStateProgram -itxn_submit - -itxn_begin -int pay -itxn_field TypeEnum -int 1 -itxn_field Amount -global CurrentApplicationAddress -itxn_field Receiver -itxn_next -int pay -itxn_field TypeEnum -int 2 -itxn_field Amount -global CurrentApplicationAddress -itxn_field Receiver -itxn_submit - -int 1 -` - func TestTransactionGroupWithTracer(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - genesisInitState, addrs, keys := ledgertesting.Genesis(10) + // In all cases, a group of three transactions is tested. They are: + // 1. A basic app call transaction + // 2. A payment transaction + // 3. An app call transaction that spawns inners. This is from the mocktracer scenarios. - innerAppID := 3 - innerAppAddress := basics.AppIndex(innerAppID).Address() - balances := genesisInitState.Accounts - balances[innerAppAddress] = basics_testing.MakeAccountData(basics.Offline, basics.MicroAlgos{Raw: 1000000}) + scenarios := mocktracer.GetTestScenarios() - genesisBalances := bookkeeping.GenesisBalances{ - Balances: genesisInitState.Accounts, - FeeSink: testSinkAddr, - RewardsPool: testPoolAddr, - Timestamp: 0, - } - l := newTestLedger(t, genesisBalances) - - blkHeader, err := l.BlockHdr(basics.Round(0)) - require.NoError(t, err) - newBlock := bookkeeping.MakeBlock(blkHeader) - eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0) - require.NoError(t, err) - eval.validate = true - eval.generate = true - - basicProgram := `#pragma version 6 -byte "hello" -log -int 1` - - genHash := l.GenesisHash() - - // a basic app call - basicAppCallTxn := txntest.Txn{ - Type: protocol.ApplicationCallTx, - Sender: addrs[0], - ApprovalProgram: basicProgram, - ClearStateProgram: basicProgram, - - FirstValid: newBlock.Round(), - LastValid: newBlock.Round() + 1000, - Fee: minFee, - GenesisHash: genHash, - } - - // a non-app call txn - payTxn := txntest.Txn{ - Type: protocol.PaymentTx, - Sender: addrs[1], - Receiver: addrs[2], - CloseRemainderTo: addrs[3], - Amount: 1_000_000, - - FirstValid: newBlock.Round(), - LastValid: newBlock.Round() + 1000, - Fee: minFee, - GenesisHash: genHash, + type tracerTestCase struct { + name string + firstTxnBehavior string + innerAppCallScenario mocktracer.TestScenarioGenerator } + var testCases []tracerTestCase + + firstIteration := true + for scenarioName, scenario := range scenarios { + firstTxnBehaviors := []string{"approve"} + if firstIteration { + // When the first transaction rejects or errors, the behavior of the later transactions + // don't matter, so we only want to test these cases with any one mocktracer scenario. + firstTxnBehaviors = append(firstTxnBehaviors, "reject", "error") + firstIteration = false + } - // an app call that spawns inner txns - innerAppCallTxn := txntest.Txn{ - Type: protocol.ApplicationCallTx, - Sender: addrs[0], - ApprovalProgram: innerTxnTestProgram, - ClearStateProgram: basicProgram, - - FirstValid: newBlock.Round(), - LastValid: newBlock.Round() + 1000, - Fee: minFee, - GenesisHash: genHash, + for _, firstTxnTxnBehavior := range firstTxnBehaviors { + testCases = append(testCases, tracerTestCase{ + name: fmt.Sprintf("firstTxnBehavior=%s,scenario=%s", firstTxnTxnBehavior, scenarioName), + firstTxnBehavior: firstTxnTxnBehavior, + innerAppCallScenario: scenario, + }) + } } - txntest.Group(&basicAppCallTxn, &payTxn, &innerAppCallTxn) - - txgroup := transactions.WrapSignedTxnsWithAD([]transactions.SignedTxn{ - basicAppCallTxn.Txn().Sign(keys[0]), - payTxn.Txn().Sign(keys[1]), - innerAppCallTxn.Txn().Sign(keys[0]), - }) - - require.Len(t, eval.block.Payset, 0) + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + genesisInitState, addrs, keys := ledgertesting.Genesis(10) + + innerAppID := basics.AppIndex(3) + innerAppAddress := innerAppID.Address() + balances := genesisInitState.Accounts + balances[innerAppAddress] = basics_testing.MakeAccountData(basics.Offline, basics.MicroAlgos{Raw: 1_000_000}) + + genesisBalances := bookkeeping.GenesisBalances{ + Balances: genesisInitState.Accounts, + FeeSink: testSinkAddr, + RewardsPool: testPoolAddr, + Timestamp: 0, + } + l := newTestLedger(t, genesisBalances) - tracer := &mocktracer.Tracer{} - eval.Tracer = tracer - err = eval.TransactionGroup(txgroup) - require.NoError(t, err) + blkHeader, err := l.BlockHdr(basics.Round(0)) + require.NoError(t, err) + newBlock := bookkeeping.MakeBlock(blkHeader) + eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0) + require.NoError(t, err) + eval.validate = true + eval.generate = true + + genHash := l.GenesisHash() + + var basicAppCallReturn string + switch testCase.firstTxnBehavior { + case "approve": + basicAppCallReturn = "int 1" + case "reject": + basicAppCallReturn = "int 0" + case "error": + basicAppCallReturn = "err" + default: + require.Fail(t, "Unexpected firstTxnBehavior") + } + // a basic app call + basicAppCallTxn := txntest.Txn{ + Type: protocol.ApplicationCallTx, + Sender: addrs[0], + ApprovalProgram: fmt.Sprintf(`#pragma version 6 +byte "hello" +log +%s`, basicAppCallReturn), + ClearStateProgram: `#pragma version 6 +int 1`, + + FirstValid: newBlock.Round(), + LastValid: newBlock.Round() + 1000, + Fee: minFee, + GenesisHash: genHash, + } - require.Len(t, eval.block.Payset, len(txgroup)) + // a non-app call txn + payTxn := txntest.Txn{ + Type: protocol.PaymentTx, + Sender: addrs[1], + Receiver: addrs[2], + CloseRemainderTo: addrs[3], + Amount: 1_000_000, + + FirstValid: newBlock.Round(), + LastValid: newBlock.Round() + 1000, + Fee: minFee, + GenesisHash: genHash, + } + // an app call with inner txn + innerAppCallTxn := txntest.Txn{ + Type: protocol.ApplicationCallTx, + Sender: addrs[0], + ClearStateProgram: `#pragma version 6 +int 1`, + + FirstValid: newBlock.Round(), + LastValid: newBlock.Round() + 1000, + Fee: minFee, + GenesisHash: genHash, + } + scenario := testCase.innerAppCallScenario(mocktracer.TestScenarioInfo{ + CallingTxn: innerAppCallTxn.Txn(), + MinFee: minFee, + CreatedAppID: innerAppID, + }) + innerAppCallTxn.ApprovalProgram = scenario.Program + + txntest.Group(&basicAppCallTxn, &payTxn, &innerAppCallTxn) + + txgroup := transactions.WrapSignedTxnsWithAD([]transactions.SignedTxn{ + basicAppCallTxn.Txn().Sign(keys[0]), + payTxn.Txn().Sign(keys[1]), + innerAppCallTxn.Txn().Sign(keys[0]), + }) + + require.Len(t, eval.block.Payset, 0) + + tracer := &mocktracer.Tracer{} + eval.Tracer = tracer + err = eval.TransactionGroup(txgroup) + switch testCase.firstTxnBehavior { + case "approve": + if len(scenario.ExpectedError) != 0 { + require.ErrorContains(t, err, scenario.ExpectedError) + require.Len(t, eval.block.Payset, 0) + } else { + require.NoError(t, err) + require.Len(t, eval.block.Payset, 3) + } + case "reject": + require.ErrorContains(t, err, "transaction rejected by ApprovalProgram") + require.Len(t, eval.block.Payset, 0) + case "error": + require.ErrorContains(t, err, "logic eval error: err opcode executed") + require.Len(t, eval.block.Payset, 0) + } - expectedADs := make([]transactions.ApplyData, len(txgroup)) - for i, txn := range eval.block.Payset { - expectedADs[i] = txn.ApplyData + expectedBasicAppCallAD := transactions.ApplyData{ + ApplicationID: 1, + EvalDelta: transactions.EvalDelta{ + GlobalDelta: basics.StateDelta{}, + LocalDeltas: map[uint64]basics.StateDelta{}, + Logs: []string{"hello"}, + }, + } + expectedPayTxnAD := + transactions.ApplyData{ + ClosingAmount: basics.MicroAlgos{ + Raw: balances[payTxn.Sender].MicroAlgos.Raw - payTxn.Amount - txgroup[1].Txn.Fee.Raw, + }, + } + + var expectedEvents []mocktracer.Event + if testCase.firstTxnBehavior == "approve" { + expectedEvents = mocktracer.FlattenEvents([][]mocktracer.Event{ + { + mocktracer.BeforeTxnGroup(3), + mocktracer.BeforeTxn(protocol.ApplicationCallTx), // start basicAppCallTxn + mocktracer.BeforeProgram(logic.ModeApp), + }, + mocktracer.OpcodeEvents(3, false), + { + mocktracer.AfterProgram(logic.ModeApp, false), + mocktracer.AfterTxn(protocol.ApplicationCallTx, expectedBasicAppCallAD, false), // end basicAppCallTxn + mocktracer.BeforeTxn(protocol.PaymentTx), // start payTxn + mocktracer.AfterTxn(protocol.PaymentTx, expectedPayTxnAD, false), // end payTxn + }, + scenario.ExpectedEvents, + { + mocktracer.AfterTxnGroup(3, scenario.Outcome != mocktracer.ApprovalOutcome), + }, + }) + } else { + hasError := testCase.firstTxnBehavior == "error" + // EvalDeltas are removed from failed app call transactions + expectedBasicAppCallAD.EvalDelta = transactions.EvalDelta{} + expectedEvents = mocktracer.FlattenEvents([][]mocktracer.Event{ + { + mocktracer.BeforeTxnGroup(3), + mocktracer.BeforeTxn(protocol.ApplicationCallTx), // start basicAppCallTxn + mocktracer.BeforeProgram(logic.ModeApp), + }, + mocktracer.OpcodeEvents(3, hasError), + { + mocktracer.AfterProgram(logic.ModeApp, hasError), + mocktracer.AfterTxn(protocol.ApplicationCallTx, expectedBasicAppCallAD, true), // end basicAppCallTxn + mocktracer.AfterTxnGroup(3, true), + }, + }) + } + require.Equal(t, expectedEvents, mocktracer.StripInnerTxnGroupIDsFromEvents(tracer.Events)) + }) } - - expectedEvents := flatten([][]mocktracer.Event{ - { - mocktracer.BeforeTxnGroup(3), - mocktracer.BeforeTxn(protocol.ApplicationCallTx), // start basicAppCallTxn - mocktracer.BeforeProgram(logic.ModeApp), - }, - tealOpLogs(3), - { - mocktracer.AfterProgram(logic.ModeApp), - mocktracer.AfterTxn(protocol.ApplicationCallTx, expectedADs[0]), // end basicAppCallTxn - mocktracer.BeforeTxn(protocol.PaymentTx), // start payTxn - mocktracer.AfterTxn(protocol.PaymentTx, expectedADs[1]), // end payTxn - mocktracer.BeforeTxn(protocol.ApplicationCallTx), // start innerAppCallTxn - mocktracer.BeforeProgram(logic.ModeApp), - }, - tealOpLogs(10), - { - mocktracer.BeforeOpcode(), - mocktracer.BeforeTxnGroup(1), // start first itxn group - mocktracer.BeforeTxn(protocol.ApplicationCallTx), - mocktracer.BeforeProgram(logic.ModeApp), - }, - tealOpLogs(1), - { - mocktracer.AfterProgram(logic.ModeApp), - mocktracer.AfterTxn(protocol.ApplicationCallTx, expectedADs[2].EvalDelta.InnerTxns[0].ApplyData), - mocktracer.AfterTxnGroup(1), // end first itxn group - mocktracer.AfterOpcode(), - }, - tealOpLogs(14), - { - mocktracer.BeforeOpcode(), - mocktracer.BeforeTxnGroup(2), // start second itxn group - mocktracer.BeforeTxn(protocol.PaymentTx), - mocktracer.AfterTxn(protocol.PaymentTx, expectedADs[2].EvalDelta.InnerTxns[1].ApplyData), - mocktracer.BeforeTxn(protocol.PaymentTx), - mocktracer.AfterTxn(protocol.PaymentTx, expectedADs[2].EvalDelta.InnerTxns[2].ApplyData), - mocktracer.AfterTxnGroup(2), // end second itxn group - mocktracer.AfterOpcode(), - }, - tealOpLogs(1), - { - mocktracer.AfterProgram(logic.ModeApp), - mocktracer.AfterTxn(protocol.ApplicationCallTx, expectedADs[2]), // end innerAppCallTxn - mocktracer.AfterTxnGroup(3), - }, - }) - require.Equal(t, expectedEvents, tracer.Events) } // BlockEvaluator.workaroundOverspentRewards() fixed a couple issues on testnet. From e14054252a63e660634a11ebfbcba1ce9a0303e6 Mon Sep 17 00:00:00 2001 From: Hang Su <87964331+ahangsu@users.noreply.github.com> Date: Fri, 17 Feb 2023 11:05:51 -0500 Subject: [PATCH 44/81] Tests: No Cache Testing in ledger (#5058) --- config/localTemplate.go | 5 ++ config/local_defaults.go | 1 + installer/config.json.example | 1 + ledger/acctonline.go | 10 ++- ledger/acctupdates.go | 17 ++++- ledger/acctupdates_test.go | 30 ++++----- ledger/apptxn_test.go | 93 ++++++++++++++------------- ledger/archival_test.go | 8 --- ledger/boxtxn_test.go | 30 ++++----- ledger/catchpointtracker_test.go | 6 +- ledger/catchpointwriter_test.go | 9 ++- ledger/double_test.go | 7 +- ledger/eval_simple_test.go | 9 ++- ledger/ledger.go | 2 +- ledger/ledger_test.go | 80 ++++++++++++++++------- ledger/lruaccts.go | 25 +++++-- ledger/lruaccts_test.go | 36 +++++++++++ ledger/lrukv.go | 20 ++++-- ledger/lrukv_test.go | 35 ++++++++++ ledger/lruonlineaccts.go | 16 ++++- ledger/lruonlineaccts_test.go | 36 +++++++++++ ledger/lruresources.go | 27 +++++--- ledger/lruresources_test.go | 42 ++++++++++++ ledger/simple_test.go | 11 +--- ledger/testing/consensusRange.go | 44 ++++++++----- ledger/testing/consensusRange_test.go | 3 + ledger/testing/withAndWithoutCache.go | 35 ++++++++++ ledger/txnbench_test.go | 3 +- test/testdata/configs/config-v27.json | 1 + 29 files changed, 472 insertions(+), 170 deletions(-) create mode 100644 ledger/testing/withAndWithoutCache.go diff --git a/config/localTemplate.go b/config/localTemplate.go index 020361918d..8172545575 100644 --- a/config/localTemplate.go +++ b/config/localTemplate.go @@ -493,6 +493,11 @@ type Local struct { // guarantees in terms of functionality or future support. EnableExperimentalAPI bool `version[26]:"false"` + // DisableLedgerLRUCache disables LRU caches in ledger. + // Setting it to TRUE might result in significant performance degradation + // and SHOULD NOT be used for other reasons than testing. + DisableLedgerLRUCache bool `version[27]:"false"` + // EnableFollowMode launches the node in "follower" mode. This turns off the agreement service, // and APIs related to broadcasting transactions, and enables APIs which can retrieve detailed information // from ledger caches and can control the ledger round. diff --git a/config/local_defaults.go b/config/local_defaults.go index f0e739a2db..48ddae4be9 100644 --- a/config/local_defaults.go +++ b/config/local_defaults.go @@ -49,6 +49,7 @@ var defaultLocal = Local{ DNSSecurityFlags: 1, DeadlockDetection: 0, DeadlockDetectionThreshold: 30, + DisableLedgerLRUCache: false, DisableLocalhostConnectionRateLimit: true, DisableNetworking: false, DisableOutgoingConnectionThrottling: false, diff --git a/installer/config.json.example b/installer/config.json.example index 52b86764ed..3eefcf69ef 100644 --- a/installer/config.json.example +++ b/installer/config.json.example @@ -28,6 +28,7 @@ "DNSSecurityFlags": 1, "DeadlockDetection": 0, "DeadlockDetectionThreshold": 30, + "DisableLedgerLRUCache": false, "DisableLocalhostConnectionRateLimit": true, "DisableNetworking": false, "DisableOutgoingConnectionThrottling": false, diff --git a/ledger/acctonline.go b/ledger/acctonline.go index ea9c411735..ebc6174187 100644 --- a/ledger/acctonline.go +++ b/ledger/acctonline.go @@ -117,12 +117,16 @@ type onlineAccounts struct { // maxAcctLookback sets the minimim deltas size to keep in memory acctLookback uint64 + + // disableCache (de)activates the LRU cache use in onlineAccounts + disableCache bool } // initialize initializes the accountUpdates structure func (ao *onlineAccounts) initialize(cfg config.Local) { ao.accountsReadCond = sync.NewCond(ao.accountsMu.RLocker()) ao.acctLookback = cfg.MaxAcctLookback + ao.disableCache = cfg.DisableLedgerLRUCache } // loadFromDisk is the 2nd level initialization, and is required before the onlineAccounts becomes functional @@ -184,7 +188,11 @@ func (ao *onlineAccounts) initializeFromDisk(l ledgerForTracker, lastBalancesRou ao.accounts = make(map[basics.Address]modifiedOnlineAccount) ao.deltasAccum = []int{0} - ao.baseOnlineAccounts.init(ao.log, baseAccountsPendingAccountsBufferSize, baseAccountsPendingAccountsWarnThreshold) + if !ao.disableCache { + ao.baseOnlineAccounts.init(ao.log, baseAccountsPendingAccountsBufferSize, baseAccountsPendingAccountsWarnThreshold) + } else { + ao.baseOnlineAccounts.init(ao.log, 0, 0) + } return } diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index 8e5ec2f752..d3caf1ff60 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -227,6 +227,9 @@ type accountUpdates struct { // maxAcctLookback sets the minimim deltas size to keep in memory acctLookback uint64 + + // disableCache (de)activates the LRU cache use in accountUpdates + disableCache bool } // RoundOffsetError is an error for when requested round is behind earliest stored db entry @@ -296,6 +299,8 @@ func (au *accountUpdates) initialize(cfg config.Local) { // log metrics au.logAccountUpdatesMetrics = cfg.EnableAccountUpdatesStats au.logAccountUpdatesInterval = cfg.AccountUpdatesStatsInterval + + au.disableCache = cfg.DisableLedgerLRUCache } // loadFromDisk is the 2nd level initialization, and is required before the accountUpdates becomes functional @@ -962,9 +967,15 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker, lastBalancesRou au.creatables = make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable) au.deltasAccum = []int{0} - au.baseAccounts.init(au.log, baseAccountsPendingAccountsBufferSize, baseAccountsPendingAccountsWarnThreshold) - au.baseResources.init(au.log, baseResourcesPendingAccountsBufferSize, baseResourcesPendingAccountsWarnThreshold) - au.baseKVs.init(au.log, baseKVPendingBufferSize, baseKVPendingWarnThreshold) + if !au.disableCache { + au.baseAccounts.init(au.log, baseAccountsPendingAccountsBufferSize, baseAccountsPendingAccountsWarnThreshold) + au.baseResources.init(au.log, baseResourcesPendingAccountsBufferSize, baseResourcesPendingAccountsWarnThreshold) + au.baseKVs.init(au.log, baseKVPendingBufferSize, baseKVPendingWarnThreshold) + } else { + au.baseAccounts.init(au.log, 0, 0) + au.baseResources.init(au.log, 0, 0) + au.baseKVs.init(au.log, 0, 0) + } return } diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go index 7d6c3485c6..66d14f5943 100644 --- a/ledger/acctupdates_test.go +++ b/ledger/acctupdates_test.go @@ -475,20 +475,13 @@ func checkOnlineAcctUpdatesConsistency(t *testing.T, ao *onlineAccounts, rnd bas } } -func TestAcctUpdates(t *testing.T) { - partitiontest.PartitionTest(t) - - if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" { - t.Skip("This test is too slow on ARM and causes travis builds to time out") - } - +func testAcctUpdates(t *testing.T, conf config.Local) { // The next operations are heavy on the memory. // Garbage collection helps prevent trashing runtime.GC() proto := config.Consensus[protocol.ConsensusCurrentVersion] - conf := config.GetDefaultLocal() for _, lookback := range []uint64{conf.MaxAcctLookback, proto.MaxBalLookback} { t.Run(fmt.Sprintf("lookback=%d", lookback), func(t *testing.T) { @@ -597,6 +590,13 @@ func TestAcctUpdates(t *testing.T) { } } +func TestAcctUpdates(t *testing.T) { + partitiontest.PartitionTest(t) + + conf := config.GetDefaultLocal() + ledgertesting.WithAndWithoutLRUCache(t, conf, testAcctUpdates) +} + func BenchmarkBalancesChanges(b *testing.B) { if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" { b.Skip("This test is too slow on ARM and causes travis builds to time out") @@ -712,20 +712,21 @@ func BenchmarkCalibrateCacheNodeSize(b *testing.B) { // The TestAcctUpdatesUpdatesCorrectness conduct a correctless test for the accounts update in the following way - // Each account is initialized with 100 algos. // On every round, each account move variable amount of funds to an accumulating account. -// The deltas for each accounts are picked by using the lookup method. +// The deltas for each account are picked by using the lookup method. // At the end of the test, we verify that each account has the expected amount of algos. // In addition, throughout the test, we check ( using lookup ) that the historical balances, *beyond* the // lookback are generating either an error, or returning the correct amount. func TestAcctUpdatesUpdatesCorrectness(t *testing.T) { partitiontest.PartitionTest(t) - if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" { - t.Skip("This test is too slow on ARM and causes travis builds to time out") - } + cfgLocal := config.GetDefaultLocal() + ledgertesting.WithAndWithoutLRUCache(t, cfgLocal, testAcctUpdatesUpdatesCorrectness) +} +func testAcctUpdatesUpdatesCorrectness(t *testing.T, cfg config.Local) { // create new protocol version, which has lower look back. testProtocolVersion := protocol.ConsensusCurrentVersion - maxAcctLookback := config.GetDefaultLocal().MaxAcctLookback + maxAcctLookback := cfg.MaxAcctLookback inMemory := true testFunction := func(t *testing.T) { @@ -750,8 +751,7 @@ func TestAcctUpdatesUpdatesCorrectness(t *testing.T) { accts[0][addr] = accountData } - conf := config.GetDefaultLocal() - au, _ := newAcctUpdates(t, ml, conf) + au, _ := newAcctUpdates(t, ml, cfg) defer au.close() // cover 10 genesis blocks diff --git a/ledger/apptxn_test.go b/ledger/apptxn_test.go index a08a3b74b2..fa840b40a4 100644 --- a/ledger/apptxn_test.go +++ b/ledger/apptxn_test.go @@ -42,8 +42,8 @@ func TestPayAction(t *testing.T) { genBalances, addrs, _ := ledgertesting.NewTestGenesis() // Inner txns start in v30 - ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() ai := dl.fundedApp(addrs[0], 200000, // account min balance, plus fees @@ -159,7 +159,8 @@ func TestAxferAction(t *testing.T) { t.Parallel() genBalances, addrs, _ := ledgertesting.NewTestGenesis() - l := newSimpleLedgerWithConsensusVersion(t, genBalances, protocol.ConsensusFuture) + cfg := config.GetDefaultLocal() + l := newSimpleLedgerWithConsensusVersion(t, genBalances, protocol.ConsensusFuture, cfg) defer l.Close() asa := txntest.Txn{ @@ -1354,8 +1355,8 @@ func TestCreateAndUse(t *testing.T) { genBalances, addrs, _ := ledgertesting.NewTestGenesis() // At 30 the asset reference is illegal, then from v31 it works. - ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() createapp := txntest.Txn{ @@ -1424,8 +1425,8 @@ func TestGtxnEffects(t *testing.T) { genBalances, addrs, _ := ledgertesting.NewTestGenesis() // At 30 `gtxn CreatedAssetId is illegal, then from v31 it works. - ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() createapp := txntest.Txn{ @@ -1486,8 +1487,8 @@ func TestBasicReentry(t *testing.T) { t.Parallel() genBalances, addrs, _ := ledgertesting.NewTestGenesis() - ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() app0 := txntest.Txn{ @@ -1680,8 +1681,8 @@ func TestMaxInnerTxForSingleAppCall(t *testing.T) { genBalances, addrs, _ := ledgertesting.NewTestGenesis() // v31 = inner appl - ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() program := ` @@ -1840,8 +1841,8 @@ func TestInnerAppVersionCalling(t *testing.T) { genBalances, addrs, _ := ledgertesting.NewTestGenesis() // 31 allowed inner appls. v34 lowered proto.MinInnerApplVersion - ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() three, err := logic.AssembleStringWithVersion("int 1", 3) @@ -2034,8 +2035,8 @@ func TestAppDowngrade(t *testing.T) { // Confirm that in old protocol version, downgrade is legal // Start at 28 because we want to v4 app to downgrade to v3 - ledgertesting.TestConsensusRange(t, 28, 30, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, 28, 30, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() create := txntest.Txn{ @@ -2065,8 +2066,8 @@ func TestAppDowngrade(t *testing.T) { dl.fullBlock(&update) }) - ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() create := txntest.Txn{ @@ -2310,7 +2311,8 @@ func executeMegaContract(b *testing.B) { var cv protocol.ConsensusVersion = "temp test" config.Consensus[cv] = vTest - l := newSimpleLedgerWithConsensusVersion(b, genBalances, cv) + cfg := config.GetDefaultLocal() + l := newSimpleLedgerWithConsensusVersion(b, genBalances, cv, cfg) defer l.Close() defer delete(config.Consensus, cv) @@ -2736,7 +2738,8 @@ func TestClearStateInnerPay(t *testing.T) { t.Run(fmt.Sprintf("i=%d", i), func(t *testing.T) { genBalances, addrs, _ := ledgertesting.NewTestGenesis() - l := newSimpleLedgerWithConsensusVersion(t, genBalances, test.consensus) + cfg := config.GetDefaultLocal() + l := newSimpleLedgerWithConsensusVersion(t, genBalances, test.consensus, cfg) defer l.Close() app0 := txntest.Txn{ @@ -3056,8 +3059,8 @@ func TestForeignAppAccountsAccessible(t *testing.T) { partitiontest.PartitionTest(t) genBalances, addrs, _ := ledgertesting.NewTestGenesis() - ledgertesting.TestConsensusRange(t, 32, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, 32, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() appA := txntest.Txn{ @@ -3122,8 +3125,8 @@ func TestForeignAppAccountsImmutable(t *testing.T) { partitiontest.PartitionTest(t) genBalances, addrs, _ := ledgertesting.NewTestGenesis() - ledgertesting.TestConsensusRange(t, 32, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, 32, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() appA := txntest.Txn{ @@ -3176,8 +3179,8 @@ func TestForeignAppAccountsMutable(t *testing.T) { partitiontest.PartitionTest(t) genBalances, addrs, _ := ledgertesting.NewTestGenesis() - ledgertesting.TestConsensusRange(t, 32, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, 32, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() appA := txntest.Txn{ @@ -3257,8 +3260,8 @@ func TestReloadWithTxns(t *testing.T) { partitiontest.PartitionTest(t) genBalances, addrs, _ := ledgertesting.NewTestGenesis() - ledgertesting.TestConsensusRange(t, 34, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, 34, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() dl.fullBlock() // So that the `block` opcode has a block to inspect @@ -3286,8 +3289,8 @@ func TestEvalAppState(t *testing.T) { genBalances, addrs, _ := ledgertesting.NewTestGenesis() // v24 = apps - ledgertesting.TestConsensusRange(t, 24, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, 24, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() appcall1 := txntest.Txn{ @@ -3338,8 +3341,8 @@ func TestGarbageClearState(t *testing.T) { genBalances, addrs, _ := ledgertesting.NewTestGenesis() // v24 = apps - ledgertesting.TestConsensusRange(t, 24, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, 24, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() createTxn := txntest.Txn{ @@ -3362,8 +3365,8 @@ func TestRewardsInAD(t *testing.T) { genBalances, addrs, _ := ledgertesting.NewTestGenesis() // v15 put rewards into ApplyData - ledgertesting.TestConsensusRange(t, 11, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, 11, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() payTxn := txntest.Txn{Type: protocol.PaymentTx, Sender: addrs[0], Receiver: addrs[1]} @@ -3411,8 +3414,8 @@ func TestDeleteNonExistantKeys(t *testing.T) { genBalances, addrs, _ := ledgertesting.NewTestGenesis() // AVM v2 (apps) - ledgertesting.TestConsensusRange(t, 24, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, 24, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() const appID basics.AppIndex = 1 @@ -3452,8 +3455,8 @@ func TestDuplicates(t *testing.T) { t.Parallel() genBalances, addrs, _ := ledgertesting.NewTestGenesis() - ledgertesting.TestConsensusRange(t, 11, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, 11, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() pay := txntest.Txn{ @@ -3488,8 +3491,8 @@ func TestHeaderAccess(t *testing.T) { genBalances, addrs, _ := ledgertesting.NewTestGenesis() // Added in v34 - ledgertesting.TestConsensusRange(t, 34, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, 34, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() fvt := txntest.Txn{ @@ -3538,8 +3541,8 @@ func TestLogsInBlock(t *testing.T) { genBalances, addrs, _ := ledgertesting.NewTestGenesis() // Run tests from v30 onward - ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() createTxn := txntest.Txn{ @@ -3598,8 +3601,8 @@ func TestUnfundedSenders(t *testing.T) { genBalances, addrs, _ := ledgertesting.NewTestGenesis() - ledgertesting.TestConsensusRange(t, 24, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, 24, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() asaIndex := basics.AssetIndex(1) @@ -3711,8 +3714,8 @@ func TestAppCallAppDuringInit(t *testing.T) { partitiontest.PartitionTest(t) genBalances, addrs, _ := ledgertesting.NewTestGenesis() - ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() approve := txntest.Txn{ diff --git a/ledger/archival_test.go b/ledger/archival_test.go index cf96cb05c3..d8783d9882 100644 --- a/ledger/archival_test.go +++ b/ledger/archival_test.go @@ -130,14 +130,6 @@ func TestArchival(t *testing.T) { // // We generate mostly empty blocks, with the exception of timestamps, // which affect participationTracker.committedUpTo()'s return value. - - // This test was causing random crashes on travis when executed with the race detector - // due to memory exhustion. For the time being, I'm taking it offline from the ubuntu - // configuration where it used to cause failuires. - if runtime.GOOS == "linux" && runtime.GOARCH == "amd64" { - t.Skip("Skipping the TestArchival as it tend to randomally fail on travis linux-amd64") - } - dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64()) genesisInitState := getInitState() const inMem = true diff --git a/ledger/boxtxn_test.go b/ledger/boxtxn_test.go index f9099a4f91..47291575bf 100644 --- a/ledger/boxtxn_test.go +++ b/ledger/boxtxn_test.go @@ -138,8 +138,8 @@ func TestBoxCreate(t *testing.T) { t.Parallel() genBalances, addrs, _ := ledgertesting.NewTestGenesis() - ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() // increment for a size 24 box with 4 letter name @@ -210,8 +210,8 @@ func TestBoxRecreate(t *testing.T) { t.Parallel() genBalances, addrs, _ := ledgertesting.NewTestGenesis() - ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() // increment for a size 4 box with 4 letter name @@ -261,14 +261,14 @@ func TestBoxCreateAvailability(t *testing.T) { t.Parallel() genBalances, addrs, _ := ledgertesting.NewTestGenesis() - ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() accessInCreate := txntest.Txn{ Type: "appl", Sender: addrs[0], - ApplicationID: 0, // This is a create + ApplicationID: 0, // This is an app-creation Boxes: []transactions.BoxRef{{Index: 0, Name: []byte("hello")}}, ApprovalProgram: ` byte "hello" @@ -366,9 +366,9 @@ func TestBoxRW(t *testing.T) { t.Parallel() genBalances, addrs, _ := ledgertesting.NewTestGenesis() - ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { + ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { t.Parallel() - dl := NewDoubleLedger(t, genBalances, cv) + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() var bufNewLogger bytes.Buffer @@ -441,8 +441,8 @@ func TestBoxAccountData(t *testing.T) { } genBalances, addrs, _ := ledgertesting.NewTestGenesis() - ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() proto := config.Consensus[cv] @@ -529,8 +529,8 @@ func TestBoxIOBudgets(t *testing.T) { t.Parallel() genBalances, addrs, _ := ledgertesting.NewTestGenesis() - ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() appIndex := dl.fundedApp(addrs[0], 0, boxAppSource) @@ -593,8 +593,8 @@ func TestBoxInners(t *testing.T) { t.Parallel() genBalances, addrs, _ := ledgertesting.NewTestGenesis() - ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) { - dl := NewDoubleLedger(t, genBalances, cv) + ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() // Advance the creatable counter, so we don't have very low app ids that diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go index b85a380f1b..e442c4030c 100644 --- a/ledger/catchpointtracker_test.go +++ b/ledger/catchpointtracker_test.go @@ -378,8 +378,9 @@ func TestReproducibleCatchpointLabels(t *testing.T) { partitiontest.PartitionTest(t) if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" { - t.Skip("This test is too slow on ARM and causes travis builds to time out") + t.Skip("This test is too slow on ARM and causes CI builds to time out") } + // create new protocol version, which has lower lookback testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestReproducibleCatchpointLabels") protoParams := config.Consensus[protocol.ConsensusCurrentVersion] @@ -1474,8 +1475,9 @@ func TestCatchpointFastUpdates(t *testing.T) { partitiontest.PartitionTest(t) if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" { - t.Skip("This test is too slow on ARM and causes travis builds to time out") + t.Skip("This test is too slow on ARM and causes CI builds to time out") } + proto := config.Consensus[protocol.ConsensusCurrentVersion] accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)} diff --git a/ledger/catchpointwriter_test.go b/ledger/catchpointwriter_test.go index 68d5854380..37f76a15c4 100644 --- a/ledger/catchpointwriter_test.go +++ b/ledger/catchpointwriter_test.go @@ -660,7 +660,8 @@ func TestExactAccountChunk(t *testing.T) { t.Parallel() genBalances, addrs, _ := ledgertesting.NewTestGenesis() - dl := NewDoubleLedger(t, genBalances, protocol.ConsensusFuture) + cfg := config.GetDefaultLocal() + dl := NewDoubleLedger(t, genBalances, protocol.ConsensusFuture, cfg) defer dl.Close() pay := txntest.Txn{ @@ -704,7 +705,8 @@ func TestCatchpointAfterTxns(t *testing.T) { t.Parallel() genBalances, addrs, _ := ledgertesting.NewTestGenesis() - dl := NewDoubleLedger(t, genBalances, protocol.ConsensusFuture) + cfg := config.GetDefaultLocal() + dl := NewDoubleLedger(t, genBalances, protocol.ConsensusFuture, cfg) defer dl.Close() boxApp := dl.fundedApp(addrs[1], 1_000_000, boxAppSource) @@ -807,7 +809,8 @@ func TestCatchpointAfterBoxTxns(t *testing.T) { t.Parallel() genBalances, addrs, _ := ledgertesting.NewTestGenesis() - dl := NewDoubleLedger(t, genBalances, protocol.ConsensusFuture) + cfg := config.GetDefaultLocal() + dl := NewDoubleLedger(t, genBalances, protocol.ConsensusFuture, cfg) defer dl.Close() boxApp := dl.fundedApp(addrs[1], 1_000_000, boxAppSource) diff --git a/ledger/double_test.go b/ledger/double_test.go index 9b4ca20f11..bbc5e95206 100644 --- a/ledger/double_test.go +++ b/ledger/double_test.go @@ -19,6 +19,7 @@ package ledger import ( "testing" + "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/transactions" @@ -55,9 +56,9 @@ func (dl DoubleLedger) Close() { } // NewDoubleLedger creates a new DoubleLedger with the supplied balances and consensus version. -func NewDoubleLedger(t *testing.T, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion) DoubleLedger { - g := newSimpleLedgerWithConsensusVersion(t, balances, cv) - v := newSimpleLedgerFull(t, balances, cv, g.GenesisHash()) +func NewDoubleLedger(t *testing.T, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion, cfg config.Local) DoubleLedger { + g := newSimpleLedgerWithConsensusVersion(t, balances, cv, cfg) + v := newSimpleLedgerFull(t, balances, cv, g.GenesisHash(), cfg) return DoubleLedger{t, g, v, nil} } diff --git a/ledger/eval_simple_test.go b/ledger/eval_simple_test.go index 4a2317dc1a..c5c9354520 100644 --- a/ledger/eval_simple_test.go +++ b/ledger/eval_simple_test.go @@ -318,7 +318,8 @@ func TestRekeying(t *testing.T) { func testEvalAppPoolingGroup(t *testing.T, schema basics.StateSchema, approvalProgram string, consensusVersion protocol.ConsensusVersion) error { genBalances, addrs, _ := ledgertesting.NewTestGenesis() - l := newSimpleLedgerWithConsensusVersion(t, genBalances, consensusVersion) + cfg := config.GetDefaultLocal() + l := newSimpleLedgerWithConsensusVersion(t, genBalances, consensusVersion, cfg) defer l.Close() eval := nextBlock(t, l) @@ -403,7 +404,8 @@ func TestMinBalanceChanges(t *testing.T) { t.Parallel() genBalances, addrs, _ := ledgertesting.NewTestGenesis() - l := newSimpleLedgerWithConsensusVersion(t, genBalances, protocol.ConsensusCurrentVersion) + cfg := config.GetDefaultLocal() + l := newSimpleLedgerWithConsensusVersion(t, genBalances, protocol.ConsensusCurrentVersion, cfg) defer l.Close() createTxn := txntest.Txn{ @@ -481,7 +483,8 @@ func TestAppInsMinBalance(t *testing.T) { t.Parallel() genBalances, addrs, _ := ledgertesting.NewTestGenesis() - l := newSimpleLedgerWithConsensusVersion(t, genBalances, protocol.ConsensusV30) + cfg := config.GetDefaultLocal() + l := newSimpleLedgerWithConsensusVersion(t, genBalances, protocol.ConsensusV30, cfg) defer l.Close() const appid basics.AppIndex = 1 diff --git a/ledger/ledger.go b/ledger/ledger.go index bf3e6f0a74..1064ad54f3 100644 --- a/ledger/ledger.go +++ b/ledger/ledger.go @@ -376,7 +376,7 @@ func initBlocksDB(tx *sql.Tx, l *Ledger, initBlocks []bookkeeping.Block, isArchi // Close reclaims resources used by the ledger (namely, the database connection // and goroutines used by trackers). func (l *Ledger) Close() { - // we shut the the blockqueue first, since it's sync goroutine dispatches calls + // we shut the blockqueue first, since it's sync goroutine dispatches calls // back to the trackers. if l.blockQ != nil { l.blockQ.stop() diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go index 6a2ca25e65..9a001e7ecd 100644 --- a/ledger/ledger_test.go +++ b/ledger/ledger_test.go @@ -197,19 +197,23 @@ func (l *Ledger) addBlockTxns(t *testing.T, accounts map[basics.Address]basics.A return l.AddBlock(blk, agreement.Certificate{}) } -func TestLedgerBasic(t *testing.T) { - partitiontest.PartitionTest(t) - +func testLedgerBasic(t *testing.T, cfg config.Local) { genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100) const inMem = true - cfg := config.GetDefaultLocal() - cfg.Archival = true log := logging.TestingLog(t) l, err := OpenLedger(log, t.Name(), inMem, genesisInitState, cfg) require.NoError(t, err, "could not open ledger") defer l.Close() } +func TestLedgerBasic(t *testing.T) { + partitiontest.PartitionTest(t) + cfg := config.GetDefaultLocal() + cfg.Archival = true + + ledgertesting.WithAndWithoutLRUCache(t, cfg, testLedgerBasic) +} + func TestLedgerBlockHeaders(t *testing.T) { partitiontest.PartitionTest(t) @@ -1503,14 +1507,10 @@ func triggerTrackerFlush(t *testing.T, l *Ledger, genesisInitState ledgercore.In l.trackers.waitAccountsWriting() } -func TestLedgerReload(t *testing.T) { - partitiontest.PartitionTest(t) - +func testLedgerReload(t *testing.T, cfg config.Local) { dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64()) genesisInitState := getInitState() const inMem = true - cfg := config.GetDefaultLocal() - cfg.Archival = true log := logging.TestingLog(t) log.SetLevel(logging.Info) l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg) @@ -1539,6 +1539,13 @@ func TestLedgerReload(t *testing.T) { } } +func TestLedgerReload(t *testing.T) { + partitiontest.PartitionTest(t) + cfg := config.GetDefaultLocal() + cfg.Archival = true + ledgertesting.WithAndWithoutLRUCache(t, cfg, testLedgerReload) +} + func TestWaitLedgerReload(t *testing.T) { partitiontest.PartitionTest(t) a := require.New(t) @@ -2883,17 +2890,13 @@ func verifyVotersContent(t *testing.T, expected map[basics.Round]*ledgercore.Vot } } -func TestVotersReloadFromDisk(t *testing.T) { - partitiontest.PartitionTest(t) - +func testVotersReloadFromDisk(t *testing.T, cfg config.Local) { proto := config.Consensus[protocol.ConsensusCurrentVersion] dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64()) genesisInitState := getInitState() genesisInitState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion const inMem = true - cfg := config.GetDefaultLocal() - cfg.Archival = false - cfg.MaxAcctLookback = proto.StateProofInterval - proto.StateProofVotersLookback - 10 + log := logging.TestingLog(t) log.SetLevel(logging.Info) l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg) @@ -2931,17 +2934,26 @@ func TestVotersReloadFromDisk(t *testing.T) { verifyVotersContent(t, vtSnapshot, l.acctsOnline.voters.votersForRoundCache) } -func TestVotersReloadFromDiskAfterOneStateProofCommitted(t *testing.T) { +func TestVotersReloadFromDisk(t *testing.T) { partitiontest.PartitionTest(t) + + proto := config.Consensus[protocol.ConsensusCurrentVersion] + + cfg := config.GetDefaultLocal() + cfg.Archival = false + cfg.MaxAcctLookback = proto.StateProofInterval - proto.StateProofVotersLookback - 10 + + ledgertesting.WithAndWithoutLRUCache(t, cfg, testVotersReloadFromDisk) +} + +func testVotersReloadFromDiskAfterOneStateProofCommitted(t *testing.T, cfg config.Local) { proto := config.Consensus[protocol.ConsensusCurrentVersion] dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64()) genesisInitState := getInitState() genesisInitState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion const inMem = true - cfg := config.GetDefaultLocal() - cfg.Archival = false - cfg.MaxAcctLookback = proto.StateProofInterval - proto.StateProofVotersLookback - 10 + log := logging.TestingLog(t) log.SetLevel(logging.Info) l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg) @@ -2991,17 +3003,25 @@ func TestVotersReloadFromDiskAfterOneStateProofCommitted(t *testing.T) { verifyVotersContent(t, vtSnapshot, l.acctsOnline.voters.votersForRoundCache) } -func TestVotersReloadFromDiskPassRecoveryPeriod(t *testing.T) { +func TestVotersReloadFromDiskAfterOneStateProofCommitted(t *testing.T) { partitiontest.PartitionTest(t) proto := config.Consensus[protocol.ConsensusCurrentVersion] + cfg := config.GetDefaultLocal() + cfg.Archival = false + cfg.MaxAcctLookback = proto.StateProofInterval - proto.StateProofVotersLookback - 10 + + ledgertesting.WithAndWithoutLRUCache(t, cfg, testVotersReloadFromDiskAfterOneStateProofCommitted) +} + +func testVotersReloadFromDiskPassRecoveryPeriod(t *testing.T, cfg config.Local) { + proto := config.Consensus[protocol.ConsensusCurrentVersion] + dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64()) genesisInitState := getInitState() genesisInitState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion const inMem = true - cfg := config.GetDefaultLocal() - cfg.Archival = false - cfg.MaxAcctLookback = proto.StateProofInterval - proto.StateProofVotersLookback - 10 + log := logging.TestingLog(t) log.SetLevel(logging.Info) l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg) @@ -3052,3 +3072,15 @@ func TestVotersReloadFromDiskPassRecoveryPeriod(t *testing.T) { require.False(t, found) require.Equal(t, beforeRemoveVotersLen, len(l.acctsOnline.voters.votersForRoundCache)) } + +func TestVotersReloadFromDiskPassRecoveryPeriod(t *testing.T) { + partitiontest.PartitionTest(t) + + proto := config.Consensus[protocol.ConsensusCurrentVersion] + + cfg := config.GetDefaultLocal() + cfg.Archival = false + cfg.MaxAcctLookback = proto.StateProofInterval - proto.StateProofVotersLookback - 10 + + ledgertesting.WithAndWithoutLRUCache(t, cfg, testVotersReloadFromDiskPassRecoveryPeriod) +} diff --git a/ledger/lruaccts.go b/ledger/lruaccts.go index 22928012af..a6962cab81 100644 --- a/ledger/lruaccts.go +++ b/ledger/lruaccts.go @@ -23,22 +23,25 @@ import ( ) // lruAccounts provides a storage class for the most recently used accounts data. -// It doesn't have any synchronization primitive on it's own and require to be -// syncronized by the caller. +// It doesn't have any synchronization primitive on its own and require to be +// synchronized by the caller. type lruAccounts struct { // accountsList contain the list of persistedAccountData, where the front ones are the most "fresh" // and the ones on the back are the oldest. accountsList *persistedAccountDataList // accounts provides fast access to the various elements in the list by using the account address + // if lruAccounts is set with pendingWrites 0, then accounts is nil accounts map[basics.Address]*persistedAccountDataListNode // pendingAccounts are used as a way to avoid taking a write-lock. When the caller needs to "materialize" these, // it would call flushPendingWrites and these would be merged into the accounts/accountsList + // if lruAccounts is set with pendingWrites 0, then pendingAccounts is nil pendingAccounts chan store.PersistedAccountData // log interface; used for logging the threshold event. log logging.Logger // pendingWritesWarnThreshold is the threshold beyond we would write a warning for exceeding the number of pendingAccounts entries pendingWritesWarnThreshold int + // if lruAccounts is set with pendingWrites 0, then pendingNotFound and notFound is nil pendingNotFound chan basics.Address notFound map[basics.Address]struct{} } @@ -46,11 +49,13 @@ type lruAccounts struct { // init initializes the lruAccounts for use. // thread locking semantics : write lock func (m *lruAccounts) init(log logging.Logger, pendingWrites int, pendingWritesWarnThreshold int) { - m.accountsList = newPersistedAccountList().allocateFreeNodes(pendingWrites) - m.accounts = make(map[basics.Address]*persistedAccountDataListNode, pendingWrites) - m.pendingAccounts = make(chan store.PersistedAccountData, pendingWrites) - m.notFound = make(map[basics.Address]struct{}, pendingWrites) - m.pendingNotFound = make(chan basics.Address, pendingWrites) + if pendingWrites > 0 { + m.accountsList = newPersistedAccountList().allocateFreeNodes(pendingWrites) + m.accounts = make(map[basics.Address]*persistedAccountDataListNode, pendingWrites) + m.pendingAccounts = make(chan store.PersistedAccountData, pendingWrites) + m.notFound = make(map[basics.Address]struct{}, pendingWrites) + m.pendingNotFound = make(chan basics.Address, pendingWrites) + } m.log = log m.pendingWritesWarnThreshold = pendingWritesWarnThreshold } @@ -127,6 +132,9 @@ func (m *lruAccounts) writeNotFoundPending(addr basics.Address) { // to be promoted to the front of the list. // thread locking semantics : write lock func (m *lruAccounts) write(acctData store.PersistedAccountData) { + if m.accounts == nil { + return + } if el := m.accounts[acctData.Addr]; el != nil { // already exists; is it a newer ? if el.Value.Before(&acctData) { @@ -144,6 +152,9 @@ func (m *lruAccounts) write(acctData store.PersistedAccountData) { // recently used entries. // thread locking semantics : write lock func (m *lruAccounts) prune(newSize int) (removed int) { + if m.accounts == nil { + return + } for { if len(m.accounts) <= newSize { break diff --git a/ledger/lruaccts_test.go b/ledger/lruaccts_test.go index 625d40f3ea..d5f8fdcab9 100644 --- a/ledger/lruaccts_test.go +++ b/ledger/lruaccts_test.go @@ -88,6 +88,42 @@ func TestLRUBasicAccounts(t *testing.T) { } } +func TestLRUAccountsDisable(t *testing.T) { + partitiontest.PartitionTest(t) + + var baseAcct lruAccounts + baseAcct.init(logging.TestingLog(t), 0, 1) + + accountsNum := 5 + + for i := 0; i < accountsNum; i++ { + go func(i int) { + time.Sleep(time.Duration((crypto.RandUint64() % 50)) * time.Millisecond) + acct := store.PersistedAccountData{ + Addr: basics.Address(crypto.Hash([]byte{byte(i)})), + Round: basics.Round(i), + Rowid: int64(i), + AccountData: store.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, + } + baseAcct.writePending(acct) + }(i) + } + require.Empty(t, baseAcct.pendingAccounts) + baseAcct.flushPendingWrites() + require.Empty(t, baseAcct.accounts) + + for i := 0; i < accountsNum; i++ { + acct := store.PersistedAccountData{ + Addr: basics.Address(crypto.Hash([]byte{byte(i)})), + Round: basics.Round(i), + Rowid: int64(i), + AccountData: store.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, + } + baseAcct.write(acct) + } + require.Empty(t, baseAcct.accounts) +} + func TestLRUAccountsPendingWrites(t *testing.T) { partitiontest.PartitionTest(t) diff --git a/ledger/lrukv.go b/ledger/lrukv.go index 77986eb1b6..420f87f5dc 100644 --- a/ledger/lrukv.go +++ b/ledger/lrukv.go @@ -30,18 +30,20 @@ type cachedKVData struct { } // lruKV provides a storage class for the most recently used kv data. -// It doesn't have any synchronization primitive on it's own and require to be -// syncronized by the caller. +// It doesn't have any synchronization primitive on its own and require to be +// synchronized by the caller. type lruKV struct { // kvList contain the list of persistedKVData, where the front ones are the most "fresh" // and the ones on the back are the oldest. kvList *persistedKVDataList // kvs provides fast access to the various elements in the list by using the key + // if lruKV is set with pendingWrites 0, then kvs is nil kvs map[string]*persistedKVDataListNode // pendingKVs are used as a way to avoid taking a write-lock. When the caller needs to "materialize" these, // it would call flushPendingWrites and these would be merged into the kvs/kvList + // if lruKV is set with pendingWrites 0, then pendingKVs is nil pendingKVs chan cachedKVData // log interface; used for logging the threshold event. @@ -54,9 +56,11 @@ type lruKV struct { // init initializes the lruKV for use. // thread locking semantics : write lock func (m *lruKV) init(log logging.Logger, pendingWrites int, pendingWritesWarnThreshold int) { - m.kvList = newPersistedKVList().allocateFreeNodes(pendingWrites) - m.kvs = make(map[string]*persistedKVDataListNode, pendingWrites) - m.pendingKVs = make(chan cachedKVData, pendingWrites) + if pendingWrites > 0 { + m.kvList = newPersistedKVList().allocateFreeNodes(pendingWrites) + m.kvs = make(map[string]*persistedKVDataListNode, pendingWrites) + m.pendingKVs = make(chan cachedKVData, pendingWrites) + } m.log = log m.pendingWritesWarnThreshold = pendingWritesWarnThreshold } @@ -103,6 +107,9 @@ func (m *lruKV) writePending(kv store.PersistedKVData, key string) { // to be promoted to the front of the list. // thread locking semantics : write lock func (m *lruKV) write(kvData store.PersistedKVData, key string) { + if m.kvs == nil { + return + } if el := m.kvs[key]; el != nil { // already exists; is it a newer ? if el.Value.Before(&kvData) { @@ -120,6 +127,9 @@ func (m *lruKV) write(kvData store.PersistedKVData, key string) { // recently used entries. // thread locking semantics : write lock func (m *lruKV) prune(newSize int) (removed int) { + if m.kvs == nil { + return + } for { if len(m.kvs) <= newSize { break diff --git a/ledger/lrukv_test.go b/ledger/lrukv_test.go index 18d0a47072..ce0eb02c03 100644 --- a/ledger/lrukv_test.go +++ b/ledger/lrukv_test.go @@ -80,6 +80,41 @@ func TestLRUBasicKV(t *testing.T) { } } +func TestLRUKVDisable(t *testing.T) { + partitiontest.PartitionTest(t) + + var baseKV lruKV + baseKV.init(logging.TestingLog(t), 0, 1) + + kvNum := 5 + + for i := 1; i <= kvNum; i++ { + go func(i int) { + time.Sleep(time.Duration((crypto.RandUint64() % 50)) * time.Millisecond) + kvValue := fmt.Sprintf("kv %d value", i) + kv := store.PersistedKVData{ + Value: []byte(kvValue), + Round: basics.Round(i), + } + baseKV.writePending(kv, fmt.Sprintf("key%d", i)) + }(i) + } + require.Empty(t, baseKV.pendingKVs) + baseKV.flushPendingWrites() + require.Empty(t, baseKV.kvs) + + for i := 0; i < kvNum; i++ { + kvValue := fmt.Sprintf("kv %d value", i) + kv := store.PersistedKVData{ + Value: []byte(kvValue), + Round: basics.Round(i), + } + baseKV.write(kv, fmt.Sprintf("key%d", i)) + } + + require.Empty(t, baseKV.kvs) +} + func TestLRUKVPendingWrites(t *testing.T) { partitiontest.PartitionTest(t) diff --git a/ledger/lruonlineaccts.go b/ledger/lruonlineaccts.go index 35c8224d41..cc8917bf57 100644 --- a/ledger/lruonlineaccts.go +++ b/ledger/lruonlineaccts.go @@ -30,9 +30,11 @@ type lruOnlineAccounts struct { // and the ones on the back are the oldest. accountsList *persistedOnlineAccountDataList // accounts provides fast access to the various elements in the list by using the account address + // if lruOnlineAccounts is set with pendingWrites 0, then accounts is nil accounts map[basics.Address]*persistedOnlineAccountDataListNode // pendingAccounts are used as a way to avoid taking a write-lock. When the caller needs to "materialize" these, // it would call flushPendingWrites and these would be merged into the accounts/accountsList + // if lruOnlineAccounts is set with pendingWrites 0, then pendingAccounts is nil pendingAccounts chan store.PersistedOnlineAccountData // log interface; used for logging the threshold event. log logging.Logger @@ -43,9 +45,11 @@ type lruOnlineAccounts struct { // init initializes the lruAccounts for use. // thread locking semantics : write lock func (m *lruOnlineAccounts) init(log logging.Logger, pendingWrites int, pendingWritesWarnThreshold int) { - m.accountsList = newPersistedOnlineAccountList().allocateFreeNodes(pendingWrites) - m.accounts = make(map[basics.Address]*persistedOnlineAccountDataListNode, pendingWrites) - m.pendingAccounts = make(chan store.PersistedOnlineAccountData, pendingWrites) + if pendingWrites > 0 { + m.accountsList = newPersistedOnlineAccountList().allocateFreeNodes(pendingWrites) + m.accounts = make(map[basics.Address]*persistedOnlineAccountDataListNode, pendingWrites) + m.pendingAccounts = make(chan store.PersistedOnlineAccountData, pendingWrites) + } m.log = log m.pendingWritesWarnThreshold = pendingWritesWarnThreshold } @@ -92,6 +96,9 @@ func (m *lruOnlineAccounts) writePending(acct store.PersistedOnlineAccountData) // to be promoted to the front of the list. // thread locking semantics : write lock func (m *lruOnlineAccounts) write(acctData store.PersistedOnlineAccountData) { + if m.accounts == nil { + return + } if el := m.accounts[acctData.Addr]; el != nil { // already exists; is it a newer ? if el.Value.Before(&acctData) { @@ -109,6 +116,9 @@ func (m *lruOnlineAccounts) write(acctData store.PersistedOnlineAccountData) { // recently used entries. // thread locking semantics : write lock func (m *lruOnlineAccounts) prune(newSize int) (removed int) { + if m.accounts == nil { + return + } for { if len(m.accounts) <= newSize { break diff --git a/ledger/lruonlineaccts_test.go b/ledger/lruonlineaccts_test.go index 0e0b314385..84acdb684e 100644 --- a/ledger/lruonlineaccts_test.go +++ b/ledger/lruonlineaccts_test.go @@ -87,6 +87,42 @@ func TestLRUOnlineAccountsBasic(t *testing.T) { } } +func TestLRUOnlineAccountsDisable(t *testing.T) { + partitiontest.PartitionTest(t) + + var baseOnlineAcct lruOnlineAccounts + baseOnlineAcct.init(logging.TestingLog(t), 0, 1) + + accountsNum := 5 + + for i := 0; i < accountsNum; i++ { + go func(i int) { + time.Sleep(time.Duration((crypto.RandUint64() % 50)) * time.Millisecond) + acct := store.PersistedOnlineAccountData{ + Addr: basics.Address(crypto.Hash([]byte{byte(i)})), + Round: basics.Round(i), + Rowid: int64(i), + AccountData: store.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, + } + baseOnlineAcct.writePending(acct) + }(i) + } + require.Empty(t, baseOnlineAcct.pendingAccounts) + baseOnlineAcct.flushPendingWrites() + require.Empty(t, baseOnlineAcct.accounts) + + for i := 0; i < accountsNum; i++ { + acct := store.PersistedOnlineAccountData{ + Addr: basics.Address(crypto.Hash([]byte{byte(i)})), + Round: basics.Round(i), + Rowid: int64(i), + AccountData: store.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, + } + baseOnlineAcct.write(acct) + } + require.Empty(t, baseOnlineAcct.accounts) +} + func TestLRUOnlineAccountsPendingWrites(t *testing.T) { partitiontest.PartitionTest(t) diff --git a/ledger/lruresources.go b/ledger/lruresources.go index 222c1d6c07..779ca3cb95 100644 --- a/ledger/lruresources.go +++ b/ledger/lruresources.go @@ -29,19 +29,21 @@ type cachedResourceData struct { address basics.Address } -// lruResources provides a storage class for the most recently used resources data. -// It doesn't have any synchronization primitive on it's own and require to be -// syncronized by the caller. +// lruResources provides a storage class for the most recently used resources' data. +// It doesn't have any synchronization primitive on its own and require to be +// synchronized by the caller. type lruResources struct { // resourcesList contain the list of persistedResourceData, where the front ones are the most "fresh" // and the ones on the back are the oldest. resourcesList *persistedResourcesDataList // resources provides fast access to the various elements in the list by using the account address + // if lruResources is set with pendingWrites 0, then resources is nil resources map[accountCreatable]*persistedResourcesDataListNode // pendingResources are used as a way to avoid taking a write-lock. When the caller needs to "materialize" these, // it would call flushPendingWrites and these would be merged into the resources/resourcesList + // if lruResources is set with pendingWrites 0, then pendingResources is nil pendingResources chan cachedResourceData // log interface; used for logging the threshold event. @@ -50,6 +52,7 @@ type lruResources struct { // pendingWritesWarnThreshold is the threshold beyond we would write a warning for exceeding the number of pendingResources entries pendingWritesWarnThreshold int + // if lruResources is set with pendingWrites 0, then pendingNotFound and notFound is nil pendingNotFound chan accountCreatable notFound map[accountCreatable]struct{} } @@ -57,11 +60,13 @@ type lruResources struct { // init initializes the lruResources for use. // thread locking semantics : write lock func (m *lruResources) init(log logging.Logger, pendingWrites int, pendingWritesWarnThreshold int) { - m.resourcesList = newPersistedResourcesList().allocateFreeNodes(pendingWrites) - m.resources = make(map[accountCreatable]*persistedResourcesDataListNode, pendingWrites) - m.pendingResources = make(chan cachedResourceData, pendingWrites) - m.notFound = make(map[accountCreatable]struct{}, pendingWrites) - m.pendingNotFound = make(chan accountCreatable, pendingWrites) + if pendingWrites > 0 { + m.resourcesList = newPersistedResourcesList().allocateFreeNodes(pendingWrites) + m.resources = make(map[accountCreatable]*persistedResourcesDataListNode, pendingWrites) + m.pendingResources = make(chan cachedResourceData, pendingWrites) + m.notFound = make(map[accountCreatable]struct{}, pendingWrites) + m.pendingNotFound = make(chan accountCreatable, pendingWrites) + } m.log = log m.pendingWritesWarnThreshold = pendingWritesWarnThreshold } @@ -149,6 +154,9 @@ func (m *lruResources) writeNotFoundPending(addr basics.Address, idx basics.Crea // to be promoted to the front of the list. // thread locking semantics : write lock func (m *lruResources) write(resData store.PersistedResourcesData, addr basics.Address) { + if m.resources == nil { + return + } if el := m.resources[accountCreatable{address: addr, index: resData.Aidx}]; el != nil { // already exists; is it a newer ? if el.Value.Before(&resData) { @@ -166,6 +174,9 @@ func (m *lruResources) write(resData store.PersistedResourcesData, addr basics.A // recently used entries. // thread locking semantics : write lock func (m *lruResources) prune(newSize int) (removed int) { + if m.resources == nil { + return + } for { if len(m.resources) <= newSize { break diff --git a/ledger/lruresources_test.go b/ledger/lruresources_test.go index 6c97309031..8424fe2fe3 100644 --- a/ledger/lruresources_test.go +++ b/ledger/lruresources_test.go @@ -89,6 +89,48 @@ func TestLRUBasicResources(t *testing.T) { } } +func TestLRUResourcesDisable(t *testing.T) { + partitiontest.PartitionTest(t) + + var baseRes lruResources + baseRes.init(logging.TestingLog(t), 0, 1) + + resourceNum := 5 + + for i := 1; i <= resourceNum; i++ { + go func(i int) { + time.Sleep(time.Duration((crypto.RandUint64() % 50)) * time.Millisecond) + addr := basics.Address(crypto.Hash([]byte{byte(i)})) + res := store.PersistedResourcesData{ + Addrid: int64(i), + Aidx: basics.CreatableIndex(i), + Round: basics.Round(i), + Data: store.ResourcesData{Total: uint64(i)}, + } + baseRes.writePending(res, addr) + baseRes.writeNotFoundPending(addr, basics.CreatableIndex(i)) + }(i) + } + require.Empty(t, baseRes.pendingResources) + require.Empty(t, baseRes.pendingNotFound) + baseRes.flushPendingWrites() + require.Empty(t, baseRes.resources) + require.Empty(t, baseRes.notFound) + + for i := 0; i < resourceNum; i++ { + addr := basics.Address(crypto.Hash([]byte{byte(i)})) + res := store.PersistedResourcesData{ + Addrid: int64(i), + Aidx: basics.CreatableIndex(i), + Round: basics.Round(i), + Data: store.ResourcesData{Total: uint64(i)}, + } + baseRes.write(res, addr) + } + + require.Empty(t, baseRes.resources) +} + func TestLRUResourcesPendingWrites(t *testing.T) { partitiontest.PartitionTest(t) diff --git a/ledger/simple_test.go b/ledger/simple_test.go index e934e3f013..d4e44f0c3f 100644 --- a/ledger/simple_test.go +++ b/ledger/simple_test.go @@ -35,23 +35,18 @@ import ( "github.com/stretchr/testify/require" ) -func newSimpleLedger(t testing.TB, balances bookkeeping.GenesisBalances) *Ledger { - return newSimpleLedgerWithConsensusVersion(t, balances, protocol.ConsensusFuture) -} - -func newSimpleLedgerWithConsensusVersion(t testing.TB, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion) *Ledger { +func newSimpleLedgerWithConsensusVersion(t testing.TB, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion, cfg config.Local) *Ledger { var genHash crypto.Digest crypto.RandBytes(genHash[:]) - return newSimpleLedgerFull(t, balances, cv, genHash) + return newSimpleLedgerFull(t, balances, cv, genHash, cfg) } -func newSimpleLedgerFull(t testing.TB, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion, genHash crypto.Digest) *Ledger { +func newSimpleLedgerFull(t testing.TB, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion, genHash crypto.Digest, cfg config.Local) *Ledger { genBlock, err := bookkeeping.MakeGenesisBlock(cv, balances, "test", genHash) require.NoError(t, err) require.False(t, genBlock.FeeSink.IsZero()) require.False(t, genBlock.RewardsPool.IsZero()) dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64()) - cfg := config.GetDefaultLocal() cfg.Archival = true l, err := OpenLedger(logging.Base(), dbName, true, ledgercore.InitState{ Block: genBlock, diff --git a/ledger/testing/consensusRange.go b/ledger/testing/consensusRange.go index e96bcc7280..02ae83fce9 100644 --- a/ledger/testing/consensusRange.go +++ b/ledger/testing/consensusRange.go @@ -17,9 +17,11 @@ package testing import ( + "crypto/rand" "fmt" "testing" + "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/protocol" "github.com/stretchr/testify/require" ) @@ -59,6 +61,25 @@ var consensusByNumber = []protocol.ConsensusVersion{ protocol.ConsensusFuture, } +func versionStringFromIndex(index int) string { + var version string + if index == len(consensusByNumber)-1 { + version = "vFuture" + } else { + version = fmt.Sprintf("v%d", index) + } + return version +} + +// randBool samples randomness for TestConsensusRange, +// which tests with or without LRU Cache in ledger +func randBool(t *testing.T) bool { + var byteBuffer [1]byte + _, err := rand.Read(byteBuffer[:]) + require.NoError(t, err) + return byteBuffer[0]%2 == 0 +} + // TestConsensusRange allows for running tests against a range of consensus // versions. Generally `start` will be the version that introduced the feature, // and `stop` will be 0 to indicate it should work right on up through vFuture. @@ -69,20 +90,18 @@ var consensusByNumber = []protocol.ConsensusVersion{ // created and inserted in consensusByNumber. At that point, your feature is // probably active in that version. (If it's being held in vFuture, just // increment your `start`.) -func TestConsensusRange(t *testing.T, start, stop int, test func(t *testing.T, ver int, cv protocol.ConsensusVersion)) { +func TestConsensusRange(t *testing.T, start, stop int, test func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local)) { if stop == 0 { // Treat 0 as "future" stop = len(consensusByNumber) - 1 } require.LessOrEqual(t, start, stop) + cfg := config.GetDefaultLocal() for i := start; i <= stop; i++ { - var version string - if i == len(consensusByNumber)-1 { - version = "vFuture" - } else { - version = fmt.Sprintf("v%d", i) - } - t.Run(fmt.Sprintf("cv=%s", version), func(t *testing.T) { - test(t, i, consensusByNumber[i]) + version := versionStringFromIndex(i) + disable := randBool(t) + t.Run(fmt.Sprintf("cv=%s,LRU-cache-disable=%t", version, disable), func(t *testing.T) { + cfg.DisableLedgerLRUCache = disable + test(t, i, consensusByNumber[i], cfg) }) } } @@ -93,12 +112,7 @@ func BenchConsensusRange(b *testing.B, start, stop int, bench func(t *testing.B, stop = len(consensusByNumber) - 1 } for i := start; i <= stop; i++ { - var version string - if i == len(consensusByNumber)-1 { - version = "vFuture" - } else { - version = fmt.Sprintf("v%d", i) - } + version := versionStringFromIndex(i) b.Run(fmt.Sprintf("cv=%s", version), func(b *testing.B) { bench(b, i, consensusByNumber[i]) }) diff --git a/ledger/testing/consensusRange_test.go b/ledger/testing/consensusRange_test.go index 325373a396..cd5baaa817 100644 --- a/ledger/testing/consensusRange_test.go +++ b/ledger/testing/consensusRange_test.go @@ -55,4 +55,7 @@ func TestReleasedVersion(t *testing.T) { require.NotZero(t, params) // just making sure an empty one didn't get put in } + require.Equal(t, versionStringFromIndex(len(consensusByNumber)-1), "vFuture") + require.Equal(t, versionStringFromIndex(36), "v36") + } diff --git a/ledger/testing/withAndWithoutCache.go b/ledger/testing/withAndWithoutCache.go new file mode 100644 index 0000000000..0bb1e75098 --- /dev/null +++ b/ledger/testing/withAndWithoutCache.go @@ -0,0 +1,35 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package testing + +import ( + "testing" + + "github.com/algorand/go-algorand/config" +) + +// WithAndWithoutLRUCache allows for running a test with ledger LRU cache activated and deactivated. +func WithAndWithoutLRUCache(t *testing.T, cfg config.Local, test func(t *testing.T, cfg config.Local)) { + cfg.DisableLedgerLRUCache = false + t.Run("test with lru cache", func(t *testing.T) { + test(t, cfg) + }) + cfg.DisableLedgerLRUCache = true + t.Run("test without lru cache", func(t *testing.T) { + test(t, cfg) + }) +} diff --git a/ledger/txnbench_test.go b/ledger/txnbench_test.go index ddc7aeba99..f2b4b8aa4a 100644 --- a/ledger/txnbench_test.go +++ b/ledger/txnbench_test.go @@ -22,6 +22,7 @@ import ( "strings" "testing" + "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/txntest" @@ -35,7 +36,7 @@ import ( func BenchmarkTxnTypes(b *testing.B) { genBalances, addrs, _ := ledgertesting.NewTestGenesis() ledgertesting.BenchConsensusRange(b, 30, 0, func(b *testing.B, ver int, cv protocol.ConsensusVersion) { - l := newSimpleLedgerWithConsensusVersion(b, genBalances, cv) + l := newSimpleLedgerWithConsensusVersion(b, genBalances, cv, config.GetDefaultLocal()) defer l.Close() createasa := txntest.Txn{ diff --git a/test/testdata/configs/config-v27.json b/test/testdata/configs/config-v27.json index 137578e0fa..76d25158c4 100644 --- a/test/testdata/configs/config-v27.json +++ b/test/testdata/configs/config-v27.json @@ -28,6 +28,7 @@ "DNSSecurityFlags": 1, "DeadlockDetection": 0, "DeadlockDetectionThreshold": 30, + "DisableLedgerLRUCache": false, "DisableLocalhostConnectionRateLimit": true, "DisableNetworking": false, "DisableOutgoingConnectionThrottling": false, From 3b197f48b185741c9e88cfa5f3df39fd4c123c93 Mon Sep 17 00:00:00 2001 From: AlgoAxel <113933518+AlgoAxel@users.noreply.github.com> Date: Fri, 17 Feb 2023 13:20:24 -0500 Subject: [PATCH 45/81] network: connection deduplication (#4695) Introduces "Identity Challenge" exchange during peering: two peers exchange signed challenges to register one another with public keys that validate their identities to be used as the mechanism to prevent duplicate and bidirectional connections between peers on the network. Co-authored-by: chris erway --- .gitignore | 1 + Makefile | 2 +- config/localTemplate.go | 6 +- network/connPerfMon.go | 1 + network/msgp_gen.go | 1088 +++++++++++++++++++++++++++++++++++ network/msgp_gen_test.go | 435 ++++++++++++++ network/netidentity.go | 412 +++++++++++++ network/netidentity_test.go | 366 ++++++++++++ network/phonebook.go | 2 + network/requestTracker.go | 1 + network/topics.go | 4 + network/wsNetwork.go | 121 +++- network/wsNetwork_test.go | 920 ++++++++++++++++++++++++++++- network/wsPeer.go | 37 +- protocol/hash.go | 3 + protocol/tags.go | 22 +- 16 files changed, 3389 insertions(+), 32 deletions(-) create mode 100644 network/msgp_gen.go create mode 100644 network/msgp_gen_test.go create mode 100644 network/netidentity.go create mode 100644 network/netidentity_test.go diff --git a/.gitignore b/.gitignore index ebd0ef239b..9781154449 100644 --- a/.gitignore +++ b/.gitignore @@ -48,6 +48,7 @@ crypto/libs *~ *.swp *.swo +*.swn # Mac .DS_Store diff --git a/Makefile b/Makefile index a418c7e35e..b9ad2e319b 100644 --- a/Makefile +++ b/Makefile @@ -85,7 +85,7 @@ GOLDFLAGS := $(GOLDFLAGS_BASE) \ UNIT_TEST_SOURCES := $(sort $(shell GOPATH=$(GOPATH) && GO111MODULE=off && go list ./... | grep -v /go-algorand/test/ )) ALGOD_API_PACKAGES := $(sort $(shell GOPATH=$(GOPATH) && GO111MODULE=off && cd daemon/algod/api; go list ./... )) -MSGP_GENERATE := ./protocol ./protocol/test ./crypto ./crypto/merklearray ./crypto/merklesignature ./crypto/stateproof ./data/basics ./data/transactions ./data/stateproofmsg ./data/committee ./data/bookkeeping ./data/hashable ./agreement ./rpcs ./node ./ledger ./ledger/ledgercore ./ledger/store ./ledger/encoded ./stateproof ./data/account ./daemon/algod/api/spec/v2 +MSGP_GENERATE := ./protocol ./protocol/test ./crypto ./crypto/merklearray ./crypto/merklesignature ./crypto/stateproof ./data/basics ./data/transactions ./data/stateproofmsg ./data/committee ./data/bookkeeping ./data/hashable ./agreement ./rpcs ./network ./node ./ledger ./ledger/ledgercore ./ledger/store ./ledger/encoded ./stateproof ./data/account ./daemon/algod/api/spec/v2 default: build diff --git a/config/localTemplate.go b/config/localTemplate.go index 8172545575..95a8a493b1 100644 --- a/config/localTemplate.go +++ b/config/localTemplate.go @@ -56,7 +56,11 @@ type Local struct { // 1 * time.Minute = 60000000000 ns ReconnectTime time.Duration `version[0]:"60" version[1]:"60000000000"` - // what we should tell people to connect to + // The public address to connect to that is advertised to other nodes. + // For MainNet relays, make sure this entry includes the full SRV host name + // plus the publicly-accessible port number. + // A valid entry will avoid "self-gossip" and is used for identity exchange + // to deduplicate redundant connections PublicAddress string `version[0]:""` MaxConnectionsPerIP int `version[3]:"30" version[27]:"15"` diff --git a/network/connPerfMon.go b/network/connPerfMon.go index e74614ae10..d254aa1279 100644 --- a/network/connPerfMon.go +++ b/network/connPerfMon.go @@ -25,6 +25,7 @@ import ( "github.com/algorand/go-algorand/crypto" ) +//msgp:ignore pmStage type pmStage int const ( diff --git a/network/msgp_gen.go b/network/msgp_gen.go new file mode 100644 index 0000000000..70b03ccfc7 --- /dev/null +++ b/network/msgp_gen.go @@ -0,0 +1,1088 @@ +package network + +// Code generated by github.com/algorand/msgp DO NOT EDIT. + +import ( + "github.com/algorand/msgp/msgp" +) + +// The following msgp objects are implemented in this file: +// disconnectReason +// |-----> MarshalMsg +// |-----> CanMarshalMsg +// |-----> (*) UnmarshalMsg +// |-----> (*) CanUnmarshalMsg +// |-----> Msgsize +// |-----> MsgIsZero +// +// identityChallenge +// |-----> (*) MarshalMsg +// |-----> (*) CanMarshalMsg +// |-----> (*) UnmarshalMsg +// |-----> (*) CanUnmarshalMsg +// |-----> (*) Msgsize +// |-----> (*) MsgIsZero +// +// identityChallengeResponse +// |-----> (*) MarshalMsg +// |-----> (*) CanMarshalMsg +// |-----> (*) UnmarshalMsg +// |-----> (*) CanUnmarshalMsg +// |-----> (*) Msgsize +// |-----> (*) MsgIsZero +// +// identityChallengeResponseSigned +// |-----> (*) MarshalMsg +// |-----> (*) CanMarshalMsg +// |-----> (*) UnmarshalMsg +// |-----> (*) CanUnmarshalMsg +// |-----> (*) Msgsize +// |-----> (*) MsgIsZero +// +// identityChallengeSigned +// |-----> (*) MarshalMsg +// |-----> (*) CanMarshalMsg +// |-----> (*) UnmarshalMsg +// |-----> (*) CanUnmarshalMsg +// |-----> (*) Msgsize +// |-----> (*) MsgIsZero +// +// identityChallengeValue +// |-----> (*) MarshalMsg +// |-----> (*) CanMarshalMsg +// |-----> (*) UnmarshalMsg +// |-----> (*) CanUnmarshalMsg +// |-----> (*) Msgsize +// |-----> (*) MsgIsZero +// +// identityVerificationMessage +// |-----> (*) MarshalMsg +// |-----> (*) CanMarshalMsg +// |-----> (*) UnmarshalMsg +// |-----> (*) CanUnmarshalMsg +// |-----> (*) Msgsize +// |-----> (*) MsgIsZero +// +// identityVerificationMessageSigned +// |-----> (*) MarshalMsg +// |-----> (*) CanMarshalMsg +// |-----> (*) UnmarshalMsg +// |-----> (*) CanUnmarshalMsg +// |-----> (*) Msgsize +// |-----> (*) MsgIsZero +// + +// MarshalMsg implements msgp.Marshaler +func (z disconnectReason) MarshalMsg(b []byte) (o []byte) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendString(o, string(z)) + return +} + +func (_ disconnectReason) CanMarshalMsg(z interface{}) bool { + _, ok := (z).(disconnectReason) + if !ok { + _, ok = (z).(*disconnectReason) + } + return ok +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *disconnectReason) UnmarshalMsg(bts []byte) (o []byte, err error) { + { + var zb0001 string + zb0001, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = disconnectReason(zb0001) + } + o = bts + return +} + +func (_ *disconnectReason) CanUnmarshalMsg(z interface{}) bool { + _, ok := (z).(*disconnectReason) + return ok +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z disconnectReason) Msgsize() (s int) { + s = msgp.StringPrefixSize + len(string(z)) + return +} + +// MsgIsZero returns whether this is a zero value +func (z disconnectReason) MsgIsZero() bool { + return z == "" +} + +// MarshalMsg implements msgp.Marshaler +func (z *identityChallenge) MarshalMsg(b []byte) (o []byte) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0002Len := uint32(3) + var zb0002Mask uint8 /* 4 bits */ + if len((*z).PublicAddress) == 0 { + zb0002Len-- + zb0002Mask |= 0x2 + } + if (*z).Challenge == (identityChallengeValue{}) { + zb0002Len-- + zb0002Mask |= 0x4 + } + if (*z).Key.MsgIsZero() { + zb0002Len-- + zb0002Mask |= 0x8 + } + // variable map header, size zb0002Len + o = append(o, 0x80|uint8(zb0002Len)) + if zb0002Len != 0 { + if (zb0002Mask & 0x2) == 0 { // if not empty + // string "a" + o = append(o, 0xa1, 0x61) + o = msgp.AppendBytes(o, (*z).PublicAddress) + } + if (zb0002Mask & 0x4) == 0 { // if not empty + // string "c" + o = append(o, 0xa1, 0x63) + o = msgp.AppendBytes(o, ((*z).Challenge)[:]) + } + if (zb0002Mask & 0x8) == 0 { // if not empty + // string "pk" + o = append(o, 0xa2, 0x70, 0x6b) + o = (*z).Key.MarshalMsg(o) + } + } + return +} + +func (_ *identityChallenge) CanMarshalMsg(z interface{}) bool { + _, ok := (z).(*identityChallenge) + return ok +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *identityChallenge) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0002 int + var zb0003 bool + zb0002, zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if _, ok := err.(msgp.TypeError); ok { + zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0002 > 0 { + zb0002-- + bts, err = (*z).Key.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Key") + return + } + } + if zb0002 > 0 { + zb0002-- + bts, err = msgp.ReadExactBytes(bts, ((*z).Challenge)[:]) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Challenge") + return + } + } + if zb0002 > 0 { + zb0002-- + var zb0004 int + zb0004, err = msgp.ReadBytesBytesHeader(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "PublicAddress") + return + } + if zb0004 > maxAddressLen { + err = msgp.ErrOverflow(uint64(zb0004), uint64(maxAddressLen)) + return + } + (*z).PublicAddress, bts, err = msgp.ReadBytesBytes(bts, (*z).PublicAddress) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "PublicAddress") + return + } + } + if zb0002 > 0 { + err = msgp.ErrTooManyArrayFields(zb0002) + if err != nil { + err = msgp.WrapError(err, "struct-from-array") + return + } + } + } else { + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0003 { + (*z) = identityChallenge{} + } + for zb0002 > 0 { + zb0002-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch string(field) { + case "pk": + bts, err = (*z).Key.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Key") + return + } + case "c": + bts, err = msgp.ReadExactBytes(bts, ((*z).Challenge)[:]) + if err != nil { + err = msgp.WrapError(err, "Challenge") + return + } + case "a": + var zb0005 int + zb0005, err = msgp.ReadBytesBytesHeader(bts) + if err != nil { + err = msgp.WrapError(err, "PublicAddress") + return + } + if zb0005 > maxAddressLen { + err = msgp.ErrOverflow(uint64(zb0005), uint64(maxAddressLen)) + return + } + (*z).PublicAddress, bts, err = msgp.ReadBytesBytes(bts, (*z).PublicAddress) + if err != nil { + err = msgp.WrapError(err, "PublicAddress") + return + } + default: + err = msgp.ErrNoField(string(field)) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + } + o = bts + return +} + +func (_ *identityChallenge) CanUnmarshalMsg(z interface{}) bool { + _, ok := (z).(*identityChallenge) + return ok +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *identityChallenge) Msgsize() (s int) { + s = 1 + 3 + (*z).Key.Msgsize() + 2 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 2 + msgp.BytesPrefixSize + len((*z).PublicAddress) + return +} + +// MsgIsZero returns whether this is a zero value +func (z *identityChallenge) MsgIsZero() bool { + return ((*z).Key.MsgIsZero()) && ((*z).Challenge == (identityChallengeValue{})) && (len((*z).PublicAddress) == 0) +} + +// MarshalMsg implements msgp.Marshaler +func (z *identityChallengeResponse) MarshalMsg(b []byte) (o []byte) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0003Len := uint32(3) + var zb0003Mask uint8 /* 4 bits */ + if (*z).Challenge == (identityChallengeValue{}) { + zb0003Len-- + zb0003Mask |= 0x2 + } + if (*z).Key.MsgIsZero() { + zb0003Len-- + zb0003Mask |= 0x4 + } + if (*z).ResponseChallenge == (identityChallengeValue{}) { + zb0003Len-- + zb0003Mask |= 0x8 + } + // variable map header, size zb0003Len + o = append(o, 0x80|uint8(zb0003Len)) + if zb0003Len != 0 { + if (zb0003Mask & 0x2) == 0 { // if not empty + // string "c" + o = append(o, 0xa1, 0x63) + o = msgp.AppendBytes(o, ((*z).Challenge)[:]) + } + if (zb0003Mask & 0x4) == 0 { // if not empty + // string "pk" + o = append(o, 0xa2, 0x70, 0x6b) + o = (*z).Key.MarshalMsg(o) + } + if (zb0003Mask & 0x8) == 0 { // if not empty + // string "rc" + o = append(o, 0xa2, 0x72, 0x63) + o = msgp.AppendBytes(o, ((*z).ResponseChallenge)[:]) + } + } + return +} + +func (_ *identityChallengeResponse) CanMarshalMsg(z interface{}) bool { + _, ok := (z).(*identityChallengeResponse) + return ok +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *identityChallengeResponse) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0003 int + var zb0004 bool + zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts) + if _, ok := err.(msgp.TypeError); ok { + zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0003 > 0 { + zb0003-- + bts, err = (*z).Key.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Key") + return + } + } + if zb0003 > 0 { + zb0003-- + bts, err = msgp.ReadExactBytes(bts, ((*z).Challenge)[:]) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Challenge") + return + } + } + if zb0003 > 0 { + zb0003-- + bts, err = msgp.ReadExactBytes(bts, ((*z).ResponseChallenge)[:]) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "ResponseChallenge") + return + } + } + if zb0003 > 0 { + err = msgp.ErrTooManyArrayFields(zb0003) + if err != nil { + err = msgp.WrapError(err, "struct-from-array") + return + } + } + } else { + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0004 { + (*z) = identityChallengeResponse{} + } + for zb0003 > 0 { + zb0003-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch string(field) { + case "pk": + bts, err = (*z).Key.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Key") + return + } + case "c": + bts, err = msgp.ReadExactBytes(bts, ((*z).Challenge)[:]) + if err != nil { + err = msgp.WrapError(err, "Challenge") + return + } + case "rc": + bts, err = msgp.ReadExactBytes(bts, ((*z).ResponseChallenge)[:]) + if err != nil { + err = msgp.WrapError(err, "ResponseChallenge") + return + } + default: + err = msgp.ErrNoField(string(field)) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + } + o = bts + return +} + +func (_ *identityChallengeResponse) CanUnmarshalMsg(z interface{}) bool { + _, ok := (z).(*identityChallengeResponse) + return ok +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *identityChallengeResponse) Msgsize() (s int) { + s = 1 + 3 + (*z).Key.Msgsize() + 2 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 3 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + return +} + +// MsgIsZero returns whether this is a zero value +func (z *identityChallengeResponse) MsgIsZero() bool { + return ((*z).Key.MsgIsZero()) && ((*z).Challenge == (identityChallengeValue{})) && ((*z).ResponseChallenge == (identityChallengeValue{})) +} + +// MarshalMsg implements msgp.Marshaler +func (z *identityChallengeResponseSigned) MarshalMsg(b []byte) (o []byte) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0001Len := uint32(2) + var zb0001Mask uint8 /* 3 bits */ + if (*z).Msg.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x2 + } + if (*z).Signature.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x4 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + if zb0001Len != 0 { + if (zb0001Mask & 0x2) == 0 { // if not empty + // string "icr" + o = append(o, 0xa3, 0x69, 0x63, 0x72) + o = (*z).Msg.MarshalMsg(o) + } + if (zb0001Mask & 0x4) == 0 { // if not empty + // string "sig" + o = append(o, 0xa3, 0x73, 0x69, 0x67) + o = (*z).Signature.MarshalMsg(o) + } + } + return +} + +func (_ *identityChallengeResponseSigned) CanMarshalMsg(z interface{}) bool { + _, ok := (z).(*identityChallengeResponseSigned) + return ok +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *identityChallengeResponseSigned) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 int + var zb0002 bool + zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if _, ok := err.(msgp.TypeError); ok { + zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).Msg.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Msg") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).Signature.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Signature") + return + } + } + if zb0001 > 0 { + err = msgp.ErrTooManyArrayFields(zb0001) + if err != nil { + err = msgp.WrapError(err, "struct-from-array") + return + } + } + } else { + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0002 { + (*z) = identityChallengeResponseSigned{} + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch string(field) { + case "icr": + bts, err = (*z).Msg.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Msg") + return + } + case "sig": + bts, err = (*z).Signature.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Signature") + return + } + default: + err = msgp.ErrNoField(string(field)) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + } + o = bts + return +} + +func (_ *identityChallengeResponseSigned) CanUnmarshalMsg(z interface{}) bool { + _, ok := (z).(*identityChallengeResponseSigned) + return ok +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *identityChallengeResponseSigned) Msgsize() (s int) { + s = 1 + 4 + (*z).Msg.Msgsize() + 4 + (*z).Signature.Msgsize() + return +} + +// MsgIsZero returns whether this is a zero value +func (z *identityChallengeResponseSigned) MsgIsZero() bool { + return ((*z).Msg.MsgIsZero()) && ((*z).Signature.MsgIsZero()) +} + +// MarshalMsg implements msgp.Marshaler +func (z *identityChallengeSigned) MarshalMsg(b []byte) (o []byte) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0001Len := uint32(2) + var zb0001Mask uint8 /* 3 bits */ + if (*z).Msg.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x2 + } + if (*z).Signature.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x4 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + if zb0001Len != 0 { + if (zb0001Mask & 0x2) == 0 { // if not empty + // string "ic" + o = append(o, 0xa2, 0x69, 0x63) + o = (*z).Msg.MarshalMsg(o) + } + if (zb0001Mask & 0x4) == 0 { // if not empty + // string "sig" + o = append(o, 0xa3, 0x73, 0x69, 0x67) + o = (*z).Signature.MarshalMsg(o) + } + } + return +} + +func (_ *identityChallengeSigned) CanMarshalMsg(z interface{}) bool { + _, ok := (z).(*identityChallengeSigned) + return ok +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *identityChallengeSigned) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 int + var zb0002 bool + zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if _, ok := err.(msgp.TypeError); ok { + zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).Msg.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Msg") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).Signature.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Signature") + return + } + } + if zb0001 > 0 { + err = msgp.ErrTooManyArrayFields(zb0001) + if err != nil { + err = msgp.WrapError(err, "struct-from-array") + return + } + } + } else { + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0002 { + (*z) = identityChallengeSigned{} + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch string(field) { + case "ic": + bts, err = (*z).Msg.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Msg") + return + } + case "sig": + bts, err = (*z).Signature.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Signature") + return + } + default: + err = msgp.ErrNoField(string(field)) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + } + o = bts + return +} + +func (_ *identityChallengeSigned) CanUnmarshalMsg(z interface{}) bool { + _, ok := (z).(*identityChallengeSigned) + return ok +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *identityChallengeSigned) Msgsize() (s int) { + s = 1 + 3 + (*z).Msg.Msgsize() + 4 + (*z).Signature.Msgsize() + return +} + +// MsgIsZero returns whether this is a zero value +func (z *identityChallengeSigned) MsgIsZero() bool { + return ((*z).Msg.MsgIsZero()) && ((*z).Signature.MsgIsZero()) +} + +// MarshalMsg implements msgp.Marshaler +func (z *identityChallengeValue) MarshalMsg(b []byte) (o []byte) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendBytes(o, (*z)[:]) + return +} + +func (_ *identityChallengeValue) CanMarshalMsg(z interface{}) bool { + _, ok := (z).(*identityChallengeValue) + return ok +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *identityChallengeValue) UnmarshalMsg(bts []byte) (o []byte, err error) { + bts, err = msgp.ReadExactBytes(bts, (*z)[:]) + if err != nil { + err = msgp.WrapError(err) + return + } + o = bts + return +} + +func (_ *identityChallengeValue) CanUnmarshalMsg(z interface{}) bool { + _, ok := (z).(*identityChallengeValue) + return ok +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *identityChallengeValue) Msgsize() (s int) { + s = msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + return +} + +// MsgIsZero returns whether this is a zero value +func (z *identityChallengeValue) MsgIsZero() bool { + return (*z) == (identityChallengeValue{}) +} + +// MarshalMsg implements msgp.Marshaler +func (z *identityVerificationMessage) MarshalMsg(b []byte) (o []byte) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0002Len := uint32(1) + var zb0002Mask uint8 /* 2 bits */ + if (*z).ResponseChallenge == (identityChallengeValue{}) { + zb0002Len-- + zb0002Mask |= 0x2 + } + // variable map header, size zb0002Len + o = append(o, 0x80|uint8(zb0002Len)) + if zb0002Len != 0 { + if (zb0002Mask & 0x2) == 0 { // if not empty + // string "rc" + o = append(o, 0xa2, 0x72, 0x63) + o = msgp.AppendBytes(o, ((*z).ResponseChallenge)[:]) + } + } + return +} + +func (_ *identityVerificationMessage) CanMarshalMsg(z interface{}) bool { + _, ok := (z).(*identityVerificationMessage) + return ok +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *identityVerificationMessage) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0002 int + var zb0003 bool + zb0002, zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if _, ok := err.(msgp.TypeError); ok { + zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0002 > 0 { + zb0002-- + bts, err = msgp.ReadExactBytes(bts, ((*z).ResponseChallenge)[:]) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "ResponseChallenge") + return + } + } + if zb0002 > 0 { + err = msgp.ErrTooManyArrayFields(zb0002) + if err != nil { + err = msgp.WrapError(err, "struct-from-array") + return + } + } + } else { + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0003 { + (*z) = identityVerificationMessage{} + } + for zb0002 > 0 { + zb0002-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch string(field) { + case "rc": + bts, err = msgp.ReadExactBytes(bts, ((*z).ResponseChallenge)[:]) + if err != nil { + err = msgp.WrapError(err, "ResponseChallenge") + return + } + default: + err = msgp.ErrNoField(string(field)) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + } + o = bts + return +} + +func (_ *identityVerificationMessage) CanUnmarshalMsg(z interface{}) bool { + _, ok := (z).(*identityVerificationMessage) + return ok +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *identityVerificationMessage) Msgsize() (s int) { + s = 1 + 3 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + return +} + +// MsgIsZero returns whether this is a zero value +func (z *identityVerificationMessage) MsgIsZero() bool { + return ((*z).ResponseChallenge == (identityChallengeValue{})) +} + +// MarshalMsg implements msgp.Marshaler +func (z *identityVerificationMessageSigned) MarshalMsg(b []byte) (o []byte) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0002Len := uint32(2) + var zb0002Mask uint8 /* 3 bits */ + if (*z).Msg.ResponseChallenge == (identityChallengeValue{}) { + zb0002Len-- + zb0002Mask |= 0x2 + } + if (*z).Signature.MsgIsZero() { + zb0002Len-- + zb0002Mask |= 0x4 + } + // variable map header, size zb0002Len + o = append(o, 0x80|uint8(zb0002Len)) + if zb0002Len != 0 { + if (zb0002Mask & 0x2) == 0 { // if not empty + // string "ivm" + o = append(o, 0xa3, 0x69, 0x76, 0x6d) + // omitempty: check for empty values + zb0003Len := uint32(1) + var zb0003Mask uint8 /* 2 bits */ + if (*z).Msg.ResponseChallenge == (identityChallengeValue{}) { + zb0003Len-- + zb0003Mask |= 0x2 + } + // variable map header, size zb0003Len + o = append(o, 0x80|uint8(zb0003Len)) + if (zb0003Mask & 0x2) == 0 { // if not empty + // string "rc" + o = append(o, 0xa2, 0x72, 0x63) + o = msgp.AppendBytes(o, ((*z).Msg.ResponseChallenge)[:]) + } + } + if (zb0002Mask & 0x4) == 0 { // if not empty + // string "sig" + o = append(o, 0xa3, 0x73, 0x69, 0x67) + o = (*z).Signature.MarshalMsg(o) + } + } + return +} + +func (_ *identityVerificationMessageSigned) CanMarshalMsg(z interface{}) bool { + _, ok := (z).(*identityVerificationMessageSigned) + return ok +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *identityVerificationMessageSigned) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0002 int + var zb0003 bool + zb0002, zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if _, ok := err.(msgp.TypeError); ok { + zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0002 > 0 { + zb0002-- + var zb0004 int + var zb0005 bool + zb0004, zb0005, bts, err = msgp.ReadMapHeaderBytes(bts) + if _, ok := err.(msgp.TypeError); ok { + zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Msg") + return + } + if zb0004 > 0 { + zb0004-- + bts, err = msgp.ReadExactBytes(bts, ((*z).Msg.ResponseChallenge)[:]) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Msg", "struct-from-array", "ResponseChallenge") + return + } + } + if zb0004 > 0 { + err = msgp.ErrTooManyArrayFields(zb0004) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Msg", "struct-from-array") + return + } + } + } else { + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Msg") + return + } + if zb0005 { + (*z).Msg = identityVerificationMessage{} + } + for zb0004 > 0 { + zb0004-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Msg") + return + } + switch string(field) { + case "rc": + bts, err = msgp.ReadExactBytes(bts, ((*z).Msg.ResponseChallenge)[:]) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Msg", "ResponseChallenge") + return + } + default: + err = msgp.ErrNoField(string(field)) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Msg") + return + } + } + } + } + } + if zb0002 > 0 { + zb0002-- + bts, err = (*z).Signature.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Signature") + return + } + } + if zb0002 > 0 { + err = msgp.ErrTooManyArrayFields(zb0002) + if err != nil { + err = msgp.WrapError(err, "struct-from-array") + return + } + } + } else { + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0003 { + (*z) = identityVerificationMessageSigned{} + } + for zb0002 > 0 { + zb0002-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch string(field) { + case "ivm": + var zb0006 int + var zb0007 bool + zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts) + if _, ok := err.(msgp.TypeError); ok { + zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Msg") + return + } + if zb0006 > 0 { + zb0006-- + bts, err = msgp.ReadExactBytes(bts, ((*z).Msg.ResponseChallenge)[:]) + if err != nil { + err = msgp.WrapError(err, "Msg", "struct-from-array", "ResponseChallenge") + return + } + } + if zb0006 > 0 { + err = msgp.ErrTooManyArrayFields(zb0006) + if err != nil { + err = msgp.WrapError(err, "Msg", "struct-from-array") + return + } + } + } else { + if err != nil { + err = msgp.WrapError(err, "Msg") + return + } + if zb0007 { + (*z).Msg = identityVerificationMessage{} + } + for zb0006 > 0 { + zb0006-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "Msg") + return + } + switch string(field) { + case "rc": + bts, err = msgp.ReadExactBytes(bts, ((*z).Msg.ResponseChallenge)[:]) + if err != nil { + err = msgp.WrapError(err, "Msg", "ResponseChallenge") + return + } + default: + err = msgp.ErrNoField(string(field)) + if err != nil { + err = msgp.WrapError(err, "Msg") + return + } + } + } + } + case "sig": + bts, err = (*z).Signature.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Signature") + return + } + default: + err = msgp.ErrNoField(string(field)) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + } + o = bts + return +} + +func (_ *identityVerificationMessageSigned) CanUnmarshalMsg(z interface{}) bool { + _, ok := (z).(*identityVerificationMessageSigned) + return ok +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *identityVerificationMessageSigned) Msgsize() (s int) { + s = 1 + 4 + 1 + 3 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 4 + (*z).Signature.Msgsize() + return +} + +// MsgIsZero returns whether this is a zero value +func (z *identityVerificationMessageSigned) MsgIsZero() bool { + return ((*z).Msg.ResponseChallenge == (identityChallengeValue{})) && ((*z).Signature.MsgIsZero()) +} diff --git a/network/msgp_gen_test.go b/network/msgp_gen_test.go new file mode 100644 index 0000000000..1046371a1a --- /dev/null +++ b/network/msgp_gen_test.go @@ -0,0 +1,435 @@ +//go:build !skip_msgp_testing +// +build !skip_msgp_testing + +package network + +// Code generated by github.com/algorand/msgp DO NOT EDIT. + +import ( + "testing" + + "github.com/algorand/msgp/msgp" + + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/test/partitiontest" +) + +func TestMarshalUnmarshalidentityChallenge(t *testing.T) { + partitiontest.PartitionTest(t) + v := identityChallenge{} + bts := v.MarshalMsg(nil) + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func TestRandomizedEncodingidentityChallenge(t *testing.T) { + protocol.RunEncodingTest(t, &identityChallenge{}) +} + +func BenchmarkMarshalMsgidentityChallenge(b *testing.B) { + v := identityChallenge{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgidentityChallenge(b *testing.B) { + v := identityChallenge{} + bts := make([]byte, 0, v.Msgsize()) + bts = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalidentityChallenge(b *testing.B) { + v := identityChallenge{} + bts := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalidentityChallengeResponse(t *testing.T) { + partitiontest.PartitionTest(t) + v := identityChallengeResponse{} + bts := v.MarshalMsg(nil) + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func TestRandomizedEncodingidentityChallengeResponse(t *testing.T) { + protocol.RunEncodingTest(t, &identityChallengeResponse{}) +} + +func BenchmarkMarshalMsgidentityChallengeResponse(b *testing.B) { + v := identityChallengeResponse{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgidentityChallengeResponse(b *testing.B) { + v := identityChallengeResponse{} + bts := make([]byte, 0, v.Msgsize()) + bts = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalidentityChallengeResponse(b *testing.B) { + v := identityChallengeResponse{} + bts := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalidentityChallengeResponseSigned(t *testing.T) { + partitiontest.PartitionTest(t) + v := identityChallengeResponseSigned{} + bts := v.MarshalMsg(nil) + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func TestRandomizedEncodingidentityChallengeResponseSigned(t *testing.T) { + protocol.RunEncodingTest(t, &identityChallengeResponseSigned{}) +} + +func BenchmarkMarshalMsgidentityChallengeResponseSigned(b *testing.B) { + v := identityChallengeResponseSigned{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgidentityChallengeResponseSigned(b *testing.B) { + v := identityChallengeResponseSigned{} + bts := make([]byte, 0, v.Msgsize()) + bts = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalidentityChallengeResponseSigned(b *testing.B) { + v := identityChallengeResponseSigned{} + bts := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalidentityChallengeSigned(t *testing.T) { + partitiontest.PartitionTest(t) + v := identityChallengeSigned{} + bts := v.MarshalMsg(nil) + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func TestRandomizedEncodingidentityChallengeSigned(t *testing.T) { + protocol.RunEncodingTest(t, &identityChallengeSigned{}) +} + +func BenchmarkMarshalMsgidentityChallengeSigned(b *testing.B) { + v := identityChallengeSigned{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgidentityChallengeSigned(b *testing.B) { + v := identityChallengeSigned{} + bts := make([]byte, 0, v.Msgsize()) + bts = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalidentityChallengeSigned(b *testing.B) { + v := identityChallengeSigned{} + bts := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalidentityChallengeValue(t *testing.T) { + partitiontest.PartitionTest(t) + v := identityChallengeValue{} + bts := v.MarshalMsg(nil) + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func TestRandomizedEncodingidentityChallengeValue(t *testing.T) { + protocol.RunEncodingTest(t, &identityChallengeValue{}) +} + +func BenchmarkMarshalMsgidentityChallengeValue(b *testing.B) { + v := identityChallengeValue{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgidentityChallengeValue(b *testing.B) { + v := identityChallengeValue{} + bts := make([]byte, 0, v.Msgsize()) + bts = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalidentityChallengeValue(b *testing.B) { + v := identityChallengeValue{} + bts := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalidentityVerificationMessage(t *testing.T) { + partitiontest.PartitionTest(t) + v := identityVerificationMessage{} + bts := v.MarshalMsg(nil) + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func TestRandomizedEncodingidentityVerificationMessage(t *testing.T) { + protocol.RunEncodingTest(t, &identityVerificationMessage{}) +} + +func BenchmarkMarshalMsgidentityVerificationMessage(b *testing.B) { + v := identityVerificationMessage{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgidentityVerificationMessage(b *testing.B) { + v := identityVerificationMessage{} + bts := make([]byte, 0, v.Msgsize()) + bts = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalidentityVerificationMessage(b *testing.B) { + v := identityVerificationMessage{} + bts := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalidentityVerificationMessageSigned(t *testing.T) { + partitiontest.PartitionTest(t) + v := identityVerificationMessageSigned{} + bts := v.MarshalMsg(nil) + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func TestRandomizedEncodingidentityVerificationMessageSigned(t *testing.T) { + protocol.RunEncodingTest(t, &identityVerificationMessageSigned{}) +} + +func BenchmarkMarshalMsgidentityVerificationMessageSigned(b *testing.B) { + v := identityVerificationMessageSigned{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgidentityVerificationMessageSigned(b *testing.B) { + v := identityVerificationMessageSigned{} + bts := make([]byte, 0, v.Msgsize()) + bts = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalidentityVerificationMessageSigned(b *testing.B) { + v := identityVerificationMessageSigned{} + bts := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/network/netidentity.go b/network/netidentity.go new file mode 100644 index 0000000000..dd8abceff0 --- /dev/null +++ b/network/netidentity.go @@ -0,0 +1,412 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package network + +import ( + "encoding/base64" + "fmt" + "net/http" + "sync/atomic" + + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/protocol" +) + +// netidentity.go implements functionality to participate in an "Identity Challenge Exchange" +// with the purpose of identifying redundant connections between peers, and preventing them. +// The identity challenge exchange protocol is a 3 way handshake that exchanges signed messages. +// +// Message 1 (Identity Challenge): when a request is made to start a gossip connection, an +// identityChallengeSigned message is added to HTTP request headers, containing: +// - a 32 byte random challenge +// - the requester's "identity" PublicKey +// - the PublicAddress of the intended recipient +// - Signature on the above by the requester's PublicKey +// +// Message 2 (Identity Challenge Response): when responding to the gossip connection request, +// if the identity challenge is valid, an identityChallengeResponseSigned message is added +// to the HTTP response headers, containing: +// - the original 32 byte random challenge from Message 1 +// - a new "response" 32 byte random challenge +// - the responder's "identity" PublicKey +// - Signature on the above by the responder's PublicKey +// +// Message 3 (Identity Verification): if the identityChallengeResponse is valid, the requester +// sends a NetIDVerificationTag message over websockets to verify it owns its PublicKey, with: +// - Signature on the response challenge from Message 2, using the requester's PublicKey +// +// Upon receipt of Message 2, the requester has enough data to consider the responder's identity "verified". +// Upon receipt of Message 3, the responder has enough data to consider the requester's identity "verified". +// At each of these steps, if the peer's identity was verified, wsNetwork will attempt to add it to the +// identityTracker, which maintains a single peer per identity PublicKey. If the identity is already in use +// by another connected peer, we know this connection is a duplicate, and can be closed. +// +// Protocol Enablement: +// This exchange is optional, and is enabled by setting the configuration value "PublicAddress" to match the +// node's public endpoint address stored in other peers' phonebooks (like "r-aa.algorand-mainnet.network:4160"). +// +// Protocol Error Handling: +// Message 1 +// - If the Message is not included, assume the peer does not use identity exchange, and peer without attaching an identityChallengeResponse +// - If the Address included in the challenge is not this node's PublicAddress, peering continues without identity exchange. +// this is so that if an operator misconfigures PublicAddress, it does not decline well meaning peering attempts +// - If the Message is malformed or cannot be decoded, the peering attempt is stopped +// - If the Signature in the challenge does not verify to the included key, the peering attempt is stopped +// +// Message 2 +// - If the Message is not included, assume the peer does not use identity exchange, and do not send Message 3 +// - If the Message is malformed or cannot be decoded, the peering attempt is stopped +// - If the original 32 byte challenge does not match the one sent in Message 1, the peering attempt is stopped +// - If the Signature in the challenge does not verify to the included key, the peering attempt is stopped +// +// Message 3 +// - If the Message is malformed or cannot be decoded, the peer is disconnected +// - If the Signature in the challenge does not verify peer's assumed PublicKey and assigned Challenge Bytes, the peer is disconnected +// - If the Message is not received, no action is taken to disconnect the peer. + +const maxAddressLen = 256 + 32 // Max DNS (255) + margin for port specification + +// identityChallengeValue is 32 random bytes used for identity challenge exchange +type identityChallengeValue [32]byte + +func newIdentityChallengeValue() identityChallengeValue { + var ret identityChallengeValue + crypto.RandBytes(ret[:]) + return ret +} + +type identityChallengeScheme interface { + AttachChallenge(attachTo http.Header, addr string) identityChallengeValue + VerifyRequestAndAttachResponse(attachTo http.Header, h http.Header) (identityChallengeValue, crypto.PublicKey, error) + VerifyResponse(h http.Header, c identityChallengeValue) (crypto.PublicKey, []byte, error) +} + +// identityChallengePublicKeyScheme implements IdentityChallengeScheme by +// exchanging and verifying public key challenges and attaching them to headers, +// or returning the message payload to be sent +type identityChallengePublicKeyScheme struct { + dedupName string + identityKeys *crypto.SignatureSecrets +} + +// NewIdentityChallengeScheme will create a default Identification Scheme +func NewIdentityChallengeScheme(dn string) *identityChallengePublicKeyScheme { + // without an deduplication name, there is no identityto manage, so just return an empty scheme + if dn == "" { + return &identityChallengePublicKeyScheme{} + } + var seed crypto.Seed + crypto.RandBytes(seed[:]) + + return &identityChallengePublicKeyScheme{ + dedupName: dn, + identityKeys: crypto.GenerateSignatureSecrets(seed), + } +} + +// AttachChallenge will generate a new identity challenge and will encode and attach the challenge +// as a header. It returns the identityChallengeValue used for this challenge, so the network can +// confirm it later (by passing it to VerifyResponse), or returns an empty challenge if dedupName is +// not set. +func (i identityChallengePublicKeyScheme) AttachChallenge(attachTo http.Header, addr string) identityChallengeValue { + if i.dedupName == "" || addr == "" { + return identityChallengeValue{} + } + c := identityChallenge{ + Key: i.identityKeys.SignatureVerifier, + Challenge: newIdentityChallengeValue(), + PublicAddress: []byte(addr), + } + + attachTo.Add(IdentityChallengeHeader, c.signAndEncodeB64(i.identityKeys)) + return c.Challenge +} + +// VerifyRequestAndAttachResponse checks headers for an Identity Challenge, and verifies: +// * the provided challenge bytes matches the one encoded in the header +// * the identity challenge verifies against the included key +// * the "Address" field matches what this scheme expects +// once verified, it will attach the header to the "attach" header +// and will return the challenge and identity of the peer for recording +// or returns empty values if the header did not end up getting set +func (i identityChallengePublicKeyScheme) VerifyRequestAndAttachResponse(attachTo http.Header, h http.Header) (identityChallengeValue, crypto.PublicKey, error) { + // if dedupName is not set, this scheme is not configured to exchange identity + if i.dedupName == "" { + return identityChallengeValue{}, crypto.PublicKey{}, nil + } + // if the headerString is not populated, the peer isn't participating in identity exchange + headerString := h.Get(IdentityChallengeHeader) + if headerString == "" { + return identityChallengeValue{}, crypto.PublicKey{}, nil + } + // decode the header to an identityChallenge + msg, err := base64.StdEncoding.DecodeString(headerString) + if err != nil { + return identityChallengeValue{}, crypto.PublicKey{}, err + } + idChal := identityChallengeSigned{} + err = protocol.Decode(msg, &idChal) + if err != nil { + return identityChallengeValue{}, crypto.PublicKey{}, err + } + if !idChal.Verify() { + return identityChallengeValue{}, crypto.PublicKey{}, fmt.Errorf("identity challenge incorrectly signed") + } + // if the address is not meant for this host, return without attaching headers, + // but also do not emit an error. This is because if an operator were to incorrectly + // specify their dedupName, it could result in inappropriate disconnections from valid peers + if string(idChal.Msg.PublicAddress) != i.dedupName { + return identityChallengeValue{}, crypto.PublicKey{}, nil + } + // make the response object, encode it and attach it to the header + r := identityChallengeResponse{ + Key: i.identityKeys.SignatureVerifier, + Challenge: idChal.Msg.Challenge, + ResponseChallenge: newIdentityChallengeValue(), + } + attachTo.Add(IdentityChallengeHeader, r.signAndEncodeB64(i.identityKeys)) + return r.ResponseChallenge, idChal.Msg.Key, nil +} + +// VerifyResponse will decode the identity challenge header from an HTTP response (containing an +// encoding of identityChallengeResponseSigned) and confirm it has a valid signature, and that the +// provided challenge (generated and added to the HTTP request by AttachChallenge) matches the one +// found in the header. If the response can be verified, it returns the identity of the peer and an +// encoded identityVerificationMessage to send to the peer. Otherwise, it returns empty values. +func (i identityChallengePublicKeyScheme) VerifyResponse(h http.Header, c identityChallengeValue) (crypto.PublicKey, []byte, error) { + // if we are not participating in identity challenge exchange, do nothing (no error and no value) + if i.dedupName == "" { + return crypto.PublicKey{}, []byte{}, nil + } + headerString := h.Get(IdentityChallengeHeader) + // if the header is not populated, assume the peer is not participating in identity exchange + if headerString == "" { + return crypto.PublicKey{}, []byte{}, nil + } + msg, err := base64.StdEncoding.DecodeString(headerString) + if err != nil { + return crypto.PublicKey{}, []byte{}, err + } + resp := identityChallengeResponseSigned{} + err = protocol.Decode(msg, &resp) + if err != nil { + return crypto.PublicKey{}, []byte{}, err + } + if resp.Msg.Challenge != c { + return crypto.PublicKey{}, []byte{}, fmt.Errorf("challenge response did not contain originally issued challenge value") + } + if !resp.Verify() { + return crypto.PublicKey{}, []byte{}, fmt.Errorf("challenge response incorrectly signed ") + } + return resp.Msg.Key, i.identityVerificationMessage(resp.Msg.ResponseChallenge), nil +} + +// identityVerificationMessage generates the 3rd message of the challenge exchange, +// which a wsNetwork can then send to a peer in order to verify their own identity. +// It is prefixed with the ID Verification tag and returned ready-to-send +func (i *identityChallengePublicKeyScheme) identityVerificationMessage(c identityChallengeValue) []byte { + signedMsg := identityVerificationMessage{ResponseChallenge: c}.Sign(i.identityKeys) + return append([]byte(protocol.NetIDVerificationTag), protocol.Encode(&signedMsg)...) +} + +// The initial challenge object, giving the peer a challenge to return (Challenge), +// the presumed identity of this node (Key), the intended recipient (Address). +type identityChallenge struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + + Key crypto.PublicKey `codec:"pk"` + Challenge identityChallengeValue `codec:"c"` + PublicAddress []byte `codec:"a,allocbound=maxAddressLen"` +} + +// identityChallengeSigned wraps an identityChallenge with a signature, similar to SignedTxn and +// netPrioResponseSigned. +type identityChallengeSigned struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + + Msg identityChallenge `codec:"ic"` + Signature crypto.Signature `codec:"sig"` +} + +// The response to an identityChallenge, containing the responder's public key, the original +// requestor's challenge, and a new challenge for the requestor. +type identityChallengeResponse struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + + Key crypto.PublicKey `codec:"pk"` + Challenge identityChallengeValue `codec:"c"` + ResponseChallenge identityChallengeValue `codec:"rc"` +} + +type identityChallengeResponseSigned struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + + Msg identityChallengeResponse `codec:"icr"` + Signature crypto.Signature `codec:"sig"` +} + +type identityVerificationMessage struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + + ResponseChallenge identityChallengeValue `codec:"rc"` +} + +type identityVerificationMessageSigned struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + + Msg identityVerificationMessage `codec:"ivm"` + Signature crypto.Signature `codec:"sig"` +} + +func (i identityChallenge) signAndEncodeB64(s *crypto.SignatureSecrets) string { + signedChal := i.Sign(s) + return base64.StdEncoding.EncodeToString(protocol.Encode(&signedChal)) +} + +func (i identityChallenge) Sign(secrets *crypto.SignatureSecrets) identityChallengeSigned { + return identityChallengeSigned{Msg: i, Signature: secrets.Sign(i)} +} + +func (i identityChallenge) ToBeHashed() (protocol.HashID, []byte) { + return protocol.NetIdentityChallenge, protocol.Encode(&i) +} + +// Verify checks that the signature included in the identityChallenge was indeed created by the included Key +func (i identityChallengeSigned) Verify() bool { + return i.Msg.Key.Verify(i.Msg, i.Signature) +} + +func (i identityChallengeResponse) signAndEncodeB64(s *crypto.SignatureSecrets) string { + signedChalResp := i.Sign(s) + return base64.StdEncoding.EncodeToString(protocol.Encode(&signedChalResp)) +} + +func (i identityChallengeResponse) Sign(secrets *crypto.SignatureSecrets) identityChallengeResponseSigned { + return identityChallengeResponseSigned{Msg: i, Signature: secrets.Sign(i)} +} + +func (i identityChallengeResponse) ToBeHashed() (protocol.HashID, []byte) { + return protocol.NetIdentityChallengeResponse, protocol.Encode(&i) +} + +// Verify checks that the signature included in the identityChallengeResponse was indeed created by the included Key +func (i identityChallengeResponseSigned) Verify() bool { + return i.Msg.Key.Verify(i.Msg, i.Signature) +} + +func (i identityVerificationMessage) Sign(secrets *crypto.SignatureSecrets) identityVerificationMessageSigned { + return identityVerificationMessageSigned{Msg: i, Signature: secrets.Sign(i)} +} + +func (i identityVerificationMessage) ToBeHashed() (protocol.HashID, []byte) { + return protocol.NetIdentityVerificationMessage, protocol.Encode(&i) +} + +// Verify checks that the signature included in the identityVerificationMessage was indeed created by the included Key +func (i identityVerificationMessageSigned) Verify(key crypto.PublicKey) bool { + return key.Verify(i.Msg, i.Signature) +} + +// identityVerificationHandler receives a signature over websocket, and confirms it matches the +// sender's claimed identity and the challenge that was assigned to it. If the identity is available, +// the peer is loaded into the identity tracker. Otherwise, we ask the network to disconnect the peer. +func identityVerificationHandler(message IncomingMessage) OutgoingMessage { + peer := message.Sender.(*wsPeer) + // avoid doing work (crypto and potentially taking a lock) if the peer is already verified + if atomic.LoadUint32(&peer.identityVerified) == 1 { + return OutgoingMessage{} + } + localAddr, _ := peer.net.Address() + msg := identityVerificationMessageSigned{} + err := protocol.Decode(message.Data, &msg) + if err != nil { + networkPeerIdentityError.Inc(nil) + peer.net.log.With("err", err).With("remote", peer.OriginAddress()).With("local", localAddr).Warn("peer identity verification could not be decoded, disconnecting") + peer.net.disconnect(peer, disconnectBadIdentityData) + return OutgoingMessage{} + } + if peer.identityChallenge != msg.Msg.ResponseChallenge { + networkPeerIdentityError.Inc(nil) + peer.net.log.With("remote", peer.OriginAddress()).With("local", localAddr).Warn("peer identity verification challenge does not match, disconnecting") + peer.net.disconnect(peer, disconnectBadIdentityData) + return OutgoingMessage{} + } + if !msg.Verify(peer.identity) { + networkPeerIdentityError.Inc(nil) + peer.net.log.With("remote", peer.OriginAddress()).With("local", localAddr).Warn("peer identity verification is incorrectly signed, disconnecting") + peer.net.disconnect(peer, disconnectBadIdentityData) + return OutgoingMessage{} + } + atomic.StoreUint32(&peer.identityVerified, 1) + // if the identity could not be claimed by this peer, it means the identity is in use + peer.net.peersLock.Lock() + ok := peer.net.identityTracker.setIdentity(peer) + peer.net.peersLock.Unlock() + if !ok { + networkPeerIdentityDisconnect.Inc(nil) + peer.net.log.With("remote", peer.OriginAddress()).With("local", localAddr).Warn("peer identity already in use, disconnecting") + peer.net.disconnect(peer, disconnectDuplicateConnection) + } + return OutgoingMessage{} +} + +var identityHandlers = []TaggedMessageHandler{ + {protocol.NetIDVerificationTag, HandlerFunc(identityVerificationHandler)}, +} + +// identityTracker is used by wsNetwork to manage peer identities for connection deduplication +type identityTracker interface { + removeIdentity(p *wsPeer) + setIdentity(p *wsPeer) bool +} + +// publicKeyIdentTracker implements identityTracker by +// mapping from PublicKeys exchanged in identity challenges to a peer +// this structure is not thread-safe; it is protected by wn.peersLock. +type publicKeyIdentTracker struct { + peersByID map[crypto.PublicKey]*wsPeer +} + +// NewIdentityTracker returns a new publicKeyIdentTracker +func NewIdentityTracker() *publicKeyIdentTracker { + return &publicKeyIdentTracker{ + peersByID: make(map[crypto.PublicKey]*wsPeer), + } +} + +// setIdentity attempts to store a peer at its identity. +// returns false if it was unable to load the peer into the given identity +// or true otherwise (if the peer was already there, or if it was added) +func (t *publicKeyIdentTracker) setIdentity(p *wsPeer) bool { + existingPeer, exists := t.peersByID[p.identity] + if !exists { + // the identity is not occupied, so set it and return true + t.peersByID[p.identity] = p + return true + } + // the identity is occupied, so return false if it is occupied by some *other* peer + // or true if it is occupied by this peer + return existingPeer == p +} + +// removeIdentity removes the entry in the peersByID map if it exists +// and is occupied by the given peer +func (t *publicKeyIdentTracker) removeIdentity(p *wsPeer) { + if t.peersByID[p.identity] == p { + delete(t.peersByID, p.identity) + } +} diff --git a/network/netidentity_test.go b/network/netidentity_test.go new file mode 100644 index 0000000000..f3c72e3e8c --- /dev/null +++ b/network/netidentity_test.go @@ -0,0 +1,366 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package network + +import ( + "encoding/base64" + "net/http" + "testing" + + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/stretchr/testify/require" +) + +// if the scheme has a dedup name, attach to headers. otherwise, don't +func TestIdentityChallengeSchemeAttachIfEnabled(t *testing.T) { + partitiontest.PartitionTest(t) + + h := http.Header{} + i := NewIdentityChallengeScheme("") + chal := i.AttachChallenge(h, "other") + require.Empty(t, h.Get(IdentityChallengeHeader)) + require.Empty(t, chal) + + j := NewIdentityChallengeScheme("yes") + chal = j.AttachChallenge(h, "other") + require.NotEmpty(t, h.Get(IdentityChallengeHeader)) + require.NotEmpty(t, chal) +} + +// TestIdentityChallengeSchemeVerifyRequestAndAttachResponse will confirm that the scheme +// attaches responses only if dedup name is set and the provided challenge verifies +func TestIdentityChallengeSchemeVerifyRequestAndAttachResponse(t *testing.T) { + partitiontest.PartitionTest(t) + + i := NewIdentityChallengeScheme("i1") + // author a challenge to the other scheme + h := http.Header{} + i.AttachChallenge(h, "i2") + require.NotEmpty(t, h.Get(IdentityChallengeHeader)) + + // without a dedup name, no response and no error + h = http.Header{} + i.AttachChallenge(h, "i2") + r := http.Header{} + i2 := NewIdentityChallengeScheme("") + chal, key, err := i2.VerifyRequestAndAttachResponse(r, h) + require.Empty(t, r.Get(IdentityChallengeHeader)) + require.Empty(t, chal) + require.Empty(t, key) + require.NoError(t, err) + + // if dedup name doesn't match, no response and no error + h = http.Header{} + i.AttachChallenge(h, "i2") + r = http.Header{} + i2 = NewIdentityChallengeScheme("not i2") + chal, key, err = i2.VerifyRequestAndAttachResponse(r, h) + require.Empty(t, r.Get(IdentityChallengeHeader)) + require.Empty(t, chal) + require.Empty(t, key) + require.NoError(t, err) + + // if the challenge can't be decoded or verified, error + h = http.Header{} + h.Add(IdentityChallengeHeader, "garbage") + r = http.Header{} + i2 = NewIdentityChallengeScheme("i2") + chal, key, err = i2.VerifyRequestAndAttachResponse(r, h) + require.Empty(t, r.Get(IdentityChallengeHeader)) + require.Empty(t, chal) + require.Empty(t, key) + require.Error(t, err) + + // happy path: response should be attached here + h = http.Header{} + i.AttachChallenge(h, "i2") + r = http.Header{} + i2 = NewIdentityChallengeScheme("i2") + chal, key, err = i2.VerifyRequestAndAttachResponse(r, h) + require.NotEmpty(t, r.Get(IdentityChallengeHeader)) + require.NotEmpty(t, chal) + require.NotEmpty(t, key) + require.NoError(t, err) +} + +func TestIdentityChallengeNoErrorWhenNotParticipating(t *testing.T) { + partitiontest.PartitionTest(t) + + // blank deduplication name will make the scheme a no-op + iNotParticipate := NewIdentityChallengeScheme("") + + // create a request header first + h := http.Header{} + i := NewIdentityChallengeScheme("i1") + origChal := i.AttachChallenge(h, "i1") + require.NotEmpty(t, h.Get(IdentityChallengeHeader)) + require.NotEmpty(t, origChal) + + // confirm a nil scheme will not return values or error + c, k, err := iNotParticipate.VerifyRequestAndAttachResponse(http.Header{}, h) + require.Empty(t, c) + require.Empty(t, k) + require.NoError(t, err) + + // create a response + h2 := http.Header{} + i2 := NewIdentityChallengeScheme("i2") + i2.VerifyRequestAndAttachResponse(h2, h) + + // confirm a nil scheme will not return values or error + k2, bytes, err := iNotParticipate.VerifyResponse(h2, identityChallengeValue{}) + require.Empty(t, k2) + require.Empty(t, bytes) + require.NoError(t, err) + + // add broken payload to a new header and try inspecting it with the empty scheme + h3 := http.Header{} + h3.Add(IdentityChallengeHeader, "broken text!") + c, k, err = iNotParticipate.VerifyRequestAndAttachResponse(http.Header{}, h) + require.Empty(t, c) + require.Empty(t, k) + require.NoError(t, err) + k2, bytes, err = iNotParticipate.VerifyResponse(h2, identityChallengeValue{}) + require.Empty(t, k2) + require.Empty(t, bytes) + require.NoError(t, err) +} + +// TestIdentityChallengeSchemeVerifyResponse confirms the scheme will +// attach responses only if dedup name is set and the provided challenge verifies +func TestIdentityChallengeSchemeVerifyResponse(t *testing.T) { + partitiontest.PartitionTest(t) + + h := http.Header{} + i := NewIdentityChallengeScheme("i1") + // author a challenge to ourselves + origChal := i.AttachChallenge(h, "i1") + require.NotEmpty(t, h.Get(IdentityChallengeHeader)) + require.NotEmpty(t, origChal) + r := http.Header{} + + respChal, key, err := i.VerifyRequestAndAttachResponse(r, h) + require.NotEmpty(t, r.Get(IdentityChallengeHeader)) + require.NotEmpty(t, respChal) + require.NotEmpty(t, key) + require.NoError(t, err) + + // respChal2 should match respChal as it is being passed back to the original peer + // while origChal will be used for verification + key2, verificationMsg, err := i.VerifyResponse(r, origChal) + require.NotEmpty(t, verificationMsg) + require.NoError(t, err) + // because we sent this to ourselves, we can confirm the keys match + require.Equal(t, key, key2) +} + +// TestIdentityChallengeSchemeBadSignature tests that the scheme will +// fail to verify and attach if the challenge is incorrectly signed +func TestIdentityChallengeSchemeBadSignature(t *testing.T) { + partitiontest.PartitionTest(t) + + h := http.Header{} + i := NewIdentityChallengeScheme("i1") + // Copy the logic of attaching the header and signing so we can sign it wrong + c := identityChallengeSigned{ + Msg: identityChallenge{ + Key: i.identityKeys.SignatureVerifier, + Challenge: newIdentityChallengeValue(), + PublicAddress: []byte("i1"), + }} + c.Signature = i.identityKeys.SignBytes([]byte("WRONG BYTES SIGNED")) + enc := protocol.Encode(&c) + b64enc := base64.StdEncoding.EncodeToString(enc) + h.Add(IdentityChallengeHeader, b64enc) + + // observe that VerifyRequestAndAttachResponse returns error on bad signature + r := http.Header{} + respChal, key, err := i.VerifyRequestAndAttachResponse(r, h) + require.Empty(t, r.Get(IdentityChallengeHeader)) + require.Empty(t, respChal) + require.Empty(t, key) + require.Error(t, err) +} + +// TestIdentityChallengeSchemeBadPayload tests that the scheme will +// fail to verify if the challenge can't be B64 decoded +func TestIdentityChallengeSchemeBadPayload(t *testing.T) { + partitiontest.PartitionTest(t) + + h := http.Header{} + i := NewIdentityChallengeScheme("i1") + h.Add(IdentityChallengeHeader, "NOT VALID BASE 64! :)") + + // observe that VerifyRequestAndAttachResponse won't do anything on bad signature + r := http.Header{} + respChal, key, err := i.VerifyRequestAndAttachResponse(r, h) + require.Empty(t, r.Get(IdentityChallengeHeader)) + require.Empty(t, respChal) + require.Empty(t, key) + require.Error(t, err) +} + +// TestIdentityChallengeSchemeBadResponseSignature tests that the scheme will +// fail to verify if the challenge response is incorrectly signed +func TestIdentityChallengeSchemeBadResponseSignature(t *testing.T) { + partitiontest.PartitionTest(t) + + h := http.Header{} + i := NewIdentityChallengeScheme("i1") + // author a challenge to ourselves + origChal := i.AttachChallenge(h, "i1") + require.NotEmpty(t, h.Get(IdentityChallengeHeader)) + require.NotEmpty(t, origChal) + + // use the code to sign and encode responses so we can sign incorrectly + r := http.Header{} + resp := identityChallengeResponseSigned{ + Msg: identityChallengeResponse{ + Key: i.identityKeys.SignatureVerifier, + Challenge: origChal, + ResponseChallenge: newIdentityChallengeValue(), + }} + resp.Signature = i.identityKeys.SignBytes([]byte("BAD BYTES FOR SIGNING")) + enc := protocol.Encode(&resp) + b64enc := base64.StdEncoding.EncodeToString(enc) + r.Add(IdentityChallengeHeader, b64enc) + + key2, verificationMsg, err := i.VerifyResponse(r, origChal) + require.Empty(t, key2) + require.Empty(t, verificationMsg) + require.Error(t, err) +} + +// TestIdentityChallengeSchemeBadResponsePayload tests that the scheme will +// fail to verify if the challenge response can't be B64 decoded +func TestIdentityChallengeSchemeBadResponsePayload(t *testing.T) { + partitiontest.PartitionTest(t) + + h := http.Header{} + i := NewIdentityChallengeScheme("i1") + // author a challenge to ourselves + origChal := i.AttachChallenge(h, "i1") + require.NotEmpty(t, h.Get(IdentityChallengeHeader)) + require.NotEmpty(t, origChal) + + // generate a bad payload that should not decode + r := http.Header{} + r.Add(IdentityChallengeHeader, "BAD B64 ENCODING :)") + + key2, verificationMsg, err := i.VerifyResponse(r, origChal) + require.Empty(t, key2) + require.Empty(t, verificationMsg) + require.Error(t, err) +} + +// TestIdentityChallengeSchemeWrongChallenge the scheme will +// return "0" if the challenge does not match upon return +func TestIdentityChallengeSchemeWrongChallenge(t *testing.T) { + partitiontest.PartitionTest(t) + + h := http.Header{} + i := NewIdentityChallengeScheme("i1") + // author a challenge to ourselves + origChal := i.AttachChallenge(h, "i1") + require.NotEmpty(t, h.Get(IdentityChallengeHeader)) + require.NotEmpty(t, origChal) + + r := http.Header{} + respChal, key, err := i.VerifyRequestAndAttachResponse(r, h) + require.NotEmpty(t, r.Get(IdentityChallengeHeader)) + require.NotEmpty(t, respChal) + require.NotEmpty(t, key) + require.NoError(t, err) + + // Attempt to verify against the wrong challenge + key2, verificationMsg, err := i.VerifyResponse(r, newIdentityChallengeValue()) + require.Empty(t, key2) + require.Empty(t, verificationMsg) + require.Error(t, err) +} + +func TestNewIdentityTracker(t *testing.T) { + partitiontest.PartitionTest(t) + + tracker := NewIdentityTracker() + require.Empty(t, tracker.peersByID) +} + +func TestIdentityTrackerRemoveIdentity(t *testing.T) { + partitiontest.PartitionTest(t) + + tracker := NewIdentityTracker() + id := crypto.PublicKey{} + p := wsPeer{identity: id} + + id2 := crypto.PublicKey{} + p2 := wsPeer{identity: id2} + + // Ensure the first attempt to insert populates the map + _, exists := tracker.peersByID[p.identity] + require.False(t, exists) + require.True(t, tracker.setIdentity(&p)) + _, exists = tracker.peersByID[p.identity] + require.True(t, exists) + + // check that removing a peer who does not exist in the map (but whos identity does) + // not not result in the wrong peer being removed + tracker.removeIdentity(&p2) + _, exists = tracker.peersByID[p.identity] + require.True(t, exists) + + tracker.removeIdentity(&p) + _, exists = tracker.peersByID[p.identity] + require.False(t, exists) +} + +func TestIdentityTrackerSetIdentity(t *testing.T) { + partitiontest.PartitionTest(t) + + tracker := NewIdentityTracker() + id := crypto.PublicKey{} + p := wsPeer{identity: id} + + // Ensure the first attempt to insert populates the map + _, exists := tracker.peersByID[p.identity] + require.False(t, exists) + require.True(t, tracker.setIdentity(&p)) + _, exists = tracker.peersByID[p.identity] + require.True(t, exists) + + // Ensure the next attempt to insert also returns true + require.True(t, tracker.setIdentity(&p)) + + // Ensure a different peer cannot take the map entry + otherP := wsPeer{identity: id} + require.False(t, tracker.setIdentity(&otherP)) + + // Ensure the entry in the map wasn't changed + require.Equal(t, tracker.peersByID[p.identity], &p) +} + +// Just tests that if a peer is already verified, it just returns OutgoingMessage{} +func TestHandlerGuard(t *testing.T) { + partitiontest.PartitionTest(t) + p := wsPeer{identityVerified: uint32(1)} + msg := IncomingMessage{ + Sender: &p, + } + require.Equal(t, OutgoingMessage{}, identityVerificationHandler(msg)) +} diff --git a/network/phonebook.go b/network/phonebook.go index 1ff3ed542d..ac5914a299 100644 --- a/network/phonebook.go +++ b/network/phonebook.go @@ -30,6 +30,8 @@ const getAllAddresses = math.MaxInt32 // PhoneBookEntryRoles defines the roles that a single entry on the phonebook can take. // currently, we have two roles : relay role and archiver role, which are mutually exclusive. +// +//msgp:ignore PhoneBookEntryRoles type PhoneBookEntryRoles int // PhoneBookEntryRelayRole used for all the relays that are provided either via the algobootstrap SRV record diff --git a/network/requestTracker.go b/network/requestTracker.go index d2f05d8bf6..025d75a5a6 100644 --- a/network/requestTracker.go +++ b/network/requestTracker.go @@ -148,6 +148,7 @@ func (ard *hostIncomingRequests) countConnections(rateLimitingWindowStartTime ti return uint(len(ard.requests) - i + len(ard.additionalHostRequests)) } +//msgp:ignore hostsIncomingMap type hostsIncomingMap map[string]*hostIncomingRequests // pruneRequests cleans stale items from the hostRequests maps diff --git a/network/topics.go b/network/topics.go index 762312585d..ded264a0cb 100644 --- a/network/topics.go +++ b/network/topics.go @@ -30,6 +30,8 @@ const ( ) // Topic is a key-value pair +// +//msgp:ignore Topic type Topic struct { key string data []byte @@ -43,6 +45,8 @@ func MakeTopic(key string, data []byte) Topic { // Topics is an array of type Topic // The maximum number of topics allowed is 32 // Each topic key can be 64 characters long and cannot be size 0 +// +//msgp:ignore Topics type Topics []Topic // MarshallTopics serializes the topics into a byte array diff --git a/network/wsNetwork.go b/network/wsNetwork.go index e8c9e97421..b537818c65 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -101,6 +101,9 @@ const slowWritingPeerMonitorInterval = 5 * time.Second // to the log file. Note that the log file itself would also json-encode these before placing them in the log file. const unprintableCharacterGlyph = "â–¯" +// match config.PublicAddress to this string to automatically set PublicAddress from Address() +const autoconfigPublicAddress = "auto" + var networkIncomingConnections = metrics.MakeGauge(metrics.NetworkIncomingConnections) var networkOutgoingConnections = metrics.MakeGauge(metrics.NetworkOutgoingConnections) @@ -113,6 +116,9 @@ var networkBroadcastSendMicros = metrics.MakeCounter(metrics.MetricName{Name: "a var networkBroadcastsDropped = metrics.MakeCounter(metrics.MetricName{Name: "algod_broadcasts_dropped_total", Description: "number of broadcast messages not sent to any peer"}) var networkPeerBroadcastDropped = metrics.MakeCounter(metrics.MetricName{Name: "algod_peer_broadcast_dropped_total", Description: "number of broadcast messages not sent to some peer"}) +var networkPeerIdentityDisconnect = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_identity_duplicate", Description: "number of times identity challenge cause us to disconnect a peer"}) +var networkPeerIdentityError = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_identity_error", Description: "number of times an error occurs (besides expected) when processing identity challenges"}) + var networkSlowPeerDrops = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_slow_drops_total", Description: "number of peers dropped for being slow to send to"}) var networkIdlePeerDrops = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_idle_drops_total", Description: "number of peers dropped due to idle connection"}) var networkBroadcastQueueFull = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_broadcast_queue_full_total", Description: "number of messages that were drops due to full broadcast queue"}) @@ -141,6 +147,8 @@ const peerShutdownDisconnectionAckDuration = 50 * time.Millisecond type Peer interface{} // PeerOption allows users to specify a subset of peers to query +// +//msgp:ignore PeerOption type PeerOption int const ( @@ -258,6 +266,8 @@ type OutgoingMessage struct { } // ForwardingPolicy is an enum indicating to whom we should send a message +// +//msgp:ignore ForwardingPolicy type ForwardingPolicy int const ( @@ -376,6 +386,10 @@ type WebsocketNetwork struct { prioTracker *prioTracker prioResponseChan chan *wsPeer + // identity challenge scheme for creating challenges and responding + identityScheme identityChallengeScheme + identityTracker identityTracker + // outgoingMessagesBufferSize is the size used for outgoing messages. outgoingMessagesBufferSize int @@ -741,6 +755,8 @@ func (wn *WebsocketNetwork) setup() { config.Consensus[protocol.ConsensusCurrentVersion].DownCommitteeSize), ) + wn.identityTracker = NewIdentityTracker() + wn.broadcastQueueHighPrio = make(chan broadcastRequest, wn.outgoingMessagesBufferSize) wn.broadcastQueueBulk = make(chan broadcastRequest, 100) wn.meshUpdateRequests = make(chan meshRequest, 5) @@ -823,6 +839,25 @@ func (wn *WebsocketNetwork) Start() { } else { wn.scheme = "http" } + + // if PublicAddress set to automatic, pull the name from Address() + if wn.config.PublicAddress == autoconfigPublicAddress { + addr, ok := wn.Address() + if ok { + url, err := url.Parse(addr) + if err == nil { + wn.config.PublicAddress = fmt.Sprintf("%s:%s", url.Hostname(), url.Port()) + } + } + } + // if the network has a public address, use that as the name for connection deduplication + if wn.config.PublicAddress != "" { + wn.RegisterHandlers(identityHandlers) + } + if wn.identityScheme == nil && wn.config.PublicAddress != "" { + wn.identityScheme = NewIdentityChallengeScheme(wn.config.PublicAddress) + } + wn.meshUpdateRequests <- meshRequest{false, nil} if wn.prioScheme != nil { wn.RegisterHandlers(prioHandlers) @@ -1144,6 +1179,20 @@ func (wn *WebsocketNetwork) ServeHTTP(response http.ResponseWriter, request *htt challenge = wn.prioScheme.NewPrioChallenge() responseHeader.Set(PriorityChallengeHeader, challenge) } + + localAddr, _ := wn.Address() + var peerIDChallenge identityChallengeValue + var peerID crypto.PublicKey + if wn.identityScheme != nil { + var err error + peerIDChallenge, peerID, err = wn.identityScheme.VerifyRequestAndAttachResponse(responseHeader, request.Header) + if err != nil { + networkPeerIdentityError.Inc(nil) + wn.log.With("err", err).With("remote", trackedRequest.otherPublicAddr).With("local", localAddr).Warnf("peer (%s) supplied an invalid identity challenge, abandoning peering", trackedRequest.otherPublicAddr) + return + } + } + conn, err := wn.upgrader.Upgrade(response, request, responseHeader) if err != nil { wn.log.Info("ws upgrade fail ", err) @@ -1165,12 +1214,14 @@ func (wn *WebsocketNetwork) ServeHTTP(response http.ResponseWriter, request *htt prioChallenge: challenge, createTime: trackedRequest.created, version: matchingVersion, + identity: peerID, + identityChallenge: peerIDChallenge, + identityVerified: 0, features: decodePeerFeatures(matchingVersion, request.Header.Get(PeerFeaturesHeader)), } peer.TelemetryGUID = trackedRequest.otherTelemetryGUID peer.init(wn.config, wn.outgoingMessagesBufferSize) wn.addPeer(peer) - localAddr, _ := wn.Address() wn.log.With("event", "ConnectedIn").With("remote", trackedRequest.otherPublicAddr).With("local", localAddr).Infof("Accepted incoming connection from peer %s", trackedRequest.otherPublicAddr) wn.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerEvent, telemetryspec.PeerEventDetails{ @@ -1701,7 +1752,7 @@ func (wn *WebsocketNetwork) checkNewConnectionsNeeded() bool { newAddrs := wn.phonebook.GetAddresses(desired+numOutgoingTotal, PhoneBookEntryRelayRole) for _, na := range newAddrs { if na == wn.config.PublicAddress { - // filter out self-public address, so we won't try to connect to outselves. + // filter out self-public address, so we won't try to connect to ourselves. continue } gossipAddr, ok := wn.tryConnectReserveAddr(na) @@ -1957,6 +2008,9 @@ const InstanceNameHeader = "X-Algorand-InstanceName" // PriorityChallengeHeader HTTP header informs a client about the challenge it should sign to increase network priority. const PriorityChallengeHeader = "X-Algorand-PriorityChallenge" +// IdentityChallengeHeader is used to exchange IdentityChallenges +const IdentityChallengeHeader = "X-Algorand-IdentityChallenge" + // TooManyRequestsRetryAfterHeader HTTP header let the client know when to make the next connection attempt const TooManyRequestsRetryAfterHeader = "Retry-After" @@ -2113,6 +2167,12 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) { for _, supportedProtocolVersion := range wn.supportedProtocolVersions { requestHeader.Add(ProtocolAcceptVersionHeader, supportedProtocolVersion) } + + var idChallenge identityChallengeValue + if wn.identityScheme != nil { + idChallenge = wn.identityScheme.AttachChallenge(requestHeader, addr) + } + // for backward compatibility, include the ProtocolVersion header as well. requestHeader.Set(ProtocolVersionHeader, wn.protocolVersion) // set the features header (comma-separated list) @@ -2164,13 +2224,41 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) { return } + // if we abort before making a wsPeer this cleanup logic will close the connection + closeEarly := func(msg string) { + deadline := time.Now().Add(peerDisconnectionAckDuration) + err := conn.WriteControl(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseProtocolError, msg), deadline) + if err != nil { + wn.log.Infof("tryConnect: failed to write CloseMessage to connection for %s", conn.RemoteAddr().String()) + } + err = conn.CloseWithoutFlush() + if err != nil { + wn.log.Infof("tryConnect: failed to CloseWithoutFlush to connection for %s", conn.RemoteAddr().String()) + } + } + // no need to test the response.StatusCode since we know it's going to be http.StatusSwitchingProtocols, as it's already being tested inside websocketDialer.DialContext. // we need to examine the headers here to extract which protocol version we should be using. responseHeaderOk, matchingVersion := wn.checkServerResponseVariables(response.Header, gossipAddr) if !responseHeaderOk { // The error was already logged, so no need to log again. + closeEarly("Unsupported headers") return } + localAddr, _ := wn.Address() + + var peerID crypto.PublicKey + var idVerificationMessage []byte + if wn.identityScheme != nil { + // if the peer responded with an identity challenge response, but it can't be verified, don't proceed with peering + peerID, idVerificationMessage, err = wn.identityScheme.VerifyResponse(response.Header, idChallenge) + if err != nil { + networkPeerIdentityError.Inc(nil) + wn.log.With("err", err).With("remote", addr).With("local", localAddr).Warn("peer supplied an invalid identity response, abandoning peering") + closeEarly("Invalid identity response") + return + } + } throttledConnection := false if atomic.AddInt32(&wn.throttledOutgoingConnections, int32(-1)) >= 0 { @@ -2188,12 +2276,28 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) { connMonitor: wn.connPerfMonitor, throttledOutgoingConnection: throttledConnection, version: matchingVersion, + identity: peerID, features: decodePeerFeatures(matchingVersion, response.Header.Get(PeerFeaturesHeader)), } peer.TelemetryGUID, peer.InstanceName, _ = getCommonHeaders(response.Header) + + // if there is a final verification message to send, it means this peer has a verified identity, + // attempt to set the peer and identityTracker + if len(idVerificationMessage) > 0 { + atomic.StoreUint32(&peer.identityVerified, uint32(1)) + wn.peersLock.Lock() + ok := wn.identityTracker.setIdentity(peer) + wn.peersLock.Unlock() + if !ok { + networkPeerIdentityDisconnect.Inc(nil) + wn.log.With("remote", addr).With("local", localAddr).Warn("peer deduplicated before adding because the identity is already known") + closeEarly("Duplicate connection") + return + } + } peer.init(wn.config, wn.outgoingMessagesBufferSize) wn.addPeer(peer) - localAddr, _ := wn.Address() + wn.log.With("event", "ConnectedOut").With("remote", addr).With("local", localAddr).Infof("Made outgoing connection to peer %v", addr) wn.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerEvent, telemetryspec.PeerEventDetails{ @@ -2206,6 +2310,14 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) { wn.maybeSendMessagesOfInterest(peer, nil) + // if there is a final identification verification message to send, send it to the peer + if len(idVerificationMessage) > 0 { + sent := peer.writeNonBlock(context.Background(), idVerificationMessage, true, crypto.Digest{}, time.Now()) + if !sent { + wn.log.With("remote", addr).With("local", localAddr).Warn("could not send identity challenge verification") + } + } + peers.Set(uint64(wn.NumPeers())) outgoingPeers.Set(uint64(wn.numOutgoingPeers())) @@ -2333,6 +2445,7 @@ func (wn *WebsocketNetwork) removePeer(peer *wsPeer, reason disconnectReason) { if peer.peerIndex < len(wn.peers) && wn.peers[peer.peerIndex] == peer { heap.Remove(peersHeap{wn}, peer.peerIndex) wn.prioTracker.removePeer(peer) + wn.identityTracker.removeIdentity(peer) if peer.throttledOutgoingConnection { atomic.AddInt32(&wn.throttledOutgoingConnections, int32(1)) } @@ -2344,6 +2457,8 @@ func (wn *WebsocketNetwork) removePeer(peer *wsPeer, reason disconnectReason) { func (wn *WebsocketNetwork) addPeer(peer *wsPeer) { wn.peersLock.Lock() defer wn.peersLock.Unlock() + // simple duplicate *pointer* check. should never trigger given the callers to addPeer + // TODO: remove this after making sure it is safe to do so for _, p := range wn.peers { if p == peer { wn.log.Errorf("dup peer added %#v", peer) diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go index ac9a757214..c86eed11a5 100644 --- a/network/wsNetwork_test.go +++ b/network/wsNetwork_test.go @@ -19,6 +19,7 @@ package network import ( "bytes" "context" + "encoding/base64" "encoding/binary" "encoding/json" "fmt" @@ -119,7 +120,7 @@ func init() { defaultConfig.MaxConnectionsPerIP = 30 } -func makeTestWebsocketNodeWithConfig(t testing.TB, conf config.Local) *WebsocketNetwork { +func makeTestWebsocketNodeWithConfig(t testing.TB, conf config.Local, opts ...testWebsocketOption) *WebsocketNetwork { log := logging.TestingLog(t) log.SetLevel(logging.Level(conf.BaseLoggerDebugLevel)) wn := &WebsocketNetwork{ @@ -129,13 +130,32 @@ func makeTestWebsocketNodeWithConfig(t testing.TB, conf config.Local) *Websocket GenesisID: "go-test-network-genesis", NetworkID: config.Devtestnet, } + // apply options to newly-created WebsocketNetwork, if provided + for _, opt := range opts { + opt.applyOpt(wn) + } + wn.setup() wn.eventualReadyDelay = time.Second return wn } -func makeTestWebsocketNode(t testing.TB) *WebsocketNetwork { - return makeTestWebsocketNodeWithConfig(t, defaultConfig) +// interface for providing extra options to makeTestWebsocketNode +type testWebsocketOption interface { + applyOpt(wn *WebsocketNetwork) +} + +// option to add KV to wn base logger +type testWebsocketLogNameOption struct{ logName string } + +func (o testWebsocketLogNameOption) applyOpt(wn *WebsocketNetwork) { + if o.logName != "" { + wn.log = wn.log.With("name", o.logName) + } +} + +func makeTestWebsocketNode(t testing.TB, opts ...testWebsocketOption) *WebsocketNetwork { + return makeTestWebsocketNodeWithConfig(t, defaultConfig, opts...) } type messageCounterHandler struct { @@ -1111,6 +1131,900 @@ func TestGetPeers(t *testing.T) { assert.Equal(t, expectAddrs, peerAddrs) } +// confirms that if the config PublicAddress is set to "auto", +// PublicAddress is loaded when possible with the value of Address() +func TestAutoPublicAddress(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + netA := makeTestWebsocketNode(t) + netA.config.PublicAddress = "auto" + netA.config.GossipFanout = 1 + + netA.Start() + + time.Sleep(100 * time.Millisecond) + + // check that "auto" has been overloaded + addr, ok := netA.Address() + addr = hostAndPort(addr) + require.True(t, ok) + require.NotEqual(t, "auto", netA.PublicAddress()) + require.Equal(t, addr, netA.PublicAddress()) +} + +// mock an identityTracker +type mockIdentityTracker struct { + isOccupied bool + setCount int + insertCount int + removeCount int + lock deadlock.Mutex + realTracker identityTracker +} + +func newMockIdentityTracker(realTracker identityTracker) *mockIdentityTracker { + return &mockIdentityTracker{ + isOccupied: false, + setCount: 0, + insertCount: 0, + removeCount: 0, + realTracker: realTracker, + } +} + +func (d *mockIdentityTracker) setIsOccupied(b bool) { + d.lock.Lock() + defer d.lock.Unlock() + d.isOccupied = b +} +func (d *mockIdentityTracker) removeIdentity(p *wsPeer) { + d.lock.Lock() + defer d.lock.Unlock() + d.removeCount++ + d.realTracker.removeIdentity(p) +} +func (d *mockIdentityTracker) getInsertCount() int { + d.lock.Lock() + defer d.lock.Unlock() + return d.insertCount +} +func (d *mockIdentityTracker) getRemoveCount() int { + d.lock.Lock() + defer d.lock.Unlock() + return d.removeCount +} +func (d *mockIdentityTracker) getSetCount() int { + d.lock.Lock() + defer d.lock.Unlock() + return d.setCount +} +func (d *mockIdentityTracker) setIdentity(p *wsPeer) bool { + d.lock.Lock() + defer d.lock.Unlock() + d.setCount++ + // isOccupied is true, meaning we're overloading the "ok" return to false + if d.isOccupied { + return false + } + ret := d.realTracker.setIdentity(p) + if ret { + d.insertCount++ + } + return ret +} + +func hostAndPort(u string) string { + url, err := url.Parse(u) + if err == nil { + return fmt.Sprintf("%s:%s", url.Hostname(), url.Port()) + } + return "" +} + +// TestPeeringWithIdentityChallenge tests the happy path of connecting with identity challenge: +// - both peers have correctly set PublicAddress +// - both should exchange identities and verify +// - both peers should be able to deduplicate connections +func TestPeeringWithIdentityChallenge(t *testing.T) { + partitiontest.PartitionTest(t) + + netA := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netA"}) + netA.identityTracker = newMockIdentityTracker(netA.identityTracker) + netA.config.PublicAddress = "auto" + netA.config.GossipFanout = 1 + + netB := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netB"}) + netB.identityTracker = newMockIdentityTracker(netB.identityTracker) + netB.config.PublicAddress = "auto" + netB.config.GossipFanout = 1 + + netA.Start() + defer netA.Stop() + netB.Start() + defer netB.Stop() + + addrA, ok := netA.Address() + require.True(t, ok) + gossipA, err := netA.addrToGossipAddr(addrA) + require.NoError(t, err) + + addrB, ok := netB.Address() + require.True(t, ok) + gossipB, err := netB.addrToGossipAddr(addrB) + require.NoError(t, err) + + // set addresses to just host:port to match phonebook/dns format + addrA = hostAndPort(addrA) + addrB = hostAndPort(addrB) + + // first connection should work just fine + if _, ok := netA.tryConnectReserveAddr(addrB); ok { + netA.wg.Add(1) + netA.tryConnect(addrB, gossipB) + // let the tryConnect go forward + time.Sleep(250 * time.Millisecond) + } + // just one A->B connection + assert.Equal(t, 0, len(netA.GetPeers(PeersConnectedIn))) + assert.Equal(t, 1, len(netA.GetPeers(PeersConnectedOut))) + assert.Equal(t, 1, len(netB.GetPeers(PeersConnectedIn))) + assert.Equal(t, 0, len(netB.GetPeers(PeersConnectedOut))) + + // confirm identity map was added to for both hosts + assert.Equal(t, 1, netA.identityTracker.(*mockIdentityTracker).getSetCount()) + assert.Equal(t, 1, netA.identityTracker.(*mockIdentityTracker).getInsertCount()) + + // netB has to wait for a final verification message over WS Handler, so pause a moment + time.Sleep(250 * time.Millisecond) + assert.Equal(t, 1, netB.identityTracker.(*mockIdentityTracker).getSetCount()) + assert.Equal(t, 1, netB.identityTracker.(*mockIdentityTracker).getInsertCount()) + + // bi-directional connection from B should not proceed + if _, ok := netB.tryConnectReserveAddr(addrA); ok { + netB.wg.Add(1) + netB.tryConnect(addrA, gossipA) + // let the tryConnect go forward + time.Sleep(250 * time.Millisecond) + } + + // still just one A->B connection + assert.Equal(t, 0, len(netA.GetPeers(PeersConnectedIn))) + assert.Equal(t, 1, len(netA.GetPeers(PeersConnectedOut))) + assert.Equal(t, 1, len(netB.GetPeers(PeersConnectedIn))) + assert.Equal(t, 0, len(netB.GetPeers(PeersConnectedOut))) + // netA never attempts to set identity as it never sees a verified identity + assert.Equal(t, 1, netA.identityTracker.(*mockIdentityTracker).getSetCount()) + // netB would attempt to add the identity to the tracker + // but it would not end up being added + assert.Equal(t, 2, netB.identityTracker.(*mockIdentityTracker).getSetCount()) + assert.Equal(t, 1, netB.identityTracker.(*mockIdentityTracker).getInsertCount()) + + // Check deduplication again, this time from A + // the "ok" from tryConnectReserveAddr is overloaded here because isConnectedTo + // will prevent this connection from attempting in the first place + // in the real world, that isConnectedTo doesn't always trigger, if the hosts are behind + // a load balancer or other NAT + if _, ok := netA.tryConnectReserveAddr(addrB); ok || true { + netA.wg.Add(1) + netA.tryConnect(addrB, gossipB) + // let the tryConnect go forward + time.Sleep(250 * time.Millisecond) + } + + // netB never tries to add a new identity, since the connection gets abandoned before it is verified + assert.Equal(t, 2, netB.identityTracker.(*mockIdentityTracker).getSetCount()) + assert.Equal(t, 1, netB.identityTracker.(*mockIdentityTracker).getInsertCount()) + // still just one A->B connection + assert.Equal(t, 0, len(netA.GetPeers(PeersConnectedIn))) + assert.Equal(t, 1, len(netA.GetPeers(PeersConnectedOut))) + assert.Equal(t, 1, len(netB.GetPeers(PeersConnectedIn))) + assert.Equal(t, 0, len(netB.GetPeers(PeersConnectedOut))) + assert.Equal(t, 2, netA.identityTracker.(*mockIdentityTracker).getSetCount()) + assert.Equal(t, 1, netA.identityTracker.(*mockIdentityTracker).getInsertCount()) + + // Now have A connect to node C, which has the same PublicAddress as B (e.g., because it shares the + // same public load balancer endpoint). C will have a different identity keypair and so will not be + // considered a duplicate. + netC := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netC"}) + netC.identityTracker = newMockIdentityTracker(netC.identityTracker) + netC.config.PublicAddress = addrB + netC.config.GossipFanout = 1 + + netC.Start() + defer netC.Stop() + + addrC, ok := netC.Address() + require.True(t, ok) + gossipC, err := netC.addrToGossipAddr(addrC) + require.NoError(t, err) + addrC = hostAndPort(addrC) + + // A connects to C (but uses addrB here to simulate case where B & C have the same PublicAddress) + netA.wg.Add(1) + netA.tryConnect(addrB, gossipC) + // let the tryConnect go forward + time.Sleep(250 * time.Millisecond) + + // A->B and A->C both open + assert.Equal(t, 0, len(netA.GetPeers(PeersConnectedIn))) + assert.Equal(t, 2, len(netA.GetPeers(PeersConnectedOut))) + assert.Equal(t, 1, len(netB.GetPeers(PeersConnectedIn))) + assert.Equal(t, 0, len(netB.GetPeers(PeersConnectedOut))) + assert.Equal(t, 1, len(netC.GetPeers(PeersConnectedIn))) + assert.Equal(t, 0, len(netB.GetPeers(PeersConnectedOut))) + + // confirm identity map was added to for both hosts + assert.Equal(t, 3, netA.identityTracker.(*mockIdentityTracker).getSetCount()) + assert.Equal(t, 2, netA.identityTracker.(*mockIdentityTracker).getInsertCount()) + + // netC has to wait for a final verification message over WS Handler, so pause a moment + time.Sleep(250 * time.Millisecond) + assert.Equal(t, 1, netC.identityTracker.(*mockIdentityTracker).getSetCount()) + assert.Equal(t, 1, netC.identityTracker.(*mockIdentityTracker).getInsertCount()) + +} + +// TestPeeringSenderIdentityChallengeOnly will confirm that if only the Sender +// Uses Identity, no identity exchange happens in the connection +func TestPeeringSenderIdentityChallengeOnly(t *testing.T) { + partitiontest.PartitionTest(t) + + netA := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netA"}) + netA.identityTracker = newMockIdentityTracker(netA.identityTracker) + netA.config.PublicAddress = "auto" + netA.config.GossipFanout = 1 + + netB := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netB"}) + netB.identityTracker = newMockIdentityTracker(netB.identityTracker) + //netB.config.PublicAddress = "auto" + netB.config.GossipFanout = 1 + + netA.Start() + defer netA.Stop() + netB.Start() + defer netB.Stop() + + addrA, ok := netA.Address() + require.True(t, ok) + gossipA, err := netA.addrToGossipAddr(addrA) + require.NoError(t, err) + + addrB, ok := netB.Address() + require.True(t, ok) + gossipB, err := netB.addrToGossipAddr(addrB) + require.NoError(t, err) + + // set addresses to just host:port to match phonebook/dns format + addrA = hostAndPort(addrA) + addrB = hostAndPort(addrB) + + // first connection should work just fine + if _, ok := netA.tryConnectReserveAddr(addrB); ok { + netA.wg.Add(1) + netA.tryConnect(addrB, gossipB) + // let the tryConnect go forward + time.Sleep(250 * time.Millisecond) + } + assert.Equal(t, 1, len(netA.GetPeers(PeersConnectedOut))) + assert.Equal(t, 1, len(netB.GetPeers(PeersConnectedIn))) + + // confirm identity map was not added to for either host + assert.Equal(t, 0, netA.identityTracker.(*mockIdentityTracker).getSetCount()) + assert.Equal(t, 0, netB.identityTracker.(*mockIdentityTracker).getSetCount()) + + // bi-directional connection should also work + if _, ok := netB.tryConnectReserveAddr(addrA); ok { + netB.wg.Add(1) + netB.tryConnect(addrA, gossipA) + // let the tryConnect go forward + time.Sleep(250 * time.Millisecond) + } + // the nodes are connected redundantly + assert.Equal(t, 1, len(netA.GetPeers(PeersConnectedIn))) + assert.Equal(t, 1, len(netA.GetPeers(PeersConnectedOut))) + assert.Equal(t, 1, len(netB.GetPeers(PeersConnectedIn))) + assert.Equal(t, 1, len(netB.GetPeers(PeersConnectedOut))) + // confirm identity map was not added to for either host + assert.Equal(t, 0, netA.identityTracker.(*mockIdentityTracker).getSetCount()) + assert.Equal(t, 0, netB.identityTracker.(*mockIdentityTracker).getSetCount()) +} + +// TestPeeringReceiverIdentityChallengeOnly will confirm that if only the Receiver +// Uses Identity, no identity exchange happens in the connection +func TestPeeringReceiverIdentityChallengeOnly(t *testing.T) { + partitiontest.PartitionTest(t) + + netA := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netA"}) + netA.identityTracker = newMockIdentityTracker(netA.identityTracker) + //netA.config.PublicAddress = "auto" + netA.config.GossipFanout = 1 + + netB := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netB"}) + netB.identityTracker = newMockIdentityTracker(netB.identityTracker) + netB.config.PublicAddress = "auto" + netB.config.GossipFanout = 1 + + netA.Start() + defer netA.Stop() + netB.Start() + defer netB.Stop() + + addrA, ok := netA.Address() + require.True(t, ok) + gossipA, err := netA.addrToGossipAddr(addrA) + require.NoError(t, err) + + addrB, ok := netB.Address() + require.True(t, ok) + gossipB, err := netB.addrToGossipAddr(addrB) + require.NoError(t, err) + + // set addresses to just host:port to match phonebook/dns format + addrA = hostAndPort(addrA) + addrB = hostAndPort(addrB) + + // first connection should work just fine + if _, ok := netA.tryConnectReserveAddr(addrB); ok { + netA.wg.Add(1) + netA.tryConnect(addrB, gossipB) + // let the tryConnect go forward + time.Sleep(250 * time.Millisecond) + } + // single A->B connection + assert.Equal(t, 0, len(netA.GetPeers(PeersConnectedIn))) + assert.Equal(t, 1, len(netA.GetPeers(PeersConnectedOut))) + assert.Equal(t, 1, len(netB.GetPeers(PeersConnectedIn))) + assert.Equal(t, 0, len(netB.GetPeers(PeersConnectedOut))) + + // confirm identity map was not added to for either host + assert.Equal(t, 0, netA.identityTracker.(*mockIdentityTracker).getSetCount()) + assert.Equal(t, 0, netB.identityTracker.(*mockIdentityTracker).getSetCount()) + + // bi-directional connection should also work + if _, ok := netB.tryConnectReserveAddr(addrA); ok { + netB.wg.Add(1) + netB.tryConnect(addrA, gossipA) + // let the tryConnect go forward + time.Sleep(250 * time.Millisecond) + } + assert.Equal(t, 1, len(netA.GetPeers(PeersConnectedIn))) + assert.Equal(t, 1, len(netA.GetPeers(PeersConnectedOut))) + assert.Equal(t, 1, len(netB.GetPeers(PeersConnectedIn))) + assert.Equal(t, 1, len(netB.GetPeers(PeersConnectedOut))) + // confirm identity map was not added to for either host + assert.Equal(t, 0, netA.identityTracker.(*mockIdentityTracker).getSetCount()) + assert.Equal(t, 0, netB.identityTracker.(*mockIdentityTracker).getSetCount()) +} + +// TestPeeringIncorrectDeduplicationName confirm that if the reciever can't match +// the Address in the challenge to its PublicAddress, identities aren't exchanged, but peering continues +func TestPeeringIncorrectDeduplicationName(t *testing.T) { + partitiontest.PartitionTest(t) + + netA := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netA"}) + netA.identityTracker = newMockIdentityTracker(netA.identityTracker) + netA.config.PublicAddress = "auto" + netA.config.GossipFanout = 1 + + netB := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netB"}) + netB.identityTracker = newMockIdentityTracker(netB.identityTracker) + netB.config.PublicAddress = "no:3333" + netB.config.GossipFanout = 1 + + netA.Start() + defer netA.Stop() + netB.Start() + defer netB.Stop() + + addrA, ok := netA.Address() + require.True(t, ok) + gossipA, err := netA.addrToGossipAddr(addrA) + require.NoError(t, err) + + addrB, ok := netB.Address() + require.True(t, ok) + gossipB, err := netB.addrToGossipAddr(addrB) + require.NoError(t, err) + + // set addresses to just host:port to match phonebook/dns format + addrA = hostAndPort(addrA) + addrB = hostAndPort(addrB) + + // first connection should work just fine + if _, ok := netA.tryConnectReserveAddr(addrB); ok { + netA.wg.Add(1) + netA.tryConnect(addrB, gossipB) + // let the tryConnect go forward + time.Sleep(250 * time.Millisecond) + } + // single A->B connection + assert.Equal(t, 0, len(netA.GetPeers(PeersConnectedIn))) + assert.Equal(t, 1, len(netA.GetPeers(PeersConnectedOut))) + assert.Equal(t, 1, len(netB.GetPeers(PeersConnectedIn))) + assert.Equal(t, 0, len(netB.GetPeers(PeersConnectedOut))) + + // confirm identity map was not added to for either host + // nor was "set" called at all + assert.Equal(t, 0, netA.identityTracker.(*mockIdentityTracker).getSetCount()) + assert.Equal(t, 0, netB.identityTracker.(*mockIdentityTracker).getSetCount()) + + // bi-directional connection should also work + // this second connection should set identities, because the reciever address matches now + if _, ok := netB.tryConnectReserveAddr(addrA); ok { + netB.wg.Add(1) + netB.tryConnect(addrA, gossipA) + // let the tryConnect go forward + time.Sleep(250 * time.Millisecond) + } + // confirm that at this point the identityTracker was called once per network + // and inserted once per network + assert.Equal(t, 1, netA.identityTracker.(*mockIdentityTracker).getSetCount()) + assert.Equal(t, 1, netB.identityTracker.(*mockIdentityTracker).getSetCount()) + assert.Equal(t, 1, netA.identityTracker.(*mockIdentityTracker).getInsertCount()) + assert.Equal(t, 1, netB.identityTracker.(*mockIdentityTracker).getInsertCount()) + assert.Equal(t, 1, len(netA.GetPeers(PeersConnectedIn))) + assert.Equal(t, 1, len(netA.GetPeers(PeersConnectedOut))) + assert.Equal(t, 1, len(netB.GetPeers(PeersConnectedIn))) + assert.Equal(t, 1, len(netB.GetPeers(PeersConnectedOut))) +} + +// make a mockIdentityScheme which can accept overloaded behavior +// use this over the next few tests to check that when one peer misbehaves, peering continues/halts as expected +type mockIdentityScheme struct { + t *testing.T + realScheme *identityChallengePublicKeyScheme + attachChallenge func(attach http.Header, addr string) identityChallengeValue + verifyAndAttachResponse func(attach http.Header, h http.Header) (identityChallengeValue, crypto.PublicKey, error) + verifyResponse func(t *testing.T, h http.Header, c identityChallengeValue) (crypto.PublicKey, []byte, error) +} + +func newMockIdentityScheme(t *testing.T) *mockIdentityScheme { + return &mockIdentityScheme{t: t, realScheme: NewIdentityChallengeScheme("any")} +} +func (i mockIdentityScheme) AttachChallenge(attach http.Header, addr string) identityChallengeValue { + if i.attachChallenge != nil { + return i.attachChallenge(attach, addr) + } + return i.realScheme.AttachChallenge(attach, addr) +} +func (i mockIdentityScheme) VerifyRequestAndAttachResponse(attach http.Header, h http.Header) (identityChallengeValue, crypto.PublicKey, error) { + if i.verifyAndAttachResponse != nil { + return i.verifyAndAttachResponse(attach, h) + } + return i.realScheme.VerifyRequestAndAttachResponse(attach, h) +} +func (i mockIdentityScheme) VerifyResponse(h http.Header, c identityChallengeValue) (crypto.PublicKey, []byte, error) { + if i.verifyResponse != nil { + return i.verifyResponse(i.t, h, c) + } + return i.realScheme.VerifyResponse(h, c) +} + +// when the identity challenge is misconstructed in various ways, peering should behave as expected +func TestPeeringWithBadIdentityChallenge(t *testing.T) { + partitiontest.PartitionTest(t) + + type testCase struct { + name string + attachChallenge func(attach http.Header, addr string) identityChallengeValue + totalInA int + totalOutA int + totalInB int + totalOutB int + } + + testCases := []testCase{ + // when identityChallenge is not included, peering continues as normal + { + name: "not included", + attachChallenge: func(attach http.Header, addr string) identityChallengeValue { return identityChallengeValue{} }, + totalInA: 0, + totalOutA: 1, + totalInB: 1, + totalOutB: 0, + }, + // when the identityChallenge is malformed B64, peering halts + { + name: "malformed b64", + attachChallenge: func(attach http.Header, addr string) identityChallengeValue { + attach.Add(IdentityChallengeHeader, "this does not decode!") + return newIdentityChallengeValue() + }, + totalInA: 0, + totalOutA: 0, + totalInB: 0, + totalOutB: 0, + }, + // when the identityChallenge can't be unmarshalled, peering halts + { + name: "not msgp decodable", + attachChallenge: func(attach http.Header, addr string) identityChallengeValue { + attach.Add(IdentityChallengeHeader, base64.StdEncoding.EncodeToString([]byte("Bad!Data!"))) + return newIdentityChallengeValue() + }, + totalInA: 0, + totalOutA: 0, + totalInB: 0, + totalOutB: 0, + }, + // when the incorrect address is used, peering continues + { + name: "incorrect address", + attachChallenge: func(attach http.Header, addr string) identityChallengeValue { + s := NewIdentityChallengeScheme("does not matter") // make a scheme to use its keys + c := identityChallenge{ + Key: s.identityKeys.SignatureVerifier, + Challenge: newIdentityChallengeValue(), + PublicAddress: []byte("incorrect address!"), + } + attach.Add(IdentityChallengeHeader, c.signAndEncodeB64(s.identityKeys)) + return c.Challenge + }, + totalInA: 0, + totalOutA: 1, + totalInB: 1, + totalOutB: 0, + }, + // when the challenge is incorrectly signed, peering halts + { + name: "bad signature", + attachChallenge: func(attach http.Header, addr string) identityChallengeValue { + s := NewIdentityChallengeScheme("does not matter") // make a scheme to use its keys + c := identityChallenge{ + Key: s.identityKeys.SignatureVerifier, + Challenge: newIdentityChallengeValue(), + PublicAddress: []byte("incorrect address!"), + }.Sign(s.identityKeys) + c.Msg.Challenge = newIdentityChallengeValue() // change the challenge after signing the message, so the signature check fails + enc := protocol.Encode(&c) + b64enc := base64.StdEncoding.EncodeToString(enc) + attach.Add(IdentityChallengeHeader, b64enc) + return c.Msg.Challenge + }, + totalInA: 0, + totalOutA: 0, + totalInB: 0, + totalOutB: 0, + }, + } + + for _, tc := range testCases { + t.Logf("Running Peering with Identity Challenge Test: %s", tc.name) + netA := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netA"}) + netA.identityTracker = newMockIdentityTracker(netA.identityTracker) + netA.config.PublicAddress = "auto" + netA.config.GossipFanout = 1 + + scheme := newMockIdentityScheme(t) + scheme.attachChallenge = tc.attachChallenge + netA.identityScheme = scheme + + netB := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netB"}) + netB.identityTracker = newMockIdentityTracker(netB.identityTracker) + netB.config.PublicAddress = "auto" + netB.config.GossipFanout = 1 + + netA.Start() + defer netA.Stop() + netB.Start() + defer netB.Stop() + + addrB, ok := netB.Address() + require.True(t, ok) + gossipB, err := netB.addrToGossipAddr(addrB) + require.NoError(t, err) + + // set addresses to just host:port to match phonebook/dns format + addrB = hostAndPort(addrB) + + if _, ok := netA.tryConnectReserveAddr(addrB); ok { + netA.wg.Add(1) + netA.tryConnect(addrB, gossipB) + // let the tryConnect go forward + time.Sleep(250 * time.Millisecond) + } + assert.Equal(t, tc.totalInA, len(netA.GetPeers(PeersConnectedIn))) + assert.Equal(t, tc.totalOutA, len(netA.GetPeers(PeersConnectedOut))) + assert.Equal(t, tc.totalInB, len(netB.GetPeers(PeersConnectedIn))) + assert.Equal(t, tc.totalOutB, len(netB.GetPeers(PeersConnectedOut))) + } + +} + +// when the identity challenge response is misconstructed in various way, confirm peering behaves as expected +func TestPeeringWithBadIdentityChallengeResponse(t *testing.T) { + partitiontest.PartitionTest(t) + + type testCase struct { + name string + verifyAndAttachResponse func(attach http.Header, h http.Header) (identityChallengeValue, crypto.PublicKey, error) + totalInA int + totalOutA int + totalInB int + totalOutB int + } + + testCases := []testCase{ + // when there is no response to the identity challenge, peering should continue without ID + { + name: "not included", + verifyAndAttachResponse: func(attach http.Header, h http.Header) (identityChallengeValue, crypto.PublicKey, error) { + return identityChallengeValue{}, crypto.PublicKey{}, nil + }, + totalInA: 0, + totalOutA: 1, + totalInB: 1, + totalOutB: 0, + }, + // when the response is malformed, do not peer + { + name: "malformed b64", + verifyAndAttachResponse: func(attach http.Header, h http.Header) (identityChallengeValue, crypto.PublicKey, error) { + attach.Add(IdentityChallengeHeader, "this does not decode!") + return identityChallengeValue{}, crypto.PublicKey{}, nil + }, + totalInA: 0, + totalOutA: 0, + totalInB: 0, + totalOutB: 0, + }, + // when the response is malformed, do not peer + { + name: "not msgp decodable", + verifyAndAttachResponse: func(attach http.Header, h http.Header) (identityChallengeValue, crypto.PublicKey, error) { + attach.Add(IdentityChallengeHeader, base64.StdEncoding.EncodeToString([]byte("Bad!Data!"))) + return identityChallengeValue{}, crypto.PublicKey{}, nil + }, + totalInA: 0, + totalOutA: 0, + totalInB: 0, + totalOutB: 0, + }, + // when the original challenge isn't included, do not peer + { + name: "incorrect original challenge", + verifyAndAttachResponse: func(attach http.Header, h http.Header) (identityChallengeValue, crypto.PublicKey, error) { + s := NewIdentityChallengeScheme("does not matter") // make a scheme to use its keys + // decode the header to an identityChallenge + msg, _ := base64.StdEncoding.DecodeString(h.Get(IdentityChallengeHeader)) + idChal := identityChallenge{} + protocol.Decode(msg, &idChal) + // make the response object, with an incorrect challenge encode it and attach it to the header + r := identityChallengeResponse{ + Key: s.identityKeys.SignatureVerifier, + Challenge: newIdentityChallengeValue(), + ResponseChallenge: newIdentityChallengeValue(), + } + attach.Add(IdentityChallengeHeader, r.signAndEncodeB64(s.identityKeys)) + return r.ResponseChallenge, idChal.Key, nil + }, + totalInA: 0, + totalOutA: 0, + totalInB: 0, + totalOutB: 0, + }, + // when the message is incorrectly signed, do not peer + { + name: "bad signature", + verifyAndAttachResponse: func(attach http.Header, h http.Header) (identityChallengeValue, crypto.PublicKey, error) { + s := NewIdentityChallengeScheme("does not matter") // make a scheme to use its keys + // decode the header to an identityChallenge + msg, _ := base64.StdEncoding.DecodeString(h.Get(IdentityChallengeHeader)) + idChal := identityChallenge{} + protocol.Decode(msg, &idChal) + // make the response object, then change the signature and encode and attach + r := identityChallengeResponse{ + Key: s.identityKeys.SignatureVerifier, + Challenge: newIdentityChallengeValue(), + ResponseChallenge: newIdentityChallengeValue(), + }.Sign(s.identityKeys) + r.Msg.ResponseChallenge = newIdentityChallengeValue() // change the challenge after signing the message + enc := protocol.Encode(&r) + b64enc := base64.StdEncoding.EncodeToString(enc) + attach.Add(IdentityChallengeHeader, b64enc) + return r.Msg.ResponseChallenge, idChal.Key, nil + }, + totalInA: 0, + totalOutA: 0, + totalInB: 0, + totalOutB: 0, + }, + } + + for _, tc := range testCases { + t.Logf("Running Peering with Identity Challenge Response Test: %s", tc.name) + netA := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netA"}) + netA.identityTracker = newMockIdentityTracker(netA.identityTracker) + netA.config.PublicAddress = "auto" + netA.config.GossipFanout = 1 + + netB := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netB"}) + netB.identityTracker = newMockIdentityTracker(netB.identityTracker) + netB.config.PublicAddress = "auto" + netB.config.GossipFanout = 1 + + scheme := newMockIdentityScheme(t) + scheme.verifyAndAttachResponse = tc.verifyAndAttachResponse + netB.identityScheme = scheme + + netA.Start() + defer netA.Stop() + netB.Start() + defer netB.Stop() + + addrB, ok := netB.Address() + require.True(t, ok) + gossipB, err := netB.addrToGossipAddr(addrB) + require.NoError(t, err) + + // set addresses to just host:port to match phonebook/dns format + addrB = hostAndPort(addrB) + + if _, ok := netA.tryConnectReserveAddr(addrB); ok { + netA.wg.Add(1) + netA.tryConnect(addrB, gossipB) + // let the tryConnect go forward + time.Sleep(250 * time.Millisecond) + } + assert.Equal(t, tc.totalInA, len(netA.GetPeers(PeersConnectedIn))) + assert.Equal(t, tc.totalOutA, len(netA.GetPeers(PeersConnectedOut))) + assert.Equal(t, tc.totalInB, len(netB.GetPeers(PeersConnectedIn))) + assert.Equal(t, tc.totalOutB, len(netB.GetPeers(PeersConnectedOut))) + } + +} + +// when the identity challenge verification is misconstructed in various ways, peering should behave as expected +func TestPeeringWithBadIdentityVerification(t *testing.T) { + partitiontest.PartitionTest(t) + + type testCase struct { + name string + verifyResponse func(t *testing.T, h http.Header, c identityChallengeValue) (crypto.PublicKey, []byte, error) + totalInA int + totalOutA int + totalInB int + totalOutB int + additionalSleep time.Duration + occupied bool + } + + testCases := []testCase{ + // in a totally unmodified scenario, the two peers stay connected even after the verification timeout + { + name: "happy path", + totalInA: 0, + totalOutA: 1, + totalInB: 1, + totalOutB: 0, + }, + // if the peer does not send a final message, the peers stay connected + { + name: "not included", + verifyResponse: func(t *testing.T, h http.Header, c identityChallengeValue) (crypto.PublicKey, []byte, error) { + return crypto.PublicKey{}, []byte{}, nil + }, + totalInA: 0, + totalOutA: 1, + totalInB: 1, + totalOutB: 0, + }, + // when the identityVerification can't be unmarshalled, peer is disconnected + { + name: "not msgp decodable", + verifyResponse: func(t *testing.T, h http.Header, c identityChallengeValue) (crypto.PublicKey, []byte, error) { + message := append([]byte(protocol.NetIDVerificationTag), []byte("Bad!Data!")[:]...) + return crypto.PublicKey{}, message, nil + }, + totalInA: 0, + totalOutA: 0, + totalInB: 0, + totalOutB: 0, + }, + { + // when the verification signature doesn't match the peer's expectation (the previously exchanged identity), peer is disconnected + name: "bad signature", + verifyResponse: func(t *testing.T, h http.Header, c identityChallengeValue) (crypto.PublicKey, []byte, error) { + headerString := h.Get(IdentityChallengeHeader) + require.NotEmpty(t, headerString) + msg, err := base64.StdEncoding.DecodeString(headerString) + require.NoError(t, err) + resp := identityChallengeResponseSigned{} + err = protocol.Decode(msg, &resp) + require.NoError(t, err) + s := NewIdentityChallengeScheme("does not matter") // make a throwaway key + ver := identityVerificationMessageSigned{ + // fill in correct ResponseChallenge field + Msg: identityVerificationMessage{ResponseChallenge: resp.Msg.ResponseChallenge}, + Signature: s.identityKeys.SignBytes([]byte("bad bytes for signing")), + } + message := append([]byte(protocol.NetIDVerificationTag), protocol.Encode(&ver)[:]...) + return crypto.PublicKey{}, message, nil + }, + totalInA: 0, + totalOutA: 0, + totalInB: 0, + totalOutB: 0, + }, + { + // when the verification signature doesn't match the peer's expectation (the previously exchanged identity), peer is disconnected + name: "bad signature", + verifyResponse: func(t *testing.T, h http.Header, c identityChallengeValue) (crypto.PublicKey, []byte, error) { + s := NewIdentityChallengeScheme("does not matter") // make a throwaway key + ver := identityVerificationMessageSigned{ + // fill in wrong ResponseChallenge field + Msg: identityVerificationMessage{ResponseChallenge: newIdentityChallengeValue()}, + Signature: s.identityKeys.SignBytes([]byte("bad bytes for signing")), + } + message := append([]byte(protocol.NetIDVerificationTag), protocol.Encode(&ver)[:]...) + return crypto.PublicKey{}, message, nil + }, + totalInA: 0, + totalOutA: 0, + totalInB: 0, + totalOutB: 0, + }, + { + // when the identity is already in use, peer is disconnected + name: "identity occupied", + verifyResponse: nil, + totalInA: 0, + totalOutA: 0, + totalInB: 0, + totalOutB: 0, + occupied: true, + }, + } + + for _, tc := range testCases { + t.Logf("Running Peering with Identity Verification Test: %s", tc.name) + netA := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netA"}) + netA.identityTracker = newMockIdentityTracker(netA.identityTracker) + netA.config.PublicAddress = "auto" + netA.config.GossipFanout = 1 + + scheme := newMockIdentityScheme(t) + scheme.verifyResponse = tc.verifyResponse + netA.identityScheme = scheme + + netB := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netB"}) + netB.identityTracker = newMockIdentityTracker(netB.identityTracker) + netB.config.PublicAddress = "auto" + netB.config.GossipFanout = 1 + // if the key is occupied, make the tracker fail to insert the peer + if tc.occupied { + netB.identityTracker = newMockIdentityTracker(netB.identityTracker) + netB.identityTracker.(*mockIdentityTracker).setIsOccupied(true) + } + + netA.Start() + defer netA.Stop() + netB.Start() + defer netB.Stop() + + addrB, ok := netB.Address() + require.True(t, ok) + gossipB, err := netB.addrToGossipAddr(addrB) + require.NoError(t, err) + + // set addresses to just host:port to match phonebook/dns format + addrB = hostAndPort(addrB) + + if _, ok := netA.tryConnectReserveAddr(addrB); ok { + netA.wg.Add(1) + netA.tryConnect(addrB, gossipB) + // let the tryConnect go forward + time.Sleep(250 * time.Millisecond) + } + + assert.Equal(t, tc.totalInA, len(netA.GetPeers(PeersConnectedIn))) + assert.Equal(t, tc.totalOutA, len(netA.GetPeers(PeersConnectedOut))) + assert.Equal(t, tc.totalInB, len(netB.GetPeers(PeersConnectedIn))) + assert.Equal(t, tc.totalOutB, len(netB.GetPeers(PeersConnectedOut))) + } +} + type benchmarkHandler struct { returns chan uint64 } diff --git a/network/wsPeer.go b/network/wsPeer.go index d769d122b7..accd06cf9e 100644 --- a/network/wsPeer.go +++ b/network/wsPeer.go @@ -105,17 +105,18 @@ var unknownProtocolTagMessagesTotal = metrics.MakeCounter(metrics.UnknownProtoco // defaultSendMessageTags is the default list of messages which a peer would // allow to be sent without receiving any explicit request. var defaultSendMessageTags = map[protocol.Tag]bool{ - protocol.AgreementVoteTag: true, - protocol.MsgDigestSkipTag: true, - protocol.NetPrioResponseTag: true, - protocol.PingTag: true, - protocol.PingReplyTag: true, - protocol.ProposalPayloadTag: true, - protocol.TopicMsgRespTag: true, - protocol.MsgOfInterestTag: true, - protocol.TxnTag: true, - protocol.UniEnsBlockReqTag: true, - protocol.VoteBundleTag: true, + protocol.AgreementVoteTag: true, + protocol.MsgDigestSkipTag: true, + protocol.NetPrioResponseTag: true, + protocol.NetIDVerificationTag: true, + protocol.PingTag: true, + protocol.PingReplyTag: true, + protocol.ProposalPayloadTag: true, + protocol.TopicMsgRespTag: true, + protocol.MsgOfInterestTag: true, + protocol.TxnTag: true, + protocol.UniEnsBlockReqTag: true, + protocol.VoteBundleTag: true, } // interface allows substituting debug implementation for *websocket.Conn @@ -165,6 +166,8 @@ const disconnectLeastPerformingPeer disconnectReason = "LeastPerformingPeer" const disconnectCliqueResolve disconnectReason = "CliqueResolving" const disconnectRequestReceived disconnectReason = "DisconnectRequest" const disconnectStaleWrite disconnectReason = "DisconnectStaleWrite" +const disconnectDuplicateConnection disconnectReason = "DuplicateConnection" +const disconnectBadIdentityData disconnectReason = "BadIdentityData" // Response is the structure holding the response from the server type Response struct { @@ -233,6 +236,12 @@ type wsPeer struct { // is present in wn.peers. peerIndex int + // the peer's identity key which it uses for identityChallenge exchanges + identity crypto.PublicKey + identityVerified uint32 + // the identityChallenge is recorded to the peer so it may verify its identity at a later time + identityChallenge identityChallengeValue + // Challenge sent to the peer on an incoming connection prioChallenge string @@ -336,8 +345,7 @@ func (wp *wsPeer) Version() string { return wp.version } -// Unicast sends the given bytes to this specific peer. Does not wait for message to be sent. -// +// Unicast sends the given bytes to this specific peer. Does not wait for message to be sent. // (Implements UnicastPeer) func (wp *wsPeer) Unicast(ctx context.Context, msg []byte, tag protocol.Tag) error { var err error @@ -571,7 +579,7 @@ func (wp *wsPeer) readLoop() { atomic.AddUint64(&wp.ppMessageCount, 1) // the remaining valid tags: no special handling here case protocol.NetPrioResponseTag, protocol.PingTag, protocol.PingReplyTag, - protocol.StateProofSigTag, protocol.UniEnsBlockReqTag, protocol.VoteBundleTag: + protocol.StateProofSigTag, protocol.UniEnsBlockReqTag, protocol.VoteBundleTag, protocol.NetIDVerificationTag: default: // unrecognized tag unknownProtocolTagMessagesTotal.Inc(nil) atomic.AddUint64(&wp.unkMessageCount, 1) @@ -1013,6 +1021,7 @@ func (wp *wsPeer) OnClose(f func()) { wp.closers = append(wp.closers, f) } +//msgp:ignore peerFeatureFlag type peerFeatureFlag int const pfCompressedProposal peerFeatureFlag = 1 diff --git a/protocol/hash.go b/protocol/hash.go index 079333a438..26975a402f 100644 --- a/protocol/hash.go +++ b/protocol/hash.go @@ -48,6 +48,9 @@ const ( MerkleArrayNode HashID = "MA" MerkleVectorCommitmentBottomLeaf HashID = "MB" Message HashID = "MX" + NetIdentityChallenge HashID = "NIC" + NetIdentityChallengeResponse HashID = "NIR" + NetIdentityVerificationMessage HashID = "NIV" NetPrioResponse HashID = "NPR" OneTimeSigKey1 HashID = "OT1" OneTimeSigKey2 HashID = "OT2" diff --git a/protocol/tags.go b/protocol/tags.go index 876a8c868c..ef44e74acf 100644 --- a/protocol/tags.go +++ b/protocol/tags.go @@ -25,16 +25,17 @@ type Tag string // are encoded using a comma separator (see network/msgOfInterest.go). // The tags must be 2 bytes long. const ( - AgreementVoteTag Tag = "AV" - MsgOfInterestTag Tag = "MI" - MsgDigestSkipTag Tag = "MS" - NetPrioResponseTag Tag = "NP" - PingTag Tag = "pi" - PingReplyTag Tag = "pj" - ProposalPayloadTag Tag = "PP" - StateProofSigTag Tag = "SP" - TopicMsgRespTag Tag = "TS" - TxnTag Tag = "TX" + AgreementVoteTag Tag = "AV" + MsgOfInterestTag Tag = "MI" + MsgDigestSkipTag Tag = "MS" + NetPrioResponseTag Tag = "NP" + NetIDVerificationTag Tag = "NI" + PingTag Tag = "pi" + PingReplyTag Tag = "pj" + ProposalPayloadTag Tag = "PP" + StateProofSigTag Tag = "SP" + TopicMsgRespTag Tag = "TS" + TxnTag Tag = "TX" //UniCatchupReqTag Tag = "UC" was replaced by UniEnsBlockReqTag UniEnsBlockReqTag Tag = "UE" //UniEnsBlockResTag Tag = "US" was used for wsfetcherservice @@ -47,6 +48,7 @@ var TagList = []Tag{ AgreementVoteTag, MsgOfInterestTag, MsgDigestSkipTag, + NetIDVerificationTag, NetPrioResponseTag, PingTag, PingReplyTag, From b84f98094f5cb7da823d444ef4a2464362af67c8 Mon Sep 17 00:00:00 2001 From: Ignacio Corderi Date: Fri, 17 Feb 2023 17:06:59 -0300 Subject: [PATCH 46/81] refactor: remove sql.Tx from Batch (#5080) --- ledger/acctdeltas.go | 31 +- ledger/acctdeltas_test.go | 1248 ++++++++++++--------------- ledger/acctonline.go | 37 +- ledger/acctonline_test.go | 122 +-- ledger/acctupdates.go | 24 +- ledger/acctupdates_test.go | 203 ++--- ledger/bulletin.go | 4 +- ledger/catchpointtracker.go | 86 +- ledger/catchpointtracker_test.go | 16 +- ledger/catchpointwriter.go | 22 +- ledger/catchpointwriter_test.go | 72 +- ledger/catchupaccessor.go | 114 ++- ledger/ledger_test.go | 46 +- ledger/metrics.go | 4 +- ledger/notifier.go | 4 +- ledger/store/accountsV2.go | 90 +- ledger/store/encodedAccountsIter.go | 11 +- ledger/store/interface.go | 40 +- ledger/store/store.go | 194 ++++- ledger/store/testing.go | 7 + ledger/tracker.go | 21 +- ledger/tracker_test.go | 4 +- ledger/trackerdb.go | 12 +- ledger/txtail.go | 18 +- ledger/txtail_test.go | 12 +- util/db/dbutil.go | 2 +- 26 files changed, 1303 insertions(+), 1141 deletions(-) diff --git a/ledger/acctdeltas.go b/ledger/acctdeltas.go index 2bbb5e5e12..480c165d9a 100644 --- a/ledger/acctdeltas.go +++ b/ledger/acctdeltas.go @@ -294,11 +294,14 @@ func makeCompactResourceDeltas(stateDeltas []ledgercore.StateDelta, baseRound ba // resourcesLoadOld updates the entries on the deltas.oldResource map that matches the provided addresses. // The round number of the persistedAccountData is not updated by this function, and the caller is responsible // for populating this field. -func (a *compactResourcesDeltas) resourcesLoadOld(tx *sql.Tx, knownAddresses map[basics.Address]int64) (err error) { +func (a *compactResourcesDeltas) resourcesLoadOld(tx store.TransactionScope, knownAddresses map[basics.Address]int64) (err error) { if len(a.misses) == 0 { return nil } - arw := store.NewAccountsSQLReaderWriter(tx) + arw, err := tx.MakeAccountsReaderWriter() + if err != nil { + return err + } defer func() { a.misses = nil @@ -458,11 +461,16 @@ func makeCompactAccountDeltas(stateDeltas []ledgercore.StateDelta, baseRound bas // accountsLoadOld updates the entries on the deltas.old map that matches the provided addresses. // The round number of the persistedAccountData is not updated by this function, and the caller is responsible // for populating this field. -func (a *compactAccountDeltas) accountsLoadOld(tx *sql.Tx) (err error) { +func (a *compactAccountDeltas) accountsLoadOld(tx store.TransactionScope) (err error) { + // TODO: this function only needs a reader's scope to the datastore if len(a.misses) == 0 { return nil } - arw := store.NewAccountsSQLReaderWriter(tx) + arw, err := tx.MakeAccountsReaderWriter() + if err != nil { + return err + } + defer func() { a.misses = nil }() @@ -595,11 +603,14 @@ func makeCompactOnlineAccountDeltas(accountDeltas []ledgercore.AccountDeltas, ba // accountsLoadOld updates the entries on the deltas.old map that matches the provided addresses. // The round number of the persistedAccountData is not updated by this function, and the caller is responsible // for populating this field. -func (a *compactOnlineAccountDeltas) accountsLoadOld(tx *sql.Tx) (err error) { +func (a *compactOnlineAccountDeltas) accountsLoadOld(tx store.TransactionScope) (err error) { if len(a.misses) == 0 { return nil } - arw := store.NewAccountsSQLReaderWriter(tx) + arw, err := tx.MakeAccountsReaderWriter() + if err != nil { + return err + } defer func() { a.misses = nil }() @@ -693,7 +704,7 @@ func accountDataToOnline(address basics.Address, ad *ledgercore.AccountData, pro // accountsNewRound is a convenience wrapper for accountsNewRoundImpl func accountsNewRound( - tx *sql.Tx, + tx store.TransactionScope, updates compactAccountDeltas, resources compactResourcesDeltas, kvPairs map[string]modifiedKvValue, creatables map[basics.CreatableIndex]ledgercore.ModifiedCreatable, proto config.ConsensusParams, lastUpdateRound basics.Round, ) (updatedAccounts []store.PersistedAccountData, updatedResources map[basics.Address][]store.PersistedResourcesData, updatedKVs map[string]store.PersistedKVData, err error) { @@ -702,7 +713,7 @@ func accountsNewRound( hasKvPairs := len(kvPairs) > 0 hasCreatables := len(creatables) > 0 - writer, err := store.MakeAccountsSQLWriter(tx, hasAccounts, hasResources, hasKvPairs, hasCreatables) + writer, err := tx.MakeAccountsOptimizedWriter(hasAccounts, hasResources, hasKvPairs, hasCreatables) if err != nil { return } @@ -712,13 +723,13 @@ func accountsNewRound( } func onlineAccountsNewRound( - tx *sql.Tx, + tx store.TransactionScope, updates compactOnlineAccountDeltas, proto config.ConsensusParams, lastUpdateRound basics.Round, ) (updatedAccounts []store.PersistedOnlineAccountData, err error) { hasAccounts := updates.len() > 0 - writer, err := store.MakeOnlineAccountsSQLWriter(tx, hasAccounts) + writer, err := tx.MakeOnlineAccountsOptimizedWriter(hasAccounts) if err != nil { return } diff --git a/ledger/acctdeltas_test.go b/ledger/acctdeltas_test.go index 017677a96c..3032b741dc 100644 --- a/ledger/acctdeltas_test.go +++ b/ledger/acctdeltas_test.go @@ -50,22 +50,22 @@ import ( "github.com/algorand/go-algorand/util/db" ) -func checkAccounts(t *testing.T, tx *sql.Tx, rnd basics.Round, accts map[basics.Address]basics.AccountData) { - arw := store.NewAccountsSQLReaderWriter(tx) +func checkAccounts(t *testing.T, tx store.TransactionScope, rnd basics.Round, accts map[basics.Address]basics.AccountData) { + arw, err := tx.MakeAccountsReaderWriter() + require.NoError(t, err) r, err := arw.AccountsRound() require.NoError(t, err) require.Equal(t, r, rnd) - aq, err := store.AccountsInitDbQueries(tx) + aor, err := tx.MakeAccountsOptimizedReader() require.NoError(t, err) - defer aq.Close() var totalOnline, totalOffline, totalNotPart uint64 for addr, data := range accts { expected := ledgercore.ToAccountData(data) - pad, err := aq.LookupAccount(addr) + pad, err := aor.LookupAccount(addr) require.NoError(t, err) d := pad.AccountData.GetLedgerCoreAccountData() require.Equal(t, expected, d) @@ -82,7 +82,7 @@ func checkAccounts(t *testing.T, tx *sql.Tx, rnd basics.Round, accts map[basics. } } - all, err := accountsAll(tx) + all, err := arw.AccountsAllTest() require.NoError(t, err) require.Equal(t, all, accts) @@ -94,7 +94,7 @@ func checkAccounts(t *testing.T, tx *sql.Tx, rnd basics.Round, accts map[basics. require.Equal(t, totalOnline+totalOffline, totals.Participating().Raw) require.Equal(t, totalOnline+totalOffline+totalNotPart, totals.All().Raw) - d, err := aq.LookupAccount(ledgertesting.RandomAddress()) + d, err := aor.LookupAccount(ledgertesting.RandomAddress()) require.NoError(t, err) require.Equal(t, rnd, d.Round) require.Equal(t, d.AccountData, store.BaseAccountData{}) @@ -148,24 +148,24 @@ func TestAccountDBInit(t *testing.T) { proto := config.Consensus[protocol.ConsensusCurrentVersion] - dbs, _ := storetesting.DbOpenTest(t, true) - storetesting.SetDbLogging(t, dbs) + dbs, _ := store.DbOpenTrackerTest(t, true) + dbs.SetLogger(logging.TestingLog(t)) defer dbs.Close() - tx, err := dbs.Wdb.Handle.Begin() - require.NoError(t, err) - defer tx.Rollback() - - accts := ledgertesting.RandomAccounts(20, true) - newDB := store.AccountsInitTest(t, tx, accts, protocol.ConsensusCurrentVersion) - require.True(t, newDB) + err := dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + accts := ledgertesting.RandomAccounts(20, true) + newDB := tx.AccountsInitTest(t, accts, protocol.ConsensusCurrentVersion) + require.True(t, newDB) - checkAccounts(t, tx, 0, accts) + checkAccounts(t, tx, 0, accts) - newDB, err = store.AccountsInitLightTest(t, tx, accts, proto) + newDB, err = tx.AccountsInitLightTest(t, accts, proto) + require.NoError(t, err) + require.False(t, newDB) + checkAccounts(t, tx, 0, accts) + return + }) require.NoError(t, err) - require.False(t, newDB) - checkAccounts(t, tx, 0, accts) } // creatablesFromUpdates calculates creatables from updates @@ -209,124 +209,124 @@ func TestAccountDBRound(t *testing.T) { proto := config.Consensus[protocol.ConsensusCurrentVersion] - dbs, _ := storetesting.DbOpenTest(t, true) - storetesting.SetDbLogging(t, dbs) + dbs, _ := store.DbOpenTrackerTest(t, true) + dbs.SetLogger(logging.TestingLog(t)) defer dbs.Close() - tx, err := dbs.Wdb.Handle.Begin() - require.NoError(t, err) - defer tx.Rollback() - - arw := store.NewAccountsSQLReaderWriter(tx) + dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) error { + arw, err := tx.MakeAccountsReaderWriter() + require.NoError(t, err) - accts := ledgertesting.RandomAccounts(20, true) - store.AccountsInitTest(t, tx, accts, protocol.ConsensusCurrentVersion) - checkAccounts(t, tx, 0, accts) - totals, err := arw.AccountsTotals(context.Background(), false) - require.NoError(t, err) - expectedOnlineRoundParams, endRound, err := arw.AccountsOnlineRoundParams() - require.NoError(t, err) - require.Equal(t, 1, len(expectedOnlineRoundParams)) - require.Equal(t, 0, int(endRound)) + accts := ledgertesting.RandomAccounts(20, true) + tx.AccountsInitTest(t, accts, protocol.ConsensusCurrentVersion) + checkAccounts(t, tx, 0, accts) + totals, err := arw.AccountsTotals(context.Background(), false) + require.NoError(t, err) + expectedOnlineRoundParams, endRound, err := arw.AccountsOnlineRoundParams() + require.NoError(t, err) + require.Equal(t, 1, len(expectedOnlineRoundParams)) + require.Equal(t, 0, int(endRound)) - // used to determine how many creatables element will be in the test per iteration - numElementsPerSegment := 10 + // used to determine how many creatables element will be in the test per iteration + numElementsPerSegment := 10 - // lastCreatableID stores asset or app max used index to get rid of conflicts - lastCreatableID := basics.CreatableIndex(crypto.RandUint64() % 512) - ctbsList, randomCtbs := randomCreatables(numElementsPerSegment) - expectedDbImage := make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable) - var baseAccounts lruAccounts - var baseResources lruResources - var baseOnlineAccounts lruOnlineAccounts - var newacctsTotals map[basics.Address]ledgercore.AccountData - baseAccounts.init(nil, 100, 80) - baseResources.init(nil, 100, 80) - baseOnlineAccounts.init(nil, 100, 80) - for i := 1; i < 10; i++ { - var updates ledgercore.AccountDeltas - updates, newacctsTotals, _ = ledgertesting.RandomDeltasFull(20, accts, 0, &lastCreatableID) - totals = ledgertesting.CalculateNewRoundAccountTotals(t, updates, 0, proto, accts, totals) - accts = applyPartialDeltas(accts, updates) - ctbsWithDeletes := randomCreatableSampling(i, ctbsList, randomCtbs, - expectedDbImage, numElementsPerSegment) - - oldBase := i - 1 - updatesCnt := makeCompactAccountDeltas([]ledgercore.StateDelta{{Accts: updates}}, basics.Round(oldBase), true, baseAccounts) - resourceUpdatesCnt := makeCompactResourceDeltas([]ledgercore.StateDelta{{Accts: updates}}, basics.Round(oldBase), true, baseAccounts, baseResources) - updatesOnlineCnt := makeCompactOnlineAccountDeltas([]ledgercore.AccountDeltas{updates}, basics.Round(oldBase), baseOnlineAccounts) - - err = updatesCnt.accountsLoadOld(tx) - require.NoError(t, err) + // lastCreatableID stores asset or app max used index to get rid of conflicts + lastCreatableID := basics.CreatableIndex(crypto.RandUint64() % 512) + ctbsList, randomCtbs := randomCreatables(numElementsPerSegment) + expectedDbImage := make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable) + var baseAccounts lruAccounts + var baseResources lruResources + var baseOnlineAccounts lruOnlineAccounts + var newacctsTotals map[basics.Address]ledgercore.AccountData + baseAccounts.init(nil, 100, 80) + baseResources.init(nil, 100, 80) + baseOnlineAccounts.init(nil, 100, 80) + for i := 1; i < 10; i++ { + var updates ledgercore.AccountDeltas + updates, newacctsTotals, _ = ledgertesting.RandomDeltasFull(20, accts, 0, &lastCreatableID) + totals = ledgertesting.CalculateNewRoundAccountTotals(t, updates, 0, proto, accts, totals) + accts = applyPartialDeltas(accts, updates) + ctbsWithDeletes := randomCreatableSampling(i, ctbsList, randomCtbs, + expectedDbImage, numElementsPerSegment) + + oldBase := i - 1 + updatesCnt := makeCompactAccountDeltas([]ledgercore.StateDelta{{Accts: updates}}, basics.Round(oldBase), true, baseAccounts) + resourceUpdatesCnt := makeCompactResourceDeltas([]ledgercore.StateDelta{{Accts: updates}}, basics.Round(oldBase), true, baseAccounts, baseResources) + updatesOnlineCnt := makeCompactOnlineAccountDeltas([]ledgercore.AccountDeltas{updates}, basics.Round(oldBase), baseOnlineAccounts) + + err = updatesCnt.accountsLoadOld(tx) + require.NoError(t, err) - err = updatesOnlineCnt.accountsLoadOld(tx) - require.NoError(t, err) + err = updatesOnlineCnt.accountsLoadOld(tx) + require.NoError(t, err) - knownAddresses := make(map[basics.Address]int64) - for _, delta := range updatesCnt.deltas { - knownAddresses[delta.oldAcct.Addr] = delta.oldAcct.Rowid - } + knownAddresses := make(map[basics.Address]int64) + for _, delta := range updatesCnt.deltas { + knownAddresses[delta.oldAcct.Addr] = delta.oldAcct.Rowid + } - err = resourceUpdatesCnt.resourcesLoadOld(tx, knownAddresses) - require.NoError(t, err) + err = resourceUpdatesCnt.resourcesLoadOld(tx, knownAddresses) + require.NoError(t, err) - err = arw.AccountsPutTotals(totals, false) - require.NoError(t, err) - onlineRoundParams := ledgercore.OnlineRoundParamsData{RewardsLevel: totals.RewardsLevel, OnlineSupply: totals.Online.Money.Raw, CurrentProtocol: protocol.ConsensusCurrentVersion} - err = arw.AccountsPutOnlineRoundParams([]ledgercore.OnlineRoundParamsData{onlineRoundParams}, basics.Round(i)) - require.NoError(t, err) - expectedOnlineRoundParams = append(expectedOnlineRoundParams, onlineRoundParams) + err = arw.AccountsPutTotals(totals, false) + require.NoError(t, err) + onlineRoundParams := ledgercore.OnlineRoundParamsData{RewardsLevel: totals.RewardsLevel, OnlineSupply: totals.Online.Money.Raw, CurrentProtocol: protocol.ConsensusCurrentVersion} + err = arw.AccountsPutOnlineRoundParams([]ledgercore.OnlineRoundParamsData{onlineRoundParams}, basics.Round(i)) + require.NoError(t, err) + expectedOnlineRoundParams = append(expectedOnlineRoundParams, onlineRoundParams) - updatedAccts, updatesResources, updatedKVs, err := accountsNewRound(tx, updatesCnt, resourceUpdatesCnt, nil, ctbsWithDeletes, proto, basics.Round(i)) - require.NoError(t, err) - require.Equal(t, updatesCnt.len(), len(updatedAccts)) - numResUpdates := 0 - for _, rs := range updatesResources { - numResUpdates += len(rs) - } - require.Equal(t, resourceUpdatesCnt.len(), numResUpdates) - require.Empty(t, updatedKVs) + updatedAccts, updatesResources, updatedKVs, err := accountsNewRound(tx, updatesCnt, resourceUpdatesCnt, nil, ctbsWithDeletes, proto, basics.Round(i)) + require.NoError(t, err) + require.Equal(t, updatesCnt.len(), len(updatedAccts)) + numResUpdates := 0 + for _, rs := range updatesResources { + numResUpdates += len(rs) + } + require.Equal(t, resourceUpdatesCnt.len(), numResUpdates) + require.Empty(t, updatedKVs) - updatedOnlineAccts, err := onlineAccountsNewRound(tx, updatesOnlineCnt, proto, basics.Round(i)) - require.NoError(t, err) + updatedOnlineAccts, err := onlineAccountsNewRound(tx, updatesOnlineCnt, proto, basics.Round(i)) + require.NoError(t, err) - err = arw.UpdateAccountsRound(basics.Round(i)) - require.NoError(t, err) + err = arw.UpdateAccountsRound(basics.Round(i)) + require.NoError(t, err) - // TODO: calculate exact number of updates? - // newly created online accounts + accounts went offline + voting data/stake modifed accounts - require.NotEmpty(t, updatedOnlineAccts) + // TODO: calculate exact number of updates? + // newly created online accounts + accounts went offline + voting data/stake modifed accounts + require.NotEmpty(t, updatedOnlineAccts) - checkAccounts(t, tx, basics.Round(i), accts) - checkCreatables(t, tx, i, expectedDbImage) - } + checkAccounts(t, tx, basics.Round(i), accts) + arw.CheckCreatablesTest(t, i, expectedDbImage) + } - // test the accounts totals - var updates ledgercore.AccountDeltas - for addr, acctData := range newacctsTotals { - updates.Upsert(addr, acctData) - } + // test the accounts totals + var updates ledgercore.AccountDeltas + for addr, acctData := range newacctsTotals { + updates.Upsert(addr, acctData) + } - expectedTotals := ledgertesting.CalculateNewRoundAccountTotals(t, updates, 0, proto, nil, ledgercore.AccountTotals{}) - actualTotals, err := arw.AccountsTotals(context.Background(), false) - require.NoError(t, err) - require.Equal(t, expectedTotals, actualTotals) + expectedTotals := ledgertesting.CalculateNewRoundAccountTotals(t, updates, 0, proto, nil, ledgercore.AccountTotals{}) + actualTotals, err := arw.AccountsTotals(context.Background(), false) + require.NoError(t, err) + require.Equal(t, expectedTotals, actualTotals) - actualOnlineRoundParams, endRound, err := arw.AccountsOnlineRoundParams() - require.NoError(t, err) - require.Equal(t, expectedOnlineRoundParams, actualOnlineRoundParams) - require.Equal(t, 9, int(endRound)) + actualOnlineRoundParams, endRound, err := arw.AccountsOnlineRoundParams() + require.NoError(t, err) + require.Equal(t, expectedOnlineRoundParams, actualOnlineRoundParams) + require.Equal(t, 9, int(endRound)) - // check LoadAllFullAccounts - loaded := make(map[basics.Address]basics.AccountData, len(accts)) - acctCb := func(addr basics.Address, data basics.AccountData) { - loaded[addr] = data - } - count, err := arw.LoadAllFullAccounts(context.Background(), "accountbase", "resources", acctCb) - require.NoError(t, err) - require.Equal(t, count, len(accts)) - require.Equal(t, count, len(loaded)) - require.Equal(t, accts, loaded) + // check LoadAllFullAccounts + loaded := make(map[basics.Address]basics.AccountData, len(accts)) + acctCb := func(addr basics.Address, data basics.AccountData) { + loaded[addr] = data + } + count, err := arw.LoadAllFullAccounts(context.Background(), "accountbase", "resources", acctCb) + require.NoError(t, err) + require.Equal(t, count, len(accts)) + require.Equal(t, count, len(loaded)) + require.Equal(t, accts, loaded) + return nil + }) } // TestAccountDBInMemoryAcct checks in-memory only account modifications are handled correctly by @@ -365,71 +365,70 @@ func TestAccountDBInMemoryAcct(t *testing.T) { for i, test := range tests { - dbs, _ := storetesting.DbOpenTest(t, true) - storetesting.SetDbLogging(t, dbs) + dbs, _ := store.DbOpenTrackerTest(t, true) + dbs.SetLogger(logging.TestingLog(t)) defer dbs.Close() - tx, err := dbs.Wdb.Handle.Begin() - require.NoError(t, err) - defer tx.Rollback() - - accts := ledgertesting.RandomAccounts(1, true) - store.AccountsInitTest(t, tx, accts, protocol.ConsensusCurrentVersion) - addr := ledgertesting.RandomAddress() - - // lastCreatableID stores asset or app max used index to get rid of conflicts - var baseAccounts lruAccounts - var baseResources lruResources - baseAccounts.init(nil, 100, 80) - baseResources.init(nil, 100, 80) - - t.Run(fmt.Sprintf("test%d", i), func(t *testing.T) { - - stateDeltas, numAcctDeltas, numResDeltas := test(addr) - lastRound := uint64(len(stateDeltas) + 1) - - outAccountDeltas := makeCompactAccountDeltas(stateDeltas, basics.Round(1), true, baseAccounts) - require.Equal(t, 1, len(outAccountDeltas.deltas)) - require.Equal(t, accountDelta{newAcct: store.BaseAccountData{UpdateRound: lastRound}, nAcctDeltas: numAcctDeltas, address: addr}, outAccountDeltas.deltas[0]) - require.Equal(t, 1, len(outAccountDeltas.misses)) + dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) error { + accts := ledgertesting.RandomAccounts(1, true) + tx.AccountsInitTest(t, accts, protocol.ConsensusCurrentVersion) + addr := ledgertesting.RandomAddress() + + // lastCreatableID stores asset or app max used index to get rid of conflicts + var baseAccounts lruAccounts + var baseResources lruResources + baseAccounts.init(nil, 100, 80) + baseResources.init(nil, 100, 80) + + t.Run(fmt.Sprintf("test%d", i), func(t *testing.T) { + + stateDeltas, numAcctDeltas, numResDeltas := test(addr) + lastRound := uint64(len(stateDeltas) + 1) + + outAccountDeltas := makeCompactAccountDeltas(stateDeltas, basics.Round(1), true, baseAccounts) + require.Equal(t, 1, len(outAccountDeltas.deltas)) + require.Equal(t, accountDelta{newAcct: store.BaseAccountData{UpdateRound: lastRound}, nAcctDeltas: numAcctDeltas, address: addr}, outAccountDeltas.deltas[0]) + require.Equal(t, 1, len(outAccountDeltas.misses)) + + outResourcesDeltas := makeCompactResourceDeltas(stateDeltas, basics.Round(1), true, baseAccounts, baseResources) + require.Equal(t, 1, len(outResourcesDeltas.deltas)) + require.Equal(t, + resourceDelta{ + oldResource: store.PersistedResourcesData{Aidx: 100}, newResource: store.MakeResourcesData(lastRound - 1), + nAcctDeltas: numResDeltas, address: addr, + }, + outResourcesDeltas.deltas[0], + ) + require.Equal(t, 1, len(outAccountDeltas.misses)) - outResourcesDeltas := makeCompactResourceDeltas(stateDeltas, basics.Round(1), true, baseAccounts, baseResources) - require.Equal(t, 1, len(outResourcesDeltas.deltas)) - require.Equal(t, - resourceDelta{ - oldResource: store.PersistedResourcesData{Aidx: 100}, newResource: store.MakeResourcesData(lastRound - 1), - nAcctDeltas: numResDeltas, address: addr, - }, - outResourcesDeltas.deltas[0], - ) - require.Equal(t, 1, len(outAccountDeltas.misses)) + err := outAccountDeltas.accountsLoadOld(tx) + require.NoError(t, err) - err = outAccountDeltas.accountsLoadOld(tx) - require.NoError(t, err) + knownAddresses := make(map[basics.Address]int64) + for _, delta := range outAccountDeltas.deltas { + knownAddresses[delta.oldAcct.Addr] = delta.oldAcct.Rowid + } - knownAddresses := make(map[basics.Address]int64) - for _, delta := range outAccountDeltas.deltas { - knownAddresses[delta.oldAcct.Addr] = delta.oldAcct.Rowid - } + err = outResourcesDeltas.resourcesLoadOld(tx, knownAddresses) + require.NoError(t, err) - err = outResourcesDeltas.resourcesLoadOld(tx, knownAddresses) - require.NoError(t, err) + updatedAccts, updatesResources, updatedKVs, err := accountsNewRound(tx, outAccountDeltas, outResourcesDeltas, nil, nil, proto, basics.Round(lastRound)) + require.NoError(t, err) + require.Equal(t, 1, len(updatedAccts)) // we store empty even for deleted accounts + require.Equal(t, + store.PersistedAccountData{Addr: addr, Round: basics.Round(lastRound)}, + updatedAccts[0], + ) - updatedAccts, updatesResources, updatedKVs, err := accountsNewRound(tx, outAccountDeltas, outResourcesDeltas, nil, nil, proto, basics.Round(lastRound)) - require.NoError(t, err) - require.Equal(t, 1, len(updatedAccts)) // we store empty even for deleted accounts - require.Equal(t, - store.PersistedAccountData{Addr: addr, Round: basics.Round(lastRound)}, - updatedAccts[0], - ) - - require.Equal(t, 1, len(updatesResources[addr])) // we store empty even for deleted resources - require.Equal(t, - store.PersistedResourcesData{Addrid: 0, Aidx: 100, Data: store.MakeResourcesData(0), Round: basics.Round(lastRound)}, - updatesResources[addr][0], - ) + require.Equal(t, 1, len(updatesResources[addr])) // we store empty even for deleted resources + require.Equal(t, + store.PersistedResourcesData{Addrid: 0, Aidx: 100, Data: store.MakeResourcesData(0), Round: basics.Round(lastRound)}, + updatesResources[addr][0], + ) - require.Empty(t, updatedKVs) + require.Empty(t, updatedKVs) + }) + return nil }) } } @@ -437,18 +436,17 @@ func TestAccountDBInMemoryAcct(t *testing.T) { func TestAccountStorageWithStateProofID(t *testing.T) { partitiontest.PartitionTest(t) - dbs, _ := storetesting.DbOpenTest(t, true) - storetesting.SetDbLogging(t, dbs) + dbs, _ := store.DbOpenTrackerTest(t, true) + dbs.SetLogger(logging.TestingLog(t)) defer dbs.Close() - tx, err := dbs.Wdb.Handle.Begin() - require.NoError(t, err) - defer tx.Rollback() - - accts := ledgertesting.RandomAccounts(20, false) - _ = store.AccountsInitTest(t, tx, accts, protocol.ConsensusCurrentVersion) - checkAccounts(t, tx, 0, accts) - require.True(t, allAccountsHaveStateProofPKs(accts)) + dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + accts := ledgertesting.RandomAccounts(20, false) + _ = tx.AccountsInitTest(t, accts, protocol.ConsensusCurrentVersion) + checkAccounts(t, tx, 0, accts) + require.True(t, allAccountsHaveStateProofPKs(accts)) + return nil + }) } func allAccountsHaveStateProofPKs(accts map[basics.Address]basics.AccountData) bool { @@ -460,38 +458,6 @@ func allAccountsHaveStateProofPKs(accts map[basics.Address]basics.AccountData) b return true } -// checkCreatables compares the expected database image to the actual databse content -func checkCreatables(t *testing.T, - tx *sql.Tx, iteration int, - expectedDbImage map[basics.CreatableIndex]ledgercore.ModifiedCreatable) { - - stmt, err := tx.Prepare("SELECT asset, creator, ctype FROM assetcreators") - require.NoError(t, err) - - defer stmt.Close() - rows, err := stmt.Query() - if err != sql.ErrNoRows { - require.NoError(t, err) - } - defer rows.Close() - counter := 0 - for rows.Next() { - counter++ - mc := ledgercore.ModifiedCreatable{} - var buf []byte - var asset basics.CreatableIndex - err := rows.Scan(&asset, &buf, &mc.Ctype) - require.NoError(t, err) - copy(mc.Creator[:], buf) - - require.NotNil(t, expectedDbImage[asset]) - require.Equal(t, expectedDbImage[asset].Creator, mc.Creator) - require.Equal(t, expectedDbImage[asset].Ctype, mc.Ctype) - require.True(t, expectedDbImage[asset].Created) - } - require.Equal(t, len(expectedDbImage), counter) -} - // randomCreatableSampling sets elements to delete from previous iteration // It consideres 10 elements in an iteration. // loop 0: returns the first 10 elements @@ -631,15 +597,9 @@ func generateRandomTestingAccountBalances(numAccounts int) (updates map[basics.A return } -func benchmarkInitBalances(b testing.TB, numAccounts int, dbs db.Pair, proto protocol.ConsensusVersion) (updates map[basics.Address]basics.AccountData) { - tx, err := dbs.Wdb.Handle.Begin() - require.NoError(b, err) - +func benchmarkInitBalances(b testing.TB, numAccounts int, tx store.TransactionScope, proto protocol.ConsensusVersion) (updates map[basics.Address]basics.AccountData) { updates = generateRandomTestingAccountBalances(numAccounts) - - store.AccountsInitTest(b, tx, updates, proto) - err = tx.Commit() - require.NoError(b, err) + tx.AccountsInitTest(b, updates, proto) return } @@ -651,20 +611,25 @@ func cleanupTestDb(dbs db.Pair, dbName string, inMemory bool) { } func benchmarkReadingAllBalances(b *testing.B, inMemory bool) { - dbs, fn := storetesting.DbOpenTest(b, inMemory) - storetesting.SetDbLogging(b, dbs) - defer cleanupTestDb(dbs, fn, inMemory) + dbs, _ := store.DbOpenTrackerTest(b, true) + dbs.SetLogger(logging.TestingLog(b)) + defer dbs.Close() + bal := make(map[basics.Address]basics.AccountData) - benchmarkInitBalances(b, b.N, dbs, protocol.ConsensusCurrentVersion) - tx, err := dbs.Rdb.Handle.Begin() + err := dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + benchmarkInitBalances(b, b.N, tx, protocol.ConsensusCurrentVersion) + arw, err := tx.MakeAccountsReaderWriter() + if err != nil { + return err + } + b.ResetTimer() + // read all the balances in the database. + var err2 error + bal, err2 = arw.AccountsAllTest() + require.NoError(b, err2) + return nil + }) require.NoError(b, err) - - b.ResetTimer() - // read all the balances in the database. - bal, err2 := accountsAll(tx) - require.NoError(b, err2) - tx.Commit() - prevHash := crypto.Digest{} for _, accountBalance := range bal { encodedAccountBalance := protocol.Encode(&accountBalance) @@ -682,31 +647,35 @@ func BenchmarkReadingAllBalancesDisk(b *testing.B) { } func benchmarkReadingRandomBalances(b *testing.B, inMemory bool) { - dbs, fn := storetesting.DbOpenTest(b, inMemory) - storetesting.SetDbLogging(b, dbs) - defer cleanupTestDb(dbs, fn, inMemory) - - accounts := benchmarkInitBalances(b, b.N, dbs, protocol.ConsensusCurrentVersion) - - qs, err := store.AccountsInitDbQueries(dbs.Rdb.Handle) - require.NoError(b, err) - defer qs.Close() + dbs, fn := store.DbOpenTrackerTest(b, true) + dbs.SetLogger(logging.TestingLog(b)) + defer dbs.CleanupTest(fn, inMemory) - // read all the balances in the database, shuffled - addrs := make([]basics.Address, len(accounts)) - pos := 0 - for addr := range accounts { - addrs[pos] = addr - pos++ - } - rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + err := dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + accounts := benchmarkInitBalances(b, b.N, tx, protocol.ConsensusCurrentVersion) - // only measure the actual fetch time - b.ResetTimer() - for _, addr := range addrs { - _, err = qs.LookupAccount(addr) + ar, err := dbs.MakeAccountsReader() require.NoError(b, err) - } + defer ar.Close() + + // read all the balances in the database, shuffled + addrs := make([]basics.Address, len(accounts)) + pos := 0 + for addr := range accounts { + addrs[pos] = addr + pos++ + } + rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + + // only measure the actual fetch time + b.ResetTimer() + for _, addr := range addrs { + _, err = ar.LookupAccount(addr) + require.NoError(b, err) + } + return nil + }) + require.NoError(b, err) } func BenchmarkReadingRandomBalancesRAM(b *testing.B) { @@ -716,129 +685,6 @@ func BenchmarkReadingRandomBalancesRAM(b *testing.B) { func BenchmarkReadingRandomBalancesDisk(b *testing.B) { benchmarkReadingRandomBalances(b, false) } -func BenchmarkWritingRandomBalancesDisk(b *testing.B) { - totalStartupAccountsNumber := 5000000 - batchCount := 1000 - startupAcct := 5 - initDatabase := func() (*sql.Tx, func(), error) { - dbs, fn := storetesting.DbOpenTest(b, false) - storetesting.SetDbLogging(b, dbs) - cleanup := func() { - cleanupTestDb(dbs, fn, false) - } - - benchmarkInitBalances(b, startupAcct, dbs, protocol.ConsensusCurrentVersion) - dbs.Wdb.SetSynchronousMode(context.Background(), db.SynchronousModeOff, false) - - // insert 1M accounts data, in batches of 1000 - for batch := 0; batch <= batchCount; batch++ { - fmt.Printf("\033[M\r %d / %d accounts written", totalStartupAccountsNumber*batch/batchCount, totalStartupAccountsNumber) - - tx, err := dbs.Wdb.Handle.Begin() - - require.NoError(b, err) - - acctsData := generateRandomTestingAccountBalances(totalStartupAccountsNumber / batchCount) - replaceStmt, err := tx.Prepare("INSERT INTO accountbase (address, normalizedonlinebalance, data) VALUES (?, ?, ?)") - require.NoError(b, err) - defer replaceStmt.Close() - for addr, acctData := range acctsData { - _, err = replaceStmt.Exec(addr[:], uint64(0), protocol.Encode(&acctData)) - require.NoError(b, err) - } - - err = tx.Commit() - require.NoError(b, err) - } - dbs.Wdb.SetSynchronousMode(context.Background(), db.SynchronousModeFull, true) - tx, err := dbs.Wdb.Handle.Begin() - require.NoError(b, err) - fmt.Printf("\033[M\r") - return tx, cleanup, err - } - - selectAccounts := func(tx *sql.Tx) (accountsAddress [][]byte, accountsRowID []int) { - accountsAddress = make([][]byte, 0, totalStartupAccountsNumber+startupAcct) - accountsRowID = make([]int, 0, totalStartupAccountsNumber+startupAcct) - - // read all the accounts to obtain the addrs. - rows, err := tx.Query("SELECT rowid, address FROM accountbase") - require.NoError(b, err) - defer rows.Close() - for rows.Next() { - var addrbuf []byte - var rowid int - err = rows.Scan(&rowid, &addrbuf) - require.NoError(b, err) - accountsAddress = append(accountsAddress, addrbuf) - accountsRowID = append(accountsRowID, rowid) - } - return - } - - tx, cleanup, err := initDatabase() - require.NoError(b, err) - defer cleanup() - - accountsAddress, accountsRowID := selectAccounts(tx) - - b.Run("ByAddr", func(b *testing.B) { - preparedUpdate, err := tx.Prepare("UPDATE accountbase SET data = ? WHERE address = ?") - require.NoError(b, err) - defer preparedUpdate.Close() - // updates accounts by address - randomAccountData := make([]byte, 200) - crypto.RandBytes(randomAccountData) - updateOrder := rand.Perm(len(accountsRowID)) - b.ResetTimer() - startTime := time.Now() - for n := 0; n < b.N; n++ { - for _, acctIdx := range updateOrder { - res, err := preparedUpdate.Exec(randomAccountData[:], accountsAddress[acctIdx]) - require.NoError(b, err) - rowsAffected, err := res.RowsAffected() - require.NoError(b, err) - require.Equal(b, int64(1), rowsAffected) - n++ - if n == b.N { - break - } - } - - } - b.ReportMetric(float64(int(time.Since(startTime))/b.N), "ns/acct_update") - }) - - b.Run("ByRowID", func(b *testing.B) { - preparedUpdate, err := tx.Prepare("UPDATE accountbase SET data = ? WHERE rowid = ?") - require.NoError(b, err) - defer preparedUpdate.Close() - // updates accounts by address - randomAccountData := make([]byte, 200) - crypto.RandBytes(randomAccountData) - updateOrder := rand.Perm(len(accountsRowID)) - b.ResetTimer() - startTime := time.Now() - for n := 0; n < b.N; n++ { - for _, acctIdx := range updateOrder { - res, err := preparedUpdate.Exec(randomAccountData[:], accountsRowID[acctIdx]) - require.NoError(b, err) - rowsAffected, err := res.RowsAffected() - require.NoError(b, err) - require.Equal(b, int64(1), rowsAffected) - n++ - if n == b.N { - break - } - } - } - b.ReportMetric(float64(int(time.Since(startTime))/b.N), "ns/acct_update") - - }) - - err = tx.Commit() - require.NoError(b, err) -} // TestAccountsDbQueriesCreateClose tests to see that we can create the accountsDbQueries and close it. // it also verify that double-closing it doesn't create an issue. @@ -924,9 +770,12 @@ func benchmarkWriteCatchpointStagingBalancesSub(b *testing.B, ascendingOrder boo normalizedAccountBalances, err := prepareNormalizedBalancesV6(chunk.Balances, proto) require.NoError(b, err) b.StartTimer() - err = l.trackerDBs.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { - crw := store.NewCatchpointSQLReaderWriter(tx) - err = crw.WriteCatchpointStagingBalances(ctx, normalizedAccountBalances) + err = l.trackerDBs.Batch(func(ctx context.Context, tx store.BatchScope) (err error) { + cw, err := tx.MakeCatchpointWriter() + if err != nil { + return err + } + err = cw.WriteCatchpointStagingBalances(ctx, normalizedAccountBalances) return }) @@ -963,14 +812,19 @@ func TestLookupKeysByPrefix(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - dbs, fn := storetesting.DbOpenTest(t, false) - storetesting.SetDbLogging(t, dbs) - defer cleanupTestDb(dbs, fn, false) + dbs, fn := store.DbOpenTrackerTest(t, false) + dbs.SetLogger(logging.TestingLog(t)) + defer dbs.CleanupTest(fn, false) - // return account data, initialize DB tables from AccountsInitTest - _ = benchmarkInitBalances(t, 1, dbs, protocol.ConsensusCurrentVersion) + err := dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + // return account data, initialize DB tables from AccountsInitTest + _ = benchmarkInitBalances(t, 1, tx, protocol.ConsensusCurrentVersion) - qs, err := store.AccountsInitDbQueries(dbs.Rdb.Handle) + return nil + }) + require.NoError(t, err) + + qs, err := dbs.MakeAccountsReader() require.NoError(t, err) defer qs.Close() @@ -997,23 +851,23 @@ func TestLookupKeysByPrefix(t *testing.T) { {key: []byte(`™£´´∂ƒ∂ƒßƒ©∑®ƒß∂†¬∆`), value: []byte("random Bluh")}, } - tx, err := dbs.Wdb.Handle.Begin() - require.NoError(t, err) + err = dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + // writer is only for kvstore + writer, err := tx.MakeAccountsOptimizedWriter(true, true, true, true) + if err != nil { + return + } - // writer is only for kvstore - writer, err := store.MakeAccountsSQLWriter(tx, true, true, true, true) - if err != nil { - return - } + for i := 0; i < len(kvPairDBPrepareSet); i++ { + err := writer.UpsertKvPair(string(kvPairDBPrepareSet[i].key), kvPairDBPrepareSet[i].value) + require.NoError(t, err) + } - for i := 0; i < len(kvPairDBPrepareSet); i++ { - err := writer.UpsertKvPair(string(kvPairDBPrepareSet[i].key), kvPairDBPrepareSet[i].value) - require.NoError(t, err) - } + writer.Close() - err = tx.Commit() + return nil + }) require.NoError(t, err) - writer.Close() testCases := []struct { prefix []byte @@ -1144,14 +998,19 @@ func TestLookupKeysByPrefix(t *testing.T) { func BenchmarkLookupKeyByPrefix(b *testing.B) { // learn something from BenchmarkWritingRandomBalancesDisk - dbs, fn := storetesting.DbOpenTest(b, false) - storetesting.SetDbLogging(b, dbs) - defer cleanupTestDb(dbs, fn, false) + dbs, fn := store.DbOpenTrackerTest(b, false) + dbs.SetLogger(logging.TestingLog(b)) + defer dbs.CleanupTest(fn, false) - // return account data, initialize DB tables from AccountsInitTest - _ = benchmarkInitBalances(b, 1, dbs, protocol.ConsensusCurrentVersion) + err := dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + // return account data, initialize DB tables from AccountsInitTest + _ = benchmarkInitBalances(b, 1, tx, protocol.ConsensusCurrentVersion) - qs, err := store.AccountsInitDbQueries(dbs.Rdb.Handle) + return nil + }) + require.NoError(b, err) + + qs, err := dbs.MakeAccountsReader() require.NoError(b, err) defer qs.Close() @@ -1164,33 +1023,33 @@ func BenchmarkLookupKeyByPrefix(b *testing.B) { // from 2^1 -> 2^2 -> ... -> 2^22 sized DB for bIndex := 0; bIndex < 22; bIndex++ { - // make writer to DB - tx, err := dbs.Wdb.Handle.Begin() - require.NoError(b, err) + var prefix string - // writer is only for kvstore - writer, err := store.MakeAccountsSQLWriter(tx, true, true, true, true) - if err != nil { - return - } + // make writer to DB + err = dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + // writer is only for kvstore + writer, err := tx.MakeAccountsOptimizedWriter(true, true, true, true) + if err != nil { + return + } - var prefix string - // how to write to dbs a bunch of stuffs? - for i := 0; i < nextDBSize-currentDBSize; i++ { - crypto.RandBytes(nameBuffer) - crypto.RandBytes(valueBuffer) - appID := basics.AppIndex(crypto.RandUint64()) - boxKey := apps.MakeBoxKey(uint64(appID), string(nameBuffer)) - err = writer.UpsertKvPair(boxKey, valueBuffer) - require.NoError(b, err) + // how to write to dbs a bunch of stuffs? + for i := 0; i < nextDBSize-currentDBSize; i++ { + crypto.RandBytes(nameBuffer) + crypto.RandBytes(valueBuffer) + appID := basics.AppIndex(crypto.RandUint64()) + boxKey := apps.MakeBoxKey(uint64(appID), string(nameBuffer)) + err = writer.UpsertKvPair(boxKey, valueBuffer) + require.NoError(b, err) - if i == 0 { - prefix = apps.MakeBoxKey(uint64(appID), "") + if i == 0 { + prefix = apps.MakeBoxKey(uint64(appID), "") + } } - } - err = tx.Commit() + writer.Close() + return nil + }) require.NoError(b, err) - writer.Close() // benchmark the query against large DB, see if we have O(log N) speed currentDBSize = nextDBSize @@ -2219,312 +2078,317 @@ func TestAccountOnlineQueries(t *testing.T) { proto := config.Consensus[protocol.ConsensusCurrentVersion] - dbs, _ := storetesting.DbOpenTest(t, true) - storetesting.SetDbLogging(t, dbs) + dbs, _ := store.DbOpenTrackerTest(t, true) + dbs.SetLogger(logging.TestingLog(t)) defer dbs.Close() - tx, err := dbs.Wdb.Handle.Begin() - require.NoError(t, err) - defer tx.Rollback() + err := dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) error { - arw := store.NewAccountsSQLReaderWriter(tx) + arw, err := tx.MakeAccountsReaderWriter() + if err != nil { + return err + } - var accts map[basics.Address]basics.AccountData - store.AccountsInitTest(t, tx, accts, protocol.ConsensusCurrentVersion) - totals, err := arw.AccountsTotals(context.Background(), false) - require.NoError(t, err) + var accts map[basics.Address]basics.AccountData + tx.AccountsInitTest(t, accts, protocol.ConsensusCurrentVersion) + totals, err := arw.AccountsTotals(context.Background(), false) + require.NoError(t, err) - var baseAccounts lruAccounts - var baseResources lruResources - var baseOnlineAccounts lruOnlineAccounts - baseAccounts.init(nil, 100, 80) - baseResources.init(nil, 100, 80) - baseOnlineAccounts.init(nil, 100, 80) - - addrA := basics.Address(crypto.Hash([]byte("A"))) - addrB := basics.Address(crypto.Hash([]byte("B"))) - addrC := basics.Address(crypto.Hash([]byte("C"))) - - var voteIDA crypto.OneTimeSignatureVerifier - crypto.RandBytes(voteIDA[:]) - var voteIDB crypto.OneTimeSignatureVerifier - crypto.RandBytes(voteIDB[:]) - var voteIDC crypto.OneTimeSignatureVerifier - crypto.RandBytes(voteIDC[:]) - - dataA1 := ledgercore.AccountData{ - AccountBaseData: ledgercore.AccountBaseData{ - MicroAlgos: basics.MicroAlgos{Raw: 100_000_000}, - Status: basics.Online, - }, - VotingData: ledgercore.VotingData{ - VoteID: voteIDA, - }, - } + var baseAccounts lruAccounts + var baseResources lruResources + var baseOnlineAccounts lruOnlineAccounts + baseAccounts.init(nil, 100, 80) + baseResources.init(nil, 100, 80) + baseOnlineAccounts.init(nil, 100, 80) - dataB1 := ledgercore.AccountData{ - AccountBaseData: ledgercore.AccountBaseData{ - MicroAlgos: basics.MicroAlgos{Raw: 200_000_000}, - Status: basics.Online, - }, - VotingData: ledgercore.VotingData{ - VoteID: voteIDB, - }, - } + addrA := basics.Address(crypto.Hash([]byte("A"))) + addrB := basics.Address(crypto.Hash([]byte("B"))) + addrC := basics.Address(crypto.Hash([]byte("C"))) - dataC3 := ledgercore.AccountData{ - AccountBaseData: ledgercore.AccountBaseData{ - MicroAlgos: basics.MicroAlgos{Raw: 300_000_000}, - Status: basics.Online, - }, - VotingData: ledgercore.VotingData{ - VoteID: voteIDC, - }, - } + var voteIDA crypto.OneTimeSignatureVerifier + crypto.RandBytes(voteIDA[:]) + var voteIDB crypto.OneTimeSignatureVerifier + crypto.RandBytes(voteIDB[:]) + var voteIDC crypto.OneTimeSignatureVerifier + crypto.RandBytes(voteIDC[:]) - dataA2 := dataA1 - dataA2.Status = basics.Offline - dataA2.VoteID = crypto.OneTimeSignatureVerifier{} + dataA1 := ledgercore.AccountData{ + AccountBaseData: ledgercore.AccountBaseData{ + MicroAlgos: basics.MicroAlgos{Raw: 100_000_000}, + Status: basics.Online, + }, + VotingData: ledgercore.VotingData{ + VoteID: voteIDA, + }, + } - dataB2 := dataB1 - dataB2.Status = basics.Offline - dataB2.VoteID = crypto.OneTimeSignatureVerifier{} + dataB1 := ledgercore.AccountData{ + AccountBaseData: ledgercore.AccountBaseData{ + MicroAlgos: basics.MicroAlgos{Raw: 200_000_000}, + Status: basics.Online, + }, + VotingData: ledgercore.VotingData{ + VoteID: voteIDB, + }, + } + + dataC3 := ledgercore.AccountData{ + AccountBaseData: ledgercore.AccountBaseData{ + MicroAlgos: basics.MicroAlgos{Raw: 300_000_000}, + Status: basics.Online, + }, + VotingData: ledgercore.VotingData{ + VoteID: voteIDC, + }, + } - delta1 := ledgercore.AccountDeltas{} - delta1.Upsert(addrA, dataA1) - delta1.Upsert(addrB, dataB1) + dataA2 := dataA1 + dataA2.Status = basics.Offline + dataA2.VoteID = crypto.OneTimeSignatureVerifier{} - delta2 := ledgercore.AccountDeltas{} - delta2.Upsert(addrA, dataA2) + dataB2 := dataB1 + dataB2.Status = basics.Offline + dataB2.VoteID = crypto.OneTimeSignatureVerifier{} - delta3 := ledgercore.AccountDeltas{} - delta3.Upsert(addrB, dataB2) - delta3.Upsert(addrC, dataC3) + delta1 := ledgercore.AccountDeltas{} + delta1.Upsert(addrA, dataA1) + delta1.Upsert(addrB, dataB1) - addRound := func(rnd basics.Round, updates ledgercore.StateDelta) { - totals = ledgertesting.CalculateNewRoundAccountTotals(t, updates.Accts, 0, proto, accts, totals) - accts = applyPartialDeltas(accts, updates.Accts) + delta2 := ledgercore.AccountDeltas{} + delta2.Upsert(addrA, dataA2) - oldBase := rnd - 1 - updatesCnt := makeCompactAccountDeltas([]ledgercore.StateDelta{updates}, oldBase, true, baseAccounts) - updatesOnlineCnt := makeCompactOnlineAccountDeltas([]ledgercore.AccountDeltas{updates.Accts}, oldBase, baseOnlineAccounts) + delta3 := ledgercore.AccountDeltas{} + delta3.Upsert(addrB, dataB2) + delta3.Upsert(addrC, dataC3) - err = updatesCnt.accountsLoadOld(tx) + addRound := func(rnd basics.Round, updates ledgercore.StateDelta) { + totals = ledgertesting.CalculateNewRoundAccountTotals(t, updates.Accts, 0, proto, accts, totals) + accts = applyPartialDeltas(accts, updates.Accts) + + oldBase := rnd - 1 + updatesCnt := makeCompactAccountDeltas([]ledgercore.StateDelta{updates}, oldBase, true, baseAccounts) + updatesOnlineCnt := makeCompactOnlineAccountDeltas([]ledgercore.AccountDeltas{updates.Accts}, oldBase, baseOnlineAccounts) + + err = updatesCnt.accountsLoadOld(tx) + require.NoError(t, err) + + err = updatesOnlineCnt.accountsLoadOld(tx) + require.NoError(t, err) + + err = arw.AccountsPutTotals(totals, false) + require.NoError(t, err) + updatedAccts, _, _, err := accountsNewRound(tx, updatesCnt, compactResourcesDeltas{}, nil, nil, proto, rnd) + require.NoError(t, err) + require.Equal(t, updatesCnt.len(), len(updatedAccts)) + + updatedOnlineAccts, err := onlineAccountsNewRound(tx, updatesOnlineCnt, proto, rnd) + require.NoError(t, err) + require.NotEmpty(t, updatedOnlineAccts) + + err = arw.UpdateAccountsRound(rnd) + require.NoError(t, err) + } + + addRound(1, ledgercore.StateDelta{Accts: delta1}) + addRound(2, ledgercore.StateDelta{Accts: delta2}) + addRound(3, ledgercore.StateDelta{Accts: delta3}) + + queries, err := tx.MakeOnlineAccountsOptimizedReader() require.NoError(t, err) - err = updatesOnlineCnt.accountsLoadOld(tx) + // check round 1 + rnd := basics.Round(1) + online, err := arw.AccountsOnlineTop(rnd, 0, 10, proto) + require.NoError(t, err) + require.Equal(t, 2, len(online)) + require.NotContains(t, online, addrC) + + onlineAcctA, ok := online[addrA] + require.True(t, ok) + require.NotNil(t, onlineAcctA) + require.Equal(t, addrA, onlineAcctA.Address) + require.Equal(t, dataA1.AccountBaseData.MicroAlgos, onlineAcctA.MicroAlgos) + + onlineAcctB, ok := online[addrB] + require.True(t, ok) + require.NotNil(t, onlineAcctB) + require.Equal(t, addrB, onlineAcctB.Address) + require.Equal(t, dataB1.AccountBaseData.MicroAlgos, onlineAcctB.MicroAlgos) + + paod, err := queries.LookupOnline(addrA, rnd) require.NoError(t, err) + require.Equal(t, basics.Round(3), paod.Round) + require.Equal(t, addrA, paod.Addr) + require.Equal(t, dataA1.AccountBaseData.MicroAlgos, paod.AccountData.MicroAlgos) + require.Equal(t, voteIDA, paod.AccountData.VoteID) - err = arw.AccountsPutTotals(totals, false) + paod, err = queries.LookupOnline(addrB, rnd) require.NoError(t, err) - updatedAccts, _, _, err := accountsNewRound(tx, updatesCnt, compactResourcesDeltas{}, nil, nil, proto, rnd) + require.Equal(t, basics.Round(3), paod.Round) + require.Equal(t, addrB, paod.Addr) + require.Equal(t, dataB1.AccountBaseData.MicroAlgos, paod.AccountData.MicroAlgos) + require.Equal(t, voteIDB, paod.AccountData.VoteID) + + paod, err = queries.LookupOnline(addrC, rnd) require.NoError(t, err) - require.Equal(t, updatesCnt.len(), len(updatedAccts)) + require.Equal(t, basics.Round(3), paod.Round) + require.Equal(t, addrC, paod.Addr) + require.Empty(t, paod.AccountData) - updatedOnlineAccts, err := onlineAccountsNewRound(tx, updatesOnlineCnt, proto, rnd) + // check round 2 + rnd = basics.Round(2) + online, err = arw.AccountsOnlineTop(rnd, 0, 10, proto) require.NoError(t, err) - require.NotEmpty(t, updatedOnlineAccts) + require.Equal(t, 1, len(online)) + require.NotContains(t, online, addrA) + require.NotContains(t, online, addrC) - err = arw.UpdateAccountsRound(rnd) + onlineAcctB, ok = online[addrB] + require.True(t, ok) + require.NotNil(t, onlineAcctB) + require.Equal(t, addrB, onlineAcctB.Address) + require.Equal(t, dataB1.AccountBaseData.MicroAlgos, onlineAcctB.MicroAlgos) + + paod, err = queries.LookupOnline(addrA, rnd) require.NoError(t, err) - } + require.Equal(t, basics.Round(3), paod.Round) + require.Equal(t, addrA, paod.Addr) + require.Empty(t, paod.AccountData) - addRound(1, ledgercore.StateDelta{Accts: delta1}) - addRound(2, ledgercore.StateDelta{Accts: delta2}) - addRound(3, ledgercore.StateDelta{Accts: delta3}) + paod, err = queries.LookupOnline(addrB, rnd) + require.NoError(t, err) + require.Equal(t, basics.Round(3), paod.Round) + require.Equal(t, addrB, paod.Addr) + require.Equal(t, dataB1.AccountBaseData.MicroAlgos, paod.AccountData.MicroAlgos) + require.Equal(t, voteIDB, paod.AccountData.VoteID) - queries, err := store.OnlineAccountsInitDbQueries(tx) - require.NoError(t, err) + paod, err = queries.LookupOnline(addrC, rnd) + require.NoError(t, err) + require.Equal(t, basics.Round(3), paod.Round) + require.Equal(t, addrC, paod.Addr) + require.Empty(t, paod.AccountData) - // check round 1 - rnd := basics.Round(1) - online, err := arw.AccountsOnlineTop(rnd, 0, 10, proto) - require.NoError(t, err) - require.Equal(t, 2, len(online)) - require.NotContains(t, online, addrC) - - onlineAcctA, ok := online[addrA] - require.True(t, ok) - require.NotNil(t, onlineAcctA) - require.Equal(t, addrA, onlineAcctA.Address) - require.Equal(t, dataA1.AccountBaseData.MicroAlgos, onlineAcctA.MicroAlgos) - - onlineAcctB, ok := online[addrB] - require.True(t, ok) - require.NotNil(t, onlineAcctB) - require.Equal(t, addrB, onlineAcctB.Address) - require.Equal(t, dataB1.AccountBaseData.MicroAlgos, onlineAcctB.MicroAlgos) - - paod, err := queries.LookupOnline(addrA, rnd) - require.NoError(t, err) - require.Equal(t, basics.Round(3), paod.Round) - require.Equal(t, addrA, paod.Addr) - require.Equal(t, dataA1.AccountBaseData.MicroAlgos, paod.AccountData.MicroAlgos) - require.Equal(t, voteIDA, paod.AccountData.VoteID) + // check round 3 + rnd = basics.Round(3) + online, err = arw.AccountsOnlineTop(rnd, 0, 10, proto) + require.NoError(t, err) + require.Equal(t, 1, len(online)) + require.NotContains(t, online, addrA) + require.NotContains(t, online, addrB) - paod, err = queries.LookupOnline(addrB, rnd) - require.NoError(t, err) - require.Equal(t, basics.Round(3), paod.Round) - require.Equal(t, addrB, paod.Addr) - require.Equal(t, dataB1.AccountBaseData.MicroAlgos, paod.AccountData.MicroAlgos) - require.Equal(t, voteIDB, paod.AccountData.VoteID) + onlineAcctC, ok := online[addrC] + require.True(t, ok) + require.NotNil(t, onlineAcctC) + require.Equal(t, addrC, onlineAcctC.Address) + require.Equal(t, dataC3.AccountBaseData.MicroAlgos, onlineAcctC.MicroAlgos) - paod, err = queries.LookupOnline(addrC, rnd) - require.NoError(t, err) - require.Equal(t, basics.Round(3), paod.Round) - require.Equal(t, addrC, paod.Addr) - require.Empty(t, paod.AccountData) + paod, err = queries.LookupOnline(addrA, rnd) + require.NoError(t, err) + require.Equal(t, basics.Round(3), paod.Round) + require.Equal(t, addrA, paod.Addr) + require.Empty(t, paod.AccountData) - // check round 2 - rnd = basics.Round(2) - online, err = arw.AccountsOnlineTop(rnd, 0, 10, proto) - require.NoError(t, err) - require.Equal(t, 1, len(online)) - require.NotContains(t, online, addrA) - require.NotContains(t, online, addrC) + paod, err = queries.LookupOnline(addrB, rnd) + require.NoError(t, err) + require.Equal(t, basics.Round(3), paod.Round) + require.Equal(t, addrB, paod.Addr) + require.Empty(t, paod.AccountData) - onlineAcctB, ok = online[addrB] - require.True(t, ok) - require.NotNil(t, onlineAcctB) - require.Equal(t, addrB, onlineAcctB.Address) - require.Equal(t, dataB1.AccountBaseData.MicroAlgos, onlineAcctB.MicroAlgos) + paod, err = queries.LookupOnline(addrC, rnd) + require.NoError(t, err) + require.Equal(t, basics.Round(3), paod.Round) + require.Equal(t, addrC, paod.Addr) + require.Equal(t, dataC3.AccountBaseData.MicroAlgos, paod.AccountData.MicroAlgos) + require.Equal(t, voteIDC, paod.AccountData.VoteID) - paod, err = queries.LookupOnline(addrA, rnd) - require.NoError(t, err) - require.Equal(t, basics.Round(3), paod.Round) - require.Equal(t, addrA, paod.Addr) - require.Empty(t, paod.AccountData) + paods, err := arw.OnlineAccountsAll(0) + require.NoError(t, err) + require.Equal(t, 5, len(paods)) + + // expect: + // + // addr | rnd | status + // -----|-----|-------- + // B | 1 | 1 + // B | 3 | 0 + // C | 3 | 1 + // A | 1 | 1 + // A | 2 | 0 + + checkAddrB := func() { + require.Equal(t, int64(2), paods[0].Rowid) + require.Equal(t, basics.Round(1), paods[0].UpdRound) + require.Equal(t, addrB, paods[0].Addr) + require.Equal(t, int64(4), paods[1].Rowid) + require.Equal(t, basics.Round(3), paods[1].UpdRound) + require.Equal(t, addrB, paods[1].Addr) + } - paod, err = queries.LookupOnline(addrB, rnd) - require.NoError(t, err) - require.Equal(t, basics.Round(3), paod.Round) - require.Equal(t, addrB, paod.Addr) - require.Equal(t, dataB1.AccountBaseData.MicroAlgos, paod.AccountData.MicroAlgos) - require.Equal(t, voteIDB, paod.AccountData.VoteID) + checkAddrC := func() { + require.Equal(t, int64(5), paods[2].Rowid) + require.Equal(t, basics.Round(3), paods[2].UpdRound) + require.Equal(t, addrC, paods[2].Addr) + } - paod, err = queries.LookupOnline(addrC, rnd) - require.NoError(t, err) - require.Equal(t, basics.Round(3), paod.Round) - require.Equal(t, addrC, paod.Addr) - require.Empty(t, paod.AccountData) + checkAddrA := func() { + require.Equal(t, int64(1), paods[3].Rowid) + require.Equal(t, basics.Round(1), paods[3].UpdRound) + require.Equal(t, addrA, paods[3].Addr) + require.Equal(t, int64(3), paods[4].Rowid) + require.Equal(t, basics.Round(2), paods[4].UpdRound) + require.Equal(t, addrA, paods[4].Addr) + } - // check round 3 - rnd = basics.Round(3) - online, err = arw.AccountsOnlineTop(rnd, 0, 10, proto) - require.NoError(t, err) - require.Equal(t, 1, len(online)) - require.NotContains(t, online, addrA) - require.NotContains(t, online, addrB) + checkAddrB() + checkAddrC() + checkAddrA() - onlineAcctC, ok := online[addrC] - require.True(t, ok) - require.NotNil(t, onlineAcctC) - require.Equal(t, addrC, onlineAcctC.Address) - require.Equal(t, dataC3.AccountBaseData.MicroAlgos, onlineAcctC.MicroAlgos) + paods, err = arw.OnlineAccountsAll(3) + require.NoError(t, err) + require.Equal(t, 5, len(paods)) + checkAddrB() + checkAddrC() + checkAddrA() - paod, err = queries.LookupOnline(addrA, rnd) - require.NoError(t, err) - require.Equal(t, basics.Round(3), paod.Round) - require.Equal(t, addrA, paod.Addr) - require.Empty(t, paod.AccountData) + paods, err = arw.OnlineAccountsAll(2) + require.NoError(t, err) + require.Equal(t, 3, len(paods)) + checkAddrB() + checkAddrC() - paod, err = queries.LookupOnline(addrB, rnd) - require.NoError(t, err) - require.Equal(t, basics.Round(3), paod.Round) - require.Equal(t, addrB, paod.Addr) - require.Empty(t, paod.AccountData) + paods, err = arw.OnlineAccountsAll(1) + require.NoError(t, err) + require.Equal(t, 2, len(paods)) + checkAddrB() - paod, err = queries.LookupOnline(addrC, rnd) - require.NoError(t, err) - require.Equal(t, basics.Round(3), paod.Round) - require.Equal(t, addrC, paod.Addr) - require.Equal(t, dataC3.AccountBaseData.MicroAlgos, paod.AccountData.MicroAlgos) - require.Equal(t, voteIDC, paod.AccountData.VoteID) + paods, rnd, err = queries.LookupOnlineHistory(addrA) + require.NoError(t, err) + require.Equal(t, basics.Round(3), rnd) + require.Equal(t, 2, len(paods)) + require.Equal(t, int64(1), paods[0].Rowid) + require.Equal(t, basics.Round(1), paods[0].UpdRound) + require.Equal(t, int64(3), paods[1].Rowid) + require.Equal(t, basics.Round(2), paods[1].UpdRound) - paods, err := arw.OnlineAccountsAll(0) - require.NoError(t, err) - require.Equal(t, 5, len(paods)) - - // expect: - // - // addr | rnd | status - // -----|-----|-------- - // B | 1 | 1 - // B | 3 | 0 - // C | 3 | 1 - // A | 1 | 1 - // A | 2 | 0 - - checkAddrB := func() { + paods, rnd, err = queries.LookupOnlineHistory(addrB) + require.NoError(t, err) + require.Equal(t, basics.Round(3), rnd) + require.Equal(t, 2, len(paods)) require.Equal(t, int64(2), paods[0].Rowid) require.Equal(t, basics.Round(1), paods[0].UpdRound) - require.Equal(t, addrB, paods[0].Addr) require.Equal(t, int64(4), paods[1].Rowid) require.Equal(t, basics.Round(3), paods[1].UpdRound) - require.Equal(t, addrB, paods[1].Addr) - } - - checkAddrC := func() { - require.Equal(t, int64(5), paods[2].Rowid) - require.Equal(t, basics.Round(3), paods[2].UpdRound) - require.Equal(t, addrC, paods[2].Addr) - } - - checkAddrA := func() { - require.Equal(t, int64(1), paods[3].Rowid) - require.Equal(t, basics.Round(1), paods[3].UpdRound) - require.Equal(t, addrA, paods[3].Addr) - require.Equal(t, int64(3), paods[4].Rowid) - require.Equal(t, basics.Round(2), paods[4].UpdRound) - require.Equal(t, addrA, paods[4].Addr) - } - checkAddrB() - checkAddrC() - checkAddrA() - - paods, err = arw.OnlineAccountsAll(3) - require.NoError(t, err) - require.Equal(t, 5, len(paods)) - checkAddrB() - checkAddrC() - checkAddrA() - - paods, err = arw.OnlineAccountsAll(2) - require.NoError(t, err) - require.Equal(t, 3, len(paods)) - checkAddrB() - checkAddrC() - - paods, err = arw.OnlineAccountsAll(1) - require.NoError(t, err) - require.Equal(t, 2, len(paods)) - checkAddrB() + paods, rnd, err = queries.LookupOnlineHistory(addrC) + require.NoError(t, err) + require.Equal(t, basics.Round(3), rnd) + require.Equal(t, 1, len(paods)) + require.Equal(t, int64(5), paods[0].Rowid) + require.Equal(t, basics.Round(3), paods[0].UpdRound) - paods, rnd, err = queries.LookupOnlineHistory(addrA) - require.NoError(t, err) - require.Equal(t, basics.Round(3), rnd) - require.Equal(t, 2, len(paods)) - require.Equal(t, int64(1), paods[0].Rowid) - require.Equal(t, basics.Round(1), paods[0].UpdRound) - require.Equal(t, int64(3), paods[1].Rowid) - require.Equal(t, basics.Round(2), paods[1].UpdRound) - - paods, rnd, err = queries.LookupOnlineHistory(addrB) - require.NoError(t, err) - require.Equal(t, basics.Round(3), rnd) - require.Equal(t, 2, len(paods)) - require.Equal(t, int64(2), paods[0].Rowid) - require.Equal(t, basics.Round(1), paods[0].UpdRound) - require.Equal(t, int64(4), paods[1].Rowid) - require.Equal(t, basics.Round(3), paods[1].UpdRound) - - paods, rnd, err = queries.LookupOnlineHistory(addrC) + return nil + }) require.NoError(t, err) - require.Equal(t, basics.Round(3), rnd) - require.Equal(t, 1, len(paods)) - require.Equal(t, int64(5), paods[0].Rowid) - require.Equal(t, basics.Round(3), paods[0].UpdRound) } type mockOnlineAccountsWriter struct { diff --git a/ledger/acctonline.go b/ledger/acctonline.go index ebc6174187..6a3d8fb390 100644 --- a/ledger/acctonline.go +++ b/ledger/acctonline.go @@ -19,7 +19,6 @@ package ledger import ( "container/heap" "context" - "database/sql" "errors" "fmt" "sort" @@ -35,7 +34,6 @@ import ( "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/ledger/store" "github.com/algorand/go-algorand/logging" - "github.com/algorand/go-algorand/util/db" "github.com/algorand/go-algorand/util/metrics" ) @@ -155,11 +153,15 @@ func (ao *onlineAccounts) initializeFromDisk(l ledgerForTracker, lastBalancesRou ao.dbs = l.trackerDB() ao.log = l.trackerLog() - err = ao.dbs.Snapshot(func(ctx context.Context, tx *sql.Tx) error { - arw := store.NewAccountsSQLReaderWriter(tx) + err = ao.dbs.Snapshot(func(ctx context.Context, tx store.SnapshotScope) error { + ar, err := tx.MakeAccountsReader() + if err != nil { + return err + } + var err0 error var endRound basics.Round - ao.onlineRoundParamsData, endRound, err0 = arw.AccountsOnlineRoundParams() + ao.onlineRoundParamsData, endRound, err0 = ar.AccountsOnlineRoundParams() if err0 != nil { return err0 } @@ -167,7 +169,7 @@ func (ao *onlineAccounts) initializeFromDisk(l ledgerForTracker, lastBalancesRou return fmt.Errorf("last onlineroundparams round %d does not match dbround %d", endRound, ao.cachedDBRoundOnline) } - onlineAccounts, err0 := arw.OnlineAccountsAll(onlineAccountsCacheMaxSize) + onlineAccounts, err0 := ar.OnlineAccountsAll(onlineAccountsCacheMaxSize) if err0 != nil { return err0 } @@ -179,7 +181,7 @@ func (ao *onlineAccounts) initializeFromDisk(l ledgerForTracker, lastBalancesRou return } - ao.accountsq, err = ao.dbs.CreateOnlineAccountsReader() + ao.accountsq, err = ao.dbs.MakeOnlineAccountsReader() if err != nil { return } @@ -407,11 +409,11 @@ func (ao *onlineAccounts) prepareCommit(dcc *deferredCommitContext) error { // commitRound closure is called within the same transaction for all trackers // it receives current offset and dbRound -func (ao *onlineAccounts) commitRound(ctx context.Context, tx *sql.Tx, dcc *deferredCommitContext) (err error) { +func (ao *onlineAccounts) commitRound(ctx context.Context, tx store.TransactionScope, dcc *deferredCommitContext) (err error) { offset := dcc.offset dbRound := dcc.oldBase - _, err = db.ResetTransactionWarnDeadline(ctx, tx, time.Now().Add(accountsUpdatePerRoundHighWatermark*time.Duration(offset))) + _, err = tx.ResetTransactionWarnDeadline(ctx, time.Now().Add(accountsUpdatePerRoundHighWatermark*time.Duration(offset))) if err != nil { return err } @@ -428,7 +430,10 @@ func (ao *onlineAccounts) commitRound(ctx context.Context, tx *sql.Tx, dcc *defe return err } - arw := store.NewAccountsSQLReaderWriter(tx) + arw, err := tx.MakeAccountsReaderWriter() + if err != nil { + return err + } err = arw.OnlineAccountsDelete(dcc.onlineAccountsForgetBefore) if err != nil { @@ -823,13 +828,17 @@ func (ao *onlineAccounts) TopOnlineAccounts(rnd basics.Round, voteRnd basics.Rou var accts map[basics.Address]*ledgercore.OnlineAccount start := time.Now() ledgerAccountsonlinetopCount.Inc(nil) - err = ao.dbs.Snapshot(func(ctx context.Context, tx *sql.Tx) (err error) { - arw := store.NewAccountsSQLReaderWriter(tx) - accts, err = arw.AccountsOnlineTop(rnd, batchOffset, batchSize, genesisProto) + err = ao.dbs.Snapshot(func(ctx context.Context, tx store.SnapshotScope) (err error) { + ar, err := tx.MakeAccountsReader() + if err != nil { + return err + } + + accts, err = ar.AccountsOnlineTop(rnd, batchOffset, batchSize, genesisProto) if err != nil { return } - dbRound, err = arw.AccountsRound() + dbRound, err = ar.AccountsRound() return }) ledgerAccountsonlinetopMicros.AddMicrosecondsSince(start, nil) diff --git a/ledger/acctonline_test.go b/ledger/acctonline_test.go index 14d92fb67f..03636f1129 100644 --- a/ledger/acctonline_test.go +++ b/ledger/acctonline_test.go @@ -78,8 +78,12 @@ func commitSyncPartial(t *testing.T, oa *onlineAccounts, ml *mockLedgerForTracke err := lt.prepareCommit(dcc) require.NoError(t, err) } - err := ml.trackers.dbs.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { - arw := store.NewAccountsSQLReaderWriter(tx) + err := ml.trackers.dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + arw, err := tx.MakeAccountsReaderWriter() + if err != nil { + return err + } + for _, lt := range ml.trackers.trackers { err0 := lt.commitRound(ctx, tx, dcc) if err0 != nil { @@ -804,9 +808,13 @@ func TestAcctOnlineRoundParamsCache(t *testing.T) { var dbOnlineRoundParams []ledgercore.OnlineRoundParamsData var endRound basics.Round - err := ao.dbs.Snapshot(func(ctx context.Context, tx *sql.Tx) (err error) { - arw := store.NewAccountsSQLReaderWriter(tx) - dbOnlineRoundParams, endRound, err = arw.AccountsOnlineRoundParams() + err := ao.dbs.Snapshot(func(ctx context.Context, tx store.SnapshotScope) (err error) { + ar, err := tx.MakeAccountsReader() + if err != nil { + return err + } + + dbOnlineRoundParams, endRound, err = ar.AccountsOnlineRoundParams() return err }) require.NoError(t, err) @@ -1289,9 +1297,13 @@ func TestAcctOnlineVotersLongerHistory(t *testing.T) { // DB has all the required history tho var dbOnlineRoundParams []ledgercore.OnlineRoundParamsData var endRound basics.Round - err = oa.dbs.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { - arw := store.NewAccountsSQLReaderWriter(tx) - dbOnlineRoundParams, endRound, err = arw.AccountsOnlineRoundParams() + err = oa.dbs.Snapshot(func(ctx context.Context, tx store.SnapshotScope) (err error) { + ar, err := tx.MakeAccountsReader() + if err != nil { + return err + } + + dbOnlineRoundParams, endRound, err = ar.AccountsOnlineRoundParams() return err }) @@ -1599,100 +1611,6 @@ func TestAcctOnlineTopBetweenCommitAndPostCommit(t *testing.T) { } } -func TestAcctOnlineTopDBBehindMemRound(t *testing.T) { - partitiontest.PartitionTest(t) - a := require.New(t) - - const numAccts = 20 - allAccts := make([]basics.BalanceRecord, numAccts) - genesisAccts := []map[basics.Address]basics.AccountData{{}} - genesisAccts[0] = make(map[basics.Address]basics.AccountData, numAccts) - - for i := 0; i < numAccts; i++ { - allAccts[i] = basics.BalanceRecord{ - Addr: ledgertesting.RandomAddress(), - AccountData: basics.AccountData{ - MicroAlgos: basics.MicroAlgos{Raw: uint64(i + 1)}, - Status: basics.Online, - VoteLastValid: 1000, - VoteFirstValid: 0, - RewardsBase: 0}, - } - genesisAccts[0][allAccts[i].Addr] = allAccts[i].AccountData - } - addSinkAndPoolAccounts(genesisAccts) - - ml := makeMockLedgerForTracker(t, true, 1, protocol.ConsensusCurrentVersion, genesisAccts) - defer ml.Close() - - stallingTracker := &blockingTracker{ - postCommitUnlockedEntryLock: make(chan struct{}), - postCommitUnlockedReleaseLock: make(chan struct{}), - postCommitEntryLock: make(chan struct{}), - postCommitReleaseLock: make(chan struct{}), - alwaysLock: false, - shouldLockPostCommit: false, - } - - conf := config.GetDefaultLocal() - au, oa := newAcctUpdates(t, ml, conf) - defer oa.close() - ml.trackers.trackers = append([]ledgerTracker{stallingTracker}, ml.trackers.trackers...) - - proto := config.Consensus[protocol.ConsensusCurrentVersion] - top, _, err := oa.TopOnlineAccounts(0, 0, 5, &proto, 0) - a.NoError(err) - compareTopAccounts(a, top, allAccts) - - _, totals, err := au.LatestTotals() - require.NoError(t, err) - - // apply some rounds so the db round will make progress (not be 0) - i.e since the max lookback in memory is 8. deltas - // will get committed at round 9 - i := 1 - for ; i < 10; i++ { - var updates ledgercore.AccountDeltas - updates.Upsert(allAccts[numAccts-1].Addr, ledgercore.AccountData{ - AccountBaseData: ledgercore.AccountBaseData{Status: basics.Offline}, VotingData: ledgercore.VotingData{}}) - newBlockWithUpdates(genesisAccts, updates, totals, t, ml, i, oa) - } - - stallingTracker.shouldLockPostCommit = true - - updateAccountsRoutine := func() { - var updates ledgercore.AccountDeltas - updates.Upsert(allAccts[numAccts-1].Addr, ledgercore.AccountData{ - AccountBaseData: ledgercore.AccountBaseData{Status: basics.Offline}, VotingData: ledgercore.VotingData{}}) - newBlockWithUpdates(genesisAccts, updates, totals, t, ml, i, oa) - } - - // This go routine will trigger a commit producer. we added a special blockingTracker that will case our - // onlineAccoutsTracker to be "stuck" between commit and Post commit . - // thus, when we call onlineTop - it should wait for the post commit to happen. - // in a different go routine we will wait 2 sec and release the commit. - go updateAccountsRoutine() - - select { - case <-stallingTracker.postCommitEntryLock: - go func() { - time.Sleep(2 * time.Second) - // tweak the database to move backwards - err = oa.dbs.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { - _, err = tx.Exec("update acctrounds set rnd = 1 WHERE id='acctbase' ") - return - }) - stallingTracker.postCommitReleaseLock <- struct{}{} - }() - - _, _, err = oa.TopOnlineAccounts(2, 2, 5, &proto, 0) - a.Error(err) - a.Contains(err.Error(), "is behind in-memory round") - - case <-time.After(1 * time.Minute): - a.FailNow("timedout while waiting for post commit") - } -} - func TestAcctOnlineTop_ChangeOnlineStake(t *testing.T) { partitiontest.PartitionTest(t) a := require.New(t) diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index d3caf1ff60..8e08571667 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -18,7 +18,6 @@ package ledger import ( "context" - "database/sql" "errors" "fmt" "io" @@ -40,7 +39,6 @@ import ( "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/logging/telemetryspec" "github.com/algorand/go-algorand/protocol" - "github.com/algorand/go-algorand/util/db" "github.com/algorand/go-algorand/util/metrics" ) @@ -933,9 +931,13 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker, lastBalancesRou start := time.Now() ledgerAccountsinitCount.Inc(nil) - err = au.dbs.Snapshot(func(ctx context.Context, tx *sql.Tx) error { - arw := store.NewAccountsSQLReaderWriter(tx) - totals, err0 := arw.AccountsTotals(ctx, false) + err = au.dbs.Snapshot(func(ctx context.Context, tx store.SnapshotScope) error { + ar, err := tx.MakeAccountsReader() + if err != nil { + return err + } + + totals, err0 := ar.AccountsTotals(ctx, false) if err0 != nil { return err0 } @@ -949,7 +951,7 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker, lastBalancesRou return } - au.accountsq, err = au.dbs.CreateAccountsReader() + au.accountsq, err = au.dbs.MakeAccountsReader() if err != nil { return } @@ -1667,7 +1669,7 @@ func (au *accountUpdates) prepareCommit(dcc *deferredCommitContext) error { // commitRound is called within the same transaction for all trackers it // receives current offset and dbRound -func (au *accountUpdates) commitRound(ctx context.Context, tx *sql.Tx, dcc *deferredCommitContext) (err error) { +func (au *accountUpdates) commitRound(ctx context.Context, tx store.TransactionScope, dcc *deferredCommitContext) (err error) { offset := dcc.offset dbRound := dcc.oldBase @@ -1679,7 +1681,7 @@ func (au *accountUpdates) commitRound(ctx context.Context, tx *sql.Tx, dcc *defe } }() - _, err = db.ResetTransactionWarnDeadline(ctx, tx, time.Now().Add(accountsUpdatePerRoundHighWatermark*time.Duration(offset))) + _, err = tx.ResetTransactionWarnDeadline(ctx, time.Now().Add(accountsUpdatePerRoundHighWatermark*time.Duration(offset))) if err != nil { return err } @@ -1687,7 +1689,6 @@ func (au *accountUpdates) commitRound(ctx context.Context, tx *sql.Tx, dcc *defe if dcc.updateStats { dcc.stats.OldAccountPreloadDuration = time.Duration(time.Now().UnixNano()) } - err = dcc.compactAccountDeltas.accountsLoadOld(tx) if err != nil { return err @@ -1707,7 +1708,10 @@ func (au *accountUpdates) commitRound(ctx context.Context, tx *sql.Tx, dcc *defe dcc.stats.OldAccountPreloadDuration = time.Duration(time.Now().UnixNano()) - dcc.stats.OldAccountPreloadDuration } - arw := store.NewAccountsSQLReaderWriter(tx) + arw, err := tx.MakeAccountsReaderWriter() + if err != nil { + return err + } err = arw.AccountsPutTotals(dcc.roundTotals, false) if err != nil { diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go index 66d14f5943..10a49ef59e 100644 --- a/ledger/acctupdates_test.go +++ b/ledger/acctupdates_test.go @@ -19,7 +19,6 @@ package ledger import ( "bytes" "context" - "database/sql" "errors" "fmt" "os" @@ -39,7 +38,6 @@ import ( "github.com/algorand/go-algorand/ledger/internal" "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/ledger/store" - storetesting "github.com/algorand/go-algorand/ledger/store/testing" ledgertesting "github.com/algorand/go-algorand/ledger/testing" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" @@ -263,9 +261,13 @@ func (au *accountUpdates) allBalances(rnd basics.Round) (bals map[basics.Address return } - err = au.dbs.Snapshot(func(ctx context.Context, tx *sql.Tx) error { + err = au.dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) error { var err0 error - bals, err0 = accountsAll(tx) + arw, err := tx.MakeAccountsReaderWriter() + if err != nil { + return err + } + bals, err0 = arw.AccountsAllTest() return err0 }) if err != nil { @@ -565,9 +567,13 @@ func testAcctUpdates(t *testing.T, conf config.Local) { // check the account totals. var dbRound basics.Round - err := ml.dbs.Snapshot(func(ctx context.Context, tx *sql.Tx) (err error) { - arw := store.NewAccountsSQLReaderWriter(tx) - dbRound, err = arw.AccountsRound() + err := ml.dbs.Snapshot(func(ctx context.Context, tx store.SnapshotScope) (err error) { + ar, err := tx.MakeAccountsReader() + if err != nil { + return err + } + + dbRound, err = ar.AccountsRound() return }) require.NoError(t, err) @@ -579,9 +585,13 @@ func testAcctUpdates(t *testing.T, conf config.Local) { expectedTotals := ledgertesting.CalculateNewRoundAccountTotals(t, updates, rewardsLevels[dbRound], proto, nil, ledgercore.AccountTotals{}) var actualTotals ledgercore.AccountTotals - err = ml.dbs.Snapshot(func(ctx context.Context, tx *sql.Tx) (err error) { - arw := store.NewAccountsSQLReaderWriter(tx) - actualTotals, err = arw.AccountsTotals(ctx, false) + err = ml.dbs.Snapshot(func(ctx context.Context, tx store.SnapshotScope) (err error) { + ar, err := tx.MakeAccountsReader() + if err != nil { + return err + } + + actualTotals, err = ar.AccountsTotals(ctx, false) return }) require.NoError(t, err) @@ -986,62 +996,64 @@ func TestListCreatables(t *testing.T) { numElementsPerSegement := 25 // set up the database - dbs, _ := storetesting.DbOpenTest(t, true) - storetesting.SetDbLogging(t, dbs) + dbs, _ := store.DbOpenTrackerTest(t, true) + dblogger := logging.TestingLog(t) + dbs.SetLogger(dblogger) defer dbs.Close() - tx, err := dbs.Wdb.Handle.Begin() - require.NoError(t, err) - defer tx.Rollback() + err := dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + proto := config.Consensus[protocol.ConsensusCurrentVersion] - proto := config.Consensus[protocol.ConsensusCurrentVersion] + accts := make(map[basics.Address]basics.AccountData) + _ = tx.AccountsInitTest(t, accts, protocol.ConsensusCurrentVersion) + require.NoError(t, err) - accts := make(map[basics.Address]basics.AccountData) - _ = store.AccountsInitTest(t, tx, accts, protocol.ConsensusCurrentVersion) - require.NoError(t, err) + au := &accountUpdates{} + au.accountsq, err = tx.MakeAccountsOptimizedReader() + require.NoError(t, err) - au := &accountUpdates{} - au.accountsq, err = store.AccountsInitDbQueries(tx) - require.NoError(t, err) + // ******* All results are obtained from the cache. Empty database ******* + // ******* No deletes ******* + // get random data. Initial batch, no deletes + ctbsList, randomCtbs := randomCreatables(numElementsPerSegement) + expectedDbImage := make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable) + ctbsWithDeletes := randomCreatableSampling(1, ctbsList, randomCtbs, + expectedDbImage, numElementsPerSegement) + // set the cache + au.creatables = ctbsWithDeletes + listAndCompareComb(t, au, expectedDbImage) + + // ******* All results are obtained from the database. Empty cache ******* + // ******* No deletes ******* + // sync with the database + var updates compactAccountDeltas + var resUpdates compactResourcesDeltas + _, _, _, err = accountsNewRound(tx, updates, resUpdates, nil, ctbsWithDeletes, proto, basics.Round(1)) + require.NoError(t, err) + // nothing left in cache + au.creatables = make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable) + listAndCompareComb(t, au, expectedDbImage) + + // ******* Results are obtained from the database and from the cache ******* + // ******* No deletes in the database. ******* + // ******* Data in the database deleted in the cache ******* + au.creatables = randomCreatableSampling(2, ctbsList, randomCtbs, + expectedDbImage, numElementsPerSegement) + listAndCompareComb(t, au, expectedDbImage) + + // ******* Results are obtained from the database and from the cache ******* + // ******* Deletes are in the database and in the cache ******* + // sync with the database. This has deletes synced to the database. + _, _, _, err = accountsNewRound(tx, updates, resUpdates, nil, au.creatables, proto, basics.Round(1)) + require.NoError(t, err) + // get new creatables in the cache. There will be deleted in the cache from the previous batch. + au.creatables = randomCreatableSampling(3, ctbsList, randomCtbs, + expectedDbImage, numElementsPerSegement) + listAndCompareComb(t, au, expectedDbImage) - // ******* All results are obtained from the cache. Empty database ******* - // ******* No deletes ******* - // get random data. Initial batch, no deletes - ctbsList, randomCtbs := randomCreatables(numElementsPerSegement) - expectedDbImage := make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable) - ctbsWithDeletes := randomCreatableSampling(1, ctbsList, randomCtbs, - expectedDbImage, numElementsPerSegement) - // set the cache - au.creatables = ctbsWithDeletes - listAndCompareComb(t, au, expectedDbImage) - - // ******* All results are obtained from the database. Empty cache ******* - // ******* No deletes ******* - // sync with the database - var updates compactAccountDeltas - var resUpdates compactResourcesDeltas - _, _, _, err = accountsNewRound(tx, updates, resUpdates, nil, ctbsWithDeletes, proto, basics.Round(1)) - require.NoError(t, err) - // nothing left in cache - au.creatables = make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable) - listAndCompareComb(t, au, expectedDbImage) - - // ******* Results are obtained from the database and from the cache ******* - // ******* No deletes in the database. ******* - // ******* Data in the database deleted in the cache ******* - au.creatables = randomCreatableSampling(2, ctbsList, randomCtbs, - expectedDbImage, numElementsPerSegement) - listAndCompareComb(t, au, expectedDbImage) - - // ******* Results are obtained from the database and from the cache ******* - // ******* Deletes are in the database and in the cache ******* - // sync with the database. This has deletes synced to the database. - _, _, _, err = accountsNewRound(tx, updates, resUpdates, nil, au.creatables, proto, basics.Round(1)) + return + }) require.NoError(t, err) - // get new creatables in the cache. There will be deleted in the cache from the previous batch. - au.creatables = randomCreatableSampling(3, ctbsList, randomCtbs, - expectedDbImage, numElementsPerSegement) - listAndCompareComb(t, au, expectedDbImage) } func TestBoxNamesByAppIDs(t *testing.T) { @@ -1345,51 +1357,6 @@ func TestKVCache(t *testing.T) { } } -func accountsAll(tx *sql.Tx) (bals map[basics.Address]basics.AccountData, err error) { - arw := store.NewAccountsSQLReaderWriter(tx) - - rows, err := tx.Query("SELECT rowid, address, data FROM accountbase") - if err != nil { - return - } - defer rows.Close() - - bals = make(map[basics.Address]basics.AccountData) - for rows.Next() { - var addrbuf []byte - var buf []byte - var rowid sql.NullInt64 - err = rows.Scan(&rowid, &addrbuf, &buf) - if err != nil { - return - } - - var data store.BaseAccountData - err = protocol.Decode(buf, &data) - if err != nil { - return - } - - var addr basics.Address - if len(addrbuf) != len(addr) { - err = fmt.Errorf("account DB address length mismatch: %d != %d", len(addrbuf), len(addr)) - return - } - copy(addr[:], addrbuf) - - var ad basics.AccountData - ad, err = arw.LoadFullAccount(context.Background(), "resources", addr, rowid.Int64, data) - if err != nil { - return - } - - bals[addr] = ad - } - - err = rows.Err() - return -} - func BenchmarkLargeMerkleTrieRebuild(b *testing.B) { proto := config.Consensus[protocol.ConsensusCurrentVersion] @@ -1414,16 +1381,20 @@ func BenchmarkLargeMerkleTrieRebuild(b *testing.B) { i++ } - err := ml.dbs.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { + err := ml.dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { _, _, _, err = accountsNewRound(tx, updates, compactResourcesDeltas{}, nil, nil, proto, basics.Round(1)) return }) require.NoError(b, err) } - err := ml.dbs.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { - arw := store.NewAccountsSQLReaderWriter(tx) - return arw.UpdateAccountsHashRound(ctx, 1) + err := ml.dbs.Batch(func(ctx context.Context, tx store.BatchScope) (err error) { + aw, err := tx.MakeAccountsWriter() + if err != nil { + return err + } + + return aw.UpdateAccountsHashRound(ctx, 1) }) require.NoError(b, err) @@ -2187,8 +2158,12 @@ func TestAcctUpdatesResources(t *testing.T) { err := au.prepareCommit(dcc) require.NoError(t, err) - err = ml.trackers.dbs.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { - arw := store.NewAccountsSQLReaderWriter(tx) + err = ml.trackers.dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + arw, err := tx.MakeAccountsReaderWriter() + if err != nil { + return err + } + err = au.commitRound(ctx, tx, dcc) if err != nil { return err @@ -2470,8 +2445,12 @@ func auCommitSync(t *testing.T, rnd basics.Round, au *accountUpdates, ml *mockLe err := au.prepareCommit(dcc) require.NoError(t, err) - err = ml.trackers.dbs.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { - arw := store.NewAccountsSQLReaderWriter(tx) + err = ml.trackers.dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + arw, err := tx.MakeAccountsReaderWriter() + if err != nil { + return err + } + err = au.commitRound(ctx, tx, dcc) if err != nil { return err diff --git a/ledger/bulletin.go b/ledger/bulletin.go index 5968c7f4f0..19baf2820b 100644 --- a/ledger/bulletin.go +++ b/ledger/bulletin.go @@ -18,7 +18,6 @@ package ledger import ( "context" - "database/sql" "sync/atomic" "github.com/algorand/go-deadlock" @@ -26,6 +25,7 @@ import ( "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/ledger/ledgercore" + "github.com/algorand/go-algorand/ledger/store" ) // notifier is a struct that encapsulates a single-shot channel; it will only be signaled once. @@ -116,7 +116,7 @@ func (b *bulletin) prepareCommit(dcc *deferredCommitContext) error { return nil } -func (b *bulletin) commitRound(context.Context, *sql.Tx, *deferredCommitContext) error { +func (b *bulletin) commitRound(context.Context, store.TransactionScope, *deferredCommitContext) error { return nil } diff --git a/ledger/catchpointtracker.go b/ledger/catchpointtracker.go index ecb0301b66..1f796efde5 100644 --- a/ledger/catchpointtracker.go +++ b/ledger/catchpointtracker.go @@ -45,7 +45,6 @@ import ( "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/logging/telemetryspec" "github.com/algorand/go-algorand/protocol" - "github.com/algorand/go-algorand/util/db" ) const ( @@ -211,9 +210,13 @@ func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basic } } - return ct.dbs.Batch(func(ctx context.Context, tx *sql.Tx) error { - crw := store.NewCatchpointSQLReaderWriter(tx) - err := ct.recordFirstStageInfo(ctx, tx, dbRound, totalKVs, totalAccounts, totalChunks, biggestChunkLen) + return ct.dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) error { + crw, err := tx.MakeCatchpointReaderWriter() + if err != nil { + return err + } + + err = ct.recordFirstStageInfo(ctx, tx, dbRound, totalKVs, totalAccounts, totalChunks, biggestChunkLen) if err != nil { return err } @@ -313,7 +316,7 @@ func (ct *catchpointTracker) recoverFromCrash(dbRound basics.Round) error { func (ct *catchpointTracker) loadFromDisk(l ledgerForTracker, dbRound basics.Round) (err error) { ct.log = l.trackerLog() ct.dbs = l.trackerDB() - ct.catchpointStore, err = l.trackerDB().CreateCatchpointReaderWriter() + ct.catchpointStore, err = l.trackerDB().MakeCatchpointReaderWriter() if err != nil { return err } @@ -324,14 +327,14 @@ func (ct *catchpointTracker) loadFromDisk(l ledgerForTracker, dbRound basics.Rou ct.catchpointDataSlowWriting = make(chan struct{}, 1) close(ct.catchpointDataSlowWriting) - err = ct.dbs.Batch(func(ctx context.Context, tx *sql.Tx) error { + err = ct.dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) error { return ct.initializeHashes(ctx, tx, dbRound) }) if err != nil { return err } - ct.accountsq, err = ct.dbs.CreateAccountsReader() + ct.accountsq, err = ct.dbs.MakeAccountsReader() if err != nil { return } @@ -479,7 +482,7 @@ func (ct *catchpointTracker) prepareCommit(dcc *deferredCommitContext) error { return nil } -func (ct *catchpointTracker) commitRound(ctx context.Context, tx *sql.Tx, dcc *deferredCommitContext) (err error) { +func (ct *catchpointTracker) commitRound(ctx context.Context, tx store.TransactionScope, dcc *deferredCommitContext) (err error) { treeTargetRound := basics.Round(0) offset := dcc.offset dbRound := dcc.oldBase @@ -490,12 +493,18 @@ func (ct *catchpointTracker) commitRound(ctx context.Context, tx *sql.Tx, dcc *d } }() - crw := store.NewCatchpointSQLReaderWriter(tx) - arw := store.NewAccountsSQLReaderWriter(tx) + crw, err := tx.MakeCatchpointReaderWriter() + if err != nil { + return err + } + arw, err := tx.MakeAccountsReaderWriter() + if err != nil { + return err + } if ct.catchpointEnabled() { var mc store.MerkleCommitter - mc, err = store.MakeMerkleCommitter(tx, false) + mc, err = tx.MakeMerkleCommitter(false) if err != nil { return } @@ -768,8 +777,12 @@ func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound return err } - err = ct.dbs.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { - crw := store.NewCatchpointSQLReaderWriter(tx) + err = ct.dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + crw, err := tx.MakeCatchpointReaderWriter() + if err != nil { + return err + } + err = ct.recordCatchpointFile(ctx, crw, round, relCatchpointFilePath, fileInfo.Size()) if err != nil { return err @@ -1088,7 +1101,7 @@ func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, account var catchpointWriter *catchpointWriter start := time.Now() ledgerGeneratecatchpointCount.Inc(nil) - err = ct.dbs.BatchContext(ctx, func(dbCtx context.Context, tx *sql.Tx) (err error) { + err = ct.dbs.TransactionContext(ctx, func(dbCtx context.Context, tx store.TransactionScope) (err error) { catchpointWriter, err = makeCatchpointWriter(dbCtx, catchpointDataFilePath, tx, ResourcesPerCatchpointFileChunk) if err != nil { return @@ -1104,7 +1117,7 @@ func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, account // we just wrote some data, but there is more to be written. // go to sleep for while. // before going to sleep, extend the transaction timeout so that we won't get warnings: - _, err0 := db.ResetTransactionWarnDeadline(dbCtx, tx, time.Now().Add(1*time.Second)) + _, err0 := tx.ResetTransactionWarnDeadline(dbCtx, time.Now().Add(1*time.Second)) if err0 != nil { ct.log.Warnf("catchpointTracker: generateCatchpoint: failed to reset transaction warn deadline : %v", err0) } @@ -1164,15 +1177,19 @@ func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, account return catchpointWriter.totalKVs, catchpointWriter.totalAccounts, catchpointWriter.chunkNum, catchpointWriter.biggestChunkLen, nil } -func (ct *catchpointTracker) recordFirstStageInfo(ctx context.Context, tx *sql.Tx, accountsRound basics.Round, totalKVs uint64, totalAccounts uint64, totalChunks uint64, biggestChunkLen uint64) error { - arw := store.NewAccountsSQLReaderWriter(tx) +func (ct *catchpointTracker) recordFirstStageInfo(ctx context.Context, tx store.TransactionScope, accountsRound basics.Round, totalKVs uint64, totalAccounts uint64, totalChunks uint64, biggestChunkLen uint64) error { + arw, err := tx.MakeAccountsReaderWriter() + if err != nil { + return err + } + accountTotals, err := arw.AccountsTotals(ctx, false) if err != nil { return err } { - mc, err := store.MakeMerkleCommitter(tx, false) + mc, err := tx.MakeMerkleCommitter(false) if err != nil { return err } @@ -1191,7 +1208,11 @@ func (ct *catchpointTracker) recordFirstStageInfo(ctx context.Context, tx *sql.T return err } - crw := store.NewCatchpointSQLReaderWriter(tx) + crw, err := tx.MakeCatchpointReaderWriter() + if err != nil { + return err + } + info := store.CatchpointFirstStageInfo{ Totals: accountTotals, TotalAccounts: totalAccounts, @@ -1254,9 +1275,13 @@ func (ct *catchpointTracker) GetCatchpointStream(round basics.Round) (ReadCloseS ledgerGetcatchpointCount.Inc(nil) // TODO: we need to generalize this, check @cce PoC PR, he has something // somewhat broken for some KVs.. - err := ct.dbs.Snapshot(func(ctx context.Context, tx *sql.Tx) (err error) { - crw := store.NewCatchpointSQLReaderWriter(tx) - dbFileName, _, fileSize, err = crw.GetCatchpoint(ctx, round) + err := ct.dbs.Snapshot(func(ctx context.Context, tx store.SnapshotScope) (err error) { + cr, err := tx.MakeCatchpointReader() + if err != nil { + return err + } + + dbFileName, _, fileSize, err = cr.GetCatchpoint(ctx, round) return }) ledgerGetcatchpointMicros.AddMicrosecondsSince(start, nil) @@ -1274,7 +1299,7 @@ func (ct *catchpointTracker) GetCatchpointStream(round basics.Round) (ReadCloseS if os.IsNotExist(err) { // the database told us that we have this file.. but we couldn't find it. // delete it from the database. - crw, err := ct.dbs.CreateCatchpointReaderWriter() + crw, err := ct.dbs.MakeCatchpointReaderWriter() if err != nil { return nil, err } @@ -1302,7 +1327,7 @@ func (ct *catchpointTracker) GetCatchpointStream(round basics.Round) (ReadCloseS // we couldn't get the stat, so just return with the file. return &readCloseSizer{ReadCloser: file, size: -1}, nil } - crw, err := ct.dbs.CreateCatchpointReaderWriter() + crw, err := ct.dbs.MakeCatchpointReaderWriter() if err != nil { return nil, err } @@ -1321,8 +1346,11 @@ func (ct *catchpointTracker) catchpointEnabled() bool { // initializeHashes initializes account/resource/kv hashes. // as part of the initialization, it tests if a hash table matches to account base and updates the former. -func (ct *catchpointTracker) initializeHashes(ctx context.Context, tx *sql.Tx, rnd basics.Round) error { - arw := store.NewAccountsSQLReaderWriter(tx) +func (ct *catchpointTracker) initializeHashes(ctx context.Context, tx store.TransactionScope, rnd basics.Round) error { + arw, err := tx.MakeAccountsReaderWriter() + if err != nil { + return err + } hashRound, err := arw.AccountsHashRound(ctx) if err != nil { return err @@ -1342,7 +1370,7 @@ func (ct *catchpointTracker) initializeHashes(ctx context.Context, tx *sql.Tx, r } // create the merkle trie for the balances - committer, err := store.MakeMerkleCommitter(tx, false) + committer, err := tx.MakeMerkleCommitter(false) if err != nil { return fmt.Errorf("initializeHashes was unable to makeMerkleCommitter: %v", err) } @@ -1361,7 +1389,7 @@ func (ct *catchpointTracker) initializeHashes(ctx context.Context, tx *sql.Tx, r if rootHash.IsZero() { ct.log.Infof("initializeHashes rebuilding merkle trie for round %d", rnd) - accountBuilderIt := store.MakeOrderedAccountsIter(tx, trieRebuildAccountChunkSize) + accountBuilderIt := tx.MakeOrderedAccountsIter(trieRebuildAccountChunkSize) defer accountBuilderIt.Close(ctx) startTrieBuildTime := time.Now() trieHashCount := 0 @@ -1432,7 +1460,7 @@ func (ct *catchpointTracker) initializeHashes(ctx context.Context, tx *sql.Tx, r // Now add the kvstore hashes pendingTrieHashes = 0 - kvs, err := store.MakeKVsIter(ctx, tx) + kvs, err := tx.MakeKVsIter(ctx) if err != nil { return err } diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go index e442c4030c..4fd95c1f8d 100644 --- a/ledger/catchpointtracker_test.go +++ b/ledger/catchpointtracker_test.go @@ -18,7 +18,6 @@ package ledger import ( "context" - "database/sql" "encoding/hex" "errors" "fmt" @@ -345,8 +344,11 @@ func BenchmarkLargeCatchpointDataWriting(b *testing.B) { // at this point, the database was created. We want to fill the accounts data accountsNumber := 6000000 * b.N - err = ml.dbs.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { - arw := store.NewAccountsSQLReaderWriter(tx) + err = ml.dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + arw, err := tx.MakeAccountsReaderWriter() + if err != nil { + return err + } for i := 0; i < accountsNumber-5-2; { // subtract the account we've already created above, plus the sink/reward var updates compactAccountDeltas @@ -564,7 +566,7 @@ func (bt *blockingTracker) prepareCommit(*deferredCommitContext) error { } // commitRound is not used by the blockingTracker -func (bt *blockingTracker) commitRound(context.Context, *sql.Tx, *deferredCommitContext) error { +func (bt *blockingTracker) commitRound(context.Context, store.TransactionScope, *deferredCommitContext) error { return nil } @@ -986,7 +988,7 @@ func TestFirstStagePersistence(t *testing.T) { defer ml2.Close() ml.Close() - cps2, err := ml2.dbs.CreateCatchpointReaderWriter() + cps2, err := ml2.dbs.MakeCatchpointReaderWriter() require.NoError(t, err) // Insert unfinished first stage record. @@ -1116,7 +1118,7 @@ func TestSecondStagePersistence(t *testing.T) { err = os.WriteFile(catchpointDataFilePath, catchpointData, 0644) require.NoError(t, err) - cps2, err := ml2.dbs.CreateCatchpointReaderWriter() + cps2, err := ml2.dbs.MakeCatchpointReaderWriter() require.NoError(t, err) // Restore the first stage database record. @@ -1308,7 +1310,7 @@ func TestSecondStageDeletesUnfinishedCatchpointRecordAfterRestart(t *testing.T) defer ml2.Close() ml.Close() - cps2, err := ml2.dbs.CreateCatchpointReaderWriter() + cps2, err := ml2.dbs.MakeCatchpointReaderWriter() require.NoError(t, err) // Sanity check: first stage record should be deleted. diff --git a/ledger/catchpointwriter.go b/ledger/catchpointwriter.go index 839a2f24c0..bbb58da34a 100644 --- a/ledger/catchpointwriter.go +++ b/ledger/catchpointwriter.go @@ -19,7 +19,6 @@ package ledger import ( "archive/tar" "context" - "database/sql" "fmt" "io" "os" @@ -47,7 +46,7 @@ const ( // has the option of throttling the CPU utilization in between the calls. type catchpointWriter struct { ctx context.Context - tx *sql.Tx + tx store.TransactionScope filePath string totalAccounts uint64 totalKVs uint64 @@ -71,7 +70,7 @@ type kvIter interface { } type accountsBatchIter interface { - Next(ctx context.Context, tx *sql.Tx, accountCount int, resourceCount int) ([]encoded.BalanceRecordV6, uint64, error) + Next(ctx context.Context, accountCount int, resourceCount int) ([]encoded.BalanceRecordV6, uint64, error) Close() } @@ -92,8 +91,11 @@ func (chunk catchpointFileChunkV6) empty() bool { return len(chunk.Balances) == 0 && len(chunk.KVs) == 0 } -func makeCatchpointWriter(ctx context.Context, filePath string, tx *sql.Tx, maxResourcesPerChunk int) (*catchpointWriter, error) { - arw := store.NewAccountsSQLReaderWriter(tx) +func makeCatchpointWriter(ctx context.Context, filePath string, tx store.TransactionScope, maxResourcesPerChunk int) (*catchpointWriter, error) { + arw, err := tx.MakeAccountsReaderWriter() + if err != nil { + return nil, err + } totalAccounts, err := arw.TotalAccounts(ctx) if err != nil { @@ -128,7 +130,7 @@ func makeCatchpointWriter(ctx context.Context, filePath string, tx *sql.Tx, maxR file: file, compressor: compressor, tar: tar, - accountsIterator: store.MakeEncodedAccoutsBatchIter(), + accountsIterator: tx.MakeEncodedAccoutsBatchIter(), maxResourcesPerChunk: maxResourcesPerChunk, } return res, nil @@ -204,7 +206,7 @@ func (cw *catchpointWriter) WriteStep(stepCtx context.Context) (more bool, err e } if cw.chunk.empty() { - err = cw.readDatabaseStep(cw.ctx, cw.tx) + err = cw.readDatabaseStep(cw.ctx) if err != nil { return } @@ -268,9 +270,9 @@ func (cw *catchpointWriter) asyncWriter(chunks chan catchpointFileChunkV6, respo // all of the account chunks first, and then the kv chunks. Even if the accounts // are evenly divisible by BalancesPerCatchpointFileChunk, it must not return an // empty chunk between accounts and kvs. -func (cw *catchpointWriter) readDatabaseStep(ctx context.Context, tx *sql.Tx) error { +func (cw *catchpointWriter) readDatabaseStep(ctx context.Context) error { if !cw.accountsDone { - balances, numAccounts, err := cw.accountsIterator.Next(ctx, tx, BalancesPerCatchpointFileChunk, cw.maxResourcesPerChunk) + balances, numAccounts, err := cw.accountsIterator.Next(ctx, BalancesPerCatchpointFileChunk, cw.maxResourcesPerChunk) if err != nil { return err } @@ -286,7 +288,7 @@ func (cw *catchpointWriter) readDatabaseStep(ctx context.Context, tx *sql.Tx) er // Create the *Rows iterator JIT if cw.kvRows == nil { - rows, err := store.MakeKVsIter(ctx, tx) + rows, err := cw.tx.MakeKVsIter(ctx) if err != nil { return err } diff --git a/ledger/catchpointwriter_test.go b/ledger/catchpointwriter_test.go index 37f76a15c4..fc689d5190 100644 --- a/ledger/catchpointwriter_test.go +++ b/ledger/catchpointwriter_test.go @@ -128,7 +128,7 @@ func TestBasicCatchpointWriter(t *testing.T) { au.close() fileName := filepath.Join(temporaryDirectory, "15.data") - err = ml.trackerDB().Snapshot(func(ctx context.Context, tx *sql.Tx) (err error) { + err = ml.trackerDB().Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { writer, err := makeCatchpointWriter(context.Background(), fileName, tx, ResourcesPerCatchpointFileChunk) if err != nil { return err @@ -193,13 +193,17 @@ func testWriteCatchpoint(t *testing.T, rdb store.TrackerStore, datapath string, maxResourcesPerChunk = ResourcesPerCatchpointFileChunk } - err := rdb.Snapshot(func(ctx context.Context, tx *sql.Tx) (err error) { + err := rdb.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { writer, err := makeCatchpointWriter(context.Background(), datapath, tx, maxResourcesPerChunk) - arw := store.NewAccountsSQLReaderWriter(tx) + if err != nil { + return err + } + arw, err := tx.MakeAccountsReaderWriter() if err != nil { return err } + for { more, err := writer.WriteStep(context.Background()) require.NoError(t, err) @@ -284,21 +288,28 @@ func TestCatchpointReadDatabaseOverflowSingleAccount(t *testing.T) { au.close() catchpointDataFilePath := filepath.Join(temporaryDirectory, "15.data") - err = ml.trackerDB().Snapshot(func(ctx context.Context, tx *sql.Tx) (err error) { + err = ml.trackerDB().Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { expectedTotalAccounts := uint64(1) totalAccountsWritten := uint64(0) totalResources := 0 totalChunks := 0 - var expectedTotalResources int cw, err := makeCatchpointWriter(context.Background(), catchpointDataFilePath, tx, maxResourcesPerChunk) - err = cw.tx.QueryRowContext(cw.ctx, "SELECT count(1) FROM resources").Scan(&expectedTotalResources) + require.NoError(t, err) + + arw, err := tx.MakeAccountsReaderWriter() + if err != nil { + return err + } + + expectedTotalResources, err := arw.TotalResources(ctx) if err != nil { return err } + // repeat this until read all accts for totalAccountsWritten < expectedTotalAccounts { cw.chunk.Balances = nil - err := cw.readDatabaseStep(cw.ctx, cw.tx) + err := cw.readDatabaseStep(cw.ctx) if err != nil { return err } @@ -318,7 +329,7 @@ func TestCatchpointReadDatabaseOverflowSingleAccount(t *testing.T) { return fmt.Errorf("expected more than one chunk due to overflow") } - if expectedTotalResources != totalResources { + if expectedTotalResources != uint64(totalResources) { return fmt.Errorf("total resources did not match: expected %d, actual %d", expectedTotalResources, totalResources) } @@ -370,25 +381,31 @@ func TestCatchpointReadDatabaseOverflowAccounts(t *testing.T) { au.close() catchpointDataFilePath := filepath.Join(temporaryDirectory, "15.data") - err = ml.trackerDB().Snapshot(func(ctx context.Context, tx *sql.Tx) (err error) { - arw := store.NewAccountsSQLReaderWriter(tx) + err = ml.trackerDB().Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + arw, err := tx.MakeAccountsReaderWriter() + if err != nil { + return err + } + expectedTotalAccounts, err := arw.TotalAccounts(ctx) if err != nil { return err } + + expectedTotalResources, err := arw.TotalResources(ctx) + if err != nil { + return err + } + totalAccountsWritten := uint64(0) totalResources := 0 - var expectedTotalResources int cw, err := makeCatchpointWriter(context.Background(), catchpointDataFilePath, tx, maxResourcesPerChunk) require.NoError(t, err) - err = cw.tx.QueryRowContext(cw.ctx, "SELECT count(1) FROM resources").Scan(&expectedTotalResources) - if err != nil { - return err - } + // repeat this until read all accts for totalAccountsWritten < expectedTotalAccounts { cw.chunk.Balances = nil - err := cw.readDatabaseStep(cw.ctx, cw.tx) + err := cw.readDatabaseStep(cw.ctx) if err != nil { return err } @@ -403,7 +420,7 @@ func TestCatchpointReadDatabaseOverflowAccounts(t *testing.T) { totalResources += numResources } - if expectedTotalResources != totalResources { + if expectedTotalResources != uint64(totalResources) { return fmt.Errorf("total resources did not match: expected %d, actual %d", expectedTotalResources, totalResources) } @@ -461,13 +478,13 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) { ctx := context.Background() err = l.trackerDBs.TransactionContext(ctx, func(ctx context.Context, tx store.TransactionScope) (err error) { - arw, err := tx.CreateAccountsReaderWriter() + arw, err := tx.MakeAccountsReaderWriter() if err != nil { return nil } // save the existing hash - committer, err := tx.CreateMerkleCommitter(false) + committer, err := tx.MakeMerkleCommitter(false) require.NoError(t, err) trie, err := merkletrie.MakeTrie(committer, store.TrieMemoryConfig) require.NoError(t, err) @@ -481,7 +498,7 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) { require.NoError(t, err) // rebuild the MT - committer, err = tx.CreateMerkleCommitter(false) + committer, err = tx.MakeMerkleCommitter(false) require.NoError(t, err) trie, err = merkletrie.MakeTrie(committer, store.TrieMemoryConfig) require.NoError(t, err) @@ -490,7 +507,7 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) { require.NoError(t, err) require.Zero(t, h) - iter := tx.CreateOrderedAccountsIter(trieRebuildAccountChunkSize) + iter := tx.MakeOrderedAccountsIter(trieRebuildAccountChunkSize) defer iter.Close(ctx) for { accts, _, err := iter.Next(ctx) @@ -575,17 +592,20 @@ func testNewLedgerFromCatchpoint(t *testing.T, catchpointWriterReadAccess store. err = accessor.BuildMerkleTrie(context.Background(), nil) require.NoError(t, err) - err = l.trackerDBs.Batch(func(ctx context.Context, tx *sql.Tx) error { - crw := store.NewCatchpointSQLReaderWriter(tx) - err := crw.ApplyCatchpointStagingBalances(ctx, 0, 0) - return err + err = l.trackerDBs.Batch(func(ctx context.Context, tx store.BatchScope) error { + cw, err := tx.MakeCatchpointWriter() + if err != nil { + return err + } + + return cw.ApplyCatchpointStagingBalances(ctx, 0, 0) }) require.NoError(t, err) balanceTrieStats := func(db store.TrackerStore) merkletrie.Stats { var stats merkletrie.Stats err = db.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { - committer, err := tx.CreateMerkleCommitter(false) + committer, err := tx.MakeMerkleCommitter(false) if err != nil { return err } diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go index 88c57d392b..d24c3a158b 100644 --- a/ledger/catchupaccessor.go +++ b/ledger/catchupaccessor.go @@ -108,7 +108,7 @@ type stagingWriterImpl struct { func (w *stagingWriterImpl) writeBalances(ctx context.Context, balances []store.NormalizedAccountBalance) error { return w.wdb.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { - crw, err := tx.CreateCatchpointReaderWriter() + crw, err := tx.MakeCatchpointReaderWriter() if err != nil { return err } @@ -118,7 +118,7 @@ func (w *stagingWriterImpl) writeBalances(ctx context.Context, balances []store. func (w *stagingWriterImpl) writeKVs(ctx context.Context, kvrs []encoded.KVRecordV6) error { return w.wdb.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { - crw, err := tx.CreateCatchpointReaderWriter() + crw, err := tx.MakeCatchpointReaderWriter() if err != nil { return err } @@ -138,7 +138,7 @@ func (w *stagingWriterImpl) writeKVs(ctx context.Context, kvrs []encoded.KVRecor func (w *stagingWriterImpl) writeCreatables(ctx context.Context, balances []store.NormalizedAccountBalance) error { return w.wdb.Transaction(func(ctx context.Context, tx store.TransactionScope) error { - crw, err := tx.CreateCatchpointReaderWriter() + crw, err := tx.MakeCatchpointReaderWriter() if err != nil { return err } @@ -149,7 +149,7 @@ func (w *stagingWriterImpl) writeCreatables(ctx context.Context, balances []stor func (w *stagingWriterImpl) writeHashes(ctx context.Context, balances []store.NormalizedAccountBalance) error { return w.wdb.Transaction(func(ctx context.Context, tx store.TransactionScope) error { - crw, err := tx.CreateCatchpointReaderWriter() + crw, err := tx.MakeCatchpointReaderWriter() if err != nil { return err } @@ -217,7 +217,7 @@ type CatchupAccessorClientLedger interface { // MakeCatchpointCatchupAccessor creates a CatchpointCatchupAccessor given a ledger func MakeCatchpointCatchupAccessor(ledger *Ledger, log logging.Logger) CatchpointCatchupAccessor { - crw, _ := ledger.trackerDB().CreateCatchpointReaderWriter() + crw, _ := ledger.trackerDB().MakeCatchpointReaderWriter() return &catchpointCatchupAccessorImpl{ ledger: ledger, catchpointStore: crw, @@ -279,8 +279,12 @@ func (c *catchpointCatchupAccessorImpl) ResetStagingBalances(ctx context.Context } start := time.Now() ledgerResetstagingbalancesCount.Inc(nil) - err = c.ledger.trackerDB().Batch(func(ctx context.Context, tx *sql.Tx) (err error) { - crw := store.NewCatchpointSQLReaderWriter(tx) + err = c.ledger.trackerDB().Batch(func(ctx context.Context, tx store.BatchScope) (err error) { + crw, err := tx.MakeCatchpointWriter() + if err != nil { + return err + } + err = crw.ResetCatchpointStagingBalances(ctx, newCatchup) if err != nil { return fmt.Errorf("unable to reset catchpoint catchup balances : %v", err) @@ -368,21 +372,28 @@ func (c *catchpointCatchupAccessorImpl) processStagingContent(ctx context.Contex // TotalAccounts, TotalAccounts, Catchpoint, BlockHeaderDigest, BalancesRound start := time.Now() ledgerProcessstagingcontentCount.Inc(nil) - err = c.ledger.trackerDB().Batch(func(ctx context.Context, tx *sql.Tx) (err error) { - crw := store.NewCatchpointSQLReaderWriter(tx) - arw := store.NewAccountsSQLReaderWriter(tx) + err = c.ledger.trackerDB().Batch(func(ctx context.Context, tx store.BatchScope) (err error) { + cw, err := tx.MakeCatchpointWriter() + if err != nil { + return err + } + + aw, err := tx.MakeAccountsWriter() + if err != nil { + return err + } - err = crw.WriteCatchpointStateUint64(ctx, store.CatchpointStateCatchupBlockRound, uint64(fileHeader.BlocksRound)) + err = cw.WriteCatchpointStateUint64(ctx, store.CatchpointStateCatchupBlockRound, uint64(fileHeader.BlocksRound)) if err != nil { return fmt.Errorf("CatchpointCatchupAccessorImpl::processStagingContent: unable to write catchpoint catchup state '%s': %v", store.CatchpointStateCatchupBlockRound, err) } if fileHeader.Version == CatchpointFileVersionV6 { - err = crw.WriteCatchpointStateUint64(ctx, store.CatchpointStateCatchupHashRound, uint64(fileHeader.BlocksRound)) + err = cw.WriteCatchpointStateUint64(ctx, store.CatchpointStateCatchupHashRound, uint64(fileHeader.BlocksRound)) if err != nil { return fmt.Errorf("CatchpointCatchupAccessorImpl::processStagingContent: unable to write catchpoint catchup state '%s': %v", store.CatchpointStateCatchupHashRound, err) } } - err = arw.AccountsPutTotals(fileHeader.Totals, true) + err = aw.AccountsPutTotals(fileHeader.Totals, true) return }) ledgerProcessstagingcontentMicros.AddMicrosecondsSince(start, nil) @@ -655,10 +666,18 @@ func countHashes(hashes [][]byte) (accountCount, kvCount uint64) { // BuildMerkleTrie would process the catchpointpendinghashes and insert all the items in it into the merkle trie func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, progressUpdates func(uint64, uint64)) (err error) { trackerdb := c.ledger.trackerDB() - err = trackerdb.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { - crw := store.NewCatchpointSQLReaderWriter(tx) + err = trackerdb.Batch(func(ctx context.Context, tx store.BatchScope) (err error) { + crw, err := tx.MakeCatchpointWriter() + if err != nil { + return err + } + // creating the index can take a while, so ensure we don't generate false alerts for no good reason. - db.ResetTransactionWarnDeadline(ctx, tx, time.Now().Add(120*time.Second)) + _, err = tx.ResetTransactionWarnDeadline(ctx, time.Now().Add(120*time.Second)) + if err != nil { + return err + } + return crw.CreateCatchpointStagingHashesIndex(ctx) }) if err != nil { @@ -678,8 +697,8 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro defer wg.Done() defer close(writerQueue) - err := trackerdb.Snapshot(func(transactionCtx context.Context, tx *sql.Tx) (err error) { - it := store.MakeCatchpointPendingHashesIterator(trieRebuildAccountChunkSize, tx) + err := trackerdb.Snapshot(func(transactionCtx context.Context, tx store.SnapshotScope) (err error) { + it := tx.MakeCatchpointPendingHashesIterator(trieRebuildAccountChunkSize) var hashes [][]byte for { hashes, err = it.Next(transactionCtx) @@ -716,9 +735,9 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro accountHashesWritten, kvHashesWritten := uint64(0), uint64(0) var mc store.MerkleCommitter - err := trackerdb.Batch(func(transactionCtx context.Context, tx *sql.Tx) (err error) { + err := trackerdb.Transaction(func(transactionCtx context.Context, tx store.TransactionScope) (err error) { // create the merkle trie for the balances - mc, err = store.MakeMerkleCommitter(tx, true) + mc, err = tx.MakeMerkleCommitter(true) if err != nil { return } @@ -745,8 +764,8 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro continue } - err = trackerdb.Snapshot(func(transactionCtx context.Context, tx *sql.Tx) (err error) { - mc, err = store.MakeMerkleCommitter(tx, true) + err = trackerdb.Transaction(func(transactionCtx context.Context, tx store.TransactionScope) (err error) { + mc, err = tx.MakeMerkleCommitter(true) if err != nil { return } @@ -775,10 +794,13 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro } if uncommitedHashesCount >= trieRebuildCommitFrequency { - err = trackerdb.Batch(func(transactionCtx context.Context, tx *sql.Tx) (err error) { + err = trackerdb.Transaction(func(transactionCtx context.Context, tx store.TransactionScope) (err error) { // set a long 30-second window for the evict before warning is generated. - db.ResetTransactionWarnDeadline(transactionCtx, tx, time.Now().Add(30*time.Second)) - mc, err = store.MakeMerkleCommitter(tx, true) + _, err = tx.ResetTransactionWarnDeadline(transactionCtx, time.Now().Add(30*time.Second)) + if err != nil { + return + } + mc, err = tx.MakeMerkleCommitter(true) if err != nil { return } @@ -805,10 +827,13 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro return } if uncommitedHashesCount > 0 { - err = trackerdb.Batch(func(transactionCtx context.Context, tx *sql.Tx) (err error) { + err = trackerdb.Transaction(func(transactionCtx context.Context, tx store.TransactionScope) (err error) { // set a long 30-second window for the evict before warning is generated. - db.ResetTransactionWarnDeadline(transactionCtx, tx, time.Now().Add(30*time.Second)) - mc, err = store.MakeMerkleCommitter(tx, true) + _, err = tx.ResetTransactionWarnDeadline(transactionCtx, time.Now().Add(30*time.Second)) + if err != nil { + return + } + mc, err = tx.MakeMerkleCommitter(true) if err != nil { return } @@ -865,10 +890,14 @@ func (c *catchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, bl start := time.Now() ledgerVerifycatchpointCount.Inc(nil) - err = c.ledger.trackerDB().Snapshot(func(ctx context.Context, tx *sql.Tx) (err error) { - arw := store.NewAccountsSQLReaderWriter(tx) + err = c.ledger.trackerDB().Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + arw, err := tx.MakeAccountsReaderWriter() + if err != nil { + return err + } + // create the merkle trie for the balances - mc, err0 := store.MakeMerkleCommitter(tx, true) + mc, err0 := tx.MakeMerkleCommitter(true) if err0 != nil { return fmt.Errorf("unable to make MerkleCommitter: %v", err0) } @@ -917,8 +946,12 @@ func (c *catchpointCatchupAccessorImpl) StoreBalancesRound(ctx context.Context, balancesRound := blk.Round() - basics.Round(catchpointLookback) start := time.Now() ledgerStorebalancesroundCount.Inc(nil) - err = c.ledger.trackerDB().Batch(func(ctx context.Context, tx *sql.Tx) (err error) { - crw := store.NewCatchpointSQLReaderWriter(tx) + err = c.ledger.trackerDB().Batch(func(ctx context.Context, tx store.BatchScope) (err error) { + crw, err := tx.MakeCatchpointWriter() + if err != nil { + return err + } + err = crw.WriteCatchpointStateUint64(ctx, store.CatchpointStateCatchupBalancesRound, uint64(balancesRound)) if err != nil { return fmt.Errorf("CatchpointCatchupAccessorImpl::StoreBalancesRound: unable to write catchpoint catchup state '%s': %v", store.CatchpointStateCatchupBalancesRound, err) @@ -1013,9 +1046,16 @@ func (c *catchpointCatchupAccessorImpl) CompleteCatchup(ctx context.Context) (er func (c *catchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err error) { start := time.Now() ledgerCatchpointFinishBalsCount.Inc(nil) - err = c.ledger.trackerDB().Batch(func(ctx context.Context, tx *sql.Tx) (err error) { - crw := store.NewCatchpointSQLReaderWriter(tx) - arw := store.NewAccountsSQLReaderWriter(tx) + err = c.ledger.trackerDB().Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + crw, err := tx.MakeCatchpointReaderWriter() + if err != nil { + return err + } + + arw, err := tx.MakeAccountsReaderWriter() + if err != nil { + return err + } var balancesRound, hashRound uint64 var totals ledgercore.AccountTotals @@ -1062,7 +1102,7 @@ func (c *catchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err DbPathPrefix: c.ledger.catchpoint.dbDirectory, BlockDb: c.ledger.blockDBs, } - _, err = store.RunMigrations(ctx, tx, tp, c.ledger.log, 6 /*target database version*/) + _, err = tx.RunMigrations(ctx, tp, c.ledger.log, 6 /*target database version*/) if err != nil { return err } diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go index 9a001e7ecd..694d159eba 100644 --- a/ledger/ledger_test.go +++ b/ledger/ledger_test.go @@ -19,7 +19,6 @@ package ledger import ( "bytes" "context" - "database/sql" "errors" "fmt" "math/rand" @@ -2286,8 +2285,12 @@ func TestLedgerReloadTxTailHistoryAccess(t *testing.T) { // reset tables and re-init again, similary to the catchpount apply code // since the ledger has only genesis accounts, this recreates them - err = l.trackerDBs.Batch(func(ctx context.Context, tx *sql.Tx) error { - arw := store.NewAccountsSQLReaderWriter(tx) + err = l.trackerDBs.Batch(func(ctx context.Context, tx store.BatchScope) error { + arw, err := tx.MakeAccountsWriter() + if err != nil { + return err + } + err0 := arw.AccountsReset(ctx) if err0 != nil { return err0 @@ -2301,12 +2304,12 @@ func TestLedgerReloadTxTailHistoryAccess(t *testing.T) { DbPathPrefix: l.catchpoint.dbDirectory, BlockDb: l.blockDBs, } - _, err0 = store.RunMigrations(ctx, tx, tp, l.log, preReleaseDBVersion /*target database version*/) + _, err0 = tx.RunMigrations(ctx, tp, l.log, preReleaseDBVersion /*target database version*/) if err0 != nil { return err0 } - if err0 := store.AccountsUpdateSchemaTest(ctx, tx); err != nil { + if err0 := tx.AccountsUpdateSchemaTest(ctx); err != nil { return err0 } @@ -2342,21 +2345,7 @@ func TestLedgerReloadTxTailHistoryAccess(t *testing.T) { // drop new tables // reloadLedger should migrate db properly - err = l.trackerDBs.Batch(func(ctx context.Context, tx *sql.Tx) error { - var resetExprs = []string{ - `DROP TABLE IF EXISTS onlineaccounts`, - `DROP TABLE IF EXISTS txtail`, - `DROP TABLE IF EXISTS onlineroundparamstail`, - `DROP TABLE IF EXISTS catchpointfirststageinfo`, - } - for _, stmt := range resetExprs { - _, err0 := tx.ExecContext(ctx, stmt) - if err0 != nil { - return err0 - } - } - return nil - }) + err = l.trackerDBs.ResetToV6Test(context.Background()) require.NoError(t, err) err = l.reloadLedger() @@ -2465,8 +2454,8 @@ func TestLedgerMigrateV6ShrinkDeltas(t *testing.T) { blockDB.Close() }() // create tables so online accounts can still be written - err = trackerDB.Batch(func(ctx context.Context, tx *sql.Tx) error { - if err := store.AccountsUpdateSchemaTest(ctx, tx); err != nil { + err = trackerDB.Batch(func(ctx context.Context, tx store.BatchScope) error { + if err := tx.AccountsUpdateSchemaTest(ctx); err != nil { return err } return nil @@ -2642,18 +2631,7 @@ func TestLedgerMigrateV6ShrinkDeltas(t *testing.T) { cfg.MaxAcctLookback = shorterLookback store.AccountDBVersion = 7 // delete tables since we want to check they can be made from other data - err = trackerDB.Batch(func(ctx context.Context, tx *sql.Tx) error { - if _, err := tx.ExecContext(ctx, "DROP TABLE IF EXISTS onlineaccounts"); err != nil { - return err - } - if _, err := tx.ExecContext(ctx, "DROP TABLE IF EXISTS txtail"); err != nil { - return err - } - if _, err = tx.ExecContext(ctx, "DROP TABLE IF EXISTS onlineroundparamstail"); err != nil { - return err - } - return nil - }) + err = trackerDB.ResetToV6Test(context.Background()) require.NoError(t, err) l2, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg) diff --git a/ledger/metrics.go b/ledger/metrics.go index 4700dfcd55..f4561edf5c 100644 --- a/ledger/metrics.go +++ b/ledger/metrics.go @@ -18,11 +18,11 @@ package ledger import ( "context" - "database/sql" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/ledger/ledgercore" + "github.com/algorand/go-algorand/ledger/store" "github.com/algorand/go-algorand/util/metrics" ) @@ -70,7 +70,7 @@ func (mt *metricsTracker) prepareCommit(dcc *deferredCommitContext) error { return nil } -func (mt *metricsTracker) commitRound(context.Context, *sql.Tx, *deferredCommitContext) error { +func (mt *metricsTracker) commitRound(context.Context, store.TransactionScope, *deferredCommitContext) error { return nil } diff --git a/ledger/notifier.go b/ledger/notifier.go index b9660abf9d..6154683ae5 100644 --- a/ledger/notifier.go +++ b/ledger/notifier.go @@ -18,7 +18,6 @@ package ledger import ( "context" - "database/sql" "sync" "github.com/algorand/go-deadlock" @@ -26,6 +25,7 @@ import ( "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/ledger/ledgercore" + "github.com/algorand/go-algorand/ledger/store" ) type blockDeltaPair struct { @@ -113,7 +113,7 @@ func (bn *blockNotifier) prepareCommit(dcc *deferredCommitContext) error { return nil } -func (bn *blockNotifier) commitRound(context.Context, *sql.Tx, *deferredCommitContext) error { +func (bn *blockNotifier) commitRound(context.Context, store.TransactionScope, *deferredCommitContext) error { return nil } diff --git a/ledger/store/accountsV2.go b/ledger/store/accountsV2.go index ecaaf5970f..5f20c23c63 100644 --- a/ledger/store/accountsV2.go +++ b/ledger/store/accountsV2.go @@ -22,6 +22,7 @@ import ( "database/sql" "fmt" "strings" + "testing" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" @@ -29,6 +30,7 @@ import ( "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" + "github.com/stretchr/testify/require" ) type accountsV2Reader struct { @@ -45,7 +47,7 @@ type accountsV2ReaderWriter struct { accountsV2Writer } -// NewAccountsSQLReaderWriter creates a Catchpoint SQL reader+writer +// NewAccountsSQLReaderWriter creates an SQL reader+writer func NewAccountsSQLReaderWriter(e db.Executable) *accountsV2ReaderWriter { return &accountsV2ReaderWriter{ accountsV2Reader{q: e, preparedStatements: make(map[string]*sql.Stmt)}, @@ -84,6 +86,81 @@ func (r *accountsV2Reader) AccountsTotals(ctx context.Context, catchpointStaging return } +// AccountsAllTest iterates the account table and returns a map of the data +// It is meant only for testing purposes - it is heavy and has no production use case. +func (r *accountsV2Reader) AccountsAllTest() (bals map[basics.Address]basics.AccountData, err error) { + rows, err := r.q.Query("SELECT rowid, address, data FROM accountbase") + if err != nil { + return + } + defer rows.Close() + + bals = make(map[basics.Address]basics.AccountData) + for rows.Next() { + var addrbuf []byte + var buf []byte + var rowid sql.NullInt64 + err = rows.Scan(&rowid, &addrbuf, &buf) + if err != nil { + return + } + + var data BaseAccountData + err = protocol.Decode(buf, &data) + if err != nil { + return + } + + var addr basics.Address + if len(addrbuf) != len(addr) { + err = fmt.Errorf("account DB address length mismatch: %d != %d", len(addrbuf), len(addr)) + return + } + copy(addr[:], addrbuf) + + var ad basics.AccountData + ad, err = r.LoadFullAccount(context.Background(), "resources", addr, rowid.Int64, data) + if err != nil { + return + } + + bals[addr] = ad + } + + err = rows.Err() + return +} + +func (r *accountsV2Reader) CheckCreatablesTest(t *testing.T, + iteration int, + expectedDbImage map[basics.CreatableIndex]ledgercore.ModifiedCreatable) { + stmt, err := r.q.Prepare("SELECT asset, creator, ctype FROM assetcreators") + require.NoError(t, err) + + defer stmt.Close() + rows, err := stmt.Query() + if err != sql.ErrNoRows { + require.NoError(t, err) + } + defer rows.Close() + counter := 0 + for rows.Next() { + counter++ + mc := ledgercore.ModifiedCreatable{} + var buf []byte + var asset basics.CreatableIndex + err := rows.Scan(&asset, &buf, &mc.Ctype) + require.NoError(t, err) + copy(mc.Creator[:], buf) + + require.NotNil(t, expectedDbImage[asset]) + require.Equal(t, expectedDbImage[asset].Creator, mc.Creator) + require.Equal(t, expectedDbImage[asset].Ctype, mc.Ctype) + require.True(t, expectedDbImage[asset].Created) + } + require.Equal(t, len(expectedDbImage), counter) +} + // AccountsRound returns the tracker balances round number func (r *accountsV2Reader) AccountsRound() (rnd basics.Round, err error) { err = r.q.QueryRow("SELECT rnd FROM acctrounds WHERE id='acctbase'").Scan(&rnd) @@ -210,6 +287,17 @@ func (r *accountsV2Reader) OnlineAccountsAll(maxAccounts uint64) ([]PersistedOnl return result, nil } +// TotalResources returns the total number of resources +func (r *accountsV2Reader) TotalResources(ctx context.Context) (total uint64, err error) { + err = r.q.QueryRowContext(ctx, "SELECT count(1) FROM resources").Scan(&total) + if err == sql.ErrNoRows { + total = 0 + err = nil + return + } + return +} + // TotalAccounts returns the total number of accounts func (r *accountsV2Reader) TotalAccounts(ctx context.Context) (total uint64, err error) { err = r.q.QueryRowContext(ctx, "SELECT count(1) FROM accountbase").Scan(&total) diff --git a/ledger/store/encodedAccountsIter.go b/ledger/store/encodedAccountsIter.go index 1dd5310489..60ce3a0ec6 100644 --- a/ledger/store/encodedAccountsIter.go +++ b/ledger/store/encodedAccountsIter.go @@ -27,6 +27,7 @@ import ( // encodedAccountsBatchIter allows us to iterate over the accounts data stored in the accountbase table. type encodedAccountsBatchIter struct { + tx *sql.Tx accountsRows *sql.Rows resourcesRows *sql.Rows nextBaseRow pendingBaseRow @@ -43,21 +44,21 @@ type catchpointAccountResourceCounter struct { } // MakeEncodedAccoutsBatchIter creates an empty accounts batch iterator. -func MakeEncodedAccoutsBatchIter() *encodedAccountsBatchIter { - return &encodedAccountsBatchIter{} +func MakeEncodedAccoutsBatchIter(tx *sql.Tx) *encodedAccountsBatchIter { + return &encodedAccountsBatchIter{tx: tx} } // Next returns an array containing the account data, in the same way it appear in the database // returning accountCount accounts data at a time. -func (iterator *encodedAccountsBatchIter) Next(ctx context.Context, tx *sql.Tx, accountCount int, resourceCount int) (bals []encoded.BalanceRecordV6, numAccountsProcessed uint64, err error) { +func (iterator *encodedAccountsBatchIter) Next(ctx context.Context, accountCount int, resourceCount int) (bals []encoded.BalanceRecordV6, numAccountsProcessed uint64, err error) { if iterator.accountsRows == nil { - iterator.accountsRows, err = tx.QueryContext(ctx, "SELECT rowid, address, data FROM accountbase ORDER BY rowid") + iterator.accountsRows, err = iterator.tx.QueryContext(ctx, "SELECT rowid, address, data FROM accountbase ORDER BY rowid") if err != nil { return } } if iterator.resourcesRows == nil { - iterator.resourcesRows, err = tx.QueryContext(ctx, "SELECT addrid, aidx, data FROM resources ORDER BY addrid, aidx") + iterator.resourcesRows, err = iterator.tx.QueryContext(ctx, "SELECT addrid, aidx, data FROM resources ORDER BY addrid, aidx") if err != nil { return } diff --git a/ledger/store/interface.go b/ledger/store/interface.go index 98a46e485a..6a45de647f 100644 --- a/ledger/store/interface.go +++ b/ledger/store/interface.go @@ -18,9 +18,12 @@ package store import ( "context" + "testing" + "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/ledger/ledgercore" ) // AccountsWriter is the write interface for: @@ -45,12 +48,18 @@ type AccountsWriter interface { // AccountsWriterExt is the write interface used inside transactions and batch operations. type AccountsWriterExt interface { + AccountsReset(ctx context.Context) error ResetAccountHashes(ctx context.Context) (err error) TxtailNewRound(ctx context.Context, baseRound basics.Round, roundData [][]byte, forgetBeforeRound basics.Round) error UpdateAccountsRound(rnd basics.Round) (err error) + UpdateAccountsHashRound(ctx context.Context, hashRound basics.Round) (err error) + AccountsPutTotals(totals ledgercore.AccountTotals, catchpointStaging bool) error + OnlineAccountsDelete(forgetBefore basics.Round) (err error) + AccountsPutOnlineRoundParams(onlineRoundParamsData []ledgercore.OnlineRoundParamsData, startRound basics.Round) error + AccountsPruneOnlineRoundParams(deleteBeforeRound basics.Round) error } -// AccountsReader is the read interface for: +// AccountsReader is the "optimized" read interface for: // - accounts, resources, app kvs, creatables type AccountsReader interface { ListCreatables(maxIdx basics.CreatableIndex, maxResults uint64, ctype basics.CreatableType) (results []basics.CreatableLocator, dbRound basics.Round, err error) @@ -68,11 +77,35 @@ type AccountsReader interface { Close() } +// AccountsReaderExt is the read interface for: +// - accounts, resources, app kvs, creatables +type AccountsReaderExt interface { + AccountsTotals(ctx context.Context, catchpointStaging bool) (totals ledgercore.AccountTotals, err error) + AccountsHashRound(ctx context.Context) (hashrnd basics.Round, err error) + LookupAccountAddressFromAddressID(ctx context.Context, addrid int64) (address basics.Address, err error) + LookupAccountDataByAddress(basics.Address) (rowid int64, data []byte, err error) + LookupAccountRowID(basics.Address) (addrid int64, err error) + LookupResourceDataByAddrID(addrid int64, aidx basics.CreatableIndex) (data []byte, err error) + TotalResources(ctx context.Context) (total uint64, err error) + TotalAccounts(ctx context.Context) (total uint64, err error) + TotalKVs(ctx context.Context) (total uint64, err error) + AccountsRound() (rnd basics.Round, err error) + AccountsAllTest() (bals map[basics.Address]basics.AccountData, err error) + CheckCreatablesTest(t *testing.T, iteration int, expectedDbImage map[basics.CreatableIndex]ledgercore.ModifiedCreatable) + LookupOnlineAccountDataByAddress(addr basics.Address) (rowid int64, data []byte, err error) + AccountsOnlineTop(rnd basics.Round, offset uint64, n uint64, proto config.ConsensusParams) (map[basics.Address]*ledgercore.OnlineAccount, error) + AccountsOnlineRoundParams() (onlineRoundParamsData []ledgercore.OnlineRoundParamsData, endRound basics.Round, err error) + OnlineAccountsAll(maxAccounts uint64) ([]PersistedOnlineAccountData, error) + LoadTxTail(ctx context.Context, dbRound basics.Round) (roundData []*TxTailRound, roundHash []crypto.Digest, baseRound basics.Round, err error) + LoadAllFullAccounts(ctx context.Context, balancesTable string, resourcesTable string, acctCb func(basics.Address, basics.AccountData)) (count int, err error) +} + // AccountsReaderWriter is AccountsReader+AccountsWriter type AccountsReaderWriter interface { // AccountsReader // AccountsWriter AccountsWriterExt + AccountsReaderExt } // OnlineAccountsWriter is the write interface for: @@ -96,6 +129,8 @@ type OnlineAccountsReader interface { // CatchpointWriter is the write interface for: // - catchpoints type CatchpointWriter interface { + CreateCatchpointStagingHashesIndex(ctx context.Context) (err error) + StoreCatchpoint(ctx context.Context, round basics.Round, fileName string, catchpoint string, fileSize int64) (err error) WriteCatchpointStateUint64(ctx context.Context, stateName CatchpointState, setValue uint64) (err error) @@ -106,6 +141,9 @@ type CatchpointWriter interface { WriteCatchpointStagingCreatable(ctx context.Context, bals []NormalizedAccountBalance) error WriteCatchpointStagingHashes(ctx context.Context, bals []NormalizedAccountBalance) error + ApplyCatchpointStagingBalances(ctx context.Context, balancesRound basics.Round, merkleRootRound basics.Round) (err error) + ResetCatchpointStagingBalances(ctx context.Context, newCatchup bool) (err error) + InsertUnfinishedCatchpoint(ctx context.Context, round basics.Round, blockHash crypto.Digest) error DeleteUnfinishedCatchpoint(ctx context.Context, round basics.Round) error DeleteOldCatchpointFirstStageInfo(ctx context.Context, maxRoundToDelete basics.Round) error diff --git a/ledger/store/store.go b/ledger/store/store.go index e3e09345ae..5b1e580df5 100644 --- a/ledger/store/store.go +++ b/ledger/store/store.go @@ -19,8 +19,14 @@ package store import ( "context" "database/sql" + "os" + "testing" + "time" + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" ) @@ -29,18 +35,59 @@ type trackerSQLStore struct { pair db.Pair } -type batchFn func(ctx context.Context, tx *sql.Tx) error +type batchFn func(ctx context.Context, tx BatchScope) error -type snapshotFn func(ctx context.Context, tx *sql.Tx) error +// BatchScope is the write scope to the store. +type BatchScope interface { + MakeCatchpointWriter() (CatchpointWriter, error) + MakeAccountsWriter() (AccountsWriterExt, error) + MakeAccountsOptimizedWriter(hasAccounts, hasResources, hasKvPairs, hasCreatables bool) (AccountsWriter, error) + + RunMigrations(ctx context.Context, params TrackerDBParams, log logging.Logger, targetVersion int32) (mgr TrackerDBInitParams, err error) + ResetTransactionWarnDeadline(ctx context.Context, deadline time.Time) (prevDeadline time.Time, err error) + + AccountsInitTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) (newDatabase bool) + AccountsUpdateSchemaTest(ctx context.Context) (err error) +} +type sqlBatchScope struct { + tx *sql.Tx +} + +type snapshotFn func(ctx context.Context, tx SnapshotScope) error + +// SnapshotScope is the read scope to the store. +type SnapshotScope interface { + MakeAccountsReader() (AccountsReaderExt, error) + MakeCatchpointReader() (CatchpointReader, error) + + MakeCatchpointPendingHashesIterator(hashCount int) *catchpointPendingHashesIterator +} +type sqlSnapshotScope struct { + tx *sql.Tx +} type transactionFn func(ctx context.Context, tx TransactionScope) error -// TransactionScope read/write scope to the store. +// TransactionScope is the read/write scope to the store. type TransactionScope interface { - CreateCatchpointReaderWriter() (CatchpointReaderWriter, error) - CreateAccountsReaderWriter() (AccountsReaderWriter, error) - CreateMerkleCommitter(staging bool) (MerkleCommitter, error) - CreateOrderedAccountsIter(accountCount int) *orderedAccountsIter + MakeCatchpointReaderWriter() (CatchpointReaderWriter, error) + MakeAccountsReaderWriter() (AccountsReaderWriter, error) + MakeAccountsOptimizedReader() (AccountsReader, error) + MakeAccountsOptimizedWriter(hasAccounts, hasResources, hasKvPairs, hasCreatables bool) (AccountsWriter, error) + MakeOnlineAccountsOptimizedWriter(hasAccounts bool) (w OnlineAccountsWriter, err error) + MakeOnlineAccountsOptimizedReader() (OnlineAccountsReader, error) + + MakeMerkleCommitter(staging bool) (MerkleCommitter, error) + + MakeOrderedAccountsIter(accountCount int) *orderedAccountsIter + MakeKVsIter(ctx context.Context) (*kvsIter, error) + MakeEncodedAccoutsBatchIter() *encodedAccountsBatchIter + + RunMigrations(ctx context.Context, params TrackerDBParams, log logging.Logger, targetVersion int32) (mgr TrackerDBInitParams, err error) + ResetTransactionWarnDeadline(ctx context.Context, deadline time.Time) (prevDeadline time.Time, err error) + + AccountsInitTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) (newDatabase bool) + AccountsInitLightTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabase bool, err error) } type sqlTransactionScope struct { tx *sql.Tx @@ -61,13 +108,16 @@ type TrackerStore interface { Transaction(fn transactionFn) (err error) TransactionContext(ctx context.Context, fn transactionFn) (err error) - CreateAccountsReader() (AccountsReader, error) - CreateOnlineAccountsReader() (OnlineAccountsReader, error) + MakeAccountsReader() (AccountsReader, error) + MakeOnlineAccountsReader() (OnlineAccountsReader, error) - CreateCatchpointReaderWriter() (CatchpointReaderWriter, error) + MakeCatchpointReaderWriter() (CatchpointReaderWriter, error) Vacuum(ctx context.Context) (stats db.VacuumStats, err error) Close() + CleanupTest(dbName string, inMemory bool) + + ResetToV6Test(ctx context.Context) error } // OpenTrackerSQLStore opens the sqlite database store @@ -105,7 +155,7 @@ func (s *trackerSQLStore) Batch(fn batchFn) (err error) { func (s *trackerSQLStore) BatchContext(ctx context.Context, fn batchFn) (err error) { return s.pair.Wdb.AtomicContext(ctx, func(ctx context.Context, tx *sql.Tx) error { - return fn(ctx, tx) + return fn(ctx, sqlBatchScope{tx}) }) } @@ -115,7 +165,7 @@ func (s *trackerSQLStore) Snapshot(fn snapshotFn) (err error) { func (s *trackerSQLStore) SnapshotContext(ctx context.Context, fn snapshotFn) (err error) { return s.pair.Rdb.AtomicContext(ctx, func(ctx context.Context, tx *sql.Tx) error { - return fn(ctx, tx) + return fn(ctx, sqlSnapshotScope{tx}) }) } @@ -129,15 +179,15 @@ func (s *trackerSQLStore) TransactionContext(ctx context.Context, fn transaction }) } -func (s *trackerSQLStore) CreateAccountsReader() (AccountsReader, error) { +func (s *trackerSQLStore) MakeAccountsReader() (AccountsReader, error) { return AccountsInitDbQueries(s.pair.Rdb.Handle) } -func (s *trackerSQLStore) CreateOnlineAccountsReader() (OnlineAccountsReader, error) { +func (s *trackerSQLStore) MakeOnlineAccountsReader() (OnlineAccountsReader, error) { return OnlineAccountsInitDbQueries(s.pair.Rdb.Handle) } -func (s *trackerSQLStore) CreateCatchpointReaderWriter() (CatchpointReaderWriter, error) { +func (s *trackerSQLStore) MakeCatchpointReaderWriter() (CatchpointReaderWriter, error) { w := NewCatchpointSQLReaderWriter(s.pair.Wdb.Handle) return w, nil } @@ -149,22 +199,128 @@ func (s *trackerSQLStore) Vacuum(ctx context.Context) (stats db.VacuumStats, err return } +func (s *trackerSQLStore) CleanupTest(dbName string, inMemory bool) { + s.pair.Close() + if !inMemory { + os.Remove(dbName) + } +} + +func (s *trackerSQLStore) ResetToV6Test(ctx context.Context) error { + var resetExprs = []string{ + `DROP TABLE IF EXISTS onlineaccounts`, + `DROP TABLE IF EXISTS txtail`, + `DROP TABLE IF EXISTS onlineroundparamstail`, + `DROP TABLE IF EXISTS catchpointfirststageinfo`, + } + + return s.pair.Wdb.AtomicContext(ctx, func(ctx context.Context, tx *sql.Tx) error { + for _, stmt := range resetExprs { + _, err := tx.ExecContext(ctx, stmt) + if err != nil { + return err + } + } + return nil + }) +} + func (s *trackerSQLStore) Close() { s.pair.Close() } -func (txs sqlTransactionScope) CreateCatchpointReaderWriter() (CatchpointReaderWriter, error) { +func (txs sqlTransactionScope) MakeCatchpointReaderWriter() (CatchpointReaderWriter, error) { return NewCatchpointSQLReaderWriter(txs.tx), nil } -func (txs sqlTransactionScope) CreateAccountsReaderWriter() (AccountsReaderWriter, error) { +func (txs sqlTransactionScope) MakeAccountsReaderWriter() (AccountsReaderWriter, error) { return NewAccountsSQLReaderWriter(txs.tx), nil } -func (txs sqlTransactionScope) CreateMerkleCommitter(staging bool) (MerkleCommitter, error) { +func (txs sqlTransactionScope) MakeAccountsOptimizedReader() (AccountsReader, error) { + return AccountsInitDbQueries(txs.tx) +} + +func (txs sqlTransactionScope) MakeAccountsOptimizedWriter(hasAccounts, hasResources, hasKvPairs, hasCreatables bool) (AccountsWriter, error) { + return MakeAccountsSQLWriter(txs.tx, hasAccounts, hasResources, hasKvPairs, hasCreatables) +} + +func (txs sqlTransactionScope) MakeOnlineAccountsOptimizedWriter(hasAccounts bool) (w OnlineAccountsWriter, err error) { + return MakeOnlineAccountsSQLWriter(txs.tx, hasAccounts) +} + +func (txs sqlTransactionScope) MakeOnlineAccountsOptimizedReader() (r OnlineAccountsReader, err error) { + return OnlineAccountsInitDbQueries(txs.tx) +} + +func (txs sqlTransactionScope) MakeMerkleCommitter(staging bool) (MerkleCommitter, error) { return MakeMerkleCommitter(txs.tx, staging) } -func (txs sqlTransactionScope) CreateOrderedAccountsIter(accountCount int) *orderedAccountsIter { +func (txs sqlTransactionScope) MakeOrderedAccountsIter(accountCount int) *orderedAccountsIter { return MakeOrderedAccountsIter(txs.tx, accountCount) } + +func (txs sqlTransactionScope) MakeKVsIter(ctx context.Context) (*kvsIter, error) { + return MakeKVsIter(ctx, txs.tx) +} + +func (txs sqlTransactionScope) MakeEncodedAccoutsBatchIter() *encodedAccountsBatchIter { + return MakeEncodedAccoutsBatchIter(txs.tx) +} + +func (txs sqlTransactionScope) RunMigrations(ctx context.Context, params TrackerDBParams, log logging.Logger, targetVersion int32) (mgr TrackerDBInitParams, err error) { + return RunMigrations(ctx, txs.tx, params, log, targetVersion) +} + +func (txs sqlTransactionScope) ResetTransactionWarnDeadline(ctx context.Context, deadline time.Time) (prevDeadline time.Time, err error) { + return db.ResetTransactionWarnDeadline(ctx, txs.tx, deadline) +} + +func (txs sqlTransactionScope) AccountsInitTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) (newDatabase bool) { + return AccountsInitTest(tb, txs.tx, initAccounts, proto) +} + +func (txs sqlTransactionScope) AccountsInitLightTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabase bool, err error) { + return AccountsInitLightTest(tb, txs.tx, initAccounts, proto) +} + +func (bs sqlBatchScope) MakeCatchpointWriter() (CatchpointWriter, error) { + return NewCatchpointSQLReaderWriter(bs.tx), nil +} + +func (bs sqlBatchScope) MakeAccountsWriter() (AccountsWriterExt, error) { + return NewAccountsSQLReaderWriter(bs.tx), nil +} + +func (bs sqlBatchScope) MakeAccountsOptimizedWriter(hasAccounts, hasResources, hasKvPairs, hasCreatables bool) (AccountsWriter, error) { + return MakeAccountsSQLWriter(bs.tx, hasAccounts, hasResources, hasKvPairs, hasCreatables) +} + +func (bs sqlBatchScope) RunMigrations(ctx context.Context, params TrackerDBParams, log logging.Logger, targetVersion int32) (mgr TrackerDBInitParams, err error) { + return RunMigrations(ctx, bs.tx, params, log, targetVersion) +} + +func (bs sqlBatchScope) ResetTransactionWarnDeadline(ctx context.Context, deadline time.Time) (prevDeadline time.Time, err error) { + return db.ResetTransactionWarnDeadline(ctx, bs.tx, deadline) +} + +func (bs sqlBatchScope) AccountsInitTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) (newDatabase bool) { + return AccountsInitTest(tb, bs.tx, initAccounts, proto) +} + +func (bs sqlBatchScope) AccountsUpdateSchemaTest(ctx context.Context) (err error) { + return AccountsUpdateSchemaTest(ctx, bs.tx) +} + +func (ss sqlSnapshotScope) MakeAccountsReader() (AccountsReaderExt, error) { + return NewAccountsSQLReaderWriter(ss.tx), nil +} + +func (ss sqlSnapshotScope) MakeCatchpointReader() (CatchpointReader, error) { + return NewCatchpointSQLReaderWriter(ss.tx), nil +} + +func (ss sqlSnapshotScope) MakeCatchpointPendingHashesIterator(hashCount int) *catchpointPendingHashesIterator { + return MakeCatchpointPendingHashesIterator(hashCount, ss.tx) +} diff --git a/ledger/store/testing.go b/ledger/store/testing.go index babeca5094..507bb48c35 100644 --- a/ledger/store/testing.go +++ b/ledger/store/testing.go @@ -26,6 +26,7 @@ import ( "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" "github.com/stretchr/testify/require" @@ -41,6 +42,12 @@ func DbOpenTrackerTest(t testing.TB, inMemory bool) (TrackerStore, string) { return &trackerSQLStore{dbs}, fn } +// SetDbTrackerTestLogging sets a testing logger on a database. +func SetDbTrackerTestLogging(t testing.TB, dbs TrackerStore) { + dblogger := logging.TestingLog(t) + dbs.SetLogger(dblogger) +} + // AccountsInitLightTest initializes an empty database for testing without the extra methods being called. func AccountsInitLightTest(tb testing.TB, tx *sql.Tx, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabase bool, err error) { newDB, err := accountsInit(tx, initAccounts, proto) diff --git a/ledger/tracker.go b/ledger/tracker.go index caf59f27e6..45e14a63e8 100644 --- a/ledger/tracker.go +++ b/ledger/tracker.go @@ -18,7 +18,6 @@ package ledger import ( "context" - "database/sql" "errors" "fmt" "reflect" @@ -108,7 +107,7 @@ type ledgerTracker interface { // commitRound is called for each of the trackers after a deferredCommitContext was agreed upon // by all the prepareCommit calls. The commitRound is being executed within a single transactional // context, and so, if any of the tracker's commitRound calls fails, the transaction is rolled back. - commitRound(context.Context, *sql.Tx, *deferredCommitContext) error + commitRound(context.Context, store.TransactionScope, *deferredCommitContext) error // postCommit is called only on a successful commitRound. In that case, each of the trackers have // the chance to update it's internal data structures, knowing that the given deferredCommitContext // has completed. An optional context is provided for long-running operations. @@ -282,9 +281,13 @@ func (tr *trackerRegistry) initialize(l ledgerForTracker, trackers []ledgerTrack tr.dbs = l.trackerDB() tr.log = l.trackerLog() - err = tr.dbs.Snapshot(func(ctx context.Context, tx *sql.Tx) (err error) { - arw := store.NewAccountsSQLReaderWriter(tx) - tr.dbRound, err = arw.AccountsRound() + err = tr.dbs.Snapshot(func(ctx context.Context, tx store.SnapshotScope) (err error) { + ar, err := tx.MakeAccountsReader() + if err != nil { + return err + } + + tr.dbRound, err = ar.AccountsRound() return err }) @@ -512,8 +515,12 @@ func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) error { start := time.Now() ledgerCommitroundCount.Inc(nil) - err := tr.dbs.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { - arw := store.NewAccountsSQLReaderWriter(tx) + err := tr.dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + arw, err := tx.MakeAccountsReaderWriter() + if err != nil { + return err + } + for _, lt := range tr.trackers { err0 := lt.commitRound(ctx, tx, dcc) if err0 != nil { diff --git a/ledger/tracker_test.go b/ledger/tracker_test.go index 8be223d6f5..908510893e 100644 --- a/ledger/tracker_test.go +++ b/ledger/tracker_test.go @@ -19,7 +19,6 @@ package ledger import ( "bytes" "context" - "database/sql" "sync" "testing" "time" @@ -32,6 +31,7 @@ import ( "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/ledger/ledgercore" + "github.com/algorand/go-algorand/ledger/store" ledgertesting "github.com/algorand/go-algorand/ledger/testing" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" @@ -180,7 +180,7 @@ func (bt *producePrepareBlockingTracker) prepareCommit(*deferredCommitContext) e } // commitRound is not used by the blockingTracker -func (bt *producePrepareBlockingTracker) commitRound(context.Context, *sql.Tx, *deferredCommitContext) error { +func (bt *producePrepareBlockingTracker) commitRound(context.Context, store.TransactionScope, *deferredCommitContext) error { return nil } diff --git a/ledger/trackerdb.go b/ledger/trackerdb.go index e65b7ba2cb..a6809e0d71 100644 --- a/ledger/trackerdb.go +++ b/ledger/trackerdb.go @@ -18,7 +18,6 @@ package ledger import ( "context" - "database/sql" "fmt" "github.com/algorand/go-algorand/ledger/store" @@ -39,8 +38,11 @@ func trackerDBInitialize(l ledgerForTracker, catchpointEnabled bool, dbPathPrefi return } - err = dbs.Batch(func(ctx context.Context, tx *sql.Tx) error { - arw := store.NewAccountsSQLReaderWriter(tx) + err = dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) error { + arw, err := tx.MakeAccountsReaderWriter() + if err != nil { + return err + } tp := store.TrackerDBParams{ InitAccounts: l.GenesisAccounts(), @@ -52,7 +54,7 @@ func trackerDBInitialize(l ledgerForTracker, catchpointEnabled bool, dbPathPrefi BlockDb: bdbs, } var err0 error - mgr, err0 = store.RunMigrations(ctx, tx, tp, log, store.AccountDBVersion) + mgr, err0 = tx.RunMigrations(ctx, tp, log, store.AccountDBVersion) if err0 != nil { return err0 } @@ -67,7 +69,7 @@ func trackerDBInitialize(l ledgerForTracker, catchpointEnabled bool, dbPathPrefi if err0 != nil { return err0 } - mgr, err0 = store.RunMigrations(ctx, tx, tp, log, store.AccountDBVersion) + mgr, err0 = tx.RunMigrations(ctx, tp, log, store.AccountDBVersion) if err0 != nil { return err0 } diff --git a/ledger/txtail.go b/ledger/txtail.go index cbd1b5266b..5262632245 100644 --- a/ledger/txtail.go +++ b/ledger/txtail.go @@ -18,7 +18,6 @@ package ledger import ( "context" - "database/sql" "fmt" "github.com/algorand/go-deadlock" @@ -97,9 +96,13 @@ func (t *txTail) loadFromDisk(l ledgerForTracker, dbRound basics.Round) error { var roundTailHashes []crypto.Digest var baseRound basics.Round if dbRound > 0 { - err := l.trackerDB().Snapshot(func(ctx context.Context, tx *sql.Tx) (err error) { - arw := store.NewAccountsSQLReaderWriter(tx) - roundData, roundTailHashes, baseRound, err = arw.LoadTxTail(ctx, dbRound) + err := l.trackerDB().Snapshot(func(ctx context.Context, tx store.SnapshotScope) (err error) { + ar, err := tx.MakeAccountsReader() + if err != nil { + return err + } + + roundData, roundTailHashes, baseRound, err = ar.LoadTxTail(ctx, dbRound) return }) if err != nil { @@ -269,8 +272,11 @@ func (t *txTail) prepareCommit(dcc *deferredCommitContext) (err error) { return } -func (t *txTail) commitRound(ctx context.Context, tx *sql.Tx, dcc *deferredCommitContext) error { - arw := store.NewAccountsSQLReaderWriter(tx) +func (t *txTail) commitRound(ctx context.Context, tx store.TransactionScope, dcc *deferredCommitContext) error { + arw, err := tx.MakeAccountsReaderWriter() + if err != nil { + return err + } // determine the round to remove data // the formula is similar to the committedUpTo: rnd + 1 - retain size diff --git a/ledger/txtail_test.go b/ledger/txtail_test.go index 1267c8350b..954139e54f 100644 --- a/ledger/txtail_test.go +++ b/ledger/txtail_test.go @@ -18,7 +18,6 @@ package ledger import ( "context" - "database/sql" "errors" "fmt" "testing" @@ -154,12 +153,15 @@ func (t *txTailTestLedger) initialize(ts *testing.T, protoVersion protocol.Conse t.trackerDBs, _ = store.DbOpenTrackerTest(ts, inMemory) t.protoVersion = protoVersion - err := t.trackerDBs.Batch(func(transactionCtx context.Context, tx *sql.Tx) (err error) { - arw := store.NewAccountsSQLReaderWriter(tx) + err := t.trackerDBs.Batch(func(transactionCtx context.Context, tx store.BatchScope) (err error) { + arw, err := tx.MakeAccountsWriter() + if err != nil { + return err + } accts := ledgertesting.RandomAccounts(20, true) proto := config.Consensus[protoVersion] - newDB := store.AccountsInitTest(ts, tx, accts, protoVersion) + newDB := tx.AccountsInitTest(ts, accts, protoVersion) require.True(ts, newDB) roundData := make([][]byte, 0, proto.MaxTxnLife) @@ -298,7 +300,7 @@ func TestTxTailDeltaTracking(t *testing.T) { err = txtail.prepareCommit(dcc) require.NoError(t, err) - err := ledger.trackerDBs.Batch(func(ctx context.Context, tx *sql.Tx) (err error) { + err := ledger.trackerDBs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { err = txtail.commitRound(context.Background(), tx, dcc) require.NoError(t, err) return nil diff --git a/util/db/dbutil.go b/util/db/dbutil.go index 7e62dfee61..c896c53771 100644 --- a/util/db/dbutil.go +++ b/util/db/dbutil.go @@ -321,7 +321,7 @@ func (db *Accessor) AtomicContext(ctx context.Context, fn idemFn, extras ...inte // however, the transaction context and transaction object can be used to uniquely associate the request // with a particular deadline. // the function fails if the given transaction is not on the stack of the provided context. -func ResetTransactionWarnDeadline(ctx context.Context, tx *sql.Tx, deadline time.Time) (prevDeadline time.Time, err error) { +func ResetTransactionWarnDeadline(ctx context.Context, tx interface{}, deadline time.Time) (prevDeadline time.Time, err error) { txContextData, ok := ctx.Value(tx).(*txExecutionContext) if !ok { // it's not a valid call. just return an error. From 070154360cea7e48b9fbb6370ed161d3f1a65f14 Mon Sep 17 00:00:00 2001 From: cce <51567+cce@users.noreply.github.com> Date: Tue, 21 Feb 2023 12:13:36 -0500 Subject: [PATCH 47/81] network: add OutgoingMessage disconnectReason field (#5147) --- network/netidentity.go | 11 +++------ network/wsNetwork.go | 9 +++++-- network/wsNetwork_test.go | 51 +++++++++++++++++++++++++++++++++------ 3 files changed, 55 insertions(+), 16 deletions(-) diff --git a/network/netidentity.go b/network/netidentity.go index dd8abceff0..9fce21fcb0 100644 --- a/network/netidentity.go +++ b/network/netidentity.go @@ -336,20 +336,17 @@ func identityVerificationHandler(message IncomingMessage) OutgoingMessage { if err != nil { networkPeerIdentityError.Inc(nil) peer.net.log.With("err", err).With("remote", peer.OriginAddress()).With("local", localAddr).Warn("peer identity verification could not be decoded, disconnecting") - peer.net.disconnect(peer, disconnectBadIdentityData) - return OutgoingMessage{} + return OutgoingMessage{Action: Disconnect, reason: disconnectBadIdentityData} } if peer.identityChallenge != msg.Msg.ResponseChallenge { networkPeerIdentityError.Inc(nil) peer.net.log.With("remote", peer.OriginAddress()).With("local", localAddr).Warn("peer identity verification challenge does not match, disconnecting") - peer.net.disconnect(peer, disconnectBadIdentityData) - return OutgoingMessage{} + return OutgoingMessage{Action: Disconnect, reason: disconnectBadIdentityData} } if !msg.Verify(peer.identity) { networkPeerIdentityError.Inc(nil) peer.net.log.With("remote", peer.OriginAddress()).With("local", localAddr).Warn("peer identity verification is incorrectly signed, disconnecting") - peer.net.disconnect(peer, disconnectBadIdentityData) - return OutgoingMessage{} + return OutgoingMessage{Action: Disconnect, reason: disconnectBadIdentityData} } atomic.StoreUint32(&peer.identityVerified, 1) // if the identity could not be claimed by this peer, it means the identity is in use @@ -359,7 +356,7 @@ func identityVerificationHandler(message IncomingMessage) OutgoingMessage { if !ok { networkPeerIdentityDisconnect.Inc(nil) peer.net.log.With("remote", peer.OriginAddress()).With("local", localAddr).Warn("peer identity already in use, disconnecting") - peer.net.disconnect(peer, disconnectDuplicateConnection) + return OutgoingMessage{Action: Disconnect, reason: disconnectDuplicateConnection} } return OutgoingMessage{} } diff --git a/network/wsNetwork.go b/network/wsNetwork.go index b537818c65..e04e04b0f6 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -263,6 +263,7 @@ type OutgoingMessage struct { Tag Tag Payload []byte Topics Topics + reason disconnectReason // used when Action == Disconnect } // ForwardingPolicy is an enum indicating to whom we should send a message @@ -309,7 +310,7 @@ type TaggedMessageHandler struct { // Propagate is a convenience function to save typing in the common case of a message handler telling us to propagate an incoming message // "return network.Propagate(msg)" instead of "return network.OutgoingMsg{network.Broadcast, msg.Tag, msg.Data}" func Propagate(msg IncomingMessage) OutgoingMessage { - return OutgoingMessage{Broadcast, msg.Tag, msg.Data, nil} + return OutgoingMessage{Action: Broadcast, Tag: msg.Tag, Payload: msg.Data, Topics: nil} } // GossipNetworkPath is the URL path to connect to the websocket gossip node at. @@ -1286,7 +1287,11 @@ func (wn *WebsocketNetwork) messageHandlerThread(peersConnectivityCheckCh <-chan switch outmsg.Action { case Disconnect: wn.wg.Add(1) - go wn.disconnectThread(msg.Sender, disconnectBadData) + reason := disconnectBadData + if outmsg.reason != disconnectReasonNone { + reason = outmsg.reason + } + go wn.disconnectThread(msg.Sender, reason) case Broadcast: err := wn.Broadcast(wn.ctx, msg.Tag, msg.Data, false, msg.Sender) if err != nil && err != errBcastQFull { diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go index c86eed11a5..7cd281dea5 100644 --- a/network/wsNetwork_test.go +++ b/network/wsNetwork_test.go @@ -3255,21 +3255,43 @@ func TestWebsocketNetworkTXMessageOfInterestPN(t *testing.T) { // Plan: // Network A will be sending messages to network B. // Network B will respond with another message for the first 4 messages. When it receive the 5th message, it would close the connection. -// We want to get an event with disconnectRequestReceived func TestWebsocketDisconnection(t *testing.T) { partitiontest.PartitionTest(t) + // We want to get an event with disconnectRequestReceived from netA + testWebsocketDisconnection(t, func(wn *WebsocketNetwork, _ *OutgoingMessage) { + wn.DisconnectPeers() + }, nil) + + // We want to get an event with the default reason from netB + defaultReason := disconnectBadData + testWebsocketDisconnection(t, func(_ *WebsocketNetwork, out *OutgoingMessage) { + out.Action = Disconnect + }, &defaultReason) + + // We want to get an event with the provided reason from netB + customReason := disconnectReason("MyCustomDisconnectReason") + testWebsocketDisconnection(t, func(_ *WebsocketNetwork, out *OutgoingMessage) { + out.Action = Disconnect + out.reason = customReason + }, &customReason) +} + +func testWebsocketDisconnection(t *testing.T, disconnectFunc func(wn *WebsocketNetwork, out *OutgoingMessage), expectedNetBReason *disconnectReason) { netA := makeTestWebsocketNode(t) netA.config.GossipFanout = 1 netA.config.EnablePingHandler = false - dl := eventsDetailsLogger{Logger: logging.TestingLog(t), eventReceived: make(chan interface{}, 1), eventIdentifier: telemetryspec.DisconnectPeerEvent} - netA.log = dl + dlNetA := eventsDetailsLogger{Logger: logging.TestingLog(t), eventReceived: make(chan interface{}, 1), eventIdentifier: telemetryspec.DisconnectPeerEvent} + netA.log = dlNetA netA.Start() defer netStop(t, netA, "A") netB := makeTestWebsocketNode(t) netB.config.GossipFanout = 1 netB.config.EnablePingHandler = false + dlNetB := eventsDetailsLogger{Logger: logging.TestingLog(t), eventReceived: make(chan interface{}, 1), eventIdentifier: telemetryspec.DisconnectPeerEvent} + netB.log = dlNetB + addrA, postListen := netA.Address() require.True(t, postListen) t.Log(addrA) @@ -3289,7 +3311,7 @@ func TestWebsocketDisconnection(t *testing.T) { msgHandlerB := func(msg IncomingMessage) (out OutgoingMessage) { if atomic.AddUint32(&msgCounterNetB, 1) == 5 { // disconnect - netB.DisconnectPeers() + disconnectFunc(netB, &out) } else { // if we received a message, send a message back. netB.Broadcast(context.Background(), protocol.ProposalPayloadTag, []byte{msg.Data[0] + 1}, true, nil) @@ -3331,16 +3353,31 @@ func TestWebsocketDisconnection(t *testing.T) { } select { - case eventDetails := <-dl.eventReceived: + case eventDetails := <-dlNetA.eventReceived: switch disconnectPeerEventDetails := eventDetails.(type) { case telemetryspec.DisconnectPeerEventDetails: - require.Equal(t, disconnectPeerEventDetails.Reason, string(disconnectRequestReceived)) + require.Equal(t, string(disconnectRequestReceived), disconnectPeerEventDetails.Reason) default: require.FailNow(t, "Unexpected event was send : %v", eventDetails) } default: - require.FailNow(t, "The DisconnectPeerEvent was missing") + require.FailNow(t, "The NetA DisconnectPeerEvent was missing") + } + + if expectedNetBReason != nil { + select { + case eventDetails := <-dlNetB.eventReceived: + switch disconnectPeerEventDetails := eventDetails.(type) { + case telemetryspec.DisconnectPeerEventDetails: + require.Equal(t, string(*expectedNetBReason), disconnectPeerEventDetails.Reason) + default: + require.FailNow(t, "Unexpected event was send : %v", eventDetails) + } + + default: + require.FailNow(t, "The NetB DisconnectPeerEvent was missing") + } } } From 02b6186a13ba0f685e581d958339728b17c7ee13 Mon Sep 17 00:00:00 2001 From: AlgoAxel <113933518+AlgoAxel@users.noreply.github.com> Date: Tue, 21 Feb 2023 13:50:25 -0500 Subject: [PATCH 48/81] tests: use assert.Eventually to wait for peering to settle (#5150) --- network/wsNetwork_test.go | 30 +++++++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go index 7cd281dea5..c08042c465 100644 --- a/network/wsNetwork_test.go +++ b/network/wsNetwork_test.go @@ -1318,10 +1318,18 @@ func TestPeeringWithIdentityChallenge(t *testing.T) { // still just one A->B connection assert.Equal(t, 0, len(netA.GetPeers(PeersConnectedIn))) assert.Equal(t, 1, len(netA.GetPeers(PeersConnectedOut))) - assert.Equal(t, 1, len(netB.GetPeers(PeersConnectedIn))) assert.Equal(t, 0, len(netB.GetPeers(PeersConnectedOut))) assert.Equal(t, 2, netA.identityTracker.(*mockIdentityTracker).getSetCount()) assert.Equal(t, 1, netA.identityTracker.(*mockIdentityTracker).getInsertCount()) + // it is possible for NetB to be in the process of doing addPeer while + // the underlying connection is being closed. In this case, the read loop + // on the peer will detect and close the peer. Since this is asynchronous, + // we wait and check regularly to allow the connection to settle + assert.Eventually( + t, + func() bool { return len(netB.GetPeers(PeersConnectedIn)) == 1 }, + 5*time.Second, + 100*time.Millisecond) // Now have A connect to node C, which has the same PublicAddress as B (e.g., because it shares the // same public load balancer endpoint). C will have a different identity keypair and so will not be @@ -1869,8 +1877,16 @@ func TestPeeringWithBadIdentityChallengeResponse(t *testing.T) { } assert.Equal(t, tc.totalInA, len(netA.GetPeers(PeersConnectedIn))) assert.Equal(t, tc.totalOutA, len(netA.GetPeers(PeersConnectedOut))) - assert.Equal(t, tc.totalInB, len(netB.GetPeers(PeersConnectedIn))) assert.Equal(t, tc.totalOutB, len(netB.GetPeers(PeersConnectedOut))) + // it is possible for NetB to be in the process of doing addPeer while + // the underlying connection is being closed. In this case, the read loop + // on the peer will detect and close the peer. Since this is asynchronous, + // we wait and check regularly to allow the connection to settle + assert.Eventually( + t, + func() bool { return len(netB.GetPeers(PeersConnectedIn)) == tc.totalInB }, + 5*time.Second, + 100*time.Millisecond) } } @@ -2020,8 +2036,16 @@ func TestPeeringWithBadIdentityVerification(t *testing.T) { assert.Equal(t, tc.totalInA, len(netA.GetPeers(PeersConnectedIn))) assert.Equal(t, tc.totalOutA, len(netA.GetPeers(PeersConnectedOut))) - assert.Equal(t, tc.totalInB, len(netB.GetPeers(PeersConnectedIn))) assert.Equal(t, tc.totalOutB, len(netB.GetPeers(PeersConnectedOut))) + // it is possible for NetB to be in the process of doing addPeer while + // the underlying connection is being closed. In this case, the read loop + // on the peer will detect and close the peer. Since this is asynchronous, + // we wait and check regularly to allow the connection to settle + assert.Eventually( + t, + func() bool { return len(netB.GetPeers(PeersConnectedIn)) == tc.totalInB }, + 5*time.Second, + 100*time.Millisecond) } } From 67d0d2c838419b8dbde0c626105791507218365e Mon Sep 17 00:00:00 2001 From: cce <51567+cce@users.noreply.github.com> Date: Tue, 21 Feb 2023 15:17:55 -0500 Subject: [PATCH 49/81] CI: upgrade test_nightly infra to large (#5054) --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 5508099457..d98d70c1de 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -251,7 +251,7 @@ jobs: parameters: platform: type: string - executor: << parameters.platform >>_medium + executor: << parameters.platform >>_large working_directory: << pipeline.parameters.build_dir >>/project parallelism: 4 steps: From 9bdf91f1e28f49c34a94f660a55c52c1394166b0 Mon Sep 17 00:00:00 2001 From: Shant Karakashian <55754073+algonautshant@users.noreply.github.com> Date: Wed, 22 Feb 2023 14:52:57 -0500 Subject: [PATCH 50/81] tests: fix TestStreamVerifierPoolShutdown (#5152) --- data/transactions/verify/streamverifier_test.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/data/transactions/verify/streamverifier_test.go b/data/transactions/verify/streamverifier_test.go index ea20f1d5fd..d926650cd7 100644 --- a/data/transactions/verify/streamverifier_test.go +++ b/data/transactions/verify/streamverifier_test.go @@ -386,13 +386,15 @@ func TestStreamVerifierPoolShutdown(t *testing.T) { //nolint:paralleltest // Not // Shutdown will block until all tasks held by holdTasks is released verificationPool.Shutdown() }() - // Send more tasks to break the backlog worker after b.pool.Enqueue returns the error + // release the tasks + close(holdTasks) + wg.Wait() + + // Send more tasks to fill the queueof the backlog worker after the consumer shuts down for x := 0; x < 100; x++ { verificationPool.EnqueueBacklog(context.Background(), func(arg interface{}) interface{} { <-holdTasks; return nil }, nil, nil) } - // release the tasks - close(holdTasks) // make sure the EnqueueBacklogis returning err for x := 0; x < 10; x++ { @@ -444,6 +446,7 @@ func TestStreamVerifierPoolShutdown(t *testing.T) { //nolint:paralleltest // Not require.ErrorIs(t, err, errShuttingDownError) } require.Contains(t, logBuffer.String(), "addVerificationTaskToThePoolNow: EnqueueBacklog returned an error and StreamVerifier will stop: context canceled") + wg.Wait() } // TestStreamVerifierRestart tests what happens when the context is canceled From 8bb071f3a934a64559ca51aa619a18d5a27cfd45 Mon Sep 17 00:00:00 2001 From: Shant Karakashian <55754073+algonautshant@users.noreply.github.com> Date: Thu, 23 Feb 2023 16:07:26 -0500 Subject: [PATCH 51/81] perf: Separate the txn specific operations from StreamVerifier (#5132) Co-authored-by: chris erway --- data/transactions/verify/streamverifier.go | 367 ++++-------------- data/transactions/verify/txn.go | 2 - data/transactions/verify/txnBatch.go | 270 +++++++++++++ ...treamverifier_test.go => txnBatch_test.go} | 166 ++++---- data/txHandler.go | 18 +- data/txHandler_test.go | 8 +- 6 files changed, 465 insertions(+), 366 deletions(-) create mode 100644 data/transactions/verify/txnBatch.go rename data/transactions/verify/{streamverifier_test.go => txnBatch_test.go} (80%) diff --git a/data/transactions/verify/streamverifier.go b/data/transactions/verify/streamverifier.go index 0a3e075e91..a5a5fe512f 100644 --- a/data/transactions/verify/streamverifier.go +++ b/data/transactions/verify/streamverifier.go @@ -20,219 +20,128 @@ import ( "context" "errors" "sync" - "sync/atomic" "time" - "github.com/algorand/go-algorand/crypto" - "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/data/bookkeeping" - "github.com/algorand/go-algorand/data/transactions" - "github.com/algorand/go-algorand/data/transactions/logic" - "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/util/execpool" ) -// batchSizeBlockLimit is the limit when the batch exceeds, will be added to the exec pool, even if the pool is saturated -// and the batch verifier will block until the exec pool accepts the batch -const batchSizeBlockLimit = 1024 - -// waitForNextTxnDuration is the time to wait before sending the batch to the exec pool -// If the incoming txn rate is low, a txn in the batch may wait no less than -// waitForNextTxnDuration before it is set for verification. -// This can introduce a latency to the propagation of a transaction in the network, -// since every relay will go through this wait time before broadcasting the txn. -// However, when the incoming txn rate is high, the batch will fill up quickly and will send -// for signature evaluation before waitForNextTxnDuration. -const waitForNextTxnDuration = 2 * time.Millisecond - -// UnverifiedElement is the element passed to the Stream verifier -// BacklogMessage is a *txBacklogMsg from data/txHandler.go which needs to be -// passed back to that context -type UnverifiedElement struct { - TxnGroup []transactions.SignedTxn - BacklogMessage interface{} -} - -// VerificationResult is the result of the txn group verification -// BacklogMessage is the reference associated with the txn group which was -// initially passed to the stream verifier -type VerificationResult struct { - TxnGroup []transactions.SignedTxn - BacklogMessage interface{} - Err error -} +// ErrShuttingDownError is the error returned when a job is not processed because the service is shutting down +var ErrShuttingDownError = errors.New("not processed, execpool service is shutting down") -// StreamVerifier verifies txn groups received through the stxnChan channel, and returns the -// results through the resultChan -type StreamVerifier struct { - resultChan chan<- *VerificationResult - droppedChan chan<- *UnverifiedElement - stxnChan <-chan *UnverifiedElement - verificationPool execpool.BacklogPool - ctx context.Context - cache VerifiedTransactionCache - activeLoopWg sync.WaitGroup - nbw *NewBlockWatcher - ledger logic.LedgerForSignature -} - -// NewBlockWatcher is a struct used to provide a new block header to the -// stream verifier -type NewBlockWatcher struct { - blkHeader atomic.Value -} - -// MakeNewBlockWatcher construct a new block watcher with the initial blkHdr -func MakeNewBlockWatcher(blkHdr bookkeeping.BlockHeader) (nbw *NewBlockWatcher) { - nbw = &NewBlockWatcher{} - nbw.blkHeader.Store(&blkHdr) - return nbw -} - -// OnNewBlock implements the interface to subscribe to new block notifications from the ledger -func (nbw *NewBlockWatcher) OnNewBlock(block bookkeeping.Block, delta ledgercore.StateDelta) { - bh := nbw.blkHeader.Load().(*bookkeeping.BlockHeader) - if bh.Round >= block.BlockHeader.Round { - return - } - nbw.blkHeader.Store(&block.BlockHeader) -} +// waitForNextJobDuration is the time to wait before sending the batch to the exec pool +// If the incoming rate is low, an input job in the batch may wait no less than +// waitForNextJobDuration before it is sent for processing. +// This can introduce a latency to the propagation in the network (e.g. sigs in txn or vote), +// since every relay will go through this wait time before broadcasting the result. +// However, when the incoming rate is high, the batch will fill up quickly and will send +// for processing before waitForNextJobDuration. +const waitForNextJobDuration = 2 * time.Millisecond -func (nbw *NewBlockWatcher) getBlockHeader() (bh *bookkeeping.BlockHeader) { - return nbw.blkHeader.Load().(*bookkeeping.BlockHeader) -} - -type batchLoad struct { - txnGroups [][]transactions.SignedTxn - groupCtxs []*GroupContext - elementBacklogMessage []interface{} - messagesForTxn []int -} +// batchSizeBlockLimit is the limit when the batch exceeds, will be added to the exec pool, even if the pool is saturated +// and the stream will be blocked until the exec pool accepts the batch +const batchSizeBlockLimit = 1024 -func makeBatchLoad(l int) (bl batchLoad) { - bl.txnGroups = make([][]transactions.SignedTxn, 0, l) - bl.groupCtxs = make([]*GroupContext, 0, l) - bl.elementBacklogMessage = make([]interface{}, 0, l) - bl.messagesForTxn = make([]int, 0, l) - return bl +// InputJob is the interface the incoming jobs need to implement +type InputJob interface { + GetNumberOfBatchableItems() (count uint64, err error) } -func (bl *batchLoad) addLoad(txngrp []transactions.SignedTxn, gctx *GroupContext, backlogMsg interface{}, numBatchableSigs int) { - bl.txnGroups = append(bl.txnGroups, txngrp) - bl.groupCtxs = append(bl.groupCtxs, gctx) - bl.elementBacklogMessage = append(bl.elementBacklogMessage, backlogMsg) - bl.messagesForTxn = append(bl.messagesForTxn, numBatchableSigs) - +// BatchProcessor is the interface of the functions needed to prepare a batch from the stream, +// process and return the results +type BatchProcessor interface { + // ProcessBatch processes a batch packed from the stream in the execpool + ProcessBatch(jobs []InputJob) + // GetErredUnprocessed returns an unprocessed jobs because of an err + GetErredUnprocessed(ue InputJob, err error) + // Cleanup called on the unprocessed jobs when the service shuts down + Cleanup(ue []InputJob, err error) } -// LedgerForStreamVerifier defines the ledger methods used by the StreamVerifier. -type LedgerForStreamVerifier interface { - logic.LedgerForSignature - RegisterBlockListeners([]ledgercore.BlockListener) - Latest() basics.Round - BlockHdr(rnd basics.Round) (blk bookkeeping.BlockHeader, err error) +// StreamToBatch makes batches from incoming stream of jobs, and submits the batches to the exec pool +type StreamToBatch struct { + inputChan <-chan InputJob + executionPool execpool.BacklogPool + ctx context.Context + activeLoopWg sync.WaitGroup + batchProcessor BatchProcessor } -// MakeStreamVerifier creates a new stream verifier and returns the chans used to send txn groups -// to it and obtain the txn signature verification result from -func MakeStreamVerifier(stxnChan <-chan *UnverifiedElement, resultChan chan<- *VerificationResult, - droppedChan chan<- *UnverifiedElement, ledger LedgerForStreamVerifier, - verificationPool execpool.BacklogPool, cache VerifiedTransactionCache) (*StreamVerifier, error) { +// MakeStreamToBatch creates a new stream to batch converter +func MakeStreamToBatch(inputChan <-chan InputJob, execPool execpool.BacklogPool, + batchProcessor BatchProcessor) *StreamToBatch { - latest := ledger.Latest() - latestHdr, err := ledger.BlockHdr(latest) - if err != nil { - return nil, errors.New("MakeStreamVerifier: Could not get header for previous block") + return &StreamToBatch{ + inputChan: inputChan, + executionPool: execPool, + batchProcessor: batchProcessor, } - - nbw := MakeNewBlockWatcher(latestHdr) - ledger.RegisterBlockListeners([]ledgercore.BlockListener{nbw}) - - return &StreamVerifier{ - resultChan: resultChan, - stxnChan: stxnChan, - droppedChan: droppedChan, - verificationPool: verificationPool, - cache: cache, - nbw: nbw, - ledger: ledger, - }, nil } -// Start is called when the verifier is created and whenever it needs to restart after +// Start is called when the StreamToBatch is created and whenever it needs to restart after // the ctx is canceled -func (sv *StreamVerifier) Start(ctx context.Context) { +func (sv *StreamToBatch) Start(ctx context.Context) { sv.ctx = ctx sv.activeLoopWg.Add(1) go sv.batchingLoop() } // WaitForStop waits until the batching loop terminates afer the ctx is canceled -func (sv *StreamVerifier) WaitForStop() { +func (sv *StreamToBatch) WaitForStop() { sv.activeLoopWg.Wait() } -func (sv *StreamVerifier) cleanup(pending []*UnverifiedElement) { - // report an error for the unchecked txns - // drop the messages without reporting if the receiver does not consume - for _, uel := range pending { - sv.sendResult(uel.TxnGroup, uel.BacklogMessage, errShuttingDownError) - } -} - -func (sv *StreamVerifier) batchingLoop() { +func (sv *StreamToBatch) batchingLoop() { defer sv.activeLoopWg.Done() - timer := time.NewTicker(waitForNextTxnDuration) + timer := time.NewTicker(waitForNextJobDuration) defer timer.Stop() var added bool - var numberOfSigsInCurrent uint64 + var numberOfJobsInCurrent uint64 var numberOfBatchAttempts uint64 - ue := make([]*UnverifiedElement, 0, 8) - defer func() { sv.cleanup(ue) }() + uJobs := make([]InputJob, 0, 8) + defer func() { sv.batchProcessor.Cleanup(uJobs, ErrShuttingDownError) }() for { select { - case stx := <-sv.stxnChan: - numberOfBatchableSigsInGroup, err := getNumberOfBatchableSigsInGroup(stx.TxnGroup) + case job := <-sv.inputChan: + numberOfBatchable, err := job.GetNumberOfBatchableItems() if err != nil { - // wrong number of signatures - sv.sendResult(stx.TxnGroup, stx.BacklogMessage, err) + sv.batchProcessor.GetErredUnprocessed(job, err) continue } - // if no batchable signatures here, send this as a task of its own - if numberOfBatchableSigsInGroup == 0 { - err := sv.addVerificationTaskToThePoolNow([]*UnverifiedElement{stx}) + // if no batchable items here, send this as a task of its own + if numberOfBatchable == 0 { + err := sv.addBatchToThePoolNow([]InputJob{job}) if err != nil { return } - continue // stx is handled, continue + continue // job is handled, continue } - // add this txngrp to the list of batchable txn groups - numberOfSigsInCurrent = numberOfSigsInCurrent + numberOfBatchableSigsInGroup - ue = append(ue, stx) - if numberOfSigsInCurrent > txnPerWorksetThreshold { - // enough transaction in the batch to efficiently verify + // add this job to the list of batchable jobs + numberOfJobsInCurrent = numberOfJobsInCurrent + numberOfBatchable + uJobs = append(uJobs, job) + if numberOfJobsInCurrent > txnPerWorksetThreshold { + // enough jobs in the batch to efficiently process - if numberOfSigsInCurrent > batchSizeBlockLimit { - // do not consider adding more txns to this batch. + if numberOfJobsInCurrent > batchSizeBlockLimit { + // do not consider adding more jobs to this batch. // bypass the exec pool situation and queue anyway // this is to prevent creation of very large batches - err := sv.addVerificationTaskToThePoolNow(ue) + err := sv.addBatchToThePoolNow(uJobs) if err != nil { return } added = true } else { - added, err = sv.tryAddVerificationTaskToThePool(ue) + added, err = sv.tryAddBatchToThePool(uJobs) if err != nil { return } } if added { - numberOfSigsInCurrent = 0 - ue = make([]*UnverifiedElement, 0, 8) + numberOfJobsInCurrent = 0 + uJobs = make([]InputJob, 0, 8) numberOfBatchAttempts = 0 } else { // was not added because of the exec pool buffer length @@ -241,29 +150,29 @@ func (sv *StreamVerifier) batchingLoop() { } case <-timer.C: // timer ticked. it is time to send the batch even if it is not full - if numberOfSigsInCurrent == 0 { + if numberOfJobsInCurrent == 0 { // nothing batched yet... wait some more continue } var err error if numberOfBatchAttempts > 1 { // bypass the exec pool situation and queue anyway - // this is to prevent long delays in transaction propagation - // at least one transaction here has waited 3 x waitForNextTxnDuration - err = sv.addVerificationTaskToThePoolNow(ue) + // this is to prevent long delays in the propagation (sigs txn/vote) + // at least one job has waited 3 x waitForNextJobDuration + err = sv.addBatchToThePoolNow(uJobs) added = true } else { - added, err = sv.tryAddVerificationTaskToThePool(ue) + added, err = sv.tryAddBatchToThePool(uJobs) } if err != nil { return } if added { - numberOfSigsInCurrent = 0 - ue = make([]*UnverifiedElement, 0, 8) + numberOfJobsInCurrent = 0 + uJobs = make([]InputJob, 0, 8) numberOfBatchAttempts = 0 } else { - // was not added because of the exec pool buffer length. wait for some more txns + // was not added because of the exec pool buffer length. wait for some more numberOfBatchAttempts++ } case <-sv.ctx.Done(): @@ -272,30 +181,16 @@ func (sv *StreamVerifier) batchingLoop() { } } -func (sv *StreamVerifier) sendResult(veTxnGroup []transactions.SignedTxn, veBacklogMessage interface{}, err error) { - // send the txn result out the pipe - select { - case sv.resultChan <- &VerificationResult{ - TxnGroup: veTxnGroup, - BacklogMessage: veBacklogMessage, - Err: err, - }: - default: - // we failed to write to the output queue, since the queue was full. - sv.droppedChan <- &UnverifiedElement{veTxnGroup, veBacklogMessage} - } -} - -func (sv *StreamVerifier) tryAddVerificationTaskToThePool(ue []*UnverifiedElement) (added bool, err error) { +func (sv *StreamToBatch) tryAddBatchToThePool(uJobs []InputJob) (added bool, err error) { // if the exec pool buffer is full, can go back and collect - // more signatures instead of waiting in the exec pool buffer - // more signatures to the batch do not harm performance but introduce latency when delayed (see crypto.BenchmarkBatchVerifierBig) + // more jobs instead of waiting in the exec pool buffer + // e.g. more signatures to the batch do not harm performance but introduce latency when delayed (see crypto.BenchmarkBatchVerifierBig) // if the buffer is full - if l, c := sv.verificationPool.BufferSize(); l == c { + if l, c := sv.executionPool.BufferSize(); l == c { return false, nil } - err = sv.addVerificationTaskToThePoolNow(ue) + err = sv.addBatchToThePoolNow(uJobs) if err != nil { // An error is returned when the context of the pool expires return false, err @@ -303,119 +198,27 @@ func (sv *StreamVerifier) tryAddVerificationTaskToThePool(ue []*UnverifiedElemen return true, nil } -func (sv *StreamVerifier) addVerificationTaskToThePoolNow(ue []*UnverifiedElement) error { +func (sv *StreamToBatch) addBatchToThePoolNow(unprocessed []InputJob) error { // if the context is canceled when the task is in the queue, it should be canceled - // copy the ctx here so that when the StreamVerifier is started again, and a new context + // copy the ctx here so that when the StreamToBatch is started again, and a new context // is created, this task still gets canceled due to the ctx at the time of this task taskCtx := sv.ctx function := func(arg interface{}) interface{} { + uJobs := arg.([]InputJob) if taskCtx.Err() != nil { // ctx is canceled. the results will be returned - sv.cleanup(ue) - return nil - } - - ue := arg.([]*UnverifiedElement) - batchVerifier := crypto.MakeBatchVerifier() - - bl := makeBatchLoad(len(ue)) - // TODO: separate operations here, and get the sig verification inside the LogicSig to the batch here - blockHeader := sv.nbw.getBlockHeader() - for _, ue := range ue { - groupCtx, err := txnGroupBatchPrep(ue.TxnGroup, blockHeader, sv.ledger, batchVerifier, nil) - if err != nil { - // verification failed, no need to add the sig to the batch, report the error - sv.sendResult(ue.TxnGroup, ue.BacklogMessage, err) - continue - } - totalBatchCount := batchVerifier.GetNumberOfEnqueuedSignatures() - bl.addLoad(ue.TxnGroup, groupCtx, ue.BacklogMessage, totalBatchCount) - } - - failed, err := batchVerifier.VerifyWithFeedback() - // this error can only be crypto.ErrBatchHasFailedSigs - if err == nil { // success, all signatures verified - for i := range bl.txnGroups { - sv.sendResult(bl.txnGroups[i], bl.elementBacklogMessage[i], nil) - } - sv.cache.AddPayset(bl.txnGroups, bl.groupCtxs) + sv.batchProcessor.Cleanup(uJobs, ErrShuttingDownError) return nil } - verifiedTxnGroups := make([][]transactions.SignedTxn, 0, len(bl.txnGroups)) - verifiedGroupCtxs := make([]*GroupContext, 0, len(bl.groupCtxs)) - failedSigIdx := 0 - for txgIdx := range bl.txnGroups { - txGroupSigFailed := false - for failedSigIdx < bl.messagesForTxn[txgIdx] { - if failed[failedSigIdx] { - // if there is a failed sig check, then no need to check the rest of the - // sigs for this txnGroup - failedSigIdx = bl.messagesForTxn[txgIdx] - txGroupSigFailed = true - } else { - // proceed to check the next sig belonging to this txnGroup - failedSigIdx++ - } - } - var result error - if !txGroupSigFailed { - verifiedTxnGroups = append(verifiedTxnGroups, bl.txnGroups[txgIdx]) - verifiedGroupCtxs = append(verifiedGroupCtxs, bl.groupCtxs[txgIdx]) - } else { - result = err - } - sv.sendResult(bl.txnGroups[txgIdx], bl.elementBacklogMessage[txgIdx], result) - } - // loading them all at once by locking the cache once - sv.cache.AddPayset(verifiedTxnGroups, verifiedGroupCtxs) + sv.batchProcessor.ProcessBatch(uJobs) return nil } // EnqueueBacklog returns an error when the context is canceled - err := sv.verificationPool.EnqueueBacklog(sv.ctx, function, ue, nil) + err := sv.executionPool.EnqueueBacklog(sv.ctx, function, unprocessed, nil) if err != nil { - logging.Base().Infof("addVerificationTaskToThePoolNow: EnqueueBacklog returned an error and StreamVerifier will stop: %v", err) + logging.Base().Infof("addBatchToThePoolNow: EnqueueBacklog returned an error and StreamToBatch will stop: %v", err) } return err } - -func getNumberOfBatchableSigsInGroup(stxs []transactions.SignedTxn) (batchSigs uint64, err error) { - batchSigs = 0 - for i := range stxs { - count, err := getNumberOfBatchableSigsInTxn(&stxs[i]) - if err != nil { - return 0, err - } - batchSigs = batchSigs + count - } - return -} - -func getNumberOfBatchableSigsInTxn(stx *transactions.SignedTxn) (uint64, error) { - sigType, err := checkTxnSigTypeCounts(stx) - if err != nil { - return 0, err - } - switch sigType { - case regularSig: - return 1, nil - case multiSig: - sig := stx.Msig - batchSigs := uint64(0) - for _, subsigi := range sig.Subsigs { - if (subsigi.Sig != crypto.Signature{}) { - batchSigs++ - } - } - return batchSigs, nil - case logicSig: - // Currently the sigs in here are not batched. Something to consider later. - return 0, nil - case stateProofTxn: - return 0, nil - default: - // this case is impossible - return 0, nil - } -} diff --git a/data/transactions/verify/txn.go b/data/transactions/verify/txn.go index 71f75dc0c0..683a2701c8 100644 --- a/data/transactions/verify/txn.go +++ b/data/transactions/verify/txn.go @@ -44,8 +44,6 @@ var msigLsigLessOrEqual4 = metrics.MakeCounter(metrics.MetricName{Name: "algod_v var msigLsigLessOrEqual10 = metrics.MakeCounter(metrics.MetricName{Name: "algod_verify_msig_lsig_5_10", Description: "Total transaction scripts with 5-10 msigs"}) var msigLsigMore10 = metrics.MakeCounter(metrics.MetricName{Name: "algod_verify_msig_lsig_10", Description: "Total transaction scripts with 11+ msigs"}) -var errShuttingDownError = errors.New("not verified, verifier is shutting down") - // The PaysetGroups is taking large set of transaction groups and attempt to verify their validity using multiple go-routines. // When doing so, it attempts to break these into smaller "worksets" where each workset takes about 2ms of execution time in order // to avoid context switching overhead while providing good validation cancellation responsiveness. Each one of these worksets is diff --git a/data/transactions/verify/txnBatch.go b/data/transactions/verify/txnBatch.go new file mode 100644 index 0000000000..206ee221fd --- /dev/null +++ b/data/transactions/verify/txnBatch.go @@ -0,0 +1,270 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package verify + +import ( + "errors" + "sync/atomic" + + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/data/transactions/logic" + "github.com/algorand/go-algorand/ledger/ledgercore" +) + +// UnverifiedTxnSigJob is the sig verification job passed to the Stream verifier +// It represents an unverified txn whose signatures will be verified +// BacklogMessage is a *txBacklogMsg from data/txHandler.go which needs to be +// passed back to that context +// Implements UnverifiedSigJob +type UnverifiedTxnSigJob struct { + TxnGroup []transactions.SignedTxn + BacklogMessage interface{} +} + +// VerificationResult is the result of the txn group verification +// BacklogMessage is the reference associated with the txn group which was +// initially passed to the stream verifier +type VerificationResult struct { + TxnGroup []transactions.SignedTxn + BacklogMessage interface{} + Err error +} + +// NewBlockWatcher is a struct used to provide a new block header to the +// stream verifier +type NewBlockWatcher struct { + blkHeader atomic.Value +} + +// MakeNewBlockWatcher construct a new block watcher with the initial blkHdr +func MakeNewBlockWatcher(blkHdr bookkeeping.BlockHeader) (nbw *NewBlockWatcher) { + nbw = &NewBlockWatcher{} + nbw.blkHeader.Store(&blkHdr) + return nbw +} + +// OnNewBlock implements the interface to subscribe to new block notifications from the ledger +func (nbw *NewBlockWatcher) OnNewBlock(block bookkeeping.Block, delta ledgercore.StateDelta) { + bh := nbw.blkHeader.Load().(*bookkeeping.BlockHeader) + if bh.Round >= block.BlockHeader.Round { + return + } + nbw.blkHeader.Store(&block.BlockHeader) +} + +func (nbw *NewBlockWatcher) getBlockHeader() (bh *bookkeeping.BlockHeader) { + return nbw.blkHeader.Load().(*bookkeeping.BlockHeader) +} + +type batchLoad struct { + txnGroups [][]transactions.SignedTxn + groupCtxs []*GroupContext + backlogMessage []interface{} + messagesForTxn []int +} + +func makeBatchLoad(l int) (bl *batchLoad) { + return &batchLoad{ + txnGroups: make([][]transactions.SignedTxn, 0, l), + groupCtxs: make([]*GroupContext, 0, l), + backlogMessage: make([]interface{}, 0, l), + messagesForTxn: make([]int, 0, l), + } +} + +func (bl *batchLoad) addLoad(txngrp []transactions.SignedTxn, gctx *GroupContext, backlogMsg interface{}, numBatchableSigs int) { + bl.txnGroups = append(bl.txnGroups, txngrp) + bl.groupCtxs = append(bl.groupCtxs, gctx) + bl.backlogMessage = append(bl.backlogMessage, backlogMsg) + bl.messagesForTxn = append(bl.messagesForTxn, numBatchableSigs) + +} + +type txnSigBatchProcessor struct { + cache VerifiedTransactionCache + nbw *NewBlockWatcher + ledger logic.LedgerForSignature + resultChan chan<- *VerificationResult + droppedChan chan<- *UnverifiedTxnSigJob +} + +// LedgerForStreamVerifier defines the ledger methods used by the StreamVerifier. +type LedgerForStreamVerifier interface { + logic.LedgerForSignature + RegisterBlockListeners([]ledgercore.BlockListener) + Latest() basics.Round + BlockHdr(rnd basics.Round) (blk bookkeeping.BlockHeader, err error) +} + +func (tbp *txnSigBatchProcessor) Cleanup(pending []InputJob, err error) { + // report an error for the unchecked txns + // drop the messages without reporting if the receiver does not consume + for i := range pending { + uelt := pending[i].(*UnverifiedTxnSigJob) + tbp.sendResult(uelt.TxnGroup, uelt.BacklogMessage, err) + } +} + +func (tbp txnSigBatchProcessor) GetErredUnprocessed(ue InputJob, err error) { + uelt := ue.(*UnverifiedTxnSigJob) + tbp.sendResult(uelt.TxnGroup, uelt.BacklogMessage, err) +} + +func (tbp txnSigBatchProcessor) sendResult(veTxnGroup []transactions.SignedTxn, veBacklogMessage interface{}, err error) { + // send the txn result out the pipe + select { + case tbp.resultChan <- &VerificationResult{ + TxnGroup: veTxnGroup, + BacklogMessage: veBacklogMessage, + Err: err, + }: + default: + // we failed to write to the output queue, since the queue was full. + tbp.droppedChan <- &UnverifiedTxnSigJob{veTxnGroup, veBacklogMessage} + } +} + +// MakeSigVerifyJobProcessor returns the object implementing the stream verifier Helper interface +func MakeSigVerifyJobProcessor(ledger LedgerForStreamVerifier, cache VerifiedTransactionCache, + resultChan chan<- *VerificationResult, droppedChan chan<- *UnverifiedTxnSigJob) (svp BatchProcessor, err error) { + latest := ledger.Latest() + latestHdr, err := ledger.BlockHdr(latest) + if err != nil { + return nil, errors.New("MakeStreamVerifier: Could not get header for previous block") + } + + nbw := MakeNewBlockWatcher(latestHdr) + ledger.RegisterBlockListeners([]ledgercore.BlockListener{nbw}) + + return &txnSigBatchProcessor{ + cache: cache, + nbw: nbw, + ledger: ledger, + droppedChan: droppedChan, + resultChan: resultChan, + }, nil +} + +func (tbp *txnSigBatchProcessor) ProcessBatch(txns []InputJob) { + batchVerifier, ctx := tbp.preProcessUnverifiedTxns(txns) + failed, err := batchVerifier.VerifyWithFeedback() + // this error can only be crypto.ErrBatchHasFailedSigs + tbp.postProcessVerifiedJobs(ctx, failed, err) +} + +func (tbp *txnSigBatchProcessor) preProcessUnverifiedTxns(uTxns []InputJob) (batchVerifier *crypto.BatchVerifier, ctx interface{}) { + batchVerifier = crypto.MakeBatchVerifier() + bl := makeBatchLoad(len(uTxns)) + // TODO: separate operations here, and get the sig verification inside the LogicSig to the batch here + blockHeader := tbp.nbw.getBlockHeader() + + for i := range uTxns { + ut := uTxns[i].(*UnverifiedTxnSigJob) + groupCtx, err := txnGroupBatchPrep(ut.TxnGroup, blockHeader, tbp.ledger, batchVerifier, nil) + if err != nil { + // verification failed, no need to add the sig to the batch, report the error + tbp.sendResult(ut.TxnGroup, ut.BacklogMessage, err) + continue + } + totalBatchCount := batchVerifier.GetNumberOfEnqueuedSignatures() + bl.addLoad(ut.TxnGroup, groupCtx, ut.BacklogMessage, totalBatchCount) + } + return batchVerifier, bl +} + +// GetNumberOfBatchableItems returns the number of batchable signatures in the txn group +func (ue UnverifiedTxnSigJob) GetNumberOfBatchableItems() (batchSigs uint64, err error) { + batchSigs = 0 + for i := range ue.TxnGroup { + count, err := getNumberOfBatchableSigsInTxn(&ue.TxnGroup[i]) + if err != nil { + return 0, err + } + batchSigs = batchSigs + count + } + return +} + +func getNumberOfBatchableSigsInTxn(stx *transactions.SignedTxn) (uint64, error) { + sigType, err := checkTxnSigTypeCounts(stx) + if err != nil { + return 0, err + } + switch sigType { + case regularSig: + return 1, nil + case multiSig: + sig := stx.Msig + batchSigs := uint64(0) + for _, subsigi := range sig.Subsigs { + if (subsigi.Sig != crypto.Signature{}) { + batchSigs++ + } + } + return batchSigs, nil + case logicSig: + // Currently the sigs in here are not batched. Something to consider later. + return 0, nil + case stateProofTxn: + return 0, nil + default: + // this case is impossible + return 0, nil + } +} + +func (tbp *txnSigBatchProcessor) postProcessVerifiedJobs(ctx interface{}, failed []bool, err error) { + bl := ctx.(*batchLoad) + if err == nil { // success, all signatures verified + for i := range bl.txnGroups { + tbp.sendResult(bl.txnGroups[i], bl.backlogMessage[i], nil) + } + tbp.cache.AddPayset(bl.txnGroups, bl.groupCtxs) + return + } + + verifiedTxnGroups := make([][]transactions.SignedTxn, 0, len(bl.txnGroups)) + verifiedGroupCtxs := make([]*GroupContext, 0, len(bl.groupCtxs)) + failedSigIdx := 0 + for txgIdx := range bl.txnGroups { + txGroupSigFailed := false + for failedSigIdx < bl.messagesForTxn[txgIdx] { + if failed[failedSigIdx] { + // if there is a failed sig check, then no need to check the rest of the + // sigs for this txnGroup + failedSigIdx = bl.messagesForTxn[txgIdx] + txGroupSigFailed = true + } else { + // proceed to check the next sig belonging to this txnGroup + failedSigIdx++ + } + } + var result error + if !txGroupSigFailed { + verifiedTxnGroups = append(verifiedTxnGroups, bl.txnGroups[txgIdx]) + verifiedGroupCtxs = append(verifiedGroupCtxs, bl.groupCtxs[txgIdx]) + } else { + result = err + } + tbp.sendResult(bl.txnGroups[txgIdx], bl.backlogMessage[txgIdx], result) + } + // loading them all at once by locking the cache once + tbp.cache.AddPayset(verifiedTxnGroups, verifiedGroupCtxs) +} diff --git a/data/transactions/verify/streamverifier_test.go b/data/transactions/verify/txnBatch_test.go similarity index 80% rename from data/transactions/verify/streamverifier_test.go rename to data/transactions/verify/txnBatch_test.go index d926650cd7..a196a9dcd2 100644 --- a/data/transactions/verify/streamverifier_test.go +++ b/data/transactions/verify/txnBatch_test.go @@ -46,7 +46,7 @@ import ( var droppedFromPool = metrics.MakeCounter(metrics.MetricName{Name: "test_streamVerifierTestCore_messages_dropped_pool", Description: "Test streamVerifierTestCore messages dropped from pool"}) func streamVerifierTestCore(txnGroups [][]transactions.SignedTxn, badTxnGroups map[uint64]struct{}, - expectedError error, t *testing.T) (sv *StreamVerifier) { + expectedError error, t *testing.T) (sv *StreamToBatch) { numOfTxnGroups := len(txnGroups) verificationPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, t) @@ -57,11 +57,12 @@ func streamVerifierTestCore(txnGroups [][]transactions.SignedTxn, badTxnGroups m defer cancel() - stxnChan := make(chan *UnverifiedElement) + inputChan := make(chan InputJob) resultChan := make(chan *VerificationResult, txBacklogSize) - droppedChan := make(chan *UnverifiedElement) - sv, err := MakeStreamVerifier(stxnChan, resultChan, droppedChan, &DummyLedgerForSignature{}, verificationPool, cache) + droppedChan := make(chan *UnverifiedTxnSigJob) + ep, err := MakeSigVerifyJobProcessor(&DummyLedgerForSignature{}, cache, resultChan, droppedChan) require.NoError(t, err) + sv = MakeStreamToBatch(inputChan, verificationPool, ep) sv.Start(ctx) wg := sync.WaitGroup{} @@ -78,7 +79,7 @@ func streamVerifierTestCore(txnGroups [][]transactions.SignedTxn, badTxnGroups m go func() { defer wg.Done() for _, tg := range txnGroups { - stxnChan <- &UnverifiedElement{TxnGroup: tg, BacklogMessage: nil} + inputChan <- &UnverifiedTxnSigJob{TxnGroup: tg, BacklogMessage: nil} } }() @@ -165,8 +166,8 @@ func getSignedTransactions(numOfTxns, maxGrpSize, noteOffset int, badTxnProb flo } -// TestStreamVerifier tests the basic functionality -func TestStreamVerifier(t *testing.T) { +// TestStreamToBatch tests the basic functionality +func TestStreamToBatch(t *testing.T) { partitiontest.PartitionTest(t) numOfTxns := 4000 @@ -176,8 +177,8 @@ func TestStreamVerifier(t *testing.T) { sv.WaitForStop() } -// TestStreamVerifierCases tests various valid and invalid transaction signature cases -func TestStreamVerifierCases(t *testing.T) { +// TestStreamToBatchCases tests various valid and invalid transaction signature cases +func TestStreamToBatchCases(t *testing.T) { partitiontest.PartitionTest(t) numOfTxns := 10 @@ -278,8 +279,8 @@ byte base64 5rZMNsevs5sULO+54aN+OvU6lQ503z2X+SSYUABIx7E= sv.WaitForStop() } -// TestStreamVerifierIdel starts the verifer and sends nothing, to trigger the timer, then sends a txn -func TestStreamVerifierIdel(t *testing.T) { +// TestStreamToBatchIdel starts the verifer and sends nothing, to trigger the timer, then sends a txn +func TestStreamToBatchIdel(t *testing.T) { partitiontest.PartitionTest(t) numOfTxns := 1 @@ -298,13 +299,13 @@ func TestGetNumberOfBatchableSigsInGroup(t *testing.T) { // txn with 0 sigs txnGroups[mod][0].Sig = crypto.Signature{} - batchSigs, err := getNumberOfBatchableSigsInGroup(txnGroups[mod]) + batchSigs, err := UnverifiedTxnSigJob{TxnGroup: txnGroups[mod]}.GetNumberOfBatchableItems() require.ErrorIs(t, err, errTxnSigHasNoSig) mod++ _, signedTxns, secrets, addrs := generateTestObjects(numOfTxns, 20, 0, 50) txnGroups = generateTransactionGroups(1, signedTxns, secrets, addrs) - batchSigs, err = getNumberOfBatchableSigsInGroup(txnGroups[0]) + batchSigs, err = UnverifiedTxnSigJob{TxnGroup: txnGroups[mod]}.GetNumberOfBatchableItems() require.NoError(t, err) require.Equal(t, uint64(1), batchSigs) @@ -312,14 +313,15 @@ func TestGetNumberOfBatchableSigsInGroup(t *testing.T) { txnGroups[mod][0].Sig = crypto.Signature{} txnGroups[mod][0].Txn.Type = protocol.StateProofTx txnGroups[mod][0].Txn.Header.Sender = transactions.StateProofSender - batchSigs, err = getNumberOfBatchableSigsInGroup(txnGroups[mod]) + batchSigs, err = UnverifiedTxnSigJob{TxnGroup: txnGroups[mod]}.GetNumberOfBatchableItems() require.NoError(t, err) require.Equal(t, uint64(0), batchSigs) mod++ // multisig _, mSigTxn, _, _ := generateMultiSigTxn(1, 6, 50, t) - batchSigs, err = getNumberOfBatchableSigsInGroup(mSigTxn) + + batchSigs, err = UnverifiedTxnSigJob{TxnGroup: mSigTxn}.GetNumberOfBatchableItems() require.NoError(t, err) require.Equal(t, uint64(2), batchSigs) mod++ @@ -340,7 +342,7 @@ byte base64 5rZMNsevs5sULO+54aN+OvU6lQ503z2X+SSYUABIx7E= txnGroups[mod][0].Lsig.Logic = op.Program program := logic.Program(op.Program) txnGroups[mod][0].Lsig.Sig = secrets[s].Sign(program) - batchSigs, err = getNumberOfBatchableSigsInGroup(txnGroups[mod]) + batchSigs, err = UnverifiedTxnSigJob{TxnGroup: txnGroups[mod]}.GetNumberOfBatchableItems() require.NoError(t, err) require.Equal(t, uint64(0), batchSigs) mod++ @@ -349,12 +351,12 @@ byte base64 5rZMNsevs5sULO+54aN+OvU6lQ503z2X+SSYUABIx7E= _, signedTxn, secrets, addrs = generateTestObjects(numOfTxns, 20, 0, 50) txnGroups = generateTransactionGroups(1, signedTxn, secrets, addrs) txnGroups[mod][0].Msig = mSigTxn[0].Msig - batchSigs, err = getNumberOfBatchableSigsInGroup(txnGroups[mod]) + batchSigs, err = UnverifiedTxnSigJob{TxnGroup: txnGroups[mod]}.GetNumberOfBatchableItems() require.ErrorIs(t, err, errTxnSigNotWellFormed) } -// TestStreamVerifierPoolShutdown tests what happens when the exec pool shuts down -func TestStreamVerifierPoolShutdown(t *testing.T) { //nolint:paralleltest // Not parallel because it depends on the default logger +// TestStreamToBatchPoolShutdown tests what happens when the exec pool shuts down +func TestStreamToBatchPoolShutdown(t *testing.T) { //nolint:paralleltest // Not parallel because it depends on the default logger partitiontest.PartitionTest(t) // only one transaction should be sufficient for the batch verifier @@ -406,11 +408,12 @@ func TestStreamVerifierPoolShutdown(t *testing.T) { //nolint:paralleltest // Not ctx, cancel := context.WithCancel(context.Background()) cache := MakeVerifiedTransactionCache(50000) - stxnChan := make(chan *UnverifiedElement) + inputChan := make(chan InputJob) resultChan := make(chan *VerificationResult, txBacklogSize) - droppedChan := make(chan *UnverifiedElement) - sv, err := MakeStreamVerifier(stxnChan, resultChan, droppedChan, &DummyLedgerForSignature{}, verificationPool, cache) + droppedChan := make(chan *UnverifiedTxnSigJob) + ep, err := MakeSigVerifyJobProcessor(&DummyLedgerForSignature{}, cache, resultChan, droppedChan) require.NoError(t, err) + sv := MakeStreamToBatch(inputChan, verificationPool, ep) sv.Start(ctx) errChan := make(chan error) @@ -438,19 +441,21 @@ func TestStreamVerifierPoolShutdown(t *testing.T) { //nolint:paralleltest // Not select { case <-ctx.Done(): break - case stxnChan <- &UnverifiedElement{TxnGroup: tg, BacklogMessage: nil}: + case inputChan <- &UnverifiedTxnSigJob{TxnGroup: tg, BacklogMessage: nil}: } } }() for err := range errChan { - require.ErrorIs(t, err, errShuttingDownError) + require.ErrorIs(t, err, ErrShuttingDownError) } - require.Contains(t, logBuffer.String(), "addVerificationTaskToThePoolNow: EnqueueBacklog returned an error and StreamVerifier will stop: context canceled") + require.Contains(t, logBuffer.String(), "addBatchToThePoolNow: EnqueueBacklog returned an error and StreamToBatch will stop: context canceled") wg.Wait() + + verifyResults(txnGroups, badTxnGroups, cache, badSigResultCounter, goodSigResultCounter, t) } -// TestStreamVerifierRestart tests what happens when the context is canceled -func TestStreamVerifierRestart(t *testing.T) { +// TestStreamToBatchRestart tests what happens when the context is canceled +func TestStreamToBatchRestart(t *testing.T) { partitiontest.PartitionTest(t) numOfTxns := 1000 @@ -463,13 +468,14 @@ func TestStreamVerifierRestart(t *testing.T) { cache := MakeVerifiedTransactionCache(50) - stxnChan := make(chan *UnverifiedElement) + inputChan := make(chan InputJob) resultChan := make(chan *VerificationResult, txBacklogSize) - droppedChan := make(chan *UnverifiedElement) + droppedChan := make(chan *UnverifiedTxnSigJob) ctx, cancel := context.WithCancel(context.Background()) - sv, err := MakeStreamVerifier(stxnChan, resultChan, droppedChan, &DummyLedgerForSignature{}, verificationPool, cache) + ep, err := MakeSigVerifyJobProcessor(&DummyLedgerForSignature{}, cache, resultChan, droppedChan) require.NoError(t, err) + sv := MakeStreamToBatch(inputChan, verificationPool, ep) sv.Start(ctx) errChan := make(chan error) @@ -497,21 +503,23 @@ func TestStreamVerifierRestart(t *testing.T) { select { case <-ctx2.Done(): break - case stxnChan <- &UnverifiedElement{TxnGroup: tg, BacklogMessage: nil}: + case inputChan <- &UnverifiedTxnSigJob{TxnGroup: tg, BacklogMessage: nil}: } } cancel() }() for err := range errChan { - require.ErrorIs(t, err, errShuttingDownError) + require.ErrorIs(t, err, ErrShuttingDownError) } wg.Wait() sv.WaitForStop() cancel2() // not necessary, but the golint will want to see this + + verifyResults(txnGroups, badTxnGroups, cache, badSigResultCounter, goodSigResultCounter, t) } // TestBlockWatcher runs multiple goroutines to check the concurency and correctness of the block watcher -func TestStreamVerifierBlockWatcher(t *testing.T) { +func TestStreamToBatchBlockWatcher(t *testing.T) { partitiontest.PartitionTest(t) blkHdr := createDummyBlockHeader() nbw := MakeNewBlockWatcher(blkHdr) @@ -568,23 +576,24 @@ func getSaturatedExecPool(t *testing.T) (execpool.BacklogPool, chan interface{}) return verificationPool, holdTasks } -// TestStreamVerifierCtxCancel tests the termination when the ctx is canceled +// TestStreamToBatchCtxCancel tests the termination when the ctx is canceled // To make sure that the batchingLoop is still working on a batch when the // ctx is cancled, this test first saturates the exec pool buffer, then // sends a txn and immediately cancels the ctx so that the batch is not // passed to the exec pool yet, but is in batchingLoop -func TestStreamVerifierCtxCancel(t *testing.T) { +func TestStreamToBatchCtxCancel(t *testing.T) { partitiontest.PartitionTest(t) verificationPool, holdTasks := getSaturatedExecPool(t) defer verificationPool.Shutdown() ctx, cancel := context.WithCancel(context.Background()) cache := MakeVerifiedTransactionCache(50) - stxnChan := make(chan *UnverifiedElement) + inputChan := make(chan InputJob) resultChan := make(chan *VerificationResult, txBacklogSize) - droppedChan := make(chan *UnverifiedElement) - sv, err := MakeStreamVerifier(stxnChan, resultChan, droppedChan, &DummyLedgerForSignature{}, verificationPool, cache) + droppedChan := make(chan *UnverifiedTxnSigJob) + ep, err := MakeSigVerifyJobProcessor(&DummyLedgerForSignature{}, cache, resultChan, droppedChan) require.NoError(t, err) + sv := MakeStreamToBatch(inputChan, verificationPool, ep) sv.Start(ctx) var result *VerificationResult @@ -600,7 +609,7 @@ func TestStreamVerifierCtxCancel(t *testing.T) { // send batchSizeBlockLimit after the exec pool buffer is full numOfTxns := 1 txnGroups, _ := getSignedTransactions(numOfTxns, 1, 0, 0.5) - stxnChan <- &UnverifiedElement{TxnGroup: txnGroups[0], BacklogMessage: nil} + inputChan <- &UnverifiedTxnSigJob{TxnGroup: txnGroups[0], BacklogMessage: nil} // cancel the ctx before the sig is sent to the exec pool cancel() @@ -611,17 +620,17 @@ func TestStreamVerifierCtxCancel(t *testing.T) { close(holdTasks) wg.Wait() - require.ErrorIs(t, result.Err, errShuttingDownError) + require.ErrorIs(t, result.Err, ErrShuttingDownError) } -// TestStreamVerifierCtxCancelPoolQueue tests the termination when the ctx is canceled +// TestStreamToBatchCtxCancelPoolQueue tests the termination when the ctx is canceled // To make sure that the batchingLoop is still working on a batch when the // ctx is cancled, this test first saturates the exec pool buffer, then // sends a txn and cancels the ctx after multiple waitForNextTxnDuration // so that the batch is sent to the pool. Since the pool is saturated, // the task will be stuck waiting to be queued when the context is canceled // everything should be gracefully terminated -func TestStreamVerifierCtxCancelPoolQueue(t *testing.T) { //nolint:paralleltest // Not parallel because it depends on the default logger +func TestStreamToBatchCtxCancelPoolQueue(t *testing.T) { //nolint:paralleltest // Not parallel because it depends on the default logger partitiontest.PartitionTest(t) verificationPool, holdTasks := getSaturatedExecPool(t) @@ -634,11 +643,12 @@ func TestStreamVerifierCtxCancelPoolQueue(t *testing.T) { //nolint:paralleltest ctx, cancel := context.WithCancel(context.Background()) cache := MakeVerifiedTransactionCache(50) - stxnChan := make(chan *UnverifiedElement) + inputChan := make(chan InputJob) resultChan := make(chan *VerificationResult, txBacklogSize) - droppedChan := make(chan *UnverifiedElement) - sv, err := MakeStreamVerifier(stxnChan, resultChan, droppedChan, &DummyLedgerForSignature{}, verificationPool, cache) + droppedChan := make(chan *UnverifiedTxnSigJob) + ep, err := MakeSigVerifyJobProcessor(&DummyLedgerForSignature{}, cache, resultChan, droppedChan) require.NoError(t, err) + sv := MakeStreamToBatch(inputChan, verificationPool, ep) sv.Start(ctx) var result *VerificationResult @@ -648,8 +658,8 @@ func TestStreamVerifierCtxCancelPoolQueue(t *testing.T) { //nolint:paralleltest defer wg.Done() for { result = <-resultChan - // at least one errShuttingDownError is expected - if result.Err != errShuttingDownError { + // at least one ErrShuttingDownError is expected + if result.Err != ErrShuttingDownError { continue } break @@ -671,7 +681,7 @@ func TestStreamVerifierCtxCancelPoolQueue(t *testing.T) { //nolint:paralleltest // the single transaction slips through and passes the batch verifier before the exec pool shuts down. // this happens when close(holdTasks) runs and frees the exec pool, and lets the txns get verified, before // verificationPool.Shutdown() executes. - case stxnChan <- &UnverifiedElement{TxnGroup: txnGroups[0], BacklogMessage: nil}: + case inputChan <- &UnverifiedTxnSigJob{TxnGroup: txnGroups[0], BacklogMessage: nil}: case <-ctx.Done(): return } @@ -680,7 +690,7 @@ func TestStreamVerifierCtxCancelPoolQueue(t *testing.T) { //nolint:paralleltest // cancel the ctx as the sig is not yet sent to the exec pool // the test might sporadically fail if between sending the txn above // and the cancelation, 2 x waitForNextTxnDuration elapses (10ms) - time.Sleep(6 * waitForNextTxnDuration) + time.Sleep(6 * waitForNextJobDuration) go func() { // wait a bit before releasing the tasks, so that the verificationPool ctx first gets canceled time.Sleep(20 * time.Millisecond) @@ -693,13 +703,13 @@ func TestStreamVerifierCtxCancelPoolQueue(t *testing.T) { //nolint:paralleltest cancel() wg.Wait() - require.ErrorIs(t, result.Err, errShuttingDownError) - require.Contains(t, logBuffer.String(), "addVerificationTaskToThePoolNow: EnqueueBacklog returned an error and StreamVerifier will stop: context canceled") + require.ErrorIs(t, result.Err, ErrShuttingDownError) + require.Contains(t, logBuffer.String(), "addBatchToThePoolNow: EnqueueBacklog returned an error and StreamToBatch will stop: context canceled") } -// TestStreamVerifierPostVBlocked tests the behavior when the return channel (result chan) of verified +// TestStreamToBatchPostVBlocked tests the behavior when the return channel (result chan) of verified // transactions is blocked, and checks droppedFromPool counter to confirm the drops -func TestStreamVerifierPostVBlocked(t *testing.T) { +func TestStreamToBatchPostVBlocked(t *testing.T) { partitiontest.PartitionTest(t) // prepare the stream verifier @@ -715,11 +725,12 @@ func TestStreamVerifierPostVBlocked(t *testing.T) { txBacklogSizeMod := txBacklogSize / 20 - stxnChan := make(chan *UnverifiedElement) + inputChan := make(chan InputJob) resultChan := make(chan *VerificationResult, txBacklogSizeMod) - droppedChan := make(chan *UnverifiedElement) - sv, err := MakeStreamVerifier(stxnChan, resultChan, droppedChan, &DummyLedgerForSignature{}, verificationPool, cache) + droppedChan := make(chan *UnverifiedTxnSigJob) + ep, err := MakeSigVerifyJobProcessor(&DummyLedgerForSignature{}, cache, resultChan, droppedChan) require.NoError(t, err) + sv := MakeStreamToBatch(inputChan, verificationPool, ep) defer close(droppedChan) go func() { @@ -736,7 +747,7 @@ func TestStreamVerifierPostVBlocked(t *testing.T) { txnGroups, badTxnGroups := getSignedTransactions(numOfTxns, 1, 0, 0.5) numOfTxnGroups := len(txnGroups) for _, tg := range txnGroups { - stxnChan <- &UnverifiedElement{TxnGroup: tg, BacklogMessage: nil} + inputChan <- &UnverifiedTxnSigJob{TxnGroup: tg, BacklogMessage: nil} } var droppedPool uint64 @@ -757,7 +768,7 @@ func TestStreamVerifierPostVBlocked(t *testing.T) { go processResults(ctx, errChan, resultChan, numOfTxnGroups-overflow, badTxnGroups, &badSigResultCounter, &goodSigResultCounter, &wg) for err := range errChan { - require.ErrorIs(t, err, errShuttingDownError) + require.ErrorIs(t, err, ErrShuttingDownError) fmt.Println(badTxnGroups) } @@ -774,26 +785,26 @@ func TestStreamVerifierPostVBlocked(t *testing.T) { go processResults(ctx, errChan, resultChan, numOfTxnGroups, badTxnGroups, &badSigResultCounter, &goodSigResultCounter, &wg) for _, tg := range txnGroups { - stxnChan <- &UnverifiedElement{TxnGroup: tg, BacklogMessage: nil} + inputChan <- &UnverifiedTxnSigJob{TxnGroup: tg, BacklogMessage: nil} } for err := range errChan { - require.ErrorIs(t, err, errShuttingDownError) + require.ErrorIs(t, err, ErrShuttingDownError) fmt.Println(badTxnGroups) } wg.Wait() } -func TestStreamVerifierMakeStreamVerifierErr(t *testing.T) { +func TestStreamToBatchMakeStreamToBatchErr(t *testing.T) { partitiontest.PartitionTest(t) - _, err := MakeStreamVerifier(nil, nil, nil, &DummyLedgerForSignature{badHdr: true}, nil, nil) + _, err := MakeSigVerifyJobProcessor(&DummyLedgerForSignature{badHdr: true}, nil, nil, nil) require.Error(t, err) } -// TestStreamVerifierCancelWhenPooled tests the case where the ctx is cancled after the verification +// TestStreamToBatchCancelWhenPooled tests the case where the ctx is cancled after the verification // task is queued to the exec pool and before the task is executed in the pool -func TestStreamVerifierCancelWhenPooled(t *testing.T) { +func TestStreamToBatchCancelWhenPooled(t *testing.T) { partitiontest.PartitionTest(t) numOfTxns := 1000 txnGroups, badTxnGroups := getSignedTransactions(numOfTxns, 1, 0, 0.5) @@ -807,12 +818,13 @@ func TestStreamVerifierCancelWhenPooled(t *testing.T) { cache := MakeVerifiedTransactionCache(50) - stxnChan := make(chan *UnverifiedElement) + inputChan := make(chan InputJob) resultChan := make(chan *VerificationResult, txBacklogSize) - droppedChan := make(chan *UnverifiedElement) + droppedChan := make(chan *UnverifiedTxnSigJob) ctx, cancel := context.WithCancel(context.Background()) - sv, err := MakeStreamVerifier(stxnChan, resultChan, droppedChan, &DummyLedgerForSignature{}, verificationPool, cache) + ep, err := MakeSigVerifyJobProcessor(&DummyLedgerForSignature{}, cache, resultChan, droppedChan) require.NoError(t, err) + sv := MakeStreamToBatch(inputChan, verificationPool, ep) sv.Start(ctx) errChan := make(chan error) @@ -831,15 +843,31 @@ func TestStreamVerifierCancelWhenPooled(t *testing.T) { go func() { defer wg.Done() for _, tg := range txnGroups { - stxnChan <- &UnverifiedElement{TxnGroup: tg, BacklogMessage: nil} + inputChan <- &UnverifiedTxnSigJob{TxnGroup: tg, BacklogMessage: nil} } // cancel the ctx, and expect at least one task queued to the pool but not yet executed cancel() }() for err := range errChan { - require.ErrorIs(t, err, errShuttingDownError) + require.ErrorIs(t, err, ErrShuttingDownError) } wg.Wait() sv.WaitForStop() cancel2() // not necessary, but the golint will want to see this + + verifyResults(txnGroups, badTxnGroups, cache, badSigResultCounter, goodSigResultCounter, t) +} + +func TestGetErredUnprocessed(t *testing.T) { + partitiontest.PartitionTest(t) + + droppedChan := make(chan *UnverifiedTxnSigJob, 1) + svh := txnSigBatchProcessor{ + resultChan: make(chan<- *VerificationResult, 0), + droppedChan: droppedChan, + } + + svh.GetErredUnprocessed(&UnverifiedTxnSigJob{}, nil) + dropped := <-droppedChan + require.Equal(t, *dropped, UnverifiedTxnSigJob{}) } diff --git a/data/txHandler.go b/data/txHandler.go index 66cac800af..9f18044442 100644 --- a/data/txHandler.go +++ b/data/txHandler.go @@ -125,9 +125,9 @@ type TxHandler struct { cacheConfig txHandlerConfig ctx context.Context ctxCancel context.CancelFunc - streamVerifier *verify.StreamVerifier - streamVerifierChan chan *verify.UnverifiedElement - streamVerifierDropped chan *verify.UnverifiedElement + streamVerifier *verify.StreamToBatch + streamVerifierChan chan verify.InputJob + streamVerifierDropped chan *verify.UnverifiedTxnSigJob erl *util.ElasticRateLimiter } @@ -177,8 +177,8 @@ func MakeTxHandler(opts TxHandlerOpts) (*TxHandler, error) { msgCache: makeSaltedCache(2 * txBacklogSize), txCanonicalCache: makeDigestCache(2 * txBacklogSize), cacheConfig: txHandlerConfig{opts.Config.TxFilterRawMsgEnabled(), opts.Config.TxFilterCanonicalEnabled()}, - streamVerifierChan: make(chan *verify.UnverifiedElement), - streamVerifierDropped: make(chan *verify.UnverifiedElement), + streamVerifierChan: make(chan verify.InputJob), + streamVerifierDropped: make(chan *verify.UnverifiedTxnSigJob), } if opts.Config.EnableTxBacklogRateLimiting { @@ -193,12 +193,12 @@ func MakeTxHandler(opts TxHandlerOpts) (*TxHandler, error) { // prepare the transaction stream verifer var err error - handler.streamVerifier, err = verify.MakeStreamVerifier(handler.streamVerifierChan, - handler.postVerificationQueue, handler.streamVerifierDropped, handler.ledger, - handler.txVerificationPool, handler.ledger.VerifiedTransactionCache()) + txnElementProcessor, err := verify.MakeSigVerifyJobProcessor(handler.ledger, handler.ledger.VerifiedTransactionCache(), + handler.postVerificationQueue, handler.streamVerifierDropped) if err != nil { return nil, err } + handler.streamVerifier = verify.MakeStreamToBatch(handler.streamVerifierChan, handler.txVerificationPool, txnElementProcessor) go handler.droppedTxnWatcher() return handler, nil } @@ -308,7 +308,7 @@ func (handler *TxHandler) backlogWorker() { } // handler.streamVerifierChan does not receive if ctx is cancled select { - case handler.streamVerifierChan <- &verify.UnverifiedElement{TxnGroup: wi.unverifiedTxGroup, BacklogMessage: wi}: + case handler.streamVerifierChan <- &verify.UnverifiedTxnSigJob{TxnGroup: wi.unverifiedTxGroup, BacklogMessage: wi}: case <-handler.ctx.Done(): transactionMessagesDroppedFromBacklog.Inc(nil) return diff --git a/data/txHandler_test.go b/data/txHandler_test.go index 275a8e9e3f..f0ea2d5b40 100644 --- a/data/txHandler_test.go +++ b/data/txHandler_test.go @@ -1053,7 +1053,7 @@ loop: handler.streamVerifier.Start(handler.ctx) defer handler.streamVerifier.WaitForStop() defer handler.ctxCancel() - handler.streamVerifierChan <- &verify.UnverifiedElement{ + handler.streamVerifierChan <- &verify.UnverifiedTxnSigJob{ TxnGroup: msg.unverifiedTxGroup, BacklogMessage: msg} var currentCount uint64 for x := 0; x < 1000; x++ { @@ -1213,7 +1213,7 @@ func incomingTxHandlerProcessing(maxGroupSize, numberOfTransactionGroups int, t // this is not expected during the test continue } - handler.streamVerifierChan <- &verify.UnverifiedElement{TxnGroup: wi.unverifiedTxGroup, BacklogMessage: wi} + handler.streamVerifierChan <- &verify.UnverifiedTxnSigJob{TxnGroup: wi.unverifiedTxGroup, BacklogMessage: wi} case wi, ok := <-handler.postVerificationQueue: if !ok { return @@ -1739,7 +1739,7 @@ func runHandlerBenchmarkWithBacklog(b *testing.B, txGen txGenIf, tps int, useBac // this is not expected during the test continue } - handler.streamVerifierChan <- &verify.UnverifiedElement{TxnGroup: wi.unverifiedTxGroup, BacklogMessage: wi} + handler.streamVerifierChan <- &verify.UnverifiedTxnSigJob{TxnGroup: wi.unverifiedTxGroup, BacklogMessage: wi} case wi, ok := <-handler.postVerificationQueue: if !ok { return @@ -1890,7 +1890,7 @@ func runHandlerBenchmarkWithBacklog(b *testing.B, txGen txGenIf, tps int, useBac } else { stxngrp := signedTransactionGroups[i] blm := txBacklogMsg{rawmsg: nil, unverifiedTxGroup: stxngrp} - handler.streamVerifierChan <- &verify.UnverifiedElement{TxnGroup: stxngrp, BacklogMessage: &blm} + handler.streamVerifierChan <- &verify.UnverifiedTxnSigJob{TxnGroup: stxngrp, BacklogMessage: &blm} } c++ if c == b.N { From 292b8072e4ddedc13d19113a589ebe7a92d40d0c Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Fri, 24 Feb 2023 11:22:56 -0500 Subject: [PATCH 52/81] tests: fix debug output in expect test harness (#5158) --- test/e2e-go/cli/goal/expect/goalExpectCommon.exp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp index e21d3e9ec1..d8ed3f4e4d 100644 --- a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp +++ b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp @@ -53,10 +53,10 @@ proc ::AlgorandGoal::Abort { ERROR } { puts "GLOBAL_NETWORK_NAME $::GLOBAL_NETWORK_NAME" # dump system and processes information to check memory consumption and if algod procs are still alive - if { $tcl_platform(os) == "Darwin" } { - exec top -l 1 - } elseif { $tcl_platform(os) == "Linux" } { - exec top -n 1 + if { $::tcl_platform(os) == "Darwin" } { + exec >@stdout 2>@stderr top -l 1 + } elseif { $::tcl_platform(os) == "Linux" } { + exec >@stdout 2>@stderr top -n 1 } else { # no logging for other platforms } From b41ae08bf07b9ced507546654a705f20a5946d99 Mon Sep 17 00:00:00 2001 From: Jacob Daitzman Date: Fri, 24 Feb 2023 12:18:18 -0500 Subject: [PATCH 53/81] Algod: Additional simulation result information (#4439) Co-authored-by: Michael Diamant Co-authored-by: Jason Paulos --- daemon/algod/api/algod.oas2.json | 80 +- daemon/algod/api/algod.oas3.yml | 185 +- .../api/server/v2/generated/data/routes.go | 330 ++-- .../v2/generated/experimental/routes.go | 339 ++-- .../api/server/v2/generated/model/types.go | 72 +- .../nonparticipating/private/routes.go | 325 ++-- .../nonparticipating/public/routes.go | 438 ++--- .../generated/participating/private/routes.go | 328 ++-- .../generated/participating/public/routes.go | 343 ++-- daemon/algod/api/server/v2/handlers.go | 46 +- daemon/algod/api/server/v2/handlers_test.go | 168 ++ .../algod/api/server/v2/test/handlers_test.go | 366 +++- daemon/algod/api/server/v2/test/helpers.go | 14 +- daemon/algod/api/server/v2/utils.go | 55 +- data/transactions/logic/debugger_eval_test.go | 14 +- data/transactions/logic/eval.go | 16 + .../logic/mocktracer/scenarios.go | 171 +- data/transactions/verify/txn.go | 18 +- ledger/simulation/simulation_eval_test.go | 1531 +++++++++++++++++ ledger/simulation/simulator.go | 79 +- ledger/simulation/simulator_test.go | 713 +------- ledger/simulation/testing/utils.go | 135 ++ ledger/simulation/trace.go | 94 + ledger/simulation/tracer.go | 193 +++ ledger/simulation/tracer_test.go | 257 +++ node/follower_node.go | 3 +- node/follower_node_test.go | 2 +- node/node.go | 2 +- 28 files changed, 4474 insertions(+), 1843 deletions(-) create mode 100644 ledger/simulation/simulation_eval_test.go create mode 100644 ledger/simulation/testing/utils.go create mode 100644 ledger/simulation/trace.go create mode 100644 ledger/simulation/tracer.go create mode 100644 ledger/simulation/tracer_test.go diff --git a/daemon/algod/api/algod.oas2.json b/daemon/algod/api/algod.oas2.json index 014539ce6a..70b9981a8b 100644 --- a/daemon/algod/api/algod.oas2.json +++ b/daemon/algod/api/algod.oas2.json @@ -1215,6 +1215,7 @@ "application/x-binary" ], "produces": [ + "application/json", "application/msgpack" ], "schemes": [ @@ -1232,11 +1233,14 @@ "type": "string", "format": "binary" } + }, + { + "$ref": "#/parameters/format" } ], "responses": { "200": { - "$ref": "#/responses/SimulationResponse" + "$ref": "#/responses/SimulateResponse" }, "400": { "description": "Bad Request - Malformed Algorand transaction", @@ -3321,6 +3325,49 @@ } } }, + "SimulateTransactionGroupResult": { + "description": "Simulation result for an atomic transaction group", + "type": "object", + "required": [ + "txn-results" + ], + "properties": { + "txn-results": { + "description": "Simulation result for individual transactions", + "type": "array", + "items": { + "$ref": "#/definitions/SimulateTransactionResult" + } + }, + "failure-message": { + "description": "If present, indicates that the transaction group failed and specifies why that happened", + "type": "string" + }, + "failed-at": { + "description": "If present, indicates which transaction in this group caused the failure. This array represents the path to the failing transaction. Indexes are zero based, the first element indicates the top-level transaction, and successive elements indicate deeper inner transactions.", + "type": "array", + "items": { + "type": "integer" + } + } + } + }, + "SimulateTransactionResult": { + "description": "Simulation result for an individual transaction", + "type": "object", + "required": [ + "txn-result" + ], + "properties": { + "txn-result": { + "$ref": "#/definitions/PendingTransactionResponse" + }, + "missing-signature": { + "description": "A boolean indicating whether this transaction is missing signatures", + "type": "boolean" + } + } + }, "StateProof": { "description": "Represents a state proof and its corresponding message", "type": "object", @@ -3485,7 +3532,7 @@ "msgpack" ], "type": "string", - "description": "Configures whether the response object is JSON or MessagePack encoded.", + "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.", "name": "format", "in": "query" }, @@ -3964,7 +4011,7 @@ } } }, - "SimulationResponse": { + "SimulateResponse": { "description": "Result of a transaction group simulation.", "tags": [ "experimental" @@ -3972,16 +4019,29 @@ "schema": { "type": "object", "required": [ - "failure-message", - "missing-signatures" + "version", + "last-round", + "txn-groups", + "would-succeed" ], "properties": { - "failure-message": { - "description": "\\[fm\\] Failure message, if the transaction would have failed during a live broadcast.", - "type": "string" + "version": { + "description": "The version of this response object.", + "type": "integer" + }, + "last-round": { + "description": "The round immediately preceding this simulation. State changes through this round were used to run this simulation.", + "type": "integer" + }, + "txn-groups": { + "description": "A result object for each transaction group that was simulated.", + "type": "array", + "items": { + "$ref": "#/definitions/SimulateTransactionGroupResult" + } }, - "missing-signatures": { - "description": "\\[ms\\] Whether any transactions would have failed during a live broadcast because they were missing signatures.", + "would-succeed": { + "description": "Indicates whether the simulated transactions would have succeeded during an actual submission. If any transaction fails or is missing a signature, this will be false.", "type": "boolean" } } diff --git a/daemon/algod/api/algod.oas3.yml b/daemon/algod/api/algod.oas3.yml index 3e5947b61b..57774cc1e0 100644 --- a/daemon/algod/api/algod.oas3.yml +++ b/daemon/algod/api/algod.oas3.yml @@ -103,7 +103,7 @@ } }, "format": { - "description": "Configures whether the response object is JSON or MessagePack encoded.", + "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.", "in": "query", "name": "format", "schema": { @@ -753,23 +753,36 @@ }, "description": "Transaction ID of the submission." }, - "SimulationResponse": { + "SimulateResponse": { "content": { "application/json": { "schema": { "properties": { - "failure-message": { - "description": "\\[fm\\] Failure message, if the transaction would have failed during a live broadcast.", - "type": "string" + "last-round": { + "description": "The round immediately preceding this simulation. State changes through this round were used to run this simulation.", + "type": "integer" + }, + "txn-groups": { + "description": "A result object for each transaction group that was simulated.", + "items": { + "$ref": "#/components/schemas/SimulateTransactionGroupResult" + }, + "type": "array" }, - "missing-signatures": { - "description": "\\[ms\\] Whether any transactions would have failed during a live broadcast because they were missing signatures.", + "version": { + "description": "The version of this response object.", + "type": "integer" + }, + "would-succeed": { + "description": "Indicates whether the simulated transactions would have succeeded during an actual submission. If any transaction fails or is missing a signature, this will be false.", "type": "boolean" } }, "required": [ - "failure-message", - "missing-signatures" + "last-round", + "txn-groups", + "version", + "would-succeed" ], "type": "object" } @@ -1822,6 +1835,49 @@ ], "type": "object" }, + "SimulateTransactionGroupResult": { + "description": "Simulation result for an atomic transaction group", + "properties": { + "failed-at": { + "description": "If present, indicates which transaction in this group caused the failure. This array represents the path to the failing transaction. Indexes are zero based, the first element indicates the top-level transaction, and successive elements indicate deeper inner transactions.", + "items": { + "type": "integer" + }, + "type": "array" + }, + "failure-message": { + "description": "If present, indicates that the transaction group failed and specifies why that happened", + "type": "string" + }, + "txn-results": { + "description": "Simulation result for individual transactions", + "items": { + "$ref": "#/components/schemas/SimulateTransactionResult" + }, + "type": "array" + } + }, + "required": [ + "txn-results" + ], + "type": "object" + }, + "SimulateTransactionResult": { + "description": "Simulation result for an individual transaction", + "properties": { + "missing-signature": { + "description": "A boolean indicating whether this transaction is missing signatures", + "type": "boolean" + }, + "txn-result": { + "$ref": "#/components/schemas/PendingTransactionResponse" + } + }, + "required": [ + "txn-result" + ], + "type": "object" + }, "StateDelta": { "description": "Application state delta.", "items": { @@ -2087,7 +2143,7 @@ "operationId": "AccountInformation", "parameters": [ { - "description": "Configures whether the response object is JSON or MessagePack encoded.", + "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.", "in": "query", "name": "format", "schema": { @@ -2200,7 +2256,7 @@ "operationId": "AccountApplicationInformation", "parameters": [ { - "description": "Configures whether the response object is JSON or MessagePack encoded.", + "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.", "in": "query", "name": "format", "schema": { @@ -2340,7 +2396,7 @@ "operationId": "AccountAssetInformation", "parameters": [ { - "description": "Configures whether the response object is JSON or MessagePack encoded.", + "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.", "in": "query", "name": "format", "schema": { @@ -2498,7 +2554,7 @@ } }, { - "description": "Configures whether the response object is JSON or MessagePack encoded.", + "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.", "in": "query", "name": "format", "schema": { @@ -2973,7 +3029,7 @@ "operationId": "GetBlock", "parameters": [ { - "description": "Configures whether the response object is JSON or MessagePack encoded.", + "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.", "in": "query", "name": "format", "schema": { @@ -3328,7 +3384,7 @@ } }, { - "description": "Configures whether the response object is JSON or MessagePack encoded.", + "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.", "in": "query", "name": "format", "schema": { @@ -3635,7 +3691,7 @@ } }, { - "description": "Configures whether the response object is JSON or MessagePack encoded.", + "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.", "in": "query", "name": "format", "schema": { @@ -5367,7 +5423,7 @@ } }, { - "description": "Configures whether the response object is JSON or MessagePack encoded.", + "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.", "in": "query", "name": "format", "schema": { @@ -5508,7 +5564,7 @@ } }, { - "description": "Configures whether the response object is JSON or MessagePack encoded.", + "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.", "in": "query", "name": "format", "schema": { @@ -5596,6 +5652,20 @@ "/v2/transactions/simulate": { "post": { "operationId": "SimulateTransaction", + "parameters": [ + { + "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.", + "in": "query", + "name": "format", + "schema": { + "enum": [ + "json", + "msgpack" + ], + "type": "string" + } + } + ], "requestBody": { "content": { "application/x-binary": { @@ -5611,21 +5681,66 @@ "responses": { "200": { "content": { + "application/json": { + "schema": { + "properties": { + "last-round": { + "description": "The round immediately preceding this simulation. State changes through this round were used to run this simulation.", + "type": "integer" + }, + "txn-groups": { + "description": "A result object for each transaction group that was simulated.", + "items": { + "$ref": "#/components/schemas/SimulateTransactionGroupResult" + }, + "type": "array" + }, + "version": { + "description": "The version of this response object.", + "type": "integer" + }, + "would-succeed": { + "description": "Indicates whether the simulated transactions would have succeeded during an actual submission. If any transaction fails or is missing a signature, this will be false.", + "type": "boolean" + } + }, + "required": [ + "last-round", + "txn-groups", + "version", + "would-succeed" + ], + "type": "object" + } + }, "application/msgpack": { "schema": { "properties": { - "failure-message": { - "description": "\\[fm\\] Failure message, if the transaction would have failed during a live broadcast.", - "type": "string" + "last-round": { + "description": "The round immediately preceding this simulation. State changes through this round were used to run this simulation.", + "type": "integer" + }, + "txn-groups": { + "description": "A result object for each transaction group that was simulated.", + "items": { + "$ref": "#/components/schemas/SimulateTransactionGroupResult" + }, + "type": "array" + }, + "version": { + "description": "The version of this response object.", + "type": "integer" }, - "missing-signatures": { - "description": "\\[ms\\] Whether any transactions would have failed during a live broadcast because they were missing signatures.", + "would-succeed": { + "description": "Indicates whether the simulated transactions would have succeeded during an actual submission. If any transaction fails or is missing a signature, this will be false.", "type": "boolean" } }, "required": [ - "failure-message", - "missing-signatures" + "last-round", + "txn-groups", + "version", + "would-succeed" ], "type": "object" } @@ -5635,6 +5750,11 @@ }, "400": { "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + } + }, "application/msgpack": { "schema": { "$ref": "#/components/schemas/ErrorResponse" @@ -5645,6 +5765,11 @@ }, "401": { "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + } + }, "application/msgpack": { "schema": { "$ref": "#/components/schemas/ErrorResponse" @@ -5655,6 +5780,11 @@ }, "500": { "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + } + }, "application/msgpack": { "schema": { "$ref": "#/components/schemas/ErrorResponse" @@ -5665,6 +5795,11 @@ }, "503": { "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + } + }, "application/msgpack": { "schema": { "$ref": "#/components/schemas/ErrorResponse" diff --git a/daemon/algod/api/server/v2/generated/data/routes.go b/daemon/algod/api/server/v2/generated/data/routes.go index b4512779fb..4d178d1317 100644 --- a/daemon/algod/api/server/v2/generated/data/routes.go +++ b/daemon/algod/api/server/v2/generated/data/routes.go @@ -145,168 +145,174 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9a3PctpLoX0HNbpUfdziSXzmxqlJ7FTvJ0cZxXJaSc3dt3wRD9szgiAQYAJRm4qv/", - "fgsNgARJkEM9Yidb55OtIR6NRqPRb3ycpaIoBQeu1ezo46ykkhagQeJfNE1FxXXCMvNXBiqVrNRM8NmR", - "/0aUloyvZ/MZM7+WVG9m8xmnBTRtTP/5TMJvFZOQzY60rGA+U+kGCmoG1rvStK5H2iZrkbghju0QJy9n", - "VyMfaJZJUKoP5Y883xHG07zKgGhJuaKp+aTIJdMbojdMEdeZME4EByJWRG9ajcmKQZ6phV/kbxXIXbBK", - "N/nwkq4aEBMpcujD+UIUS8bBQwU1UPWGEC1IBitstKGamBkMrL6hFkQBlemGrITcA6oFIoQXeFXMjt7N", - "FPAMJO5WCuwC/7uSAL9Doqlcg559mMcWt9IgE82KyNJOHPYlqCrXimBbXOOaXQAnpteC/FApTZZAKCdv", - "v31Bnjx58twspKBaQ+aIbHBVzezhmmz32dEsoxr85z6t0XwtJOVZUrd/++0LnP/ULXBqK6oUxA/LsflC", - "Tl4OLcB3jJAQ4xrWuA8t6jc9Ioei+XkJKyFh4p7Yxne6KeH8n3VXUqrTTSkY15F9IfiV2M9RHhZ0H+Nh", - "NQCt9qXBlDSDvjtMnn/4+Gj+6PDq394dJ//t/nz25Gri8l/U4+7BQLRhWkkJPN0lawkUT8uG8j4+3jp6", - "UBtR5RnZ0AvcfFogq3d9ielrWecFzStDJyyV4jhfC0WoI6MMVrTKNfETk4rnhk2Z0Ry1E6ZIKcUFyyCb", - "G+57uWHphqRU2SGwHblkeW5osFKQDdFafHUjh+kqRImB60b4wAX9eZHRrGsPJmCL3CBJc6Eg0WLP9eRv", - "HMozEl4ozV2lrndZkbMNEJzcfLCXLeKOG5rO8x3RuK8ZoYpQ4q+mOWErshMVucTNydk59nerMVgriEEa", - "bk7rHjWHdwh9PWREkLcUIgfKEXn+3PVRxldsXUlQ5HIDeuPuPAmqFFwBEct/QqrNtv/n6Y+viZDkB1CK", - "ruENTc8J8FRkw3vsJo3d4P9Uwmx4odYlTc/j13XOChYB+Qe6ZUVVEF4VS5Bmv/z9oAWRoCvJhwCyI+6h", - "s4Ju+5OeyYqnuLnNtC1BzZASU2VOdwtysiIF3X51OHfgKELznJTAM8bXRG/5oJBm5t4PXiJFxbMJMow2", - "GxbcmqqElK0YZKQeZQQSN80+eBi/HjyNZBWA4wcZBKeeZQ84HLYRmjFH13whJV1DQDIL8pPjXPhVi3Pg", - "NYMjyx1+KiVcMFGputMAjDj1uHjNhYaklLBiERo7degw3MO2cey1cAJOKrimjENmOC8CLTRYTjQIUzDh", - "uDLTv6KXVMEXT4cu8ObrxN1fie6uj+74pN3GRok9kpF70Xx1BzYuNrX6T1D+wrkVWyf2595GsvWZuUpW", - "LMdr5p9m/zwaKoVMoIUIf/EotuZUVxKO3vOH5i+SkFNNeUZlZn4p7E8/VLlmp2xtfsrtT6/EmqWnbD2A", - "zBrWqDaF3Qr7jxkvzo71Nqo0vBLivCrDBaUtrXS5IycvhzbZjnldwjyuVdlQqzjbek3juj30tt7IASAH", - "cVdS0/AcdhIMtDRd4T/bFdITXcnfzT9lmZveulzFUGvo2N23aBtwNoPjssxZSg0S37rP5qthAmC1BNq0", - "OMAL9ehjAGIpRQlSMzsoLcskFynNE6WpxpH+XcJqdjT7t4PGuHJgu6uDYPJXptcpdjLyqJVxElqW1xjj", - "jZFr1AizMAwaPyGbsGwPJSLG7SYaUmKGBedwQbleNPpIix/UB/idm6nBtxVlLL47+tUgwoltuARlxVvb", - "8J4iAeoJopUgWlHaXOdiWf9w/7gsGwzi9+OytPhA0RAYSl2wZUqrB7h82pykcJ6TlwvyXTg2ytmC5ztz", - "OVhRw9wNK3druVusNhy5NTQj3lMEt1PIhdkajwYjw98FxaHOsBG5kXr20opp/HfXNiQz8/ukzn8NEgtx", - "O0xcqEU5zFkFBn8JNJf7HcrpE46z5SzIcbfvzcjGjBInmBvRyuh+2nFH8Fij8FLS0gLovti7lHHUwGwj", - "C+stuelERheFOTjDAa0hVDc+a3vPQxQSJIUODF/nIj3/O1WbOzjzSz9W//jhNGQDNANJNlRtFrOYlBEe", - "r2a0KUfMNETtnSyDqRb1Eu9qeXuWllFNg6U5eONiiUU99kOmBzKiu/yI/6E5MZ/N2Tas3w67IGfIwJQ9", - "zs6DkBlV3ioIdibTAE0MghRWeydG674WlC+ayeP7NGmPvrEGA7dDbhG4Q2J758fga7GNwfC12PaOgNiC", - "ugv6MOOgGKmhUBPge+kgE7j/Dn1USrrrIxnHnoJks0Ajuio8DTy88c0sjeX1eCnkzbhPh61w0tiTCTWj", - "Bsx33kESNq3KxJFixCZlG3QGalx440yjO3wMYy0snGr6B2BBmVHvAgvtge4aC6IoWQ53QPqbKNNfUgVP", - "HpPTvx8/e/T4l8fPvjAkWUqxlrQgy50GRe473YwovcvhQX9lqB1VuY6P/sVTb4VsjxsbR4lKplDQsj+U", - "tW5aEcg2I6ZdH2ttNOOqawCnHM4zMJzcop1Yw70B7SVTRsIqlneyGUMIy5pZMuIgyWAvMV13ec00u3CJ", - "cieru1BlQUohI/Y1PGJapCJPLkAqJiKukjeuBXEtvHhbdn+30JJLqoiZG02/FUeBIkJZesun83079NmW", - "N7gZ5fx2vZHVuXmn7Esb+d6SqEgJMtFbTjJYVuuWJrSSoiCUZNgR7+jvQJ/ueIpWtbsg0mE1rWAcTfxq", - "x9NAZzMblUO2bm3C7XWzLla8fc5OdU9FwDHoeIWfUa1/Cbmmdy6/dCeIwf7Cb6QFlmSmIWrBr9h6owMB", - "840UYnX3MMZmiQGKH6x4nps+fSH9tcjALLZSd3AZN4M1tG72NKRwuhSVJpRwkQFaVCoVv6YH3PLoD0Q3", - "pg5vfr2xEvcSDCGltDKrrUqCTroe52g6JjS11JsgatSAF6N2P9lWdjrr8s0l0Mxo9cCJWDpXgXNi4CIp", - "ehi1v+ickBA5Sy24SilSUAqyxJko9oLm21kmokfwhIAjwPUsRAmyovLWwJ5f7IXzHHYJ+sMVuf/9z+rB", - "Z4BXC03zPYjFNjH01gqf8wf1oZ42/RjBdScPyY5KIJ7nGu3SMIgcNAyh8Fo4Gdy/LkS9Xbw9Wi5Aomfm", - "D6V4P8ntCKgG9Q+m99tCW5UDUV5O0TljBdrtOOVCQSp4pqKD5VTpZB9bNo1a2phZQcAJY5wYBx4QSl5R", - "pa03kfEMjSD2OsF5rIBiphgGeFAgNSP/7GXR/tipuQe5qlQtmKqqLIXUkMXWwGE7Mtdr2NZziVUwdi39", - "akEqBftGHsJSML5Dll2JRRDVtdHdudv7i0PTtLnnd1FUtoBoEDEGyKlvFWA3jHQZAISpBtGWcJjqUE4d", - "XjOfKS3K0nALnVS87jeEplPb+lj/1LTtExfVzb2dCTCzaw+Tg/zSYtbGOG2oUaFxZFLQcyN7oEJs3Z59", - "mM1hTBTjKSRjlG+O5alpFR6BvYe0KteSZpBkkNNdf9Cf7GdiP48NgDveKD5CQ2LjWeKb3lCyDx8YGVrg", - "eComPBL8QlJzBI3m0RCI671n5Axw7BhzcnR0rx4K54pukR8Pl223OjIi3oYXQpsdt+SAEDuGPgXeATTU", - "I98cE9g5adSy7hT/BcpNUIsR159kB2poCc3411rAgDHNhQEHx6XD3TsMOMo1B7nYHjYydGIHLHtvqNQs", - "ZSWqOt/D7s41v+4EUX8TyUBTlkNGgg9WCyzD/sQGYnTHvJkmOMkI0we/Z4WJLCdnCiWeNvDnsEOV+42N", - "8DsL4gLvQJWNjGquJ8oJAurjhowEHjaBLU11vjNymt7AjlyCBKKqZcG0tiGbbU1XizIJB4gauEdmdN4c", - "Gx3nd2CKe+kUhwqW19+K+cyqBOPwnXX0ghY6nCpQCpFPMB71kBGFYJLjn5TC7DpzEcI+jNRTUgtIx7TR", - "lVff/vdUC824AvJfoiIp5ahxVRpqkUZIlBNQfjQzGAmsntO5+BsMQQ4FWEUSvzx82F34w4duz5kiK7j0", - "YfWmYRcdDx+iGeeNULp1uO7AVGiO20nk+kDLP957Lnihw1P2u5jdyFN28k1n8NpdYM6UUo5wzfJvzQA6", - "J3M7Ze0hjUxzr+O4k4z6wdCxdeO+n7Kiyu9qw1eU5ZWEYe/Y+/fvVsX79x/It7ald2zPPZGH6Lhs0iJW", - "7jaqJIbWkJwZ/VYKmhkBIWrbx0XydVIHZ6ooOIUy4PzDnUPKd51EvqkwkCWktLJRyY5rOwia8FC1iMiL", - "nd3tojC6kInm8SrX9tIOsbqWoiqJqrfdUoGmGv4YU3MzdAzK/sRBbFDzcSg8yKiJ+e4Obms7EJFQSlDI", - "W0PzirJfxSrMv3HMV+2UhqJvgbZdfxnQz94O6jmC54xDUggOu2jKKePwA36M9bb8faAz3rRDfbvCcwv+", - "DljteaZQ423xi7sdMLQ3dVzcHWx+d9yO8yHMPELjGuQloSTNGZreBFdaVql+zykq98Fhi8QPeDVm2Nzz", - "wjeJ25ci5h831HtOMXakVvmjfHEFEb78LYC3+qhqvQalO1LiCuA9d60YJxVnGucqzH4ldsNKkOjEX9iW", - "Bd2RFc3ROvU7SEGWlW4zV0yQUJrlufOEmGmIWL3nVJMcDFf9gfGzLQ7nPYmeZjjoSyHPaywsoudhDRwU", - "U0k8zuE7+xVD0NzyNy4cDbNV7WdrOzfjN1kUO9T9mwzM/3v/P47eHSf/TZPfD5Pn/+vgw8enVw8e9n58", - "fPXVV/+v/dOTq68e/Me/x3bKwx4L33eQn7x0OsXJSxQcG+N5D/ZPZjgtGE+iRBa6iDu0Re4b8dcT0IO2", - "WUFv4D3XW24I6YLmLKP6ZuTQZXG9s2hPR4dqWhvRMSP4tV5THLsFlyERJtNhjTe+xvuhQfFEGfTmuNwX", - "PC+ritutrJTzKGEcuA/REKt5nQxliyAcEcyU2VAfX+T+fPzsi9m8yXCpv8/mM/f1Q4SSWbaN5TFlsI1J", - "2e6A4MG4p0hJdwp0nHsg7NFoFOsUD4ctwKhnasPKT88plGbLOIfz0bVOW9/yE27DXs35Qd/QzpmcxerT", - "w60lQAal3sSSo1uSArZqdhOg468vpbgAPidsAYuutpytQfm4mBzoCpN00b8hpmQL1OfAEpqnigDr4UIm", - "qaQx+kHh1nHrq/nMXf7qzuVxN3AMru6ctSPI/60FuffdN2fkwDFMdc+m1NmhgySoiBXKxfm3IjkMN7Ml", - "IWxO4Xv+nr+EFePMfD96zzOq6cGSKpaqg0qB/JrmlKewWAty5FMHXlJN3/OepDVYtSVI2iBltcxZSs5D", - "ibghT5uJH1Ubab4WRnHsOrX78qubKspf7ATJJdMbUenEpRonEi6pjDkNVJ1qiiPbQgFjs86JG9uyYpfK", - "7MaP8zxalqqbctZfflnmZvkBGSqXUGW2jCgtpJdFjIBiocH9fS3cxSDppc9TrxQo8mtBy3eM6w8keV8d", - "Hj4B0srB+tVd+YYmdyW07JU3Sonr2ipx4Vavga2WNCnpesBooIGWuPsoLxeoZOc5wW6t3C8f24pDNQvw", - "+BjeAAvHtfNYcHGntpevGRNfAn7CLcQ2RtxoPKY33a8gG+zG29XJKOvtUqU3iTnb0VUpQ+J+Z+pSEmsj", - "ZHk3tmJrDBV0VTeWQNINpOeQYQEAKEq9m7e6+0gJJ2h61sGULZRhczkwmxtNu0sgVZlRJ4p3DEoGwwq0", - "9rGKb+EcdmeiSQa/Th5tO61TDR1UpNRAujTEGh5bN0Z38104Dtq6ytJnR2KajCeLo5oufJ/hg2xF3js4", - "xDGiaKUdDiGCyggiLPEPoOAGCzXj3Yr0Y8szWsbS3nyRuhqe9xPXpFGeXORMuBrMprTfC8CqO+JSkSU1", - "crtwBWNs6mLAxSpF1zAgIYfW9YkJgi2LPA6y796L3nRi1b3QevdNFGTbODFrjlIKmC+GVFCZ6cRL+Zms", - "A8caUAnWgXMIW+YoJtWBZZbpUNnyctjCVkOgxQkYJG8EDg9GGyOhZLOhyteywZI//ixPkgH+wFTcsQIM", - "J0GoT1DXpzZ8e57bPac97dKVYfC1F3zBhVC1nFA8wUj4GF0c2w7BUQDKIIe1Xbht7AmlSQtuNsjA8eNq", - "lTMOJIlFDVGlRMpsMaLmmnFzgJGPHxJiTcBk8ggxMg7ARsckDkxei/Bs8vV1gOQurZn6sdGlGfwN8QwM", - "G0drRB5RGhbO+EDEtucA1IWa1fdXJ+ARhyGMz4lhcxc0N2zOaXzNIL06ACi2drL+nWv8wZA4O2KBtxfL", - "tdZkr6KbrCaUmTzQcYFuBOKl2CY2BSsq8S63S0Pv0dBiTAiLHUxbceGeIkuxxXALvFpsKOseWIbh8GAE", - "Gv6WKaRX7Dd0m1tgxqYdl6ZiVKiQZJw5ryaXIXFiytQDEswQudwPiijcCICOsaMpN+qU371Kals86V/m", - "za02b4oD+ayN2PEfOkLRXRrAX98KU5c9eNOVWKJ2inbUQLviQyBCxojesIm+k6bvClKQAyoFSUuISs5j", - "rjuj2wDeOKe+W2C8wLoSlO8eBKEoEtZMaWiM6OZi9l6hT22epFjOSojV8Op0KVdmfW+FqK8pWy8FO7aW", - "+clXgKGcKyaVTtADEV2CafStQqX6W9M0Liu1g11sZUeWxXkDTnsOuyRjeRWnVzfv9y/NtK9rlqiqJfJb", - "xgnQdEOWWIk0GgI3MrWNkhxd8Cu74Ff0ztY77TSYpmZiacilPcdf5Fx0OO8YO4gQYIw4+rs2iNIRBhlk", - "Lva5YyA32cOJmYuLMetr7zBlfuy9YSM+f3LojrIjRdcSGAxGV8HQTWTEEqaDQp79lMKBM0DLkmXbji3U", - "jjqoMdNrGTx8haQOFnB33WB7MBDYPWNZDRJUuxhWI+DbkqytWhSLSZg5a5esChlCOBVTvqB4H1F11tM+", - "XJ0Bzb+H3c+mLS5ndjWf3c50GsO1G3EPrt/U2xvFM7rmrSmt5Qm5JsppWUpxQfPEGZiHSFOKC0ea2Nzb", - "oz8xq4ubMc++OX71xoF/NZ+lOVCZ1KLC4KqwXfmXWZWtuzVwQHzBYqPzeZndipLB5tfFgkKj9OUGXHHY", - "QBrtVbFrHA7BUXRG6lU8Qmivydn5RuwSR3wkUNYuksZ8Zz0kba8IvaAs93YzD+1ANA8ublopxChXCAe4", - "tXclcJIld8pueqc7fjoa6trDk8K5RsrXFrZCsyKCd13oRoREcxySakGxBp21ivSZE68KtCQkKmdp3MbK", - "lxh2y63vzDQm2HhAGDUjVmzAFcsrFoxlmqkJim4HyGCOKDJ9PcMh3C2Fe1qj4uy3CgjLgGvzSeKp7BxU", - "LPrnrO3969TIDv253MDWQt8MfxsZI6y/2L3xEIhxASP01PXAfVmrzH6htUXK/BC4JK7h8A9n7F2JI856", - "Rx+Omm3w4qbtcQtfwujzP0MYtmry/mc4vPLqCkEOzBF9VoOpZCXF7xDX81A9jmSM+IqTDKNcfgc+Icy8", - "se40r4M0sw9u95B0E1qh2kEKA1SPOx+45bD0nbdQU2632la5b8W6xQkmjCo9sOM3BONg7kXi5vRySWN1", - "AY2QYWA6bhzALVu6FsR39rh3Zn/mioAuSOBLrtsymwxcgmySufqFRW4oMNhpJ4sKjWSAVBvKBHPr/8uV", - "iAxT8UvK7WMJpp89Sq63Amv8Mr0uhcRUfhU3+2eQsoLmcckhS/sm3oytmX0qoFIQ1KJ3A9k3ViwVuXr+", - "1sXeoOZkRQ7nwWsXbjcydsEUW+aALR7ZFkuqkJPXhqi6i1kecL1R2PzxhOabimcSMr1RFrFKkFqoQ/Wm", - "dl4tQV8CcHKI7R49J/fRbafYBTwwWHT38+zo0XM0uto/DmMXgHsTZIybZKsw8SVOx+i3tGMYxu1GXUSz", - "nu1DTsOMa+Q02a5TzhK2dLxu/1kqKKdriEeKFHtgsn1xN9GQ1sELz+wrJEpLsSNsIAUJNDX8aSD63LA/", - "CwZJRVEwXTjnjhKFoaem0Lyd1A9nnzRxNUI9XP4j+khL7yLqKJGf1mhq77fYqtGT/ZoW0EbrnFBbvyFn", - "TfSCr1xMTnx5GCyaWtdKtbgxc5mlo5iDwQwrUkrGNSoWlV4lX5J0QyVNDftbDIGbLL94GikU2y5YyK8H", - "+CfHuwQF8iKOejlA9l6GcH3JfS54UhiOkj1osj2CUznozI277YZ8h+NDTxXKzCjJILlVLXKjAae+FeHx", - "kQFvSYr1eq5Fj9de2SenzErGyYNWZod+evvKSRmFkLGab81xdxKHBC0ZXGDsXnyTzJi33AuZT9qF20D/", - "eT0PXuQMxDJ/lmOKwNciop364sW1Jd3FqkesA0PH1HwwZLB0Q81Ju1Dsp3f6eeNz3/lkvnhY8Y8usJ95", - "SxHJfgUDmxgUsY5uZ1Z/D/zflHwttlM3tXNC/Mb+CVATRUnF8uznJiuzUyNcUp5uov6spen4S/OaUb04", - "ez9FS6ttKOeQR4ezsuAvXmaMSLX/FFPnKRif2LZbttwut7O4BvA2mB4oP6FBL9O5mSDEajvhrQ6oztci", - "IzhPU8er4Z79cvdBUeLfKlA6ljyEH2xQF9otjb5ra+IS4BlqiwvynX2NdAOkVaUFtTSbHw+Zr9BqDepV", - "mQuazYkZ5+yb41fEzmr72Dc5bE3eNSop7VV07FVBicJp4cH+eY146sL0ccZjqc2qlcaiSUrToowlh5oW", - "Z74BZqCGNnxUX0LsLMhLqzkqr5fYSQw9rJgsjMZVj2ZlF6QJ8x+tabpBlazFUodJfnoxaU+VKnjArX6I", - "pa7bh+fOwO3qSdty0nMijN58yZR9hBIuoJ2PWidnO5OAz09tL09WnFtKicoeY8UDboJ2D5wN1PBm/ihk", - "HcRfUyC3tdivW1v7FHtF6wh1C3X3Xm6z2Y31Axv+ceGUcsFZilV8Yleze9Byig9sQsGjrpHVH3F3QiOH", - "K1oevA6Tc1gcLBjuGaFDXN8IH3w1m2qpw/6p8eXEDdVkDVo5zgbZ3Fe5d3ZAxhW4Ooz4tmnAJ4Vs+RWR", - "Q0Zd1Unt0rgmGWFazIBi96359tqp/Rgvfs44CvgObS403Vrq8L09bbQCpslagHLraecGq3emzwLTZDPY", - "flj49/lwDOuWM8u2Puj+UMfeI+08wKbtC9PWljJpfm5FINtJj8vSTTr8BkJUHtBbPojgiGcx8a6dALn1", - "+OFoI+Q2GkqC96khNLhARzSUeA/3CKN+D6Dz1owRWi1FYQtiQ7iiFQwYj4DxinFoXo+MXBBp9ErAjcHz", - "OtBPpZJqKwJO4mlnQHP0PscYmtLO9XDboTobjCjBNfo5hrexecpggHHUDRrBjfJd/Wiloe5AmHiBr+U6", - "RPYfJkCpyglRGWYUdJ4qiDEOw7h9KaT2BdA/Bn2ZyHbXktqTc52baChJdFlla9AJzbJYXcyv8SvBr75Q", - "FGwhrer6iWVJUqyJ0i4S06c2N1EquKqKkbl8g1tOF7z9EaGG8P0Rv8OYhLLc4b+x4oHDO+OCMK4dBugj", - "LtxjCdeUm9sj9aReQ9OJYutkOibwTrk9Opqpb0boTf87pfRcrNuAfOLSEGNcLtyjGH/7xlwcYeWEXkVM", - "e7XUhQ0w6E74F9tQbaxTcttcCa+yXolMdPbUNe/GDRDDbzvN8fIbCL0NCmJQe79a7+FQAG46GC9Otctc", - "05SMsqDBbCAbvWPzfhCKuOV0KGLHBuyYz73e0yTDnpyNY48i1IeC9QH63seZkpIy5xpvmEUfsy4ifdhc", - "OHbomg3uLsLFeQ9a7L6/GIrJJorxdQ4Ev3dfwzkHl85eP4du1+qjkrxKaH91r5Ha8eqo+Oj6+9EJONXn", - "NYMOGm3PXOV1u0ynk3//s41hI8C13P0JTLi9Te+9JdSXdq15qmlC6qq9k6r4tm7F+LNAw/WPmppHSE+l", - "UKypFB17L2hirNsZPvkT1G/qj+UDTS4g1VgevHGgS4DrVHMykwVv0f2rDtKA7liHBLryR2M1j/o1wfdc", - "aL20pCC1ztZTXkyv8HNch0khU8LX4NbA3XNw7YSDyWHPqxWkml3sSQP7xwZ4kGI090YI+6xrkBXG6jBa", - "rCJyfRNbA9BYltYoPEE1v1uDM5QEcg67e4q0qCFa4Hnu75WbFJBADCB3SAyJCBULQ7BWU+cZZqqmDMSC", - "D/ux3aEpxTX4NEyQ1HjDuTxJmhu3SXQcmTL+NsWkuUzXa6X/YkToUKZYv7b9sLD9Ep8SUPWzbb4ARaiS", - "kpNI9WdXwAKT9mpHgS9lAcr/5jN07Sw5O4fw8Rp0y1xSmfkWUTuDN2EkI/dRL70rWrKaKhtE6fzgdZBm", - "P6EnUvgJQ3HTXGC556F45nZcZPjGO0Z/4HWA5acRrhVI98gXCnu5UJBo4YM6x+AYQ4V7j/wmSFCDxRYt", - "cIMlUN42NV6w6CzFkifURbaECyQSCmqgk0ElluE5x5D9wn73GSy+6Ohec0pNr8neUio+PJepHhJDql8R", - "d1vuz4y5iWWFcW6fFFWxsizcoDI0/ZdSZFVqL+jwYNTWp8lFj0ZYSdQokfZX2dMvcywB9irIMzyH3YEV", - "/dMN5U0ttvaxtiKUXUOQ19/Z7Ts1OsX163xtF7C+Ezg/p+FmPiuFyJMBW/9Jv7pM9wycs/QcMmLuDh/Y", - "NvC6BrmPJubamXu52flqKmUJHLIHC0KOuQ0l9n7ddnnjzuT8nh6bf4uzZpUt+ORsSov3PB6TiaWY5C35", - "mx9mnKspMMzvllPZQfbULtkOVLaR9DLy1sxiqlLa97R23/9oiMpCEZNSbpjIPul89+1KEdIPnj4Y137C", - "OhdNAJ205kmUlprnINrCyw+N1XHaIwy+wx7wQqU4eIbBcyMHzmeOcvuhRkqwlEFKaC1/n57tFtjwpWCL", - "FKZFmGXaqkM2QqK9L4ERRb2obRNxPPdNGFjUQnAs9NM3fSg0V2O94JBwzLmUFzT/9OYLrHZyjPhwTyLG", - "FxrqvyGSLSrVzUJNXtFJcwe67t1Nzd+gueUfYPYo6mdwQzm7Y/38hbfOYl07mpNcNI8h4ZDkEse0jolH", - "X5ClC5MvJaRMsU4G0aUvZVqre1jZu3kpc1y/3LfOn4W+BRk7BUGU5HVTFlELvB8aCJsj+pmZysDJjVJ5", - "jPp6ZBHBX4xHhfnqe66L85bHwpaZ7YTiCAl37LkIYhCu6bnoZ+JPXZ61zptLp1LQX+fk27qF28hF3axt", - "qtutj9yx2nlTvGXxkpimO7rrLEKwnixBUMmvj34lElb4YIQgDx/iBA8fzl3TXx+3P5vj/PBh/EXOT+Wo", - "szhyY7h5YxTz81Dopg1PHIgS7uxHxfJsH2G0Yr6bJ1cwqvkXl/XxWR59+cXaU/tH1RXev06IQHcTEDGR", - "tbYmD6YKorknBHK7bpGwbdRM0koyvcNiFN78xn6JuhS/qy32zuNTpy+7u0+Lc6jLmTT2/Ur52/U7QXO8", - "j4xMjQEaGl9h/GZLizIHd1C+urf8Gzz58ml2+OTR35ZfHj47TOHps+eHh/T5U/ro+ZNH8PjLZ08P4dHq", - "i+fLx9njp4+XTx8//eLZ8/TJ00fLp188/9s9w4cMyBbQmU99nP0ffBkpOX5zkpwZYBuc0JLVj68aMvbP", - "O9AUTyIUlOWzI//T//YnbJGKohne/zpzmVWzjdalOjo4uLy8XIRdDtZo0Eu0qNLNgZ+n/+jlm5M6Ot66", - "gnFHbeCzIQXcVEcKx/jt7TenZ+T4zcmiIZjZ0exwcbh4hI+ZlcBpyWZHsyf4E56eDe77gSO22dHHq/ns", - "YAM0R/+X+aMALVnqP6lLul6DXLh3LsxPF48PvChx8NEZM6/Gvh2EJWMPPrZsvtmenlhS8uCjr5Qw3rpV", - "isDZuoMOE6EYa3awxASsqU1BBY2Hl4IKhjr4iCLy4O8HLisl/hFVFXsGDrxjJN6yhaWPemtg7fRwrzcf", - "fMT/IE0GYNkYsADc2TpWoOU70D6Bw/ZwEQ51aENN2yeZbd7zuLsaKLYo3NG7aTWxwU9nNMUMFHOFcpBL", - "mCPQHGIf7N2waC0rCAuYjaX6X80jz8et2LqSnQfz61An94QDU+Q/T398TYQkTid+Q9PzMPYBYf2tArlr", - "gHV3YQidr6jtIukLtS7bYae1nP0BU5MRCuQAjw8P7+xVm962XdkQ0Ho4D9dtRuwxzBf+GmzRmGF8Tw8f", - "3dna2tFst15Yd7jeqk44OpMNWyf22sIFPf3LLugFqsdcaLJiPLMlyTXFA2tPH67vy7/s+jQrvIma4+sO", - "oPB2f3aHB+zTE6ERsmlOsKVdzZO/7GpOQV6wFMgZFKWQVLJ8R37idfpWULKlf4f9xM+5uOQeEUaKroqC", - "yp273yjpsirP4+1dFzywZcQ3ulZoQcciubO5DWj9cOXuVcvIDuyDrM1163/ecZc8kUMsFOEnrsC9Yu6y", - "Jnc8HbpssfHpjqdv6xuwdzngwfzjiLi/TzW8yC7QV/0Hc/Np7PfZp8TCpz17n+ywvIVCXICq3yBtiNNI", - "R0bjsM+RSlEENLwYOTTzQanTeRH6M3kPSjN4TwTdcyZu+p7nSCTCJDj3hA7Z4Se+TG5F0U6osp3qXmyD", - "Zv9iBP9iBHfICHQl+eARDe4vDKeD0kYKkJSmG1hMv0R3PA011FLEahWcjjALl6E9xCtO27ziT62nfvhT", - "3O8vKPfnubXjNn6DypyBrKmA8n7S/L+4wP8YLmCrfzhb0JxoyHMVnn0t8OzbEAAXJc1taMZEPtB9HC/2", - "88HH9uMMLaOc2lQ6E5dBX3Tk2iiEvq2ufq6s9ffBJWU6WQnpIqSxeGa/swaaH7jc/86vTbpd7wvmEAY/", - "Bna9+K8HdW3i6MeuwTT21RkMBxr5yi3+c+MwCR0QyCFr18O7D4Y/YeU7xzwbe/rRwQFGHW6E0gezq/nH", - "jq09/PihJglfEmlWSnaBGZYfrv5/AAAA//9vX8MwD78AAA==", + "H4sIAAAAAAAC/+x9a3PctpLoX0HNbpUfdyjJr5xYVam9ip3kaOM4LkvJubu2b4Ihe2ZwRAIMAI5m4qv/", + "fgsNgARJkEM9Yidb55OtIdBoNBqNRr/wcZaKohQcuFaz44+zkkpagAaJf9E0FRXXCcvMXxmoVLJSM8Fn", + "x/4bUVoyvprNZ8z8WlK9ns1nnBbQtDH95zMJv1VMQjY71rKC+UylayioAax3pWldQ9omK5E4ECcWxOnL", + "2dXIB5plEpTqY/kjz3eE8TSvMiBaUq5oaj4pcsn0mug1U8R1JowTwYGIJdHrVmOyZJBn6sBP8rcK5C6Y", + "pRt8eEpXDYqJFDn08XwhigXj4LGCGql6QYgWJIMlNlpTTcwIBlffUAuigMp0TZZC7kHVIhHiC7wqZsfv", + "Zgp4BhJXKwW2wf8uJcDvkGgqV6BnH+axyS01yESzIjK1U0d9CarKtSLYFue4YhvgxPQ6ID9USpMFEMrJ", + "229fkCdPnjw3Eymo1pA5JhucVTN6OCfbfXY8y6gG/7nPazRfCUl5ltTt3377Asc/cxOc2ooqBfHNcmK+", + "kNOXQxPwHSMsxLiGFa5Di/tNj8imaH5ewFJImLgmtvGdLko4/mddlZTqdF0KxnVkXQh+JfZzVIYF3cdk", + "WI1Aq31pKCUN0HdHyfMPHx/NHx1d/du7k+S/3Z/PnlxNnP6LGu4eCkQbppWUwNNdspJAcbesKe/T463j", + "B7UWVZ6RNd3g4tMCRb3rS0xfKzo3NK8Mn7BUipN8JRShjo0yWNIq18QPTCqeGzFloDluJ0yRUooNyyCb", + "G+l7uWbpmqRUWRDYjlyyPDc8WCnIhngtPruRzXQVksTgdSN64IT+vMRo5rWHErBFaZCkuVCQaLHnePIn", + "DuUZCQ+U5qxS1zusyPkaCA5uPtjDFmnHDU/n+Y5oXNeMUEUo8UfTnLAl2YmKXOLi5OwC+7vZGKoVxBAN", + "F6d1jprNO0S+HjEixFsIkQPlSDy/7/ok40u2qiQocrkGvXZnngRVCq6AiMU/IdVm2f/z7MfXREjyAyhF", + "V/CGphcEeCoyyA7I6ZJwoQPWcLyENDQ9h+bh8Iod8v9UwvBEoVYlTS/iJ3rOChaZ1Q90y4qqILwqFiDN", + "kvojRAsiQVeSDyFkIe5hxYJu+4Oey4qnuP7NsC1dznAbU2VOd0iwgm6/Opo7dBSheU5K4BnjK6K3fFCP", + "M2PvRy+RouLZBDVHmzUNDlZVQsqWDDJSQxnBxA2zDx/Gr4dPo3wF6Hggg+jUo+xBh8M2wjNmd5svpKQr", + "CFjmgPzkhBt+1eICeM3oZLHDT6WEDROVqjsN4IhDj2vgXGhISglLFuGxM0cOI2BsGyeBC6cDpYJryjhk", + "Rjgj0kKDFVaDOAUDjt93+qf4gir44unQGd98nbj6S9Fd9dEVn7Ta2CixWzJydJqvbsPGNatW/wn3w3Bs", + "xVaJ/bm3kGx1bk6bJcvxJPqnWT9PhkqhEGgRwp9Niq041ZWE4/f8ofmLJORMU55RmZlfCvvTD1Wu2Rlb", + "mZ9y+9MrsWLpGVsNELPGNXrhwm6F/cfAi4tjvY3eK14JcVGV4YTS1sV1sSOnL4cW2cK8LmOe1Lfd8OJx", + "vvWXkev20Nt6IQeQHKRdSU3DC9hJMNjSdIn/bJfIT3Qpfzf/lGVueutyGSOt4WN3JKP5wJkVTsoyZyk1", + "RHzrPpuvRgiAvUjQpsUhHqjHHwMUSylKkJpZoLQsk1ykNE+Uphoh/buE5ex49m+Hjf3l0HZXh8Hgr0yv", + "M+xkVFarBiW0LK8B441RfdSIsDACGj+hmLBiD5Umxu0iGlZiRgTnsKFcHzRXlpY8qDfwOzdSQ2+r7Vh6", + "d65ggwQntuEClNWAbcN7igSkJ0hWgmRFhXSVi0X9w/2TsmwoiN9PytLSA7VHYKiYwZYprR7g9Gmzk8Jx", + "Tl8ekO9C2KiKC57vzOFgVQ1zNizdqeVOsdq25ObQQLynCC6nkAdmaTwZjJp/FxyH14q1yI3Ws5dXTOO/", + "u7Yhm5nfJ3X+a7BYSNth5sKLlqOcvePgL8Hl5n6Hc/qM48w9B+Sk2/dmbGOgxBnmRrwyup4W7ggdaxJe", + "SlpaBN0Xe5Yyjpc028jiektpOlHQRXEO9nDAa4jVjffa3v0QxQRZoYPD17lIL/5O1foO9vzCw+pvPxyG", + "rIFmIMmaqvXBLKZlhNurgTZli5mGeMEni2Cog3qKdzW9PVPLqKbB1By+cbXEkh77odADGbm7/Ij/oTkx", + "n83eNqLfgj0g5yjAlN3OzsmQmdu+vSDYkUwDtEIIUtgLPjG37mth+aIZPL5Ok9boG2tTcCvkJoErJLZ3", + "vg2+FtsYDl+LbW8LiC2ou+APAwfVSA2FmoDfS4eZwPV35KNS0l2fyAh7CpHNBI3qqnA38PDEN6M0xtmT", + "hZA3kz4dscJJY3Im1EANhO+8QyRsWpWJY8WI2co26ABqvHzjQqMLPkaxFhXONP0DqKAM1LugQhvQXVNB", + "FCXL4Q5Yfx0V+guq4Mljcvb3k2ePHv/y+NkXhiVLKVaSFmSx06DIfXc3I0rvcnjQnxnejqpcx6F/8dQb", + "KttwY3CUqGQKBS37oKwB1KpAthkx7fpUa5MZZ10jOGVznoOR5JbsxNr2DWovmTIaVrG4k8UYIljWjJIR", + "h0kGe5nputNrhtmFU5Q7Wd3FVRakFDJiX8MtpkUq8mQDUjER8aa8cS2Ia+HV27L7u8WWXFJFzNho+q04", + "KhQRztJbPl3uW9DnW97QZlTy2/lGZufGnbIubeJ7S6IiJchEbznJYFGtWjehpRQFoSTDjnhGfwf6bMdT", + "tKrdBZMOX9MKxtHEr3Y8De5sZqFyyFatRbj93axLFW+fs0PdUxF0DDle4We81r+EXNM711+6A8Rwf+EX", + "0iJLMtMQb8Gv2GqtAwXzjRRiefc4xkaJIYofrHqemz59Jf21yMBMtlJ3cBg3wBpeN2sacjhdiEoTSrjI", + "AC0qlYof0wOee3QZoqdThye/XluNewGGkVJamdlWJUE/Xk9yNB0TmlruTZA0asCLUbufbCs7nPUK5xJo", + "Zm71wIlYOFeBc2LgJCk6IbU/6JySENlLLbxKKVJQCrLEmSj2oubbWSGiR+iEiCPC9ShECbKk8tbIXmz2", + "4nkBuwRd5orc//5n9eAz4KuFpvkewmKbGHnrC5/zB/Wxnjb8GMN1Bw/ZjkogXuaa26UREDloGCLhtWgy", + "uH5djHqreHuybECiZ+YP5Xg/yO0YqEb1D+b322JblQOBYO6ic84KtNtxyoWCVPBMRYHlVOlkn1g2jVq3", + "MTODQBLGJDECHlBKXlGlrTeR8QyNIPY4wXGsgmKGGEZ4UCE1kH/2umgfdmrOQa4qVSumqipLITVksTlw", + "2I6M9Rq29VhiGcCutV8tSKVgH+QhKgXwHbHsTCyBqK6N7s7d3p8cmqbNOb+LkrKFREOIMUTOfKuAumEw", + "zAAiTDWEtozDVIdz6gic+UxpUZZGWuik4nW/ITKd2dYn+qembZ+5qG7O7UyAwhgc195hfmkpa8Og1tRc", + "oREyKeiF0T3wQmzdnn2czWZMFOMpJGOcb7blmWkVboG9m7QqV5JmkGSQ010f6E/2M7GfxwDgijcXH6Eh", + "sfEs8UVvONmHD4yAFghPxZRHgl9IaraguXk0DOJ674GcAcKOCSfHR/dqUDhWdIk8PJy2XeoIRDwNN0Kb", + "FbfsgBg7gT4F3wEy1JBvTgnsnDTXsu4Q/wXKDVCrEdcfZAdqaAoN/GtNYMCY5iKFg+3Ske4dARyVmoNS", + "bI8YGdqxA5a9N1RqlrISrzrfw+7Ob37dAaL+JpKBpiyHjAQf7C2wDPsTG4jRhXmzm+AkI0wf/Z4VJjKd", + "nCnUeNrIX8AOr9xvbITfeRAXeAdX2QhUczxRThBRHzdkNPCwCWxpqvOd0dP0GnbkEiQQVS0KprWN3G3f", + "dLUokxBA1MA9MqLz5tjoOL8CU9xLZwgqmF5/KeYzeyUYx++8cy9okcNdBUoh8gnGox4xohhMcvyTUphV", + "Zy6I2IeRek5qIemENrry6tP/nmqRGWdA/ktUJKUcb1yVhlqlERL1BNQfzQhGA6vHdC7+hkKQQwH2Iolf", + "Hj7sTvzhQ7fmTJElXPrIe9OwS46HD9GM80Yo3dpcd2AqNNvtNHJ8oOUfzz0XvNCRKftdzA7ylJV80wFe", + "uwvMnlLKMa6Z/q0FQGdnbqfMPeSRae51hDvJqB+Ajs0b1/2MFVVO9V24L0b10fo+wYoCMkY15DtSSkjB", + "RlcbBUtZXAxqxMZdpWvKV6hXS1GtXOCPhYOCsVLWgiEr3gMRVT70licrKaoyJihdsKcPsDdqB1Bz8wkI", + "iZ2tnn9J6/FcTsWUE8wTPFid7wzMIa/CfDZ4MTRE3TQXQ0ucdpZAnAqY9pCoKk0BoiHAsStXPdVONmST", + "3+IAGrWhkjYGitBUVzQPuY6cLgnlu3aaJGW5MlKQKYLtTOcmrnZu5+ZzWJY0t77ZSFJFuFNaGl+w8g1J", + "u6SY6HdAJjHaUJ8zQgY028uw8R9jw29Ax7DsDxwEXTUfh+KuzP07392BGmQBEQmlBIWHVmi3UvarWIa5", + "T+5UUzuloeib9m3XXwYEzdvBC6TgOeOQFILDLpruyzj8gB+jggMPzoHOqMIM9e3eSlr4d9BqjzOFG29L", + "X1ztQBa9qQMO72Dxu3A7Xp0w6wutlpCXhJI0Z2jTFFxpWaX6PadoNQk2WyQww98Ph+1oL3yTuOEuYldz", + "oN5zikE5tS0l6kxeQsRw8C2AN6eparUC1ZGfZAnwnrtWjJOKM41jFWa9ErtgJUiMjjiwLQu6MyIQzX6/", + "gxRkUem2TMbME6WNuLQuJjMMEcv3nGqSg7lT/8D4+RbBeRet5xkO+lLIi5oK8SNkBRwUU0k8gOQ7+xVj", + "+9z01y7ODzOF7WfrlDDwm/SUHRpVmuzX/3v/P47fnST/TZPfj5Ln/+vww8enVw8e9n58fPXVV/+v/dOT", + "q68e/Me/x1bK4x7Li3CYn750l7XTl6iRN16JHu6fzCJdMJ5EmSz0vXd4i9zHHEDHQA/a9hq9hvdcb7lh", + "pA3NWWZUrpuwQ1fE9fai3R0drmktRMc+4+d6TT33FlKGRIRMRzTe+Bjvx1zFM5DQTeaSinC/LCtul9Ir", + "ujbA3se+iOW8zjKzBSiOCaYgrakP3HJ/Pn72xWzepA7V32fzmfv6IcLJLNtGtUPYxq4vboPgxrinSEl3", + "CgYUUMQ9GuZjow1CsAWYe69as/LTSwql2SIu4XzYsjODbPkpt/HEZv+g023nbPli+enx1tLo4aVexxLT", + "W5oCtmpWE6ATCFFKsQE+J+wADrpmiMxczVzAUQ50iQnSeNETU9Iw6n1gGc1zRUD1cCKT7vox/kHl1knr", + "q/nMHf7qzvVxBziGV3fM2sPm/9aC3Pvum3Ny6ASmumdzFS3oILsscmt1CRStEBkjzWw5Dpus+Z6/5y9h", + "yTgz34/f84xqerigiqXqsFIgv6Y55SkcrAQ59jkZL6mm73lP0xqsmBNkw5CyWuQsJRehRtywp62C0Ifw", + "/v07mq/E+/cfetECff3VDRWVL3aA5JLptah04nK4EwmXVMa8MarO4UXItkjD2Khz4mBbUexyxB38uMyj", + "Zam6uXz96ZdlbqYfsKFymWpmyYjSQnpdxCgoFhtc39fCHQySXnoTRqVAkV8LWr5jXH8gyfvq6OgJkFZy", + "26/uyDc8uSthsiFjMNewa7/Aidt7DWy1pElJVzGvz/v37zTQElcf9eUCL9l5TrBbK6nOBw0jqGYCnh7D", + "C2DxuHaCEE7uzPby9XriU8BPuITYxqgbjSv6pusVpNndeLk6qXq9Var0OjF7OzorZVjcr0xdxmNllCwf", + "H6DYCmMwXcWTBZB0DemFK0UBRal381Z3H4LiFE0vOpiyRUpskgymyaPNfAGkKjPqVPGuBWmxIwq09kGg", + "b+ECdueiybK/ToJyO19WDW1U5NRAuzTMGm5bB6O7+C7OCU1cZenTTjH/yLPFcc0Xvs/wRrYq7x1s4hhT", + "tPI5hwhBZYQQlvkHSHCDiRp4t2L92PTMLWNhT75IwRIv+4lr0lyeXEhSOBs0cNvvBWDFI3GpyIIavV24", + "Yj02JzSQYpWiKxjQkEO3xcTMy5arA4HsO/eiJ51Ydg+03nkTRdk2Tsyco5wC5othFbzMdALR/EjWM+ac", + "AFiDzxFskaOaVEfsWaFDZct9ZIuKDaEWZ2CQvFE4PBptioSazZoqX0cIyy35vTxJB/gDc5zHKluEBv2g", + "plJtX/cyt7tPe7dLV9/CF7XwlSzCq+WEqhRGw8ew7dhyCI4KUAY5rOzEbWPPKE2+dbNABo8fl8uccSBJ", + "LByLKiVSZgtBNceMGwOMfvyQEGsCJpMhxNg4QBs9vgiYvBbh3uSr6yDJXb449bDRVxz8DfHUFhugbFQe", + "URoRzgYcSKmXANTF8NXnVyeSFMEQxufEiLkNzY2Ycze+BkivwAKqrZ1yCi7m4MGQOjtigbcHy7XmZI+i", + "m8wm1Jk80nGFbgTjhdgmNrctqvEutgvD79GYbcy0i21MW8riniILscU4FjxabIzwHlyG8fBoBDf8LVPI", + "r9hv6DS3yIwNO65NxbhQIcs4c17NLkPqxJShBzSYIXa5H1SnuBECHWNHU+rVXX73XlLb6kn/MG9OtXlT", + "dcmnw8S2/9AWiq7SAP36Vpi6nsSbrsYStVO0wzHapTQCFTLG9EZM9J00fVeQghzwUpC0lKjkIua6M3cb", + "wBPnzHcLjBdYsIPy3YMgxkfCiikNjRHdhyR8DvMkxTphQiyHZ6dLuTTzeytEfUzZQjTYsTXNTz4DjJFd", + "Mql0gh6I6BRMo28VXqq/NU3julI7ishW1WRZXDbgsBewSzKWV3F+deN+/9IM+7oWiapaoLxl3MaGLLAK", + "bDS2cGRoG346OuFXdsKv6J3Nd9puME3NwNKwS3uMv8i+6EjeMXEQYcAYc/RXbZCkIwIySAntS8dAb7Kb", + "E1NCD8asr73NlHnYe8NGfGLq0BllIUXnEhgMRmfB0E1k1BKmgyKq/VzNgT1Ay5Jl244t1EIdvDHTaxk8", + "fOmpDhVwdR2wPRQI7J6xdBEJql1lrFHwbTncVpGPg0mUOW/XAgsFQjgUU76Ye59QdTrZPlqdA82/h93P", + "pi1OZ3Y1n93OdBqjtYO4h9Zv6uWN0hld89aU1vKEXJPktCyl2NA8cQbmIdaUYuNYE5t7e/QnFnVxM+b5", + "Nyev3jj0r+azNAcqk1pVGJwVtiv/MrOyBc0GNogvFm3ufF5nt6pksPh1FabQKH25Bld1N9BGe+UBG4dD", + "sBWdkXoZjxDaa3J2vhE7xREfCZS1i6Qx31kPSdsrQjeU5d5u5rEdiObByU2rMRmVCiGAW3tXAidZcqfi", + "pre747uj4a49Mikca6QucGFLXysieNeFjuHFu9J53QuKxf2sVaQvnHhVoCUhUTlL4zZWvlCGObj1nZnG", + "BBsPKKMGYsUGXLG8YgEs00xNuOh2kAzGiBLTF4ocot1CuGdNKs5+q4CwDLg2nyTuys5GxWqKztreP06N", + "7tAfywG2FvoG/G10jLCwZffEQyTGFYzQU9dD92V9ZfYTrS1SGG7duCSu4fAPR+wdiSPOescfjptt8OK6", + "7XELXyHpyz/DGLYc9f4nUPzl1VXYHBgj+qQJU8lSit8hfs/D63EkFceX8mQY5fI78Akx5411p3mZpRl9", + "cLmHtJvQCtUOUhjgelz5wC2HNQW9hZpyu9T2hYFWrFucYcKo0kMLv2EYh3MvEjenlwsaK7holAyD00nj", + "AG7Z0rUgvrOnvaoTG+zoJPAl122ZzbIuQTZZcv2KLTdUGOywk1WFRjNArg11grn1/+VKRMBU/JJy+1CF", + "6We3kuutwBq/TK9LIbFGgoqb/TNIWUHzuOaQpX0Tb8ZWzL7BUCkIivw7QPZ9G8tF7qGEOl3HkeZ0SY7m", + "wUsjbjUytmGKLXLAFo9siwVVKMlrQ1TdxUwPuF4rbP54QvN1xTMJmV4rS1glSK3U4fWmdl4tQF8CcHKE", + "7R49J/fRbafYBh4YKrrzeXb86DkaXe0fR7EDwL2hMSZNMhQn/3DiJM7H6Le0MIzgdlAPounk9hGtYcE1", + "spts1yl7CVs6Wbd/LxWU0xXEI0WKPTjZvriaaEjr0IVn9gUYpaXYEabj44OmRj4NRJ8b8WfRIKkoCqYL", + "59xRojD81FTwt4N6cPY5GVd81ePlP6KPtPQuos4l8tMaTe35Fps1erJf0wLaZJ0Tagtj5KyJXvAlocmp", + "r7uD1WjrIrSWNmYsM3VUczCYYUlKybjGi0Wll8mXJF1TSVMj/g6G0E0WXzyNVOBtV4Lk10P8k9NdggK5", + "iZNeDrC91yFcX3KfC54URqJkD5psj2BXDjpz4267Id/hOOipSpmBkgyyW9ViNxpI6lsxHh8BeEtWrOdz", + "LX689sw+OWdWMs4etDIr9NPbV07LKISMFdNrtrvTOCRoyWCDsXvxRTIwb7kWMp+0CrfB/vN6HrzKGahl", + "fi/HLgJfi8jt1FeFri3pLlY9Yh0Y2qbmg2GDhQM1J+0KvJ/e6eeNz33nk/niccU/ush+5iVFIvsZDCxi", + "UB08upxZ/T3wf1PytdhOXdTODvEL+ycgTZQkFcuzn5uszE7xdUl5uo76sxam4y/NM1H15Oz5FK1Zt6ac", + "Qx4FZ3XBX7zOGNFq/ymmjlMwPrFttx68nW5ncg3ibTQ9Un5AQ16mczNASNV2wlsdUJ2vREZwnKZAWiM9", + "++8IBNWef6tA6VjyEH6wQV1otzT3XVtsmADP8LZ4QL6zL8GugbTK3+Atra4i4ErfWoN6VeaCZnMs5HD+", + "zckrYke1fexjJ7bY8QovKe1ZdOxVQe3HaeHB/t2SeOrCdDjjsdRm1kpjNSqlaVHGkkNNi3PfADNQQxs+", + "Xl9C6hyQl8GbjjaP1IAw/LBksjA3rhqa1V2QJ8x/tKbpGq9kLZE6zPLTq3R7rlTBy3j1Czd1QUTcdwZv", + "V6jb1umeE2HuzZdM2QdAYQPtfNQ6OduZBHx+ant6suLcckpU9xgrHnATsnvkbKCGN/NHMesQ/poKuS1y", + "f92i5WfYK1qgqVsBvfckns1urF8u8Q87p5QLzlIsjxQ7mt1LoVN8YBMqSXWNrH6Lux0a2VzRuut1mJyj", + "4mAldi8IHeH6Rvjgq1lUyx32T41PUq6pJivQykk2yOb++QBnB2RcgStwie/KBnJSyJZfESVk1FWd1C6N", + "a7IRpsUMXOy+Nd9eu2s/xotfMI4KviObC023ljp8yFCbWwHTZCVAufm0c4PVO9PnANNkM9h+OPAPH9pq", + "MOiWM9O2Pug+qBPvkXYeYNP2hWnr6gTVP7cikO2gJ2XpBh1+XCKqD+gtHyRwxLOYeNdOQNwafghthN1G", + "Q0nwPDWMBht0REOJ53CPMeqHFjqP+Bil1XIUtiA2hCtawYDxCBqvGIfmWc7IAZFGjwRcGNyvA/1UKqm2", + "KuAkmXYONEfvc0ygKe1cD7cF1a0lZEiCc/RjDC9j80bEgOCoGzSKG+W7+jVQw92BMvECnyF2hOy/+IBa", + "lVOiMswo6LwBERMcRnD7V2baB0B/G/R1IttdS2p3znVOoqEk0UWVrUAnNMtiFam+xq8Ev/riUrCFtKoL", + "U5YlSbEmSrtITJ/b3ECp4KoqRsbyDW45XPCoSoQbwodd/ApjEspih//GqjIOr4wLwrh2GKCPuHCvUFxT", + "b25D6mm9hqcTxVbJdErgmXJ7cjRD34zRm/53yum5WLUR+cSlIcakXLhGMfn2jTk4wsoJvVKj9mipCxtg", + "0J3wT+HhtbFOyW1LJTzKerVH0dlTP7U1boAYfjRrjoffQOhtUBCD2vPVeg+HAnDTwXhxql3mmqZkVAQN", + "ZgPZ6B2b94NYxC2nQxE7NmDHfO71nqYZ9vRshD1KUB8K1kfoex9nSkrKnGu8ERZ9yrqI9GFz4dimaxa4", + "OwkX5z1osft+MxSTTRTjqxwIfu8+M3QBLp29fmfeztVHJfkrof3VPfNq4dVR8dH596MTcKjPawYdNNqe", + "u5L2dpruTv79zzaGjQDXcvcnMOH2Fr33SFNf27XmqaYJqcshTyqP3DoV4+8tDdc/amoeIT+VQrGmBHfs", + "IaaJsW7n+JZSUL+pD8sHmmwg1Vh3vXGgS4DrVHMygwWP/P2rDtLA3bEOCXTlj8ZqHvWLre850HppSUFq", + "nS1UfTC9ws9JHSaFQgkr4K6Au3f22gkHk8Oel0tINdvsSQP7xxp4kGI090YI+15ukBXG6jBarCJyfRNb", + "g9BYltYoPkE1v1ujM5QEcgG7e4q0uCFaOXvuz5WbFJBACqB0SAyLCBULQ7BWU+cZZqrmDKSCD/ux3aEp", + "xTX45k6Q1HjDsTxLmhO3SXQcGTL+6MeksUzXa6X/YkToUKZY/9GAYWX7Jb7RoOr38HwBivBKSk77Zfou", + "XQELTNqrHQW+lAUo/5vP0LWj5OwCwleB0C1zSWXmW0TtDN6EkYycR730Ll/wvov0sh6ZNUGa/YSeSOEn", + "DMVNc2H0r2QonrkdFxk+no/RH7bkN0Z8GryWIN3raajs5UJBooUP6hzDY4wU7qH3mxBBDRZbtMgNlkB5", + "29R4waKzFEueUBfZEk6QSCiowU4GlViGxxwj9gv73Wew+KKje80pNb/uLzTvw3OZ6hEx5Polcafl/syY", + "m1hWGOf2rVYVK8vCDSlD038pRVal9oAON0ZtfZpc9GhElESNEml/lr37ZY4lwF4FeYYXsDu0qr8v1e+X", + "MsTeqlB2DkFef2e179ToFL9f5ys7gdWd4Pk5DTfzWSlEngzY+k/71WW6e+CCpReQEXN2+MC2gWdLyH00", + "MdfO3Mv1zldTKUvgkD04IOSE21Bi79dtlzfuDM7v6bHxtzhqVtmCT86mdPCex2MysRSTvKV882DGpZoC", + "I/xuOZQFsqd2yXagso2kl5FHfA6mXkr7ntbuwyoNU1ksYlrKnicsIl5k/yaCf2HDZ6xoUbC0/4pCT5VY", + "4mtUCY0AP60F+Lz1ViDrPNzhawzZZxpSahU4c3mgLK8kuMwB+2xOp5x+SfXaL59p3lezzJENCsP6bUl2", + "quylwF9O3Js93X0hyiSHDbQcCS6doUpTUIptIHzvx3YmGUCJV/XuARKzkId81ZEhbu5JYGOdQt2oULGE", + "tStF9kiMgcfYE8seaioLGYw2LKtoi37qFk+xTHzbPcR14g659uaIT663NdxzKUldzC1myHTpJH4JDf82", + "T7t0FKTgCZYa5sBblDUVbqOMDJI2TtmbFdGYxA99m3ZkywTProxbXsIaO03wrrSuEbyp+V3XXdIfmt04", + "7QEY32EPeqFBLngCxmtCDp3PHGH7Q02UYCqDnNCa/j4bn5tgI76CJbKy20zTVjyz0VntdQkMuOpFbRcd", + "epepaz7FgjqCY5GxvtlVoasMa5WHjGNkt9zQ/NObTrHS0gnSw71zG59oaHsLiWxJqW4W5vaKTho7sLPd", + "3dD8DZp6/wFmjaI+TgfK+TxqXcF7hlBk0pzkonnhDkGSS4RpnaKPviALl6JTSkiZYp3sxUtfRrk2NeGr", + "As3zx+O2rX3z/FnoW7Dx0qsv5HVTklULPDEaDJst+pmFysDOjXJ5jPt6bBGhX0xGhbUy9hwXFy1vqS1x", + "3QkDFBLu2GsaxD9d02varwIydXrWM2gOnUpBf56TT+sWbSMHdTO3qS7/PnHH6nZO8dTHy/Ga7hgqYAmC", + "tawJokp+ffQrkbDEx2oEefgQB3j4cO6a/vq4/dls54cP488sf6ogAUsjB8ONG+OYn4fCxm1o9ECGQmc9", + "KpZn+xijlW/SPPeEGRW/uIyzz/Lg1C/Wl9Pfqu7Rj+uEJ3UXAQkTmWtr8GCoIJNkQhKJ6xZJGUGrSFpJ", + "pndYCMeb/tkv0XCG72pvofM216UT3NmnxQXUpZQa32Kl/On6naA5nkdGp8bgMI1P636zpUWZg9soX91b", + "/A2efPk0O3ry6G+LL4+eHaXw9NnzoyP6/Cl99PzJI3j85bOnR/Bo+cXzxePs8dPHi6ePn37x7Hn65Omj", + "xdMvnv/tnpFDBmWL6MynXc/+D77Klpy8OU3ODbINTWjJ6he1DRv7p2VoijsRCsry2bH/6X/7HXaQiqIB", + "73+duazO2VrrUh0fHl5eXh6EXQ5X6ExItKjS9aEfp/+S8ZvTOjPHXi1xRW3ShTcZeFY4wW9vvzk7Jydv", + "Tg+ClzKPZ0cHRweP8CHFEjgt2ex49gR/wt2zxnU/dMw2O/54NZ8droHm6Hs3fxSgJUv9J3VJVyuQB+6N", + "HfPT5vGhVyUOPzpHytXYt8OwXPXhx5a/KdvTE8vZHn70VVrGW7fKoDg/W9BhIhZjzQ4XmPw5tSmooPHw", + "VPCCoQ4/ooo8+Puhy4iLf8Srit0Dh94pG2/ZotJHvTW4dnq4J/kPP+J/kCcDtGz8aYDubBUrDvUdaJ88", + "Znu46Ko6rKrm7dPMNu9F+7j6S7Yg5fG7afX4wQ9nbooZKOaKdKGUMFug2cQ+0aQR0VpWEBZPHCszcjWP", + "PF25ZKtKdp7k7Tz2S5gi/3n242siJHF34jc0vajjrsjp0hb3kGLDMCUlC/KYTM96Or9VIHfNfNxxGU7A", + "F/x3iT6FWpXtqPhaFf+AlRMQURQSj4+O7uzRrd7KXtkI9Rqcx+s2EHsy9YU/KVtsaGTj06NHdza3drDt", + "rSfWBdeb1SnHWBcj+Yk92XBCT/+yE3qBN2jD8UvGM/tigqa4p+0Gxfl9+Zedn2aF96BxfHwGFCoAz+5w", + "g316JjR6OM0JtrSzefKXnc0ZyA1LgZxDUQpJJct35CdeZ5cGFaX6x9xP/IKLS+4JYRTtqiio3LkjkJKu", + "qAofrm+9/2c0PLpS6ODDGt6zuY23/3Dljl4ryA7te9HNiex/3nGX25VDLFLqJ67AWlR9UveOp0PnMTY+", + "2/H0bX1I9g4H3Jh/HBP316nGF8UFhtL8wdJ8mvh99imp8Gn33ifbLG+hEBtQ9RPJDXMaBcpcSuxryVIU", + "AQ8fjGya+aBi6hwN/ZG8k6UB3tNS9+yJmz43PBIoNQnPPZGNFvyU11nr1087mRR2qHuxBZr9SxD8SxDc", + "oSDQleSDWzQ4vzDaF0obyERSmq7hYPohuuNpeIktRayUytmIsHAFJIZkxVlbVvypr7If/hTn+wvK/X5u", + "rbgNL6MyZyBrLqC8X9PjX1Lgf4wUsMWJnLloTjTkuQr3vha4922UgEvi4DZ6Y6Ic6L7dGfv58GP77ZiW", + "3U6tK52Jy6Av+nptoELfnFe/ptj6+/CSMp0shXQJHFjbt99ZA80PXWmSzq9NNnDvC6Y4Bz+G0VfRXw/r", + "0unRj12bauyrsykONPKFpfznxqcS+ihQQtbeiXcfjHzCwpxOeDYm9+PDQwyKXgulD2dX848dc3z48UPN", + "Er5i26yUbIMJ4B+u/n8AAAD//y/FazkqxQAA", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/generated/experimental/routes.go b/daemon/algod/api/server/v2/generated/experimental/routes.go index 40f6cae52b..eb147bd397 100644 --- a/daemon/algod/api/server/v2/generated/experimental/routes.go +++ b/daemon/algod/api/server/v2/generated/experimental/routes.go @@ -8,11 +8,13 @@ import ( "compress/gzip" "encoding/base64" "fmt" + "net/http" "net/url" "path" "strings" . "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" + "github.com/algorand/oapi-codegen/pkg/runtime" "github.com/getkin/kin-openapi/openapi3" "github.com/labstack/echo/v4" ) @@ -21,7 +23,7 @@ import ( type ServerInterface interface { // Simulates a raw transaction or transaction group as it would be evaluated on the network. WARNING: This endpoint is experimental and under active development. There are no guarantees in terms of functionality or future support. // (POST /v2/transactions/simulate) - SimulateTransaction(ctx echo.Context) error + SimulateTransaction(ctx echo.Context, params SimulateTransactionParams) error } // ServerInterfaceWrapper converts echo contexts to parameters. @@ -35,8 +37,17 @@ func (w *ServerInterfaceWrapper) SimulateTransaction(ctx echo.Context) error { ctx.Set(Api_keyScopes, []string{""}) + // Parameter object where we will unmarshal all parameters from the context + var params SimulateTransactionParams + // ------------- Optional query parameter "format" ------------- + + err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), ¶ms.Format) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err)) + } + // Invoke the callback with all the unmarshalled arguments - err = w.Handler.SimulateTransaction(ctx) + err = w.Handler.SimulateTransaction(ctx, params) return err } @@ -75,164 +86,172 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9a5PcNpLgX0HUboQeV6xuPewZdYRjryXZnj5LGoW67bldSWejyKwqTJMABwC7qqzT", - "f79AAiBBEmSxH5ZmLvaT1EU8EolEIt/4NEtFUQoOXKvZyadZSSUtQIPEv2iaiorrhGXmrwxUKlmpmeCz", - "E/+NKC0ZX8/mM2Z+LanezOYzTgto2pj+85mEf1RMQjY70bKC+UylGyioGVjvS9O6HmmXrEXihji1Q5y9", - "nH0e+UCzTIJSfSj/yvM9YTzNqwyIlpQrmppPimyZ3hC9YYq4zoRxIjgQsSJ602pMVgzyTC38Iv9RgdwH", - "q3STDy/pcwNiIkUOfThfiGLJOHiooAaq3hCiBclghY02VBMzg4HVN9SCKKAy3ZCVkAdAtUCE8AKvitnJ", - "+5kCnoHE3UqBXeF/VxLgd0g0lWvQs4/z2OJWGmSiWRFZ2pnDvgRV5VoRbItrXLMr4MT0WpDXldJkCYRy", - "8u6HF+TJkyfPzEIKqjVkjsgGV9XMHq7Jdp+dzDKqwX/u0xrN10JSniV1+3c/vMD5z90Cp7aiSkH8sJya", - "L+Ts5dACfMcICTGuYY370KJ+0yNyKJqfl7ASEibuiW18p5sSzv9VdyWlOt2UgnEd2ReCX4n9HOVhQfcx", - "HlYD0GpfGkxJM+j74+TZx0+P5o+OP//b+9Pkv9yf3zz5PHH5L+pxD2Ag2jCtpASe7pO1BIqnZUN5Hx/v", - "HD2ojajyjGzoFW4+LZDVu77E9LWs84rmlaETlkpxmq+FItSRUQYrWuWa+IlJxXPDpsxojtoJU6SU4opl", - "kM0N991uWLohKVV2CGxHtizPDQ1WCrIhWouvbuQwfQ5RYuC6ET5wQf+8yGjWdQATsENukKS5UJBoceB6", - "8jcO5RkJL5TmrlLXu6zIxQYITm4+2MsWcccNTef5nmjc14xQRSjxV9OcsBXZi4pscXNydon93WoM1gpi", - "kIab07pHzeEdQl8PGRHkLYXIgXJEnj93fZTxFVtXEhTZbkBv3J0nQZWCKyBi+XdItdn2/3X+1zdESPIa", - "lKJreEvTSwI8FdnwHrtJYzf435UwG16odUnTy/h1nbOCRUB+TXesqArCq2IJ0uyXvx+0IBJ0JfkQQHbE", - "A3RW0F1/0gtZ8RQ3t5m2JagZUmKqzOl+Qc5WpKC7747nDhxFaJ6TEnjG+JroHR8U0szch8FLpKh4NkGG", - "0WbDgltTlZCyFYOM1KOMQOKmOQQP49eDp5GsAnD8IIPg1LMcAIfDLkIz5uiaL6SkawhIZkF+dpwLv2px", - "CbxmcGS5x0+lhCsmKlV3GoARpx4Xr7nQkJQSVixCY+cOHYZ72DaOvRZOwEkF15RxyAznRaCFBsuJBmEK", - "JhxXZvpX9JIq+Pbp0AXefJ24+yvR3fXRHZ+029gosUcyci+ar+7AxsWmVv8Jyl84t2LrxP7c20i2vjBX", - "yYrleM383eyfR0OlkAm0EOEvHsXWnOpKwskH/tD8RRJyrinPqMzML4X96XWVa3bO1uan3P70SqxZes7W", - "A8isYY1qU9itsP+Y8eLsWO+iSsMrIS6rMlxQ2tJKl3ty9nJok+2Y1yXM01qVDbWKi53XNK7bQ+/qjRwA", - "chB3JTUNL2EvwUBL0xX+s1shPdGV/N38U5a56a3LVQy1ho7dfYu2AWczOC3LnKXUIPGd+2y+GiYAVkug", - "TYsjvFBPPgUgllKUIDWzg9KyTHKR0jxRmmoc6d8lrGYns387aowrR7a7Ogomf2V6nWMnI49aGSehZXmN", - "Md4auUaNMAvDoPETsgnL9lAiYtxuoiElZlhwDleU60Wjj7T4QX2A37uZGnxbUcbiu6NfDSKc2IZLUFa8", - "tQ3vKRKgniBaCaIVpc11Lpb1D/dPy7LBIH4/LUuLDxQNgaHUBTumtHqAy6fNSQrnOXu5ID+GY6OcLXi+", - "N5eDFTXM3bByt5a7xWrDkVtDM+I9RXA7hVyYrfFoMDL8XVAc6gwbkRup5yCtmMZ/cW1DMjO/T+r8r0Fi", - "IW6HiQu1KIc5q8DgL4Hmcr9DOX3CcbacBTnt9r0Z2ZhR4gRzI1oZ3U877ggeaxRuJS0tgO6LvUsZRw3M", - "NrKw3pKbTmR0UZiDMxzQGkJ147N28DxEIUFS6MDwPBfp5V+o2tzBmV/6sfrHD6chG6AZSLKharOYxaSM", - "8Hg1o005YqYhau9kGUy1qJd4V8s7sLSMahoszcEbF0ss6rEfMj2QEd3lr/gfmhPz2Zxtw/rtsAtygQxM", - "2ePsPAiZUeWtgmBnMg3QxCBIYbV3YrTua0H5opk8vk+T9uh7azBwO+QWgTskdnd+DJ6LXQyG52LXOwJi", - "B+ou6MOMg2KkhkJNgO+lg0zg/jv0USnpvo9kHHsKks0Cjeiq8DTw8MY3szSW19OlkDfjPh22wkljTybU", - "jBow33kHSdi0KhNHihGblG3QGahx4Y0zje7wMYy1sHCu6R+ABWVGvQsstAe6ayyIomQ53AHpb6JMf0kV", - "PHlMzv9y+s2jx78+/uZbQ5KlFGtJC7Lca1DkvtPNiNL7HB70V4baUZXr+OjfPvVWyPa4sXGUqGQKBS37", - "Q1nrphWBbDNi2vWx1kYzrroGcMrhvADDyS3aiTXcG9BeMmUkrGJ5J5sxhLCsmSUjDpIMDhLTdZfXTLMP", - "lyj3sroLVRakFDJiX8MjpkUq8uQKpGIi4ip561oQ18KLt2X3dwst2VJFzNxo+q04ChQRytI7Pp3v26Ev", - "drzBzSjnt+uNrM7NO2Vf2sj3lkRFSpCJ3nGSwbJatzShlRQFoSTDjnhH/wj6fM9TtKrdBZEOq2kF42ji", - "V3ueBjqb2agcsnVrE26vm3Wx4u1zdqp7KgKOQccr/Ixq/UvINb1z+aU7QQz2F34jLbAkMw1RC37F1hsd", - "CJhvpRCru4cxNksMUPxgxfPc9OkL6W9EBmaxlbqDy7gZrKF1s6chhdOlqDShhIsM0KJSqfg1PeCWR38g", - "ujF1ePPrjZW4l2AIKaWVWW1VEnTS9ThH0zGhqaXeBFGjBrwYtfvJtrLTWZdvLoFmRqsHTsTSuQqcEwMX", - "SdHDqP1F54SEyFlqwVVKkYJSkCXORHEQNN/OMhE9gicEHAGuZyFKkBWVtwb28uognJewT9Afrsj9n35R", - "D74CvFpomh9ALLaJobdW+Jw/qA/1tOnHCK47eUh2VALxPNdol4ZB5KBhCIXXwsng/nUh6u3i7dFyBRI9", - "M38oxftJbkdANah/ML3fFtqqHIjycorOBSvQbscpFwpSwTMVHSynSieH2LJp1NLGzAoCThjjxDjwgFDy", - "iiptvYmMZ2gEsdcJzmMFFDPFMMCDAqkZ+Rcvi/bHTs09yFWlasFUVWUppIYstgYOu5G53sCunkusgrFr", - "6VcLUik4NPIQloLxHbLsSiyCqK6N7s7d3l8cmqbNPb+PorIFRIOIMUDOfasAu2GkywAgTDWItoTDVIdy", - "6vCa+UxpUZaGW+ik4nW/ITSd29an+uembZ+4qG7u7UyAmV17mBzkW4tZG+O0oUaFxpFJQS+N7IEKsXV7", - "9mE2hzFRjKeQjFG+OZbnplV4BA4e0qpcS5pBkkFO9/1Bf7afif08NgDueKP4CA2JjWeJb3pDyT58YGRo", - "geOpmPBI8AtJzRE0mkdDIK73gZEzwLFjzMnR0b16KJwrukV+PFy23erIiHgbXgltdtySA0LsGPoUeAfQ", - "UI98c0xg56RRy7pT/CcoN0EtRlx/kj2ooSU0419rAQPGNBcGHByXDnfvMOAo1xzkYgfYyNCJHbDsvaVS", - "s5SVqOr8BPs71/y6E0T9TSQDTVkOGQk+WC2wDPsTG4jRHfNmmuAkI0wf/J4VJrKcnCmUeNrAX8IeVe63", - "NsLvIogLvANVNjKquZ4oJwiojxsyEnjYBHY01fneyGl6A3uyBQlEVcuCaW1DNtuarhZlEg4QNXCPzOi8", - "OTY6zu/AFPfSOQ4VLK+/FfOZVQnG4bvo6AUtdDhVoBQin2A86iEjCsEkxz8phdl15iKEfRipp6QWkI5p", - "oyuvvv3vqRaacQXkP0VFUspR46o01CKNkCgnoPxoZjASWD2nc/E3GIIcCrCKJH55+LC78IcP3Z4zRVaw", - "9WH1pmEXHQ8fohnnrVC6dbjuwFRojttZ5PpAyz/eey54ocNTDruY3chTdvJtZ/DaXWDOlFKOcM3yb80A", - "OidzN2XtIY1Mc6/juJOM+sHQsXXjvp+zosrvasNXlOWVhGHv2IcP71fFhw8fyQ+2pXdszz2Rh+jYNmkR", - "K3cbVRJDa0jOjH4rBc2MgBC17eMi+TqpgzNVFJxCGXD+5s4h5ftOIt9UGMgSUlrZqGTHtR0ETXioWkTk", - "xc7udlEYXchE83iVa3tph1hdS1GVRNXbbqlAUw1/jKm5GToGZX/iIDao+TgUHmTUxHx/B7e1HYhIKCUo", - "5K2heUXZr2IV5t845qv2SkPRt0Dbrr8O6GfvBvUcwXPGISkEh3005ZRxeI0fY70tfx/ojDftUN+u8NyC", - "vwNWe54p1Hhb/OJuBwztbR0Xdweb3x2343wIM4/QuAZ5SShJc4amN8GVllWqP3CKyn1w2CLxA16NGTb3", - "vPBN4valiPnHDfWBU4wdqVX+KF9cQYQv/wDgrT6qWq9B6Y6UuAL4wF0rxknFmca5CrNfid2wEiQ68Re2", - "ZUH3ZEVztE79DlKQZaXbzBUTJJRmee48IWYaIlYfONUkB8NVXzN+scPhvCfR0wwHvRXyssbCInoe1sBB", - "MZXE4xx+tF8xBM0tf+PC0TBb1X62tnMzfpNFsUfdv8nA/D/3/+Pk/WnyXzT5/Th59j+OPn56+vnBw96P", - "jz9/993/bf/05PN3D/7j32M75WGPhe87yM9eOp3i7CUKjo3xvAf7FzOcFownUSILXcQd2iL3jfjrCehB", - "26ygN/CB6x03hHRFc5ZRfTNy6LK43lm0p6NDNa2N6JgR/FqvKY7dgsuQCJPpsMYbX+P90KB4ogx6c1zu", - "C56XVcXtVlbKeZQwDtyHaIjVvE6GskUQTghmymyojy9yfz7+5tvZvMlwqb/P5jP39WOEklm2i+UxZbCL", - "SdnugODBuKdISfcKdJx7IOzRaBTrFA+HLcCoZ2rDyi/PKZRmyziH89G1Tlvf8TNuw17N+UHf0N6ZnMXq", - "y8OtJUAGpd7EkqNbkgK2anYToOOvL6W4Aj4nbAGLrracrUH5uJgc6AqTdNG/IaZkC9TnwBKap4oA6+FC", - "JqmkMfpB4dZx68/zmbv81Z3L427gGFzdOWtHkP9bC3Lvx+8vyJFjmOqeTamzQwdJUBErlIvzb0VyGG5m", - "S0LYnMIP/AN/CSvGmfl+8oFnVNOjJVUsVUeVAvmc5pSnsFgLcuJTB15STT/wnqQ1WLUlSNogZbXMWUou", - "Q4m4IU+biR9VG2m+FkZx7Dq1+/KrmyrKX+wEyZbpjah04lKNEwlbKmNOA1WnmuLItlDA2Kxz4sa2rNil", - "Mrvx4zyPlqXqppz1l1+WuVl+QIbKJVSZLSNKC+llESOgWGhwf98IdzFIuvV56pUCRX4raPmecf2RJB+q", - "4+MnQFo5WL+5K9/Q5L6Elr3yRilxXVslLtzqNbDTkiYlXQ8YDTTQEncf5eUClew8J9itlfvlY1txqGYB", - "Hh/DG2DhuHYeCy7u3PbyNWPiS8BPuIXYxogbjcf0pvsVZIPdeLs6GWW9Xar0JjFnO7oqZUjc70xdSmJt", - "hCzvxlZsjaGCrurGEki6gfQSMiwAAEWp9/NWdx8p4QRNzzqYsoUybC4HZnOjaXcJpCoz6kTxjkHJYFiB", - "1j5W8R1cwv5CNMng18mjbad1qqGDipQaSJeGWMNj68bobr4Lx0FbV1n67EhMk/FkcVLThe8zfJCtyHsH", - "hzhGFK20wyFEUBlBhCX+ARTcYKFmvFuRfmx5RstY2psvUlfD837imjTKk4ucCVeD2ZT2ewFYdUdsFVlS", - "I7cLVzDGpi4GXKxSdA0DEnJoXZ+YINiyyOMgh+696E0nVt0LrXffREG2jROz5iilgPliSAWVmU68lJ/J", - "OnCsAZVgHTiHsGWOYlIdWGaZDpUtL4ctbDUEWpyAQfJG4PBgtDESSjYbqnwtGyz548/yJBngD0zFHSvA", - "cBaE+gR1fWrDt+e53XPa0y5dGQZfe8EXXAhVywnFE4yEj9HFse0QHAWgDHJY24Xbxp5QmrTgZoMMHH9d", - "rXLGgSSxqCGqlEiZLUbUXDNuDjDy8UNCrAmYTB4hRsYB2OiYxIHJGxGeTb6+DpDcpTVTPza6NIO/IZ6B", - "YeNojcgjSsPCGR+I2PYcgLpQs/r+6gQ84jCE8TkxbO6K5obNOY2vGaRXBwDF1k7Wv3ONPxgSZ0cs8PZi", - "udaa7FV0k9WEMpMHOi7QjUC8FLvEpmBFJd7lbmnoPRpajAlhsYNpKy7cU2QpdhhugVeLDWU9AMswHB6M", - "QMPfMYX0iv2GbnMLzNi049JUjAoVkowz59XkMiROTJl6QIIZIpf7QRGFGwHQMXY05Uad8ntQSW2LJ/3L", - "vLnV5k1xIJ+1ETv+Q0couksD+OtbYeqyB2+7EkvUTtGOGmhXfAhEyBjRGzbRd9L0XUEKckClIGkJUcll", - "zHVndBvAG+fcdwuMF1hXgvL9gyAURcKaKQ2NEd1czN4r9KXNkxTLWQmxGl6dLuXKrO+dEPU1ZeulYMfW", - "Mr/4CjCUc8Wk0gl6IKJLMI1+UKhU/2CaxmWldrCLrezIsjhvwGkvYZ9kLK/i9Orm/emlmfZNzRJVtUR+", - "yzgBmm7IEiuRRkPgRqa2UZKjC35lF/yK3tl6p50G09RMLA25tOf4FzkXHc47xg4iBBgjjv6uDaJ0hEEG", - "mYt97hjITfZwYubiYsz62jtMmR/7YNiIz58cuqPsSNG1BAaD0VUwdBMZsYTpoJBnP6Vw4AzQsmTZrmML", - "taMOasz0WgYPXyGpgwXcXTfYAQwEds9YVoME1S6G1Qj4tiRrqxbFYhJmLtolq0KGEE7FlC8o3kdUnfV0", - "CFcXQPOfYP+LaYvLmX2ez25nOo3h2o14ANdv6+2N4hld89aU1vKEXBPltCyluKJ54gzMQ6QpxZUjTWzu", - "7dFfmNXFzZgX35++euvA/zyfpTlQmdSiwuCqsF35L7MqW3dr4ID4gsVG5/MyuxUlg82viwWFRuntBlxx", - "2EAa7VWxaxwOwVF0RupVPELooMnZ+UbsEkd8JFDWLpLGfGc9JG2vCL2iLPd2Mw/tQDQPLm5aKcQoVwgH", - "uLV3JXCSJXfKbnqnO346Guo6wJPCuUbK1xa2QrMigndd6EaERHMckmpBsQadtYr0mROvCrQkJCpnadzG", - "ypcYdsut78w0Jth4QBg1I1ZswBXLKxaMZZqpCYpuB8hgjigyfT3DIdwthXtao+LsHxUQlgHX5pPEU9k5", - "qFj0z1nb+9epkR36c7mBrYW+Gf42MkZYf7F74yEQ4wJG6KnrgfuyVpn9QmuLlPkhcElcw+Efzti7Ekec", - "9Y4+HDXb4MVN2+MWvoTR53+GMGzV5MPPcHjl1RWCHJgj+qwGU8lKit8hruehehzJGPEVJxlGufwOfEKY", - "eWPdaV4HaWYf3O4h6Sa0QrWDFAaoHnc+cMth6TtvoabcbrWtct+KdYsTTBhVemTHbwjGwdyLxM3pdklj", - "dQGNkGFgOm0cwC1buhbEd/a4d2Z/5oqALkjgS67bMpsMXIJskrn6hUVuKDDYaSeLCo1kgFQbygRz6//L", - "lYgMU/Et5faxBNPPHiXXW4E1fpleWyExlV/Fzf4ZpKygeVxyyNK+iTdja2afCqgUBLXo3UD2jRVLRa6e", - "v3WxN6g5W5HjefDahduNjF0xxZY5YItHtsWSKuTktSGq7mKWB1xvFDZ/PKH5puKZhExvlEWsEqQW6lC9", - "qZ1XS9BbAE6Osd2jZ+Q+uu0Uu4IHBovufp6dPHqGRlf7x3HsAnBvgoxxk2wVJr7E6Rj9lnYMw7jdqIto", - "1rN9yGmYcY2cJtt1ylnClo7XHT5LBeV0DfFIkeIATLYv7iYa0jp44Zl9hURpKfaEDaQggaaGPw1Enxv2", - "Z8EgqSgKpgvn3FGiMPTUFJq3k/rh7JMmrkaoh8t/RB9p6V1EHSXyyxpN7f0WWzV6st/QAtponRNq6zfk", - "rIle8JWLyZkvD4NFU+taqRY3Zi6zdBRzMJhhRUrJuEbFotKr5M8k3VBJU8P+FkPgJstvn0YKxbYLFvLr", - "Af7F8S5BgbyKo14OkL2XIVxfcp8LnhSGo2QPmmyP4FQOOnPjbrsh3+H40FOFMjNKMkhuVYvcaMCpb0V4", - "fGTAW5JivZ5r0eO1V/bFKbOScfKgldmhn9+9clJGIWSs5ltz3J3EIUFLBlcYuxffJDPmLfdC5pN24TbQ", - "f13Pgxc5A7HMn+WYIvBcRLRTX7y4tqS7WPWIdWDomJoPhgyWbqg5aReK/fJOP2987jufzBcPK/7RBfYr", - "byki2a9gYBODItbR7czq74H/m5LnYjd1UzsnxG/sPwFqoiipWJ790mRldmqES8rTTdSftTQdf21eM6oX", - "Z++naGm1DeUc8uhwVhb81cuMEan272LqPAXjE9t2y5bb5XYW1wDeBtMD5Sc06GU6NxOEWG0nvNUB1fla", - "ZATnaep4NdyzX+4+KEr8jwqUjiUP4Qcb1IV2S6Pv2pq4BHiG2uKC/GhfI90AaVVpQS3N5sdD5iu0WoN6", - "VeaCZnNixrn4/vQVsbPaPvZNDluTd41KSnsVHXtVUKJwWniwf14jnrowfZzxWGqzaqWxaJLStChjyaGm", - "xYVvgBmooQ0f1ZcQOwvy0mqOyusldhJDDysmC6Nx1aNZ2QVpwvxHa5puUCVrsdRhkp9eTNpTpQoecKsf", - "Yqnr9uG5M3C7etK2nPScCKM3b5myj1DCFbTzUevkbGcS8Pmp7eXJinNLKVHZY6x4wE3Q7oGzgRrezB+F", - "rIP4awrkthb7dWtrn2OvaB2hbqHu3sttNruxfmDDPy6cUi44S7GKT+xqdg9aTvGBTSh41DWy+iPuTmjk", - "cEXLg9dhcg6LgwXDPSN0iOsb4YOvZlMtddg/Nb6cuKGarEErx9kgm/sq984OyLgCV4cR3zYN+KSQLb8i", - "csioqzqpXRrXJCNMixlQ7H4w3944tR/jxS8ZRwHfoc2FpltLHb63p41WwDRZC1BuPe3cYPXe9FlgmmwG", - "u48L/z4fjmHdcmbZ1gfdH+rUe6SdB9i0fWHa2lImzc+tCGQ76WlZukmH30CIygN6xwcRHPEsJt61EyC3", - "Hj8cbYTcRkNJ8D41hAZX6IiGEu/hHmHU7wF03poxQqulKGxBbAhXtIIB4xEwXjEOzeuRkQsijV4JuDF4", - "Xgf6qVRSbUXASTztAmiO3ucYQ1PauR5uO1RngxEluEY/x/A2Nk8ZDDCOukEjuFG+rx+tNNQdCBMv8LVc", - "h8j+wwQoVTkhKsOMgs5TBTHGYRi3L4XUvgD6x6AvE9nuWlJ7cq5zEw0liS6rbA06oVkWq4v5HL8S/OoL", - "RcEO0qqun1iWJMWaKO0iMX1qcxOlgquqGJnLN7jldMHbHxFqCN8f8TuMSSjLPf4bKx44vDMuCOPaYYA+", - "4sI9lnBNubk9Uk/qNTSdKLZOpmMC75Tbo6OZ+maE3vS/U0rPxboNyBcuDTHG5cI9ivG3783FEVZO6FXE", - "tFdLXdgAg+6Ef7EN1cY6JbfNlfAq65XIRGdPXfNu3AAx/LbTHC+/gdDboCAGtfer9R4OBeCmg/HiVLvM", - "NU3JKAsazAay0Ts27wehiFtOhyJ2bMCO+dzrPU0y7MnZOPYoQn0oWB+gn3ycKSkpc67xhln0Mesi0ofN", - "hWOHrtng7iJcnPegxe6nq6GYbKIYX+dA8Hv3NZxLcOns9XPodq0+KsmrhPZX9xqpHa+Oio+uvx+dgFN9", - "XTPooNH2wlVet8t0OvlPv9gYNgJcy/0/gQm3t+m9t4T60q41TzVNSF21d1IV39atGH8WaLj+UVPzCOmp", - "FIo1laJj7wVNjHW7wCd/gvpN/bF8oMkVpBrLgzcOdAlwnWpOZrLgLbr/roM0oDvWIYGu/NFYzaN+TfAD", - "F1ovLSlIrbP1lBfTK/yc1mFSyJTwNbg1cPccXDvhYHLY82oFqWZXB9LA/rYBHqQYzb0Rwj7rGmSFsTqM", - "FquIXN/E1gA0lqU1Ck9Qze/W4AwlgVzC/p4iLWqIFnie+3vlJgUkEAPIHRJDIkLFwhCs1dR5hpmqKQOx", - "4MN+bHdoSnENPg0TJDXecC5PkubGbRIdR6aMv00xaS7T9VrpvxgROpQp1q9tPyxsv8SnBFT9bJsvQBGq", - "pOQsUv3ZFbDApL3aUeBLWYDyv/kMXTtLzi4hfLwG3TJbKjPfImpn8CaMZOQ+6qV3RUtWU2WDKJ0fvA7S", - "7Cf0RAo/YShumgss9zwUz9yOiwzfeMfoD7wOsPw0wrUC6R75QmEvFwoSLXxQ5xgcY6hw75HfBAlqsNii", - "BW6wBMq7psYLFp2lWPKEusiWcIFEQkENdDKoxDI85xiyX9jvPoPFFx09aE6p6TU5WErFh+cy1UNiSPUr", - "4m7Lw5kxN7GsMM7tk6IqVpaFG1SGpv9SiqxK7QUdHoza+jS56NEIK4kaJdL+Knv6ZY4lwF4FeYaXsD+y", - "on+6obypxdY+1laEsmsI8vo7u32nRqe4fp2v7QLWdwLn1zTczGelEHkyYOs/61eX6Z6BS5ZeQkbM3eED", - "2wZe1yD30cRcO3O3m72vplKWwCF7sCDklNtQYu/XbZc37kzO7+mx+Xc4a1bZgk/OprT4wOMxmViKSd6S", - "v/lhxrmaAsP8bjmVHeRA7ZLdQGUbSbeRt2YWU5XSvqe1+/5HQ1QWipiUcsNE9knnu29XipB+8PTBuPYT", - "1rloAuikNU+itNQ8B9EWXl43VsdpjzD4DgfAC5Xi4BkGz40cOF85yu11jZRgKYOU0Fr+IT3bLbDhS8EW", - "KUyLMMu0VYdshER7XwIjinpR2ybieO6bMLCoheBY6Kdv+lBorsZ6wSHhmHMpr2j+5c0XWO3kFPHhnkSM", - "LzTUf0MkW1Sqm4WavKKT5g503bubmr9Fc8vfwOxR1M/ghnJ2x/r5C2+dxbp2NCe5aB5DwiHJFse0jolH", - "35KlC5MvJaRMsU4G0daXMq3VPazs3byUOa5fHlrnL0LfgoydgiBK8qYpi6gF3g8NhM0R/cpMZeDkRqk8", - "Rn09sojgL8ajwnz1A9fFZctjYcvMdkJxhIQ79lwEMQjX9Fz0M/GnLs9a582lUynor3Pybd3CbeSibtY2", - "1e3WR+5Y7bwp3rJ4SUzTHd11FiFYT5YgqOS3R78RCSt8MEKQhw9xgocP567pb4/bn81xfvgw/iLnl3LU", - "WRy5Mdy8MYr5ZSh004YnDkQJd/ajYnl2iDBaMd/NkysY1fyry/r4Ko++/Grtqf2j6grvXydEoLsJiJjI", - "WluTB1MF0dwTArldt0jYNmomaSWZ3mMxCm9+Y79GXYo/1hZ75/Gp05fd3afFJdTlTBr7fqX87fqjoDne", - "R0amxgANja8wfr+jRZmDOyjf3Vv+CZ78+Wl2/OTRn5Z/Pv7mOIWn3zw7PqbPntJHz548gsd//ubpMTxa", - "ffts+Th7/PTx8unjp99+8yx98vTR8um3z/50z/AhA7IFdOZTH2f/G19GSk7fniUXBtgGJ7Rk9eOrhoz9", - "8w40xZMIBWX57MT/9D/9CVukomiG97/OXGbVbKN1qU6Ojrbb7SLscrRGg16iRZVujvw8/Ucv357V0fHW", - "FYw7agOfDSngpjpSOMVv774/vyCnb88WDcHMTmbHi+PFI3zMrAROSzY7mT3Bn/D0bHDfjxyxzU4+fZ7P", - "jjZAc/R/mT8K0JKl/pPa0vUa5MK9c2F+unp85EWJo0/OmPl57NtRWDL26FPL5psd6IklJY8++UoJ461b", - "pQicrTvoMBGKsWZHS0zAmtoUVNB4eCmoYKijTygiD/5+5LJS4h9RVbFn4Mg7RuItW1j6pHcG1k4P93rz", - "0Sf8D9JkAJaNAeuDa7M1juyDcv2f9zyN/tgfqFvYPPbz0ad2Yb0WQtWm0pnYBn1RCLcaZH++utR06++j", - "LWXaXKvOu4WFD/qdNdD8yMVtd35tQqV6XzD+K/ix/Upv5Nejuq5M9GOX2GNf3WYPNPJZN5h+JWxmT819", - "zjI0DdkWoXHIXm+g9HOR7UfeydklS8ap3Lffymmud/uxrx/0X/LagK1Z5C0kod0QNRu3jPDi1bICm5yN", - "1m3kgY+Pj0fgLdS6dFHC//3g6v9nD64+vf7Oj5ojWwGPEeCe04z4rLiEvKa5IXvIyKmTEkKILXyPvih8", - "ZxwDGIwoQayo9Hk+++YLI+mMG8Ge5gRbWgiefFEIzkFesRTIBRSlkFSyfE9+5nWaV1DapX+2fuaXXGy5", - "B95I21VRIL+r2aZRYiXdth+AlxFypYow3diVwCZlQDdpbEH+dvruzdmbH0+sSF5Lj+b/uxIkK4BrmqNV", - "u3IOBW3OcQZXkIvSfMZ6JhLQqsoFWVdUUq4BXLUdWaDly78xSHOm9wboVYVvQpirXkjLkuhaoXcACwDP", - "5rMQBHOGd4nh12vgibsxkqXI9r4Ql6RbvbMM4ijQs0K9ZXbyPtBY3n/8/NF8k6Y1fmrE8JOjI3RWboTS", - "R7PP808dET38+LEG3WdSz0rJrjAw++Pn/xcAAP//LCP3WkazAAA=", + "H4sIAAAAAAAC/+x9aXPctrLoX0HNvVVe3lCS13OsqtR9sp346MZ2XJaS8+6x/BIM2TODiAQYAJwlfv7v", + "r9AASJAEZ6gl9klVPtkaYmk0Go3e8WmSiqIUHLhWk+NPk5JKWoAGiX/RNBUV1wnLzF8ZqFSyUjPBJ8f+", + "G1FaMr6YTCfM/FpSvZxMJ5wW0LQx/acTCb9VTEI2OdaygulEpUsoqBlYb0vTuh5pkyxE4oY4sUOcvpx8", + "3vGBZpkEpfpQ/sDzLWE8zasMiJaUK5qaT4qsmV4SvWSKuM6EcSI4EDEnetlqTOYM8kwd+EX+VoHcBqt0", + "kw8v6XMDYiJFDn04X4hixjh4qKAGqt4QogXJYI6NllQTM4OB1TfUgiigMl2SuZB7QLVAhPACr4rJ8YeJ", + "Ap6BxN1Kga3wv3MJ8DskmsoF6MnHaWxxcw0y0ayILO3UYV+CqnKtCLbFNS7YCjgxvQ7Im0ppMgNCOXn/", + "3Qvy6NGjZ2YhBdUaMkdkg6tqZg/XZLtPjicZ1eA/92mN5gshKc+Suv37717g/GdugWNbUaUgflhOzBdy", + "+nJoAb5jhIQY17DAfWhRv+kRORTNzzOYCwkj98Q2vtVNCef/qruSUp0uS8G4juwLwa/Efo7ysKD7Lh5W", + "A9BqXxpMSTPoh6Pk2cdPD6YPjj7/x4eT5F/uzyePPo9c/ot63D0YiDZMKymBp9tkIYHiaVlS3sfHe0cP", + "aimqPCNLusLNpwWyeteXmL6Wda5oXhk6YakUJ/lCKEIdGWUwp1WuiZ+YVDw3bMqM5qidMEVKKVYsg2xq", + "uO96ydIlSamyQ2A7smZ5bmiwUpAN0Vp8dTsO0+cQJQaua+EDF/Tvi4xmXXswARvkBkmaCwWJFnuuJ3/j", + "UJ6R8EJp7ip1tcuKnC+B4OTmg71sEXfc0HSeb4nGfc0IVYQSfzVNCZuTrajIGjcnZ5fY363GYK0gBmm4", + "Oa171BzeIfT1kBFB3kyIHChH5Plz10cZn7NFJUGR9RL00t15ElQpuAIiZr9Cqs22//fZD2+JkOQNKEUX", + "8I6mlwR4KjLIDsjpnHChA9JwtIQ4ND2H1uHgil3yvyphaKJQi5Kml/EbPWcFi6zqDd2woioIr4oZSLOl", + "/grRgkjQleRDANkR95BiQTf9Sc9lxVPc/2balixnqI2pMqdbRFhBN98cTR04itA8JyXwjPEF0Rs+KMeZ", + "ufeDl0hR8WyEmKPNngYXqyohZXMGGalH2QGJm2YfPIxfDZ5G+ArA8YMMglPPsgccDpsIzZjTbb6Qki4g", + "IJkD8qNjbvhVi0vgNaGT2RY/lRJWTFSq7jQAI069WwLnQkNSSpizCI2dOXQYBmPbOA5cOBkoFVxTxiEz", + "zBmBFhossxqEKZhwt77Tv8VnVMHTx0N3fPN15O7PRXfXd+74qN3GRok9kpGr03x1BzYuWbX6j9APw7kV", + "WyT2595GssW5uW3mLMeb6Fezfx4NlUIm0EKEv5sUW3CqKwnHF/y++Ysk5ExTnlGZmV8K+9ObKtfsjC3M", + "T7n96bVYsPSMLQaQWcMaVbiwW2H/MePF2bHeRPWK10JcVmW4oLSluM625PTl0CbbMa9KmCe1thsqHucb", + "r4xctYfe1Bs5AOQg7kpqGl7CVoKBlqZz/GczR3qic/m7+acsc9Nbl/MYag0duysZzQfOrHBSljlLqUHi", + "e/fZfDVMAKwiQZsWh3ihHn8KQCylKEFqZgelZZnkIqV5ojTVONJ/SphPjif/cdjYXw5td3UYTP7a9DrD", + "TkZktWJQQsvyCmO8M6KP2sEsDIPGT8gmLNtDoYlxu4mGlJhhwTmsKNcHjcrS4gf1Af7gZmrwbaUdi++O", + "CjaIcGIbzkBZCdg2vKNIgHqCaCWIVhRIF7mY1T/cPSnLBoP4/aQsLT5QegSGghlsmNLqHi6fNicpnOf0", + "5QF5FY6Norjg+dZcDlbUMHfD3N1a7harbUtuDc2IdxTB7RTywGyNR4MR82+D4lCtWIrcSD17acU0/odr", + "G5KZ+X1U5z8HiYW4HSYuVLQc5qyOg78Eys3dDuX0CceZew7ISbfv9cjGjBInmGvRys79tOPuwGONwrWk", + "pQXQfbF3KeOopNlGFtYbctORjC4Kc3CGA1pDqK591vaehygkSAodGJ7nIr38B1XLWzjzMz9W//jhNGQJ", + "NANJllQtDyYxKSM8Xs1oY46YaYgKPpkFUx3US7yt5e1ZWkY1DZbm4I2LJRb12A+ZHsiI7vID/ofmxHw2", + "Z9uwfjvsATlHBqbscXZOhsxo+1ZBsDOZBmiFEKSwCj4xWveVoHzRTB7fp1F79K21KbgdcovAHRKbWz8G", + "z8UmBsNzsekdAbEBdRv0YcZBMVJDoUbA99JBJnD/HfqolHTbRzKOPQbJZoFGdFV4Gnh445tZGuPsyUzI", + "63GfDlvhpDE5E2pGDZjvtIMkbFqViSPFiNnKNugM1Hj5djON7vAxjLWwcKbpH4AFZUa9DSy0B7ptLIii", + "ZDncAukvo0x/RhU8ekjO/nHy5MHDnx8+eWpIspRiIWlBZlsNitx1uhlRepvDvf7KUDuqch0f/eljb6hs", + "jxsbR4lKplDQsj+UNYBaEcg2I6ZdH2ttNOOqawDHHM5zMJzcop1Y274B7SVTRsIqZreyGUMIy5pZMuIg", + "yWAvMV11ec0023CJciur21BlQUohI/Y1PGJapCJPViAVExFvyjvXgrgWXrwtu79baMmaKmLmRtNvxVGg", + "iFCW3vDxfN8Ofb7hDW52cn673sjq3Lxj9qWNfG9JVKQEmegNJxnMqkVLE5pLURBKMuyId/Qr0GdbnqJV", + "7TaIdFhNKxhHE7/a8jTQ2cxG5ZAtWptwc92sixVvn7NT3VERcAw6XuNnVOtfQq7prcsv3QlisL/wG2mB", + "JZlpiFrwa7ZY6kDAfCeFmN8+jLFZYoDiByue56ZPX0h/KzIwi63ULVzGzWANrZs9DSmczkSlCSVcZIAW", + "lUrFr+kBzz26DNHTqcObXy+txD0DQ0gprcxqq5KgH6/HOZqOCU0t9SaIGjXgxajdT7aVnc56hXMJNDNa", + "PXAiZs5V4JwYuEiKTkjtLzonJETOUguuUooUlIIscSaKvaD5dpaJ6B14QsAR4HoWogSZU3ljYC9Xe+G8", + "hG2CLnNF7n7/k7r3FeDVQtN8D2KxTQy9tcLn/EF9qMdNv4vgupOHZEclEM9zjXZpGEQOGoZQeCWcDO5f", + "F6LeLt4cLSuQ6Jn5QyneT3IzAqpB/YPp/abQVuVAIJhTdM5ZgXY7TrlQkAqeqehgOVU62ceWTaOWNmZW", + "EHDCGCfGgQeEktdUaetNZDxDI4i9TnAeK6CYKYYBHhRIzcg/eVm0P3Zq7kGuKlULpqoqSyE1ZLE1cNjs", + "mOstbOq5xDwYu5Z+tSCVgn0jD2EpGN8hy67EIojq2uju3O39xaFp2tzz2ygqW0A0iNgFyJlvFWA3DIYZ", + "AISpBtGWcJjqUE4dgTOdKC3K0nALnVS87jeEpjPb+kT/2LTtExfVzb2dCVAYg+PaO8jXFrM2DGpJjQqN", + "I5OCXhrZAxVi6/bsw2wOY6IYTyHZRfnmWJ6ZVuER2HtIq3IhaQZJBjnd9gf90X4m9vOuAXDHG8VHaEhs", + "PEt80xtK9uEDO4YWOJ6KCY8Ev5DUHEGjeTQE4nrvGTkDHDvGnBwd3amHwrmiW+THw2XbrY6MiLfhSmiz", + "45YcEGLH0MfAO4CGeuTrYwI7J41a1p3if0C5CWox4uqTbEENLaEZ/0oLGDCmuUjh4Lh0uHuHAUe55iAX", + "28NGhk7sgGXvHZWapaxEVed72N665tedIOpvIhloynLISPDBaoFl2J/YQIzumNfTBEcZYfrg96wwkeXk", + "TKHE0wb+Eraocr+zEX7nQVzgLaiykVHN9UQ5QUB93JCRwMMmsKGpzrdGTtNL2JI1SCCqmhVMaxu529Z0", + "tSiTcICogXvHjM6bY6Pj/A6McS+d4VDB8vpbMZ1YlWA3fOcdvaCFDqcKlELkI4xHPWREIRjl+CelMLvO", + "XBCxDyP1lNQC0jFtdOXVt/8d1UIzroD8j6hISjlqXJWGWqQREuUElB/NDEYCq+d0Lv4GQ5BDAVaRxC/3", + "73cXfv++23OmyBzWPvLeNOyi4/59NOO8E0q3DtctmArNcTuNXB9o+cd7zwUvdHjKfhezG3nMTr7rDF67", + "C8yZUsoRrln+jRlA52Ruxqw9pJFx7nUcd5RRPxg6tm7c9zNWVDnVt+G+2CmP1voEKwrIGNWQb0kpIQUb", + "XW0ELGVhMaARG3eVLilfoFwtRbVwgT92HGSMlbIWDFnx3hBR4UNveLKQoipjjNIFe/oAeyN2ADWaT4BI", + "7Gzl/DWt53M5FWNuMI/wYHdemTGHvArTyaBiaJC6ahRDi5x2lkAcC5j2kKgqTQGiIcAxlateaicbsslv", + "cQMasaGSNgaK0FRXNA+pjpzOCeXbdpokZbkyXJApgu1M5yaudmrX5nNY5jS3vtlIUkV4UloSX7DzDUq7", + "qBjpd0AiMdJQnzJCAjTHy5DxH2PDb4aOQdmfOAi6aj4OxV0Z/Tvf3oIYZAciEkoJCi+t0G6l7FcxD3Of", + "3K2mtkpD0Tft264/DzCa94MKpOA545AUgsM2mu7LOLzBj1HGgRfnQGcUYYb6drWSFvwdsNrzjKHGm+IX", + "dzvgRe/qgMNb2PzuuB2vTpj1hVZLyEtCSZoztGkKrrSsUn3BKVpNgsMWCczw+uGwHe2FbxI33EXsam6o", + "C04xKKe2pUSdyXOIGA6+A/DmNFUtFqA6/JPMAS64a8U4qTjTOFdh9iuxG1aCxOiIA9uyoFvDAtHs9ztI", + "QWaVbvNkzDxR2rBL62Iy0xAxv+BUkxyMTv2G8fMNDuddtJ5mOOi1kJc1FuJXyAI4KKaSeADJK/sVY/vc", + "8pcuzg8zhe1n65Qw4zfpKVs0qjTZr//37n8dfzhJ/kWT34+SZ//r8OOnx5/v3e/9+PDzN9/8v/ZPjz5/", + "c++//jO2Ux72WF6Eg/z0pVPWTl+iRN54JXqwfzGLdMF4EiWy0PfeoS1yF3MAHQHda9tr9BIuuN5wQ0gr", + "mrPMiFzXIYcui+udRXs6OlTT2oiOfcav9Ypy7g24DIkwmQ5rvPY13o+5imcgoZvMJRXheZlX3G6lF3Rt", + "gL2PfRHzaZ1lZgtQHBNMQVpSH7jl/nz45Olk2qQO1d8n04n7+jFCySzbRKVD2MTUF3dA8GDcUaSkWwUD", + "AijCHg3zsdEG4bAFGL1XLVn55TmF0mwW53A+bNmZQTb8lNt4YnN+0Om2dbZ8Mf/ycGtp5PBSL2OJ6S1J", + "AVs1uwnQCYQopVgBnxJ2AAddM0RmVDMXcJQDnWOCNCp6YkwaRn0OLKF5qgiwHi5klK4fox8Ubh23/jyd", + "uMtf3bo87gaOwdWds/aw+b+1IHdefXtODh3DVHdsrqIdOsgui2itLoGiFSJjuJktx2GTNS/4BX8Jc8aZ", + "+X58wTOq6eGMKpaqw0qBfE5zylM4WAhy7HMyXlJNL3hP0hqsmBNkw5CymuUsJZehRNyQp62C0B/h4uID", + "zRfi4uJjL1qgL7+6qaL8xU6QrJleikonLoc7kbCmMuaNUXUOL45sizTsmnVK3NiWFbsccTd+nOfRslTd", + "XL7+8ssyN8sPyFC5TDWzZURpIb0sYgQUCw3u71vhLgZJ196EUSlQ5JeClh8Y1x9JclEdHT0C0kpu+8Vd", + "+YYmtyWMNmQM5hp27Re4cKvXwEZLmpR0EfP6XFx80EBL3H2UlwtUsvOcYLdWUp0PGsahmgV4fAxvgIXj", + "yglCuLgz28vX64kvAT/hFmIbI240rujr7leQZnft7eqk6vV2qdLLxJzt6KqUIXG/M3UZj4URsnx8gGIL", + "jMF0FU9mQNIlpJeuFAUUpd5OW919CIoTND3rYMoWKbFJMpgmjzbzGZCqzKgTxbsWpNmWKNDaB4G+h0vY", + "nosmy/4qCcrtfFk1dFCRUgPp0hBreGzdGN3Nd3FOaOIqS592ivlHniyOa7rwfYYPshV5b+EQx4iilc85", + "hAgqI4iwxD+Agmss1Ix3I9KPLc9oGTN780UKlnjeT1yTRnlyIUnhatDAbb8XgBWPxFqRGTVyu3DFemxO", + "aMDFKkUXMCAhh26LkZmXLVcHDrLv3ovedGLevdB6900UZNs4MWuOUgqYL4ZUUJnpBKL5maxnzDkBsAaf", + "Q9gsRzGpjtizTIfKlvvIFhUbAi1OwCB5I3B4MNoYCSWbJVW+jhCWW/JneZQM8AfmOO+qbBEa9IOaSrV9", + "3fPc7jntaZeuvoUvauErWYSq5YiqFEbCx7Dt2HYIjgJQBjks7MJtY08oTb51s0EGjh/m85xxIEksHIsq", + "JVJmC0E114ybA4x8fJ8QawImo0eIkXEANnp8cWDyVoRnky+uAiR3+eLUj42+4uBviKe22ABlI/KI0rBw", + "NuBASj0HoC6Gr76/OpGkOAxhfEoMm1vR3LA5p/E1g/QKLKDY2imn4GIO7g2Jszss8PZiudKa7FV0ndWE", + "MpMHOi7Q7YB4JjaJzW2LSryzzczQezRmGzPtYgfTlrK4o8hMbDCOBa8WGyO8B5ZhODwYgYa/YQrpFfsN", + "3eYWmF3T7pamYlSokGScOa8mlyFxYszUAxLMELncDapTXAuAjrGjKfXqlN+9SmpbPOlf5s2tNm2qLvl0", + "mNjxHzpC0V0awF/fClPXk3jXlViidop2OEa7lEYgQsaI3rCJvpOm7wpSkAMqBUlLiEouY647o9sA3jhn", + "vltgvMCCHZRv7wUxPhIWTGlojOg+JOFrmCcp1gkTYj68Ol3KuVnfeyHqa8oWosGOrWV+8RVgjOycSaUT", + "9EBEl2AafadQqf7ONI3LSu0oIltVk2Vx3oDTXsI2yVhexenVzfv9SzPt25olqmqG/JZxGxsywyqw0djC", + "HVPb8NOdC35tF/ya3tp6x50G09RMLA25tOf4k5yLDufdxQ4iBBgjjv6uDaJ0B4MMUkL73DGQm+zhxJTQ", + "g13W195hyvzYe8NGfGLq0B1lR4quJTAY7FwFQzeREUuYDoqo9nM1B84ALUuWbTq2UDvqoMZMr2Tw8KWn", + "OljA3XWD7cFAYPeMpYtIUO0qY42Ab8vhtop8HIzCzHm7FljIEMKpmPLF3PuIqtPJ9uHqHGj+PWx/Mm1x", + "OZPP08nNTKcxXLsR9+D6Xb29UTyja96a0lqekCuinJalFCuaJ87APESaUqwcaWJzb4/+wqwubsY8//bk", + "9TsH/ufpJM2ByqQWFQZXhe3KP82qbEGzgQPii0Ubnc/L7FaUDDa/rsIUGqXXS3BVdwNptFcesHE4BEfR", + "Gann8QihvSZn5xuxS9zhI4GydpE05jvrIWl7ReiKstzbzTy0A9E8uLhxNSajXCEc4MbelcBJltwqu+md", + "7vjpaKhrD08K59pRF7iwpa8VEbzrQsfw4m3pvO4FxeJ+1irSZ068KtCSkKicpXEbK58pQxzc+s5MY4KN", + "B4RRM2LFBlyxvGLBWKaZGqHodoAM5ogi0xeKHMLdTLhnTSrOfquAsAy4Np8knsrOQcVqis7a3r9OjezQ", + "n8sNbC30zfA3kTHCwpbdGw+B2C1ghJ66Hrgva5XZL7S2SGG4deOSuILDP5yxdyXucNY7+nDUbIMXl22P", + "W/gKSZ//GcKw5aj3P4HilVdXYXNgjuiTJkwlcyl+h7ieh+pxJBXHl/JkGOXyO/ARMeeNdad5maWZfXC7", + "h6Sb0ArVDlIYoHrc+cAthzUFvYWacrvV9oWBVqxbnGDCqNJDO35DMA7mXiRuTtczGiu4aIQMA9NJ4wBu", + "2dK1IL6zx72qExvs7CTwJddtmc2yLkE2WXL9ii3XFBjstKNFhUYyQKoNZYKp9f/lSkSGqfiacvtQheln", + "j5LrrcAav0yvtZBYI0HFzf4ZpKygeVxyyNK+iTdjC2bfYKgUBEX+3UD2fRtLRe6hhDpdx6HmdE6OpsFL", + "I243MrZiis1ywBYPbIsZVcjJa0NU3cUsD7heKmz+cETzZcUzCZleKotYJUgt1KF6UzuvZqDXAJwcYbsH", + "z8hddNsptoJ7Bovufp4cP3iGRlf7x1HsAnBvaOziJhmyk386dhKnY/Rb2jEM43ajHkTTye0jWsOMa8dp", + "sl3HnCVs6Xjd/rNUUE4XEI8UKfbAZPvibqIhrYMXntkXYJSWYkuYjs8Pmhr+NBB9btifBYOkoiiYLpxz", + "R4nC0FNTwd9O6oezz8m44qseLv8RfaSldxF1lMgvazS191ts1ejJfksLaKN1SqgtjJGzJnrBl4Qmp77u", + "DlajrYvQWtyYuczSUczBYIY5KSXjGhWLSs+Tv5N0SSVNDfs7GAI3mT19HKnA264Eya8G+BfHuwQFchVH", + "vRwgey9DuL7kLhc8KQxHye412R7BqRx05sbddkO+w91DjxXKzCjJILlVLXKjAae+EeHxHQPekBTr9VyJ", + "Hq+8si9OmZWMkwetzA79+P61kzIKIWPF9Jrj7iQOCVoyWGHsXnyTzJg33AuZj9qFm0D/dT0PXuQMxDJ/", + "lmOKwHMR0U59Vejaku5i1SPWgaFjaj4YMpi5oaakXYH3yzv9vPG573wyXzys+EcX2K+8pYhkv4KBTQyq", + "g0e3M6u/B/5vSp6LzdhN7ZwQv7H/BqiJoqRiefZTk5XZKb4uKU+XUX/WzHT8uXkmql6cvZ+iNeuWlHPI", + "o8NZWfBnLzNGpNpfxdh5CsZHtu3Wg7fL7SyuAbwNpgfKT2jQy3RuJgix2k54qwOq84XICM7TFEhruGf/", + "HYGg2vNvFSgdSx7CDzaoC+2WRt+1xYYJ8Ay1xQPyyr4EuwTSKn+DWlpdRcCVvrUG9arMBc2mWMjh/NuT", + "18TOavvYx05sseMFKintVXTsVUHtx3Hhwf7dknjqwvhxdsdSm1UrjdWolKZFGUsONS3OfQPMQA1t+Ki+", + "hNg5IC+DNx1tHqkZwtDDnMnCaFz1aFZ2QZow/9GapktUyVosdZjkx1fp9lSpgpfx6hdu6oKIeO4M3K5Q", + "t63TPSXC6M1rpuwDoLCCdj5qnZztTAI+P7W9PFlxbiklKnvsKh5wHbR74GyghjfzRyHrIP6KArktcn/V", + "ouVn2CtaoKlbAb33JJ7NbqxfLvEPO6eUC85SLI8Uu5rdS6FjfGAjKkl1jaz+iLsTGjlc0brrdZicw+Jg", + "JXbPCB3i+kb44KvZVEsd9k+NT1IuqSYL0MpxNsim/vkAZwdkXIErcInvygZ8UsiWXxE5ZNRVndQujSuS", + "EabFDCh235lvb53aj/Hil4yjgO/Q5kLTraUOHzLURitgmiwEKLeedm6w+mD6HGCabAabjwf+4UNbDQbd", + "cmbZ1gfdH+rEe6SdB9i0fWHaujpB9c+tCGQ76UlZukmHH5eIygN6wwcRHPEsJt61EyC3Hj8cbQe57Qwl", + "wfvUEBqs0BENJd7DPcKoH1roPOJjhFZLUdiC2BCuaAUDxiNgvGYcmmc5IxdEGr0ScGPwvA70U6mk2oqA", + "o3jaOdAcvc8xhqa0cz3cdKhuLSGDElyjn2N4G5s3IgYYR92gEdwo39avgRrqDoSJF/gMsUNk/8UHlKqc", + "EJVhRkHnDYgY4zCM278y074A+segLxPZ7lpSe3KuchMNJYnOqmwBOqFZFqtI9Ry/Evzqi0vBBtKqLkxZ", + "liTFmijtIjF9anMTpYKrqtgxl29ww+mCR1Ui1BA+7OJ3GJNQZlv8N1aVcXhnXBDGlcMAfcSFe4XiinJz", + "e6Se1GtoOlFskYzHBN4pN0dHM/X1CL3pf6uUnotFG5AvXBpiF5cL9yjG3741F0dYOaFXatReLXVhAwy6", + "E/4pPFQb65TcNlfCq6xXexSdPfVTW7sNEMOPZk3x8hsIvQ0KYlB7v1rv4VAAbjoYL061y1zTlOxkQYPZ", + "QDZ6x+b9IBRxy+lQxI4N2DGfe73HSYY9ORvH3olQHwrWB+h7H2dKSsqca7xhFn3Muoj0YXPhrkPXbHB3", + "ES7Oe9Bi9/1qKCabKMYXORD83n1m6BJcOnv9zrxdq49K8iqh/dU982rHq6Pio+vvRyfgVF/XDDpotD13", + "Je3tMp1O/v1PNoaNANdy+29gwu1teu+Rpr60a81TTRNSl0MeVR65dSvG31sarn/U1DxCeiqFYk0J7thD", + "TCNj3c7xLaWgflN/LB9osoJUY931xoEuAa5SzclMFjzy91cdpAHdsQ4JdOWPdtU86hdb33Oh9dKSgtQ6", + "W6j6YHyFn5M6TAqZElbAXQB37+y1Ew5Ghz3P55BqttqTBvbPJfAgxWjqjRD2vdwgK4zVYbRYReTqJrYG", + "oF1ZWjvhCar53RicoSSQS9jeUaRFDdHK2VN/r1yngARiALlDYkhEqFgYgrWaOs8wUzVlIBZ82I/tDk0p", + "rsE3d4KkxmvO5UnS3LhNouOOKeOPfoyay3S9UvovRoQOZYr1Hw0YFrZf4hsNqn4PzxegCFVSctov07d2", + "BSwwaa92FPhSFqD8bz5D186Ss0sIXwVCt8yaysy3iNoZvAkj2XEf9dK7fMH7LtDzembWBGn2E3oihZ8w", + "FDfNhZG/kqF45nZcZPh4PkZ/2JLfGPFp4JqDdK+nobCXCwWJFj6ocxccu1DhHnq/DhLUYLFFC9xgCZT3", + "TY0XLDpLseQJdZEt4QKJhIIa6GRQiWV4zl3IfmG/+wwWX3R0rzmlptf9heZ9eC5TPSSGVD8n7rbcnxlz", + "HcsK49y+1apiZVm4QWVo+i+lyKrUXtDhwaitT6OLHu1gJVGjRNpfZU+/zLEE2Osgz/AStodW9Pel+v1W", + "htBbEcquIcjr7+z2rRqd4vp1vrALWNwKnF/TcDOdlELkyYCt/7RfXaZ7Bi5ZegkZMXeHD2wbeLaE3EUT", + "c+3MXS+3vppKWQKH7N4BISfchhJ7v267vHFncn5H75p/g7NmlS345GxKBxc8HpOJpZjkDfmbH2Y3V1Ng", + "mN8Np7KD7KldshmobCPpOvKIz8FYpbTvae0+rNIQlYUiJqXsecIi4kX2byL4FzZ8xooWBUv7ryj0RIk5", + "vkaV0MjgpzUDn7beCmSdhzt8jSH7TENKrQBnlAfK8kqCyxywz+Z0yumXVC/99pnmfTHLXNmgMKzflmSn", + "yioFXjlxb/Z0z4UokxxW0HIkuHSGKk1BKbaC8L0f25lkACWq6t0LJGYhD+mqw0Pc2pPAxjoGu1GmYhFr", + "d4rs4RgDj7EnljzUWBIyEK1YVtEW/tQNnmIZ+bZ7COvIE3LlwxFfXO9ouOdSkrqYW8yQ6dJJ/BYa+m2e", + "dukISMETLPWYA29R1li4iTAyiNo4Zq9XRGMUPfRt2pEjEzy7stvyEtbYaYJ3pXWNoKbmT113S980p3Hc", + "AzC+wx7wQoNc8ASMl4QcOF85wvZNjZRgKYOU0Fr+PhufW2DDvoItsrzbLNNWPLPRWe19CQy46kVtFx16", + "l6lrPsWCOoJjkbG+2VWhqwxrlYeEY3i3XNH8y5tOsdLSCeLDvXMbX2hoewuRbFGprhfm9pqOmjuws93e", + "1Pwdmnr/CWaPoj5ON5TzedSygvcMIcukOclF88IdDknWOKZ1ij54SmYuRaeUkDLFOtmLa19GuTY14asC", + "zfPHu21b+9b5k9A3IOO5F1/I26YkqxZ4YzQQNkf0KzOVgZMbpfIY9fXIIoK/GI8Ka2XsuS4uW95SW+K6", + "EwYoJNyy1zSIf7qi17RfBWTs8qxn0Fw6lYL+Okff1i3cRi7qZm1jXf595O6q2znGUx8vx2u6Y6iARQjW", + "siYIKvnlwS9EwhwfqxHk/n2c4P79qWv6y8P2Z3Oc79+PP7P8pYIELI7cGG7eGMX8NBQ2bkOjBzIUOvtR", + "sTzbRxitfJPmuSfMqPjZZZx9lQenfra+nP5RdY9+XCU8qbsJiJjIWluTB1MFmSQjkkhct0jKCFpF0koy", + "vcVCON70z36OhjO8qr2Fzttcl05wd58Wl1CXUmp8i5Xyt+srQXO8j4xMjcFhGp/W/XZDizIHd1C+uTP7", + "Gzz6++Ps6NGDv83+fvTkKIXHT54dHdFnj+mDZ48ewMO/P3l8BA/mT5/NHmYPHz+cPX74+OmTZ+mjxw9m", + "j58++9sdw4cMyBbQiU+7nvwffJUtOXl3mpwbYBuc0JLVL2obMvZPy9AUTyIUlOWTY//T//Yn7CAVRTO8", + "/3XisjonS61LdXx4uF6vD8Iuhwt0JiRaVOny0M/Tf8n43WmdmWNVS9xRm3ThTQaeFE7w2/tvz87JybvT", + "g+ClzOPJ0cHRwQN8SLEETks2OZ48wp/w9Cxx3w8dsU2OP32eTg6XQHP0vZs/CtCSpf6TWtPFAuSBe2PH", + "/LR6eOhFicNPzpHyede3w7Bc9eGnlr8p29MTy9kefvJVWna3bpVBcX62oMNIKHY1O5xh8ufYpqCCxsNL", + "QQVDHX5CEXnw90OXERf/iKqKPQOH3ikbb9nC0ie9MbB2ergn+Q8/4X+QJgOwbPxpH1ybKXZoH7Ps/7zl", + "afTH/kDdRxViPx9+ahf1bCFULSudiXXQF4Vwq0H256vL3Lf+PlxTps216jzrWHSl31kDzQ9dzkjn1yZM", + "s/cFY0+DH9tPr0d+PaxrWkU/dok99tVt9kAjn/GHqZ/CZhXW3Oc0a0xgobXMV8iyJUOPP0Te8pyzRSU7", + "bxR3Xj8mTJH/PvvhLRGSOCPBO5pe1oFo5HRuq51IsWKYo5MFiV2m54G/BX6rQG4bLu3kh7Acpn8BwWU+", + "FWpRttMEat3ko727QennItvueIBsk8wYp3LbfoSskV3sx/4E/ScSl2CLwXnzT2jwQ7XN7VEoVWhZga16", + "gThFBv/w6OivF8L/eiH8rxfCWxJweAb8uf/rGPx1DP56KH/0Q/mPr3ix7PTjtLJURh3QqwzXW+hzmhFf", + "GSEhb2hubmjIyInT1lr+O1zrgz/tWk85BsQa9ZBY9ffzdPLkT7x5p1yD5DQn2NKu5tGfdjVnIFcsBXIO", + "RSkklSzfkh95Xc4gKGHY52Y/8ksu1twj4vN0oqqiQPGzFtEVoRiDEp5nISPHmyrCdOPDAJt8DN3iCAfk", + "nyfv356+fXVszT+1pcL8f1OCZAVwTXP0oFYucEazFZAMVpCL0nzGun0S0IPHBVlUVFKuAVxVSVmgl8W/", + "pU1zprcG6HmFb58ZtVJIewHQhcIoGHzoYjKdhCAYnrdJjPi8AJ44AT6ZiWzrC85KutYby10PA5teaCND", + "Vaa2jn34aHQBLAzntJzG5HN8eIhBeUuh9OHk8/RTxxwUfvxYg+4rBk1KyVaYgPjx8/8PAAD//xT50lyq", + "vwAA", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/generated/model/types.go b/daemon/algod/api/server/v2/generated/model/types.go index 273bff101c..5837057832 100644 --- a/daemon/algod/api/server/v2/generated/model/types.go +++ b/daemon/algod/api/server/v2/generated/model/types.go @@ -124,6 +124,12 @@ const ( PendingTransactionInformationParamsFormatMsgpack PendingTransactionInformationParamsFormat = "msgpack" ) +// Defines values for SimulateTransactionParamsFormat. +const ( + SimulateTransactionParamsFormatJson SimulateTransactionParamsFormat = "json" + SimulateTransactionParamsFormatMsgpack SimulateTransactionParamsFormat = "msgpack" +) + // Account Account information at a given round. // // Definition: @@ -602,6 +608,27 @@ type PendingTransactionResponse struct { Txn map[string]interface{} `json:"txn"` } +// SimulateTransactionGroupResult Simulation result for an atomic transaction group +type SimulateTransactionGroupResult struct { + // FailedAt If present, indicates which transaction in this group caused the failure. This array represents the path to the failing transaction. Indexes are zero based, the first element indicates the top-level transaction, and successive elements indicate deeper inner transactions. + FailedAt *[]uint64 `json:"failed-at,omitempty"` + + // FailureMessage If present, indicates that the transaction group failed and specifies why that happened + FailureMessage *string `json:"failure-message,omitempty"` + + // TxnResults Simulation result for individual transactions + TxnResults []SimulateTransactionResult `json:"txn-results"` +} + +// SimulateTransactionResult Simulation result for an individual transaction +type SimulateTransactionResult struct { + // MissingSignature A boolean indicating whether this transaction is missing signatures + MissingSignature *bool `json:"missing-signature,omitempty"` + + // TxnResult Details about a pending transaction. If the transaction was recently confirmed, includes confirmation details like the round and reward details. + TxnResult PendingTransactionResponse `json:"txn-result"` +} + // StateDelta Application state delta. type StateDelta = []EvalDeltaKeyValue @@ -955,13 +982,19 @@ type PostTransactionsResponse struct { TxId string `json:"txId"` } -// SimulationResponse defines model for SimulationResponse. -type SimulationResponse struct { - // FailureMessage \[fm\] Failure message, if the transaction would have failed during a live broadcast. - FailureMessage string `json:"failure-message"` +// SimulateResponse defines model for SimulateResponse. +type SimulateResponse struct { + // LastRound The round immediately preceding this simulation. State changes through this round were used to run this simulation. + LastRound uint64 `json:"last-round"` + + // TxnGroups A result object for each transaction group that was simulated. + TxnGroups []SimulateTransactionGroupResult `json:"txn-groups"` - // MissingSignatures \[ms\] Whether any transactions would have failed during a live broadcast because they were missing signatures. - MissingSignatures bool `json:"missing-signatures"` + // Version The version of this response object. + Version uint64 `json:"version"` + + // WouldSucceed Indicates whether the simulated transactions would have succeeded during an actual submission. If any transaction fails or is missing a signature, this will be false. + WouldSucceed bool `json:"would-succeed"` } // StateProofResponse Represents a state proof and its corresponding message @@ -1036,7 +1069,7 @@ type VersionsResponse = Version // AccountInformationParams defines parameters for AccountInformation. type AccountInformationParams struct { - // Format Configures whether the response object is JSON or MessagePack encoded. + // Format Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON. Format *AccountInformationParamsFormat `form:"format,omitempty" json:"format,omitempty"` // Exclude When set to `all` will exclude asset holdings, application local state, created asset parameters, any created application parameters. Defaults to `none`. @@ -1051,7 +1084,7 @@ type AccountInformationParamsExclude string // AccountApplicationInformationParams defines parameters for AccountApplicationInformation. type AccountApplicationInformationParams struct { - // Format Configures whether the response object is JSON or MessagePack encoded. + // Format Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON. Format *AccountApplicationInformationParamsFormat `form:"format,omitempty" json:"format,omitempty"` } @@ -1060,7 +1093,7 @@ type AccountApplicationInformationParamsFormat string // AccountAssetInformationParams defines parameters for AccountAssetInformation. type AccountAssetInformationParams struct { - // Format Configures whether the response object is JSON or MessagePack encoded. + // Format Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON. Format *AccountAssetInformationParamsFormat `form:"format,omitempty" json:"format,omitempty"` } @@ -1072,7 +1105,7 @@ type GetPendingTransactionsByAddressParams struct { // Max Truncated number of transactions to display. If max=0, returns all pending txns. Max *uint64 `form:"max,omitempty" json:"max,omitempty"` - // Format Configures whether the response object is JSON or MessagePack encoded. + // Format Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON. Format *GetPendingTransactionsByAddressParamsFormat `form:"format,omitempty" json:"format,omitempty"` } @@ -1093,7 +1126,7 @@ type GetApplicationBoxesParams struct { // GetBlockParams defines parameters for GetBlock. type GetBlockParams struct { - // Format Configures whether the response object is JSON or MessagePack encoded. + // Format Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON. Format *GetBlockParamsFormat `form:"format,omitempty" json:"format,omitempty"` } @@ -1107,7 +1140,7 @@ type GetTransactionProofParams struct { // * sha256 Hashtype *GetTransactionProofParamsHashtype `form:"hashtype,omitempty" json:"hashtype,omitempty"` - // Format Configures whether the response object is JSON or MessagePack encoded. + // Format Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON. Format *GetTransactionProofParamsFormat `form:"format,omitempty" json:"format,omitempty"` } @@ -1119,7 +1152,7 @@ type GetTransactionProofParamsFormat string // GetLedgerStateDeltaParams defines parameters for GetLedgerStateDelta. type GetLedgerStateDeltaParams struct { - // Format Configures whether the response object is JSON or MessagePack encoded. + // Format Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON. Format *GetLedgerStateDeltaParamsFormat `form:"format,omitempty" json:"format,omitempty"` } @@ -1145,7 +1178,7 @@ type GetPendingTransactionsParams struct { // Max Truncated number of transactions to display. If max=0, returns all pending txns. Max *uint64 `form:"max,omitempty" json:"max,omitempty"` - // Format Configures whether the response object is JSON or MessagePack encoded. + // Format Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON. Format *GetPendingTransactionsParamsFormat `form:"format,omitempty" json:"format,omitempty"` } @@ -1154,13 +1187,22 @@ type GetPendingTransactionsParamsFormat string // PendingTransactionInformationParams defines parameters for PendingTransactionInformation. type PendingTransactionInformationParams struct { - // Format Configures whether the response object is JSON or MessagePack encoded. + // Format Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON. Format *PendingTransactionInformationParamsFormat `form:"format,omitempty" json:"format,omitempty"` } // PendingTransactionInformationParamsFormat defines parameters for PendingTransactionInformation. type PendingTransactionInformationParamsFormat string +// SimulateTransactionParams defines parameters for SimulateTransaction. +type SimulateTransactionParams struct { + // Format Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON. + Format *SimulateTransactionParamsFormat `form:"format,omitempty" json:"format,omitempty"` +} + +// SimulateTransactionParamsFormat defines parameters for SimulateTransaction. +type SimulateTransactionParamsFormat string + // TealCompileTextRequestBody defines body for TealCompile for text/plain ContentType. type TealCompileTextRequestBody = TealCompileTextBody diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go b/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go index 1b790e2ba0..b339716955 100644 --- a/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go +++ b/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go @@ -130,166 +130,171 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9a3PcNrLoX0HNnio/7nAkP3etqtS5ip1kdeM4LkvJ3nNs3wRD9sxgRQIMAEoz8dV/", - "P4UGQIIkyKEesTdV+8nWEGg0Go1Go1/4NEtFUQoOXKvZ0adZSSUtQIPEv2iaiorrhGXmrwxUKlmpmeCz", - "I/+NKC0ZX8/mM2Z+LanezOYzTgto2pj+85mE3yomIZsdaVnBfKbSDRTUANa70rSuIW2TtUgciGML4uTV", - "7GrkA80yCUr1sfyR5zvCeJpXGRAtKVc0NZ8UuWR6Q/SGKeI6E8aJ4EDEiuhNqzFZMcgztfCT/K0CuQtm", - "6QYfntJVg2IiRQ59PF+KYsk4eKygRqpeEKIFyWCFjTZUEzOCwdU31IIooDLdkJWQe1C1SIT4Aq+K2dH7", - "mQKegcTVSoFd4H9XEuB3SDSVa9Czj/PY5FYaZKJZEZnaiaO+BFXlWhFsi3NcswvgxPRakB8qpckSCOXk", - "3bcvyZMnT16YiRRUa8gckw3Oqhk9nJPtPjuaZVSD/9znNZqvhaQ8S+r27759ieOfuglObUWVgvhmOTZf", - "yMmroQn4jhEWYlzDGtehxf2mR2RTND8vYSUkTFwT2/hOFyUc/4uuSkp1uikF4zqyLgS/Evs5KsOC7mMy", - "rEag1b40lJIG6PvD5MXHT4/mjw6v/vL+OPlv9+ezJ1cTp/+yhruHAtGGaSUl8HSXrCVQ3C0byvv0eOf4", - "QW1ElWdkQy9w8WmBot71JaavFZ0XNK8Mn7BUiuN8LRShjo0yWNEq18QPTCqeGzFloDluJ0yRUooLlkE2", - "N9L3csPSDUmpsiCwHblkeW54sFKQDfFafHYjm+kqJInB60b0wAn96xKjmdceSsAWpUGS5kJBosWe48mf", - "OJRnJDxQmrNKXe+wImcbIDi4+WAPW6QdNzyd5zuicV0zQhWhxB9Nc8JWZCcqcomLk7Nz7O9mY6hWEEM0", - "XJzWOWo27xD5esSIEG8pRA6UI/H8vuuTjK/YupKgyOUG9MadeRJUKbgCIpb/hFSbZf8/pz++IUKSH0Ap", - "uoa3ND0nwFORDa+xGzR2gv9TCbPghVqXND2PH9c5K1gE5R/olhVVQXhVLEGa9fLngxZEgq4kH0LIQtzD", - "ZwXd9gc9kxVPcXGbYVuKmmElpsqc7hbkZEUKuv3qcO7QUYTmOSmBZ4yvid7yQSXNjL0fvUSKimcTdBht", - "Fiw4NVUJKVsxyEgNZQQTN8w+fBi/Hj6NZhWg44EMolOPsgcdDtsIz5ita76Qkq4hYJkF+clJLvyqxTnw", - "WsCR5Q4/lRIumKhU3WkARxx6XL3mQkNSSlixCI+dOnIY6WHbOPFaOAUnFVxTxiEzkheRFhqsJBrEKRhw", - "/DLTP6KXVMHzp0MHePN14uqvRHfVR1d80mpjo8Ruyci5aL66DRtXm1r9J1z+wrEVWyf2595CsvWZOUpW", - "LMdj5p9m/TwZKoVCoEUIf/AotuZUVxKOPvCH5i+SkFNNeUZlZn4p7E8/VLlmp2xtfsrtT6/FmqWnbD1A", - "zBrX6G0KuxX2HwMvLo71NnppeC3EeVWGE0pbt9Lljpy8GlpkC/O6jHlcX2XDW8XZ1t80rttDb+uFHEBy", - "kHYlNQ3PYSfBYEvTFf6zXSE/0ZX83fxTlrnprctVjLSGj915i7YBZzM4LsucpdQQ8Z37bL4aIQD2lkCb", - "Fgd4oB59ClAspShBamaB0rJMcpHSPFGaaoT0HxJWs6PZXw4a48qB7a4OgsFfm16n2Mnoo1bHSWhZXgPG", - "W6PXqBFhYQQ0fkIxYcUeakSM20U0rMSMCM7hgnK9aO4jLXlQb+D3bqSG3laVsfTu3K8GCU5swyUoq97a", - "hvcUCUhPkKwEyYra5joXy/qH+8dl2VAQvx+XpaUHqobAUOuCLVNaPcDp02YnheOcvFqQ70LYqGcLnu/M", - "4WBVDXM2rNyp5U6x2nDk5tBAvKcILqeQC7M0ngxGh78LjsM7w0bkRuvZyyum8d9d25DNzO+TOv85WCyk", - "7TBz4S3KUc5eYPCX4OZyv8M5fcZxtpwFOe72vRnbGChxhrkRr4yup4U7QseahJeSlhZB98WepYzjDcw2", - "srjeUppOFHRRnIM9HPAaYnXjvbZ3P0QxQVbo4PB1LtLzv1O1uYM9v/Sw+tsPhyEboBlIsqFqs5jFtIxw", - "ezXQpmwx0xBv72QZDLWop3hX09sztYxqGkzN4RtXSyzpsR8KPZCRu8uP+B+aE/PZ7G0j+i3YBTlDAabs", - "dnYehMxc5e0FwY5kGqCJQZDC3t6JuXVfC8uXzeDxdZq0Rt9Yg4FbITcJXCGxvfNt8LXYxnD4Wmx7W0Bs", - "Qd0Ffxg4qEZqKNQE/F45zASuvyMflZLu+kRG2FOIbCZoVFeFu4GHJ74ZpbG8Hi+FvJn06YgVThp7MqEG", - "aiB85x0iYdOqTBwrRmxStkEHUOPCGxcaXfAxirWocKrpH0AFZaDeBRXagO6aCqIoWQ53wPqbqNBfUgVP", - "HpPTvx8/e/T4l8fPnhuWLKVYS1qQ5U6DIvfd3YwovcvhQX9meDuqch2H/vypt0K24cbgKFHJFApa9kFZ", - "66ZVgWwzYtr1qdYmM866RnDK5jwDI8kt2Yk13BvUXjFlNKxieSeLMUSwrBklIw6TDPYy03Wn1wyzC6co", - "d7K6i6ssSClkxL6GW0yLVOTJBUjFRMRV8ta1IK6FV2/L7u8WW3JJFTFjo+m34qhQRDhLb/l0uW9Bn215", - "Q5tRyW/nG5mdG3fKurSJ7y2JipQgE73lJINltW7dhFZSFISSDDviGf0d6NMdT9GqdhdMOnxNKxhHE7/a", - "8TS4s5mFyiFbtxbh9nezLlW8fc4OdU9F0DHkeI2f8Vr/CnJN71x/6Q4Qw/2lX0iLLMlMQ7wFv2brjQ4U", - "zLdSiNXd4xgbJYYofrDqeW769JX0NyIDM9lK3cFh3ABreN2sacjhdCkqTSjhIgO0qFQqfkwPuOXRH4hu", - "TB2e/HpjNe4lGEZKaWVmW5UEnXQ9ydF0TGhquTdB0qgBL0btfrKt7HDW5ZtLoJm51QMnYulcBc6JgZOk", - "6GHU/qBzSkJkL7XwKqVIQSnIEmei2Iuab2eFiB6hEyKOCNejECXIispbI3t+sRfPc9gl6A9X5P73P6sH", - "XwBfLTTN9xAW28TIW1/4nD+oj/W04ccYrjt4yHZUAvEy19wujYDIQcMQCa9Fk8H162LUW8Xbk+UCJHpm", - "/lCO94PcjoFqVP9gfr8ttlU5EOXlLjpnrEC7HadcKEgFz1QUWE6VTvaJZdOodRszMwgkYUwSI+ABpeQ1", - "Vdp6ExnP0AhijxMcxyooZohhhAcVUgP5Z6+L9mGn5hzkqlK1YqqqshRSQxabA4ftyFhvYFuPJVYB7Fr7", - "1YJUCvZBHqJSAN8Ry87EEojq2uju3O39yaFp2pzzuygpW0g0hBhD5NS3CqgbRroMIMJUQ2jLOEx1OKcO", - "r5nPlBZlaaSFTipe9xsi06ltfax/atr2mYvq5tzOBJjRtcfJYX5pKWtjnDbUXKERMinoudE98EJs3Z59", - "nM1mTBTjKSRjnG+25alpFW6BvZu0KteSZpBkkNNdH+hP9jOxn8cA4Io3Fx+hIbHxLPFFbzjZhw+MgBYI", - "T8WUR4JfSGq2oLl5NAzieu+BnAHCjgknx0f3alA4VnSJPDyctl3qCEQ8DS+ENitu2QExdgJ9Cr4DZKgh", - "35wS2DlprmXdIf4LlBugViOuP8gO1NAUGvjXmsCAMc2FAQfbpSPdOwI4KjUHpdgeMTK0Ywcse2+p1Cxl", - "JV51vofdnd/8ugNE/U0kA01ZDhkJPthbYBn2JzYQowvzZjfBSUaYPvo9K0xkOjlTqPG0kT+HHV6539oI", - "v7MgLvAOrrIRqOZ4opwgoj5uyGjgYRPY0lTnO6On6Q3syCVIIKpaFkxrG7LZvulqUSYhgKiBe2RE582x", - "0XF+Baa4l04RVDC9/lLMZ/ZKMI7fWede0CKHuwqUQuQTjEc9YkQxmOT4J6Uwq85chLAPI/Wc1ELSCW10", - "5dWn/z3VIjPOgPyXqEhKOd64Kg21SiMk6gmoP5oRjAZWj+lc/A2FIIcC7EUSvzx82J34w4duzZkiK7j0", - "YfWmYZccDx+iGeetULq1ue7AVGi220nk+EDLP557LnihI1P2u5gd5Ckr+bYDvHYXmD2llGNcM/1bC4DO", - "ztxOmXvII9Pc6wh3klE/AB2bN677KSuq/K4WfEVZXkkY9o59+PB+VXz48JF8a1t6x/bcM3lIjssmLWLl", - "TqNKYmgNyZm530pBM6MgRG37OEm+TurgTBVFp1AGnX+4fUj5rpPINxUHsoSUVjYq2Ulth0ETHqoWEX2x", - "s7pdEkYnMtE8XuXaHtohVddSVCVR9bJbLtBUwx9jam5Ax7DsDxzEBjUfh8KDzDUx393BaW0BEQmlBIWy", - "NTSvKPtVrML8Gyd81U5pKPoWaNv1l4H72bvBe47gOeOQFILDLppyyjj8gB9jva18H+iMJ+1Q367y3MK/", - "g1Z7nCnceFv64moHAu1tHRd3B4vfhdtxPoSZR2hcg7wklKQ5Q9Ob4ErLKtUfOMXLfbDZIvED/hozbO55", - "6ZvE7UsR848D9YFTjB2pr/xRubiCiFz+FsBbfVS1XoPSHS1xBfCBu1aMk4ozjWMVZr0Su2AlSHTiL2zL", - "gu7IiuZonfodpCDLSreFKyZIKM3y3HlCzDBErD5wqkkORqr+wPjZFsF5T6LnGQ76UsjzmgqL6H5YAwfF", - "VBKPc/jOfsUQNDf9jQtHw2xV+9nazg38Jotih3f/JgPz/93/z6P3x8l/0+T3w+TF/zr4+Onp1YOHvR8f", - "X3311f9v//Tk6qsH//kfsZXyuMfC9x3mJ6/cneLkFSqOjfG8h/tnM5wWjCdRJgtdxB3eIveN+usZ6EHb", - "rKA38IHrLTeMdEFzllF9M3boirjeXrS7o8M1rYXomBH8XK+pjt1CypCIkOmIxhsf4/3QoHiiDHpzXO4L", - "7pdVxe1SVsp5lDAO3IdoiNW8ToayRRCOCGbKbKiPL3J/Pn72fDZvMlzq77P5zH39GOFklm1jeUwZbGNa", - "ttsguDHuKVLSnQIdlx6IezQaxTrFQ7AFmOuZ2rDy80sKpdkyLuF8dK27rW/5Cbdhr2b/oG9o50zOYvX5", - "8dYSIINSb2LJ0S1NAVs1qwnQ8deXUlwAnxO2gEX3tpytQfm4mBzoCpN00b8hpmQL1PvAMprnioDq4UQm", - "XUlj/IPKrZPWV/OZO/zVnevjDnAMr+6YtSPI/60FuffdN2fkwAlMdc+m1FnQQRJUxArl4vxbkRxGmtmS", - "EDan8AP/wF/BinFmvh994BnV9GBJFUvVQaVAfk1zylNYrAU58qkDr6imH3hP0xqs2hIkbZCyWuYsJeeh", - "Rtywp83Ej14bab4W5uLYdWr39Vc3VFS+2AGSS6Y3otKJSzVOJFxSGXMaqDrVFCHbQgFjo86Jg21FsUtl", - "dvDjMo+WpeqmnPWnX5a5mX7AhsolVJklI0oL6XURo6BYbHB93wh3MEh66fPUKwWK/FrQ8j3j+iNJPlSH", - "h0+AtHKwfnVHvuHJXQkte+WNUuK6tkqcuL3XwFZLmpR0PWA00EBLXH3Ulwu8ZOc5wW6t3C8f24qgmgl4", - "egwvgMXj2nksOLlT28vXjIlPAT/hEmIbo240HtObrleQDXbj5epklPVWqdKbxOzt6KyUYXG/MnUpibVR", - "srwbW7E1hgq6qhtLIOkG0nPIsAAAFKXezVvdfaSEUzS96GDKFsqwuRyYzY2m3SWQqsyoU8U7BiVDYQVa", - "+1jFd3AOuzPRJINfJ4+2ndaphjYqcmqgXRpmDbetg9FdfBeOg7ausvTZkZgm49niqOYL32d4I1uV9w42", - "cYwpWmmHQ4SgMkIIy/wDJLjBRA28W7F+bHrmlrG0J1+kroaX/cQ1aS5PLnImnA1mU9rvBWDVHXGpyJIa", - "vV24gjE2dTGQYpWiaxjQkEPr+sQEwZZFHoHsO/eiJ51YdQ+03nkTRdk2Tsyco5wC5othFbzMdOKl/EjW", - "gWMNqATrwDmCLXNUk+rAMit0qGx5OWxhqyHU4gwMkjcKh0ejTZFQs9lQ5WvZYMkfv5cn6QB/YCruWAGG", - "kyDUJ6jrUxu+vczt7tPe7dKVYfC1F3zBhfBqOaF4gtHwMbo4thyCowKUQQ5rO3Hb2DNKkxbcLJDB48fV", - "KmccSBKLGqJKiZTZYkTNMePGAKMfPyTEmoDJZAgxNg7QRsckAiZvRLg3+fo6SHKX1kw9bHRpBn9DPAPD", - "xtEalUeURoQzPhCx7SUAdaFm9fnVCXhEMITxOTFi7oLmRsy5G18DpFcHANXWTta/c40/GFJnRyzw9mC5", - "1pzsUXST2YQ6k0c6rtCNYLwU28SmYEU13uV2afg9GlqMCWGxjWkrLtxTZCm2GG6BR4sNZd2DyzAeHo3g", - "hr9lCvkV+w2d5haZsWHHtakYFypkGWfOq9llSJ2YMvSABjPELveDIgo3QqBj7GjKjbrL795Lals96R/m", - "zak2b4oD+ayN2PYf2kLRVRqgX98KU5c9eNvVWKJ2inbUQLviQ6BCxpjeiIm+k6bvClKQA14KkpYSlZzH", - "XHfmbgN44pz6boHxAutKUL57EISiSFgzpaExopuD2XuFPrd5kmI5KyFWw7PTpVyZ+b0Toj6mbL0U7Nia", - "5mefAYZyrphUOkEPRHQKptG3Ci/V35qmcV2pHexiKzuyLC4bcNhz2CUZy6s4v7pxv39lhn1Ti0RVLVHe", - "Mk6AphuyxEqk0RC4kaFtlOTohF/bCb+mdzbfabvBNDUDS8Mu7TH+JPuiI3nHxEGEAWPM0V+1QZKOCMgg", - "c7EvHQO9yW5OzFxcjFlfe5sp87D3ho34/MmhM8pCis4lMBiMzoKhm8ioJUwHhTz7KYUDe4CWJcu2HVuo", - "hTp4Y6bXMnj4CkkdKuDqOmB7KBDYPWNZDRJUuxhWo+DbkqytWhSLSZQ5a5esCgVCOBRTvqB4n1B11tM+", - "Wp0Bzb+H3c+mLU5ndjWf3c50GqO1g7iH1m/r5Y3SGV3z1pTW8oRck+S0LKW4oHniDMxDrCnFhWNNbO7t", - "0Z9Z1MXNmGffHL9+69C/ms/SHKhMalVhcFbYrvzTzMrW3RrYIL5gsbnzeZ3dqpLB4tfFgkKj9OUGXHHY", - "QBvtVbFrHA7BVnRG6lU8Qmivydn5RuwUR3wkUNYuksZ8Zz0kba8IvaAs93Yzj+1ANA9OblopxKhUCAHc", - "2rsSOMmSOxU3vd0d3x0Nd+2RSeFYI+VrC1uhWRHBuy50o0KiOQ5ZtaBYg85aRfrCiVcFWhISlbM0bmPl", - "Swy75dZ3ZhoTbDygjBqIFRtwxfKKBbBMMzXhottBMhgjSkxfz3CIdkvhntaoOPutAsIy4Np8krgrOxsV", - "i/45a3v/ODW6Q38sB9ha6Bvwt9ExwvqL3RMPkRhXMEJPXQ/dV/WV2U+0tkiZHwKXxDUc/uGIvSNxxFnv", - "+MNxsw1e3LQ9buFLGH35ZxjDVk3e/wyHv7y6QpADY0Sf1WAqWUnxO8TveXg9jmSM+IqTDKNcfgc+Icy8", - "se40r4M0ow8u95B2E1qh2kEKA1yPKx+45bD0nbdQU26X2la5b8W6xRkmjCo9sPAbhnE49yJxc3q5pLG6", - "gEbJMDgdNw7gli1dC+I7e9o7sz9zRUAXJPAl122ZTQYuQTbJXP3CIjdUGOywk1WFRjNArg11grn1/+VK", - "RMBU/JJy+1iC6We3kuutwBq/TK9LITGVX8XN/hmkrKB5XHPI0r6JN2NrZp8KqBQEtegdIPvGiuUiV8/f", - "utgb0pysyOE8eO3CrUbGLphiyxywxSPbYkkVSvLaEFV3MdMDrjcKmz+e0HxT8UxCpjfKElYJUit1eL2p", - "nVdL0JcAnBxiu0cvyH102yl2AQ8MFd35PDt69AKNrvaPw9gB4N4EGZMm2SpMfInzMfotLQwjuB3URTTr", - "2T7kNCy4RnaT7TplL2FLJ+v276WCcrqGeKRIsQcn2xdXEw1pHbrwzL5CorQUO8IGUpBAUyOfBqLPjfiz", - "aJBUFAXThXPuKFEYfmoKzdtBPTj7pImrEerx8h/RR1p6F1HnEvl5jab2fIvNGj3Zb2gBbbLOCbX1G3LW", - "RC/4ysXkxJeHwaKpda1USxszlpk6qjkYzLAipWRc48Wi0qvkbyTdUElTI/4WQ+gmy+dPI4Vi2wUL+fUQ", - "/+x0l6BAXsRJLwfY3usQri+5zwVPCiNRsgdNtkewKweduXG33ZDvcBz0VKXMQEkG2a1qsRsNJPWtGI+P", - "ALwlK9bzuRY/Xntmn50zKxlnD1qZFfrp3WunZRRCxmq+NdvdaRwStGRwgbF78UUyMG+5FjKftAq3wf7L", - "eh68yhmoZX4vxy4CX4vI7dQXL64t6S5WPWIdGNqm5oNhg6UDNSftQrGf3+nnjc9955P54nHFP7rIfuEl", - "RSL7GQwsYlDEOrqcWf098H9T8rXYTl3Uzg7xC/svQJooSSqWZz83WZmdGuGS8nQT9WctTcdfmteM6snZ", - "8ylaWm1DOYc8Cs7qgr94nTGi1f5TTB2nYHxi227ZcjvdzuQaxNtoeqT8gIa8TOdmgJCq7YS3OqA6X4uM", - "4DhNHa9GevbL3QdFiX+rQOlY8hB+sEFdaLc0911bE5cAz/C2uCDf2ddIN0BaVVrwlmbz4yHzFVqtQb0q", - "c0GzOTFwzr45fk3sqLaPfZPD1uRd4yWlPYuOvSooUTgtPNg/rxFPXZgOZzyW2sxaaSyapDQtylhyqGlx", - "5htgBmpow8frS0idBXllb47K30vsIIYfVkwW5sZVQ7O6C/KE+Y/WNN3glawlUodZfnoxac+VKnjArX6I", - "pa7bh/vO4O3qSdty0nMizL35kin7CCVcQDsftU7OdiYBn5/anp6sOLecEtU9xooH3ITsHjkbqOHN/FHM", - "OoS/pkJua7Fft7b2KfaK1hHqFuruvdxmsxvrBzb848Ip5YKzFKv4xI5m96DlFB/YhIJHXSOr3+Juh0Y2", - "V7Q8eB0m56g4WDDcC0JHuL4RPvhqFtVyh/1T48uJG6rJGrRykg2yua9y7+yAjCtwdRjxbdNATgrZ8iui", - "hIy6qpPapXFNNsK0mIGL3bfm2xt37cd48XPGUcF3ZHOh6dZSh+/taXMrYJqsBSg3n3ZusHpv+iwwTTaD", - "7ceFf58PYVi3nJm29UH3QR17j7TzAJu2L01bW8qk+bkVgWwHPS5LN+jwGwhRfUBv+SCBI57FxLt2AuLW", - "8ENoI+w2GkqC56lhNLhARzSUeA73GKN+D6Dz1oxRWi1HYQtiQ7iiFQwYj6DxmnFoXo+MHBBp9EjAhcH9", - "OtBPpZJqqwJOkmlnQHP0PscEmtLO9XBbUJ0FRpLgHP0Yw8vYPGUwIDjqBo3iRvmufrTScHegTLzE13Id", - "IfsPE6BW5ZSoDDMKOk8VxASHEdy+FFL7AOhvg75OZLtrSe3Ouc5JNJQkuqyyNeiEZlmsLubX+JXgV18o", - "CraQVnX9xLIkKdZEaReJ6XObGygVXFXFyFi+wS2HC97+iHBD+P6IX2FMQlnu8N9Y8cDhlXFBGNcOA/QR", - "F+6xhGvqzW1IPa3X8HSi2DqZTgk8U25PjmbomzF60/9OOT0X6zYin7k0xJiUC9coJt++MQdHWDmhVxHT", - "Hi11YQMMuhP+xTa8NtYpuW2phEdZr0QmOnvqmnfjBojht53mePgNhN4GBTGoPV+t93AoADcdjBen2mWu", - "aUpGRdBgNpCN3rF5P4hF3HI6FLFjA3bM517vaZphT89G2KME9aFgfYS+93GmpKTMucYbYdGnrItIHzYX", - "jm26ZoG7k3Bx3oMWu+8vhmKyiWJ8nQPB793XcM7BpbPXz6HbufqoJH8ltL+610gtvDoqPjr/fnQCDvVl", - "zaCDRtszV3ndTtPdyb//2cawEeBa7v4FTLi9Re+9JdTXdq15qmlC6qq9k6r4tk7F+LNAw/WPmppHyE+l", - "UKypFB17L2hirNsZPvkT1G/qw/KBJheQaiwP3jjQJcB1qjmZwYK36P5dB2ng7liHBLryR2M1j/o1wfcc", - "aL20pCC1ztZTXkyv8HNch0mhUMLX4NbA3XNw7YSDyWHPqxWkml3sSQP7xwZ4kGI090YI+6xrkBXG6jBa", - "rCJyfRNbg9BYltYoPkE1v1ujM5QEcg67e4q0uCFa4Hnuz5WbFJBACqB0SAyLCBULQ7BWU+cZZqrmDKSC", - "D/ux3aEpxTX4NEyQ1HjDsTxLmhO3SXQcGTL+NsWksUzXa6X/YkToUKZYv7b9sLL9Cp8SUPWzbb4ARXgl", - "JSeR6s+ugAUm7dWOAl/KApT/zWfo2lFydg7h4zXolrmkMvMtonYGb8JIRs6jXnpXtGQ1VTaI0vnB6yDN", - "fkJPpPAThuKmucByz0PxzO24yPCNd4z+wOMAy08jXiuQ7pEvVPZyoSDRwgd1juExRgr3HvlNiKAGiy1a", - "5AZLoLxrarxg0VmKJU+oi2wJJ0gkFNRgJ4NKLMNjjhH7pf3uM1h80dG95pSaX5O9pVR8eC5TPSKGXL8i", - "7rTcnxlzE8sK49w+KapiZVm4IWVo+i+lyKrUHtDhxqitT5OLHo2IkqhRIu3Psne/zLEE2Osgz/AcdgdW", - "9U83lDe12Nrb2qpQdg5BXn9nte/U6BS/X+drO4H1neD5JQ0381kpRJ4M2PpP+tVlunvgnKXnkBFzdvjA", - "toHXNch9NDHXztzLzc5XUylL4JA9WBByzG0osffrtssbdwbn9/TY+FscNatswSdnU1p84PGYTCzFJG8p", - "3zyYcammwAi/Ww5lgeypXbIdqGwj6WXkrZnF1Etp39Paff+jYSqLRUxLuWEi+6T93bcrRVg/ePpg/PYT", - "1rloAuikNU+ittQ8B9FWXn5orI7THmHwHfagF16Kg2cYvDRy6HzhKLcfaqIEUxnkhNb0992z3QQbuRQs", - "kcK0CDNNW3XIRki01yUwoqiXtW0iTue+CQOLWgiOhX76pg+F5mqsFxwyjtmX8oLmn998gdVOjpEe7knE", - "+ETD+29IZEtKdbNQk9d00tjBXffuhuZv0dzyDzBrFPUzOFDO7lg/f+Gts1jXjuYkF81jSAiSXCJM65h4", - "9JwsXZh8KSFlinUyiC59KdP6uoeVvZuXMsfvl/vm+bPQt2Bjd0EQJXnTlEXUAs+HBsNmi35hoTKwc6Nc", - "HuO+HltE6BeTUWG++p7j4rzlsbBlZjuhOELCHXsughiEa3ou+pn4U6dnrfPm0KkU9Oc5+bRu0TZyUDdz", - "m+p26xN3rHbeFG9ZvCSm6Y7uOksQrCdLEFXy66NfiYQVPhghyMOHOMDDh3PX9NfH7c9mOz98GH+R83M5", - "6iyNHAw3boxjfh4K3bThiQNRwp31qFie7WOMVsx38+QKRjX/4rI+vsijL79Ye2p/q7rC+9cJEeguAhIm", - "MtfW4MFQQTT3hEBu1y0Sto03k7SSTO+wGIU3v7Ffoi7F72qLvfP41OnL7uzT4hzqciaNfb9S/nT9TtAc", - "zyOjU2OAhsZXGL/Z0qLMwW2Ur+4t/wpP/vY0O3zy6K/Lvx0+O0zh6bMXh4f0xVP66MWTR/D4b8+eHsKj", - "1fMXy8fZ46ePl08fP33+7EX65Omj5dPnL/56z8ghg7JFdOZTH2f/F19GSo7fniRnBtmGJrRk9eOrho39", - "8w40xZ0IBWX57Mj/9L/9DlukomjA+19nLrNqttG6VEcHB5eXl4uwy8EaDXqJFlW6OfDj9B+9fHtSR8db", - "VzCuqA18NqyAi+pY4Ri/vfvm9Iwcvz1ZNAwzO5odLg4Xj/AxsxI4LdnsaPYEf8Lds8F1P3DMNjv6dDWf", - "HWyA5uj/Mn8UoCVL/Sd1SddrkAv3zoX56eLxgVclDj45Y+bV2LeDsGTswaeWzTfb0xNLSh588pUSxlu3", - "ShE4W3fQYSIWY80OlpiANbUpqKDx8FTwgqEOPqGKPPj7gctKiX/Eq4rdAwfeMRJv2aLSJ701uHZ6uNeb", - "Dz7hf5Anr6yQyCHmBrHJHJQ0zeeEaUKXQmKJAp1ujFzwudFMBS1nyKmWyU8yw9ym10uLga+CYsvCHb3v", - "x18gIOIhoSQwbN5s1NZIjSzWsoKwUll90rTaN+fN+8PkxcdPj+aPDq/+Ys4T9+ezJ1cT/Zkva7jktD4s", - "Jjb8iInFaJnF/fv48PAWT+4d84D8dpGClx17tTrcS96D74q6peoAIjUx9iRAdsAPvM399JozHrUftSLU", - "Im/xfE0z4vObcOxHn2/sE47eZCPXiT23ruazZ59z9ifcsDzNCbYMKlr0l/4nfs7FJfctjZJRFQWVO7+N", - "VUsoELfYeJTRtUJromQXFHU7LnirTP/sI1qwYzlmA/JGaXoDeXNqev1b3nwueYOLdBfypg3ojuXN42vu", - "+T//jP8tYf9sEvbUirtbSVin8Nmw/r4GahNwD+wbwf2fdzyN/tgH1H2rJvbzwad2reSWjqw2lc7Epc0O", - "jx4KWBCQ5q56EBpB6wuVFsQDaILayI8u6DzfoeWXZUAoZsOKSjc3XtPZuyobn4SB0LxjtWYcB0DjMo5i", - "y2TRIFxEQSq4ffWlcwA5zN6IDPoHEB4xv1Ugd80Z43CczVsSyLFQpCjVrQV6X2BcXY/B0AhuPTh95qif", - "emn9fXBJmTbHlIsuQ4r2O2ug+YHLm+z82qQq9L5g/kXwY3Aniv96UNd1jH7sXjZjX91la6CRz3r3nxtj", - "U2i8QZaozTbvP5qVxapBjlsaW8TRwQFGbGyE0gezq/mnjp0i/PixXkxfTqJe1KuPV/8TAAD//xceSpBL", - "uAAA", + "H4sIAAAAAAAC/+x9/XPcNrLgv4KafVX+uKEkf+5aVal3ip1kdXEcl6Vk7z3bl2DInhmsOABDgNJMfPrf", + "X6EbIEESnOFIir2p2p9sDYFGo9FoNPoLnyapWhVKgjR6cvxpUvCSr8BAiX/xNFWVNInI7F8Z6LQUhRFK", + "To79N6ZNKeRiMp0I+2vBzXIynUi+gqaN7T+dlPBbJUrIJsemrGA60ekSVtwCNpvCtq4hrZOFShyIEwJx", + "+mpyveUDz7IStO5j+aPMN0zINK8yYKbkUvPUftLsSpglM0uhmevMhGRKAlNzZpatxmwuIM/0gZ/kbxWU", + "m2CWbvDhKV03KCalyqGP50u1mgkJHiuokaoXhBnFMphjoyU3zI5gcfUNjWIaeJku2VyVO1AlJEJ8QVar", + "yfH7iQaZQYmrlYK4xP/OS4DfITG8XICZfJzGJjc3UCZGrCJTO3XUL0FXudEM2+IcF+ISJLO9DtgPlTZs", + "BoxL9u7bl+zJkycv7ERW3BjIHJMNzqoZPZwTdZ8cTzJuwH/u8xrPF6rkMkvq9u++fYnjn7kJjm3FtYb4", + "ZjmxX9jpq6EJ+I4RFhLSwALXocX9tkdkUzQ/z2CuShi5JtT4ThclHP+LrkrKTboslJAmsi4MvzL6HJVh", + "QfdtMqxGoNW+sJQqLdD3R8mLj58eTR8dXf/l/Uny3+7PZ0+uR07/ZQ13BwWiDdOqLEGmm2RRAsfdsuSy", + "T493jh/0UlV5xpb8Ehefr1DUu77M9iXRecnzyvKJSEt1ki+UZtyxUQZzXuWG+YFZJXMrpiw0x+1MaFaU", + "6lJkkE2t9L1ainTJUq4JBLZjVyLPLQ9WGrIhXovPbstmug5JYvG6ET1wQv+6xGjmtYMSsEZpkKS50pAY", + "teN48icOlxkLD5TmrNL7HVbsfAkMB7cf6LBF2knL03m+YQbXNWNcM8780TRlYs42qmJXuDi5uMD+bjaW", + "aitmiYaL0zpH7eYdIl+PGBHizZTKgUsknt93fZLJuVhUJWh2tQSzdGdeCbpQUgNTs39Cauyy/5+zH98w", + "VbIfQGu+gLc8vWAgU5VBdsBO50wqE7CG4yWkoe05NA+HV+yQ/6dWlidWelHw9CJ+oudiJSKz+oGvxapa", + "MVmtZlDaJfVHiFGsBFOVcgghgriDFVd83R/0vKxkiuvfDNvS5Sy3CV3kfIMEW/H1V0dTh45mPM9ZATIT", + "csHMWg7qcXbs3eglpapkNkLNMXZNg4NVF5CKuYCM1VC2YOKG2YWPkPvh0yhfAToeyCA69Sg70JGwjvCM", + "3d32Cyv4AgKWOWA/OeGGX426AFkzOptt8FNRwqVQla47DeCIQ2/XwKUykBQlzEWEx84cOayAoTZOAq+c", + "DpQqabiQkFnhjEgrAySsBnEKBtx+3+mf4jOu4fnToTO++Tpy9eequ+pbV3zUamOjhLZk5Oi0X92GjWtW", + "rf4j7ofh2FosEvq5t5BicW5Pm7nI8ST6p10/T4ZKoxBoEcKfTVosJDdVCccf5EP7F0vYmeEy42Vmf1nR", + "Tz9UuRFnYmF/yumn12oh0jOxGCBmjWv0woXdVvSPhRcXx2YdvVe8VuqiKsIJpa2L62zDTl8NLTLB3Jcx", + "T+rbbnjxOF/7y8i+Pcy6XsgBJAdpV3Db8AI2JVhseTrHf9Zz5Cc+L3+3/xRFbnubYh4jreVjdySj+cCZ", + "FU6KIhcpt0R85z7br1YIAF0keNPiEA/U408BikWpCiiNIKC8KJJcpTxPtOEGIf1HCfPJ8eQvh4395ZC6", + "68Ng8Ne21xl2siorqUEJL4o9YLy1qo/eIiysgMZPKCZI7KHSJCQtomUlYUVwDpdcmoPmytKSB/UGfu9G", + "auhN2g7Ru3MFGyQ4o4Yz0KQBU8N7mgWkZ0hWhmRFhXSRq1n9w/2TomgoiN9PioLogdojCFTMYC200Q9w", + "+rzZSeE4p68O2HchbFTFlcw39nAgVcOeDXN3arlTrLYtuTk0EO9phsupygO7NJ4MVs2/C47Da8VS5Vbr", + "2ckrtvHfXduQzezvozr/OVgspO0wc+FFy1GO7jj4S3C5ud/hnD7jOHPPATvp9r0Z21gocYa5Ea9sXU+C", + "u4WONQmvSl4Qgu4LnaVC4iWNGhGut5SmIwVdFOdgDwe8hljdeK/t3A9RTJAVOjh8nav04u9cL+9gz888", + "rP72w2HYEngGJVtyvTyYxLSMcHs10MZsMdsQL/hsFgx1UE/xrqa3Y2oZNzyYmsM3rpYQ6bEfCj0oI3eX", + "H/E/PGf2s93bVvQT2AN2jgJM03Z2TobM3vbpgkAj2QZohVBsRRd8Zm/de2H5shk8vk6j1ugbsim4FXKT", + "wBVS6zvfBl+rdQyHr9W6twXUGvRd8IeFg2qkgZUegd8rh5nC9Xfk42XJN30iI+wxRLYTtKqrxt0gwxPf", + "jtIYZ09mqryZ9OmIFckakzPjFmogfKcdImHTqkgcK0bMVtSgA6jx8m0XGl3wMYq1qHBm+B9ABW2h3gUV", + "2oDumgpqVYgc7oD1l1GhP+ManjxmZ38/efbo8S+Pnz23LFmUalHyFZttDGh2393NmDabHB70Z4a3oyo3", + "cejPn3pDZRtuDI5WVZnCihd9UGQAJRWImjHbrk+1Nplx1jWCYzbnOVhJTmRnZNu3qL0S2mpYq9mdLMYQ", + "wbJmlIw5TDLYyUz7Tq8ZZhNOsdyU1V1cZaEsVRmxr+EWMypVeXIJpRYq4k1561ow18Krt0X3d8KWXXHN", + "7Nho+q0kKhQRzjJrOV7uE+jztWxos1Xy03wjs3PjjlmXNvG9JVGzAsrErCXLYFYtWjehealWjLMMO+IZ", + "/R2Ys41M0ap2F0w6fE1bCYkmfr2RaXBnswuVQ7ZoLcLt72Zdqnj7HA11T0fQseR4jZ/xWv8KcsPvXH/p", + "DhDD/aVfSEKWZbYh3oJfi8XSBArm21Kp+d3jGBslhih+IPU8t336SvoblYGdbKXv4DBugDW8btc05HA+", + "U5VhnEmVAVpUKh0/pgc89+gyRE+nCU9+sySNewaWkVJe2dlWBUM/Xk9yNB0TnhL3JkgaPeDFqN1P1IqG", + "I69wXgLP7K0eJFMz5ypwTgycJEcnpPEHnVMSInuphVdRqhS0hixxJoqdqPl2JETMFjoh4ohwPQrTis15", + "eWtkLy534nkBmwRd5prd//5n/eAL4GuU4fkOwmKbGHnrC5/zB/WxHjf8NobrDh6yHS+BeZlrb5dWQORg", + "YIiEe9FkcP26GPVW8fZkuYQSPTN/KMf7QW7HQDWqfzC/3xbbqhgIBHMXnXOxQrud5FJpSJXMdBRYzrVJ", + "doll26h1G7MzCCRhTBIj4AGl5DXXhryJQmZoBKHjBMchBcUOMYzwoEJqIf/sddE+7NSeg1JXulZMdVUU", + "qjSQxeYgYb1lrDewrsdS8wB2rf0axSoNuyAPUSmA74hFMyECcVMb3Z27vT85NE3bc34TJWULiYYQ2xA5", + "860C6obBMAOICN0QmhhH6A7n1BE404k2qiistDBJJet+Q2Q6o9Yn5qembZ+5uGnO7UyBxhgc195hfkWU", + "pTCoJbdXaITMVvzC6h54ISa3Zx9nuxkTLWQKyTbOt9vyzLYKt8DOTVoVi5JnkGSQ800f6E/0mdHnbQBw", + "xZuLjzKQUDxLfNEbTvbhA1tAK4SnY8ojwy8stVvQ3jwaBnG9d0DOAGHHhJPjo3s1KBwrukQeHk6bljoC", + "EU/DS2XsihM7IMZOoI/Bd4AMNeSbUwI7J821rDvEf4F2A9RqxP6DbEAPTaGBv9cEBoxpLlI42C4d6d4R", + "wFGpOSjFdoiRoR07YNl7y0sjUlHgVed72Nz5za87QNTfxDIwXOSQseAD3QKLsD+jQIwuzJvdBEcZYfro", + "96wwkenkQqPG00b+AjZ45X5LEX7nQVzgHVxlI1Dt8cQlQ0R93JDVwMMmsOapyTdWTzNL2LArKIHparYS", + "xlDkbvuma1SRhACiBu4tIzpvDkXH+RUY4146Q1DB9PpLMZ3QlWA7fuede0GLHO4qUCiVjzAe9YgRxWCU", + "458Vyq66cEHEPozUc1ILSSe00ZVXn/73dIvMOAP2X6piKZd446oM1CqNKlFPQP3RjmA1sHpM5+JvKAQ5", + "rIAukvjl4cPuxB8+dGsuNJvDlY+8tw275Hj4EM04b5U2rc11B6ZCu91OI8cHWv7x3HPBCx2ZstvF7CCP", + "Wcm3HeC1u8DuKa0d49rp31oAdHbmeszcQx4Z515HuKOM+gHo2Lxx3c/Eqsq5uQv3xVZ9tL5PiNUKMsEN", + "5BtWlJACRVdbBUsTLhY1RnFX6ZLLBerVpaoWLvCH4KBgrDRZMMpK9kBElQ+zlsmiVFURE5Qu2NMH2Fu1", + "A7i9+QSExM6k51/xejyXUzHmBPMED1bnOwtzyKswnQxeDC1RL5uLIRGnnSUQpwKmPSS6SlOAaAhw7MpV", + "T7WTDdnktziAVm2oSoqBYjw1Fc9DrmOnc8blpp0myUWurRQUmmE727mJq53S3HwOy5zn5JuNJFWEO6Wl", + "8QUr35C0S4qRfgdkEqsN9TkjZEC7vSwb/zE2/AZ0DMv+wEHQVfNxKO7K3r/zzR2oQQSIlVCUoPHQCu1W", + "mr6qeZj75E41vdEGVn3TPnX9ZUDQvBu8QCqZCwnJSknYRNN9hYQf8GNUcODBOdAZVZihvt1bSQv/Dlrt", + "ccZw423pi6sdyKK3dcDhHSx+F27HqxNmfaHVEvKCcZbmAm2aSmpTVqn5IDlaTYLNFgnM8PfDYTvaS98k", + "briL2NUcqA+SY1BObUuJOpPnEDEcfAvgzWm6WixAd+QnmwN8kK6VkKySwuBYK7teCS1YASVGRxxQyxXf", + "WBGIZr/foVRsVpm2TMbME22suCQXkx2GqfkHyQ3Lwd6pfxDyfI3gvIvW84wEc6XKi5oK8SNkARK00Ek8", + "gOQ7+oqxfW76Sxfnh5nC9JmcEhZ+k56yQaNKk/36/+7/5/H7k+S/efL7UfLifx1+/PT0+sHD3o+Pr7/6", + "6v+3f3py/dWD//yP2Ep53GN5EQ7z01fusnb6CjXyxivRw/2zWaRXQiZRJgt97x3eYvcxB9Ax0IO2vcYs", + "4YM0a2kZ6ZLnIrMq103YoSvienuRdkeHa1oL0bHP+LnuqefeQsqwiJDpiMYbH+P9mKt4BhK6yVxSEe6X", + "eSVpKb2iSwH2PvZFzad1lhkVoDhmmIK05D5wy/35+NnzybRJHaq/T6YT9/VjhJNFto5qh7COXV/cBsGN", + "cU+zgm80DCigiHs0zIeiDUKwK7D3Xr0UxeeXFNqIWVzC+bBlZwZZy1NJ8cR2/6DTbeNs+Wr++fE2pdXD", + "C7OMJaa3NAVs1awmQCcQoijVJcgpEwdw0DVDZPZq5gKOcuBzTJDGi54ak4ZR7wNiNM8VAdXDiYy668f4", + "B5VbJ62vpxN3+Os718cd4Bhe3TFrD5v/2yh277tvztmhE5j6HuUqEugguyxya3UJFK0QGSvNqBwHJWt+", + "kB/kK5gLKez34w8y44YfzrgWqT6sNJRf85zLFA4Wih37nIxX3PAPsqdpDVbMCbJhWFHNcpGyi1AjbtiT", + "qiD0IXz48J7nC/Xhw8detEBff3VDReULDZBcCbNUlUlcDndSwhUvY94YXefwImQq0rBt1ClzsEkUuxxx", + "Bz8u83hR6G4uX3/6RZHb6QdsqF2mml0ypo0qvS5iFRTCBtf3jXIHQ8mvvAmj0qDZrytevBfSfGTJh+ro", + "6AmwVnLbr+7Itzy5KWC0IWMw17Brv8CJ070G1qbkScEXMa/Phw/vDfACVx/15RVesvOcYbdWUp0PGkZQ", + "zQQ8PYYXgPDYO0EIJ3dGvXy9nvgU8BMuIbax6kbjir7pegVpdjderk6qXm+VKrNM7N6OzkpbFvcrU5fx", + "WFgly8cHaLHAGExX8WQGLF1CeuFKUcCqMJtpq7sPQXGKphcdQlOREkqSwTR5tJnPgFVFxp0q3rUgzTZM", + "gzE+CPQdXMDmXDVZ9vskKLfzZfXQRkVODbRLy6zhtnUwuovv4pzQxFUUPu0U8488WxzXfOH7DG9kUnnv", + "YBPHmKKVzzlECF5GCEHMP0CCG0zUwrsV68emZ28ZMzr5IgVLvOxnrklzeXIhSeFs0MBN31eAFY/UlWYz", + "bvV25Yr1UE5oIMUqzRcwoCGHbouRmZctVwcC2XXuRU86Ne8eaL3zJooyNU7snKOcAvaLZRW8zHQC0fxI", + "5BlzTgCswecINstRTaoj9kjo8LLlPqKiYkOoxRkYStkoHB6NNkVCzWbJta8jhOWW/F4epQP8gTnO2ypb", + "hAb9oKZSbV/3Mre7T3u3S1ffwhe18JUswqvliKoUVsPHsO3YciiJClAGOSxo4tTYM0qTb90skMXjx/k8", + "FxJYEgvH4lqrVFAhqOaYcWOA1Y8fMkYmYDYaQoyNA7TR44uA2RsV7k252AdJ6fLFuYeNvuLgb4intlCA", + "slV5VGFFuBhwIKVeAnAXw1efX51IUgTDhJwyK+YueW7FnLvxNUB6BRZQbe2UU3AxBw+G1NktFng6WPaa", + "Ex1FN5lNqDN5pOMK3RaMZ2qdUG5bVOOdrWeW36Mx25hpF9uYVMrinmYztcY4FjxaKEZ4By7DeHg0ghv+", + "WmjkV+w3dJoTMtuG3a5NxbhQI8s4c17NLkPqxJihBzSYIXa5H1SnuBECHWNHU+rVXX53XlLb6kn/MG9O", + "tWlTdcmnw8S2/9AWiq7SAP36Vpi6nsTbrsYStVO0wzHapTQCFTLG9FZM9J00fVeQhhzwUpC0lKjkIua6", + "s3cbwBPnzHcLjBdYsIPLzYMgxqeEhdAGGiO6D0n4EuZJjnXClJoPz84U5dzO751S9TFFhWiwY2uan30G", + "GCM7F6U2CXogolOwjb7VeKn+1jaN60rtKCKqqimyuGzAYS9gk2Qir+L86sb9/pUd9k0tEnU1Q3krJMWG", + "zLAKbDS2cMvQFH66dcKvacKv+Z3Nd9xusE3twKVll/YYf5J90ZG828RBhAFjzNFftUGSbhGQQUpoXzoG", + "ehNtTkwJPdhmfe1tpszD3hk24hNTh84oghSdS2Aw2DoLgW4iq5YIExRR7edqDuwBXhQiW3dsoQR18MbM", + "9zJ4+NJTHSrg6jpgOygQ2D1j6SIl6HaVsUbBp3K4rSIfB6Moc96uBRYKhHAooX0x9z6h6nSyXbQ6B55/", + "D5ufbVuczuR6Ormd6TRGawdxB63f1ssbpTO65smU1vKE7ElyXhSluuR54gzMQ6xZqkvHmtjc26M/s6iL", + "mzHPvzl5/dahfz2dpDnwMqlVhcFZYbviTzMrKmg2sEF8sWh75/M6O6mSweLXVZhCo/TVElzV3UAb7ZUH", + "bBwOwVZ0Rup5PEJop8nZ+UZoilt8JFDULpLGfEcekrZXhF9ykXu7mcd2IJoHJzeuxmRUKoQAbu1dCZxk", + "yZ2Km97uju+Ohrt2yKRwrC11gVdU+lozJbsudAwv3hTO677iWNyPrCJ94SSrFVoSEp2LNG5jlTNtmUOS", + "78w2Zth4QBm1ECsx4IqVlQhg2WZ6xEW3g2QwRpSYvlDkEO1myj1rUknxWwVMZCCN/VTiruxsVKym6Kzt", + "/ePU6g79sRxgstA34G+jY4SFLbsnHiKxXcEIPXU9dF/VV2Y/0doiheHWjUtiD4d/OGLvSNzirHf84biZ", + "gheXbY9b+ApJX/5ZxqBy1LufQPGXV1dhc2CM6JMmQifzUv0O8XseXo8jqTi+lKfAKJffQY6IOW+sO83L", + "LM3og8s9pN2EVqh2kMIA1+PKB245rCnoLdRc0lLTCwOtWLc4w4RRpYcEv2EYh3MvEjfnVzMeK7holQyL", + "00njAG7Z0o1ivrOnva4TG2h0FviS67aCsqwLKJssuX7FlhsqDDTsaFWh0QyQa0OdYEr+v1yrCJhKXnFJ", + "D1XYfrSVXG8NZPyyva5UiTUSdNzsn0EqVjyPaw5Z2jfxZmIh6A2GSkNQ5N8BovdtiIvcQwl1uo4jzemc", + "HU2Dl0bcamTiUmgxywFbPKIWM65RkteGqLqLnR5Is9TY/PGI5stKZiVkZqmJsFqxWqnD603tvJqBuQKQ", + "7AjbPXrB7qPbTotLeGCp6M7nyfGjF2h0pT+OYgeAe0NjmzTJUJz8w4mTOB+j35JgWMHtoB5E08npEa1h", + "wbVlN1HXMXsJWzpZt3svrbjkC4hHiqx24ER9cTXRkNahi8zoBRhtSrVhwsTHB8OtfBqIPrfij9BgqVqt", + "hFk5545WK8tPTQV/GtSDo+dkXPFVj5f/iD7SwruIOpfIz2s0pfMtNmv0ZL/hK2iTdco4FcbIRRO94EtC", + "s1Nfdwer0dZFaIk2diw7dVRzMJhhzopSSIMXi8rMk7+xdMlLnlrxdzCEbjJ7/jRSgbddCVLuh/hnp3sJ", + "GsrLOOnLAbb3OoTry+5LJZOVlSjZgybbI9iVg87cuNtuyHe4HfRYpcxCSQbZrWqxGw8k9a0YT24BeEtW", + "rOezFz/uPbPPzplVGWcPXtkV+unda6dlrFQZK6bXbHencZRgSgGXGLsXXyQL85ZrUeajVuE22H9Zz4NX", + "OQO1zO/l2EXgaxW5nfqq0LUl3cWqR6wDQ9vUfrBsMHOgpqxdgffzO/288bnvfLJfPK74RxfZL7ykSGQ/", + "g4FFDKqDR5czq78H/m/OvlbrsYva2SF+Yf8FSBMlSSXy7OcmK7NTfL3kMl1G/Vkz2/GX5pmoenJ0PkVr", + "1i25lJBHwZEu+IvXGSNa7T/V2HFWQo5s260HT9PtTK5BvI2mR8oPaMkrTG4HCKnaTnirA6rzhcoYjtMU", + "SGukZ/8dgaDa828VaBNLHsIPFNSFdkt736ViwwxkhrfFA/YdvQS7BNYqf4O3tLqKgCt9Swb1qsgVz6ZY", + "yOH8m5PXjEalPvTYCRU7XuAlpT2Ljr0qqP04LjzYv1sST10YD2d7LLWdtTZYjUobvipiyaG2xblvgBmo", + "oQ0fry8hdQ7Yq+BNR8ojtSAsP8xFubI3rhoa6S7IE/Y/xvB0iVeylkgdZvnxVbo9V+rgZbz6hZu6ICLu", + "O4u3K9RNdbqnTNl785XQ9AAoXEI7H7VOznYmAZ+f2p5eWUlJnBLVPbYVD7gJ2T1yFKjhzfxRzDqE31Mh", + "pyL3+xYtP8Ne0QJN3QrovSfxKLuxfrnEP+yccqmkSLE8Uuxodi+FjvGBjagk1TWy+i3udmhkc0Xrrtdh", + "co6Kg5XYvSB0hOsb4YOvdlGJO+hPg09SLrlhCzDaSTbIpv75AGcHFFKDK3CJ78oGclKVLb8iSsioqzqp", + "XRp7shGmxQxc7L613964az/Gi18IiQq+I5sLTSdLHT5kaOytQBi2UKDdfNq5wfq97XOAabIZrD8e+IcP", + "qRoMuuXstMkH3Qd14j3SzgNs2760bV2doPrnVgQyDXpSFG7Q4cclovqAWctBAkc8i4l37QTEreGH0Law", + "29ZQEjxPLaPBJTqiocBzuMcY9UMLnUd8rNJKHIUtGIVwRSsYCBlB47WQ0DzLGTkg0uiRgAuD+3Wgn05L", + "bkgFHCXTzoHn6H2OCTRtnOvhtqC6tYQsSXCOfozhZWzeiBgQHHWDRnHjclO/Bmq5O1AmXuIzxI6Q/Rcf", + "UKtySlSGGQWdNyBigsMKbv/KTPsA6G+Dvk5E3U3JaefscxINJYnOqmwBJuFZFqtI9TV+ZfjVF5eCNaRV", + "XZiyKFiKNVHaRWL63OYGSpXU1WrLWL7BLYcLHlWJcEP4sItfYUxCmW3w31hVxuGVcUEYe4cB+ogL9wrF", + "nnpzG1JP67U8nWixSMZTAs+U25OjGfpmjN70v1NOz9WijchnLg2xTcqFaxSTb9/YgyOsnNArNUpHS13Y", + "AIPulH8KD6+NdUpuWyrhUdarPYrOnvqpre0GiOFHs6Z4+A2E3gYFMTidr+Q9HArATQfjxblxmWuGs60i", + "aDAbiKJ3KO8HsYhbTocidihgx37u9R6nGfb0bIS9laA+FKyP0Pc+zpQVXDjXeCMs+pR1EenD5sJtm65Z", + "4O4kXJz3oMXu+8uhmGymhVzkwPB795mhC3Dp7PU78zRXH5Xkr4T0q3vmleDVUfHR+fejE3CoL2sGHTTa", + "nruS9jRNdyf//meKYWMgTbn5FzDh9ha990hTX9sl81TThNXlkEeVR26divH3lobrHzU1j5CfCqVFU4I7", + "9hDTyFi3c3xLKajf1IflA00uITVYd71xoJcA+1RzsoMFj/z9uw7SwN2xDgl05Y+21TzqF1vfcaD10pKC", + "1DoqVH0wvsLPSR0mhUIJK+AuQLp39toJB6PDnudzSI243JEG9o8lyCDFaOqNEPRebpAVJuowWqwisr+J", + "rUFoW5bWVnyCan63RmcoCeQCNvc0a3FDtHL21J8rNykggRRA6ZBYFlE6FoZAVlPnGRa65gykgg/7oe7Q", + "lOIafHMnSGq84VieJe2J2yQ6bhky/ujHqLFs173SfzEidChTrP9owLCy/QrfaND1e3i+AEV4JWWn/TJ9", + "V66ABSbt1Y4CX8oCtP/NZ+jSKLm4gPBVIHTLXPEy8y2idgZvwki2nEe99C5f8L6L9LweWTRBmv2Enkjh", + "JwzFTXNl9a9kKJ65HRcZPp6P0R9U8hsjPi1ecyjd62mo7OVKQ2KUD+rchsc2UriH3m9CBD1YbJGQGyyB", + "8q6p8YJFZzmWPOEusiWcICthxS12ZVCJZXjMbcR+Sd99BosvOrrTnFLz6+5C8z48V+geEUOunzN3Wu7O", + "jLmJZUVISW+16lhZFmlJGZr+i1JlVUoHdLgxauvT6KJHW0RJ1CiR9mfZu1/mWALsdZBneAGbQ1L9fal+", + "v5Qh9qRC0RyCvP7Oat+p0Sl+v84XNIHFneD5JQ0300mhVJ4M2PpP+9VlunvgQqQXkDF7dvjAtoFnS9h9", + "NDHXztyr5cZXUykKkJA9OGDsRFIosffrtssbdwaX98y28dc4alZRwSdnUzr4IOMxmViKqbylfPNgtks1", + "DVb43XIoArKjdsl6oLJNya8ij/gcjL2U9j2t3YdVGqYiLGJayo4nLCJeZP8mgn9hw2esGLUSaf8VhZ4q", + "McfXqBIeAX5aC/Bp661A0Xm4w9cYomcaUk4KnL08cJFXJbjMAXo2p1NOv+Bm6ZfPNu+rWfbIBo1h/VSS", + "nWu6FPjLiXuzp7svVJHkcAktR4JLZ6jSFLQWlxC+90OdWQZQ4FW9e4DELOQhX3VkiJt7EthYx1A3KlSI", + "sLRSbIfEGHiMPSH20GNZyGJ0KbKKt+inb/EUy8i33UNcR+6QvTdHfHK9reGeS0nqYm4xQ6ZLJ/FLaPm3", + "edqloyAFT7DUMAfeoqypcBtlZJC0ccrerIjGKH7o27QjWyZ4dmW75SWssdME75bkGsGbmt913SX9odmN", + "4x6A8R12oBca5IInYLwm5ND5whG2P9RECaYyyAmt6e+y8bkJNuIrWCKS3XaaVPGMorPa6xIYcPXL2i46", + "9C5T13yKBXWUxCJjfbOrRlcZ1ioPGcfK7vKS55/fdIqVlk6QHu6d2/hEQ9tbSGQipb5ZmNtrPmrswM52", + "d0PLt2jq/QfYNYr6OB0o5/OodQXvGUKRyXOWq+aFOwTJrhAmOUUfPWczl6JTlJAKLTrZi1e+jHJtasJX", + "BZrnj7fbtnbN82dlbsHGc6++sDdNSVaj8MRoMGy26BcWKgM7N8rlMe7rsUWEfjEZFdbK2HFcXLS8pVTi", + "uhMGqEq4Y69pEP+0p9e0XwVk7PTIM2gPnUpDf56jT+sWbSMHdTO3sS7/PnG31e0c46mPl+O13TFUgAiC", + "tawZosp+ffQrK2GOj9Uo9vAhDvDw4dQ1/fVx+7Pdzg8fxp9Z/lxBAkQjB8ONG+OYn4fCxik0eiBDobMe", + "lcizXYzRyjdpnnvCjIpfXMbZF3lw6hfy5fS3qnv0Y5/wpO4iIGEic20NHgwVZJKMSCJx3SIpI2gVSatS", + "mA0WwvGmf/FLNJzhu9pb6LzNdekEd/YZdQF1KaXGt1hpf7p+p3iO55HVqTE4zODTut+s+arIwW2Ur+7N", + "/gpP/vY0O3ry6K+zvx09O0rh6bMXR0f8xVP+6MWTR/D4b8+eHsGj+fMXs8fZ46ePZ08fP33+7EX65Omj", + "2dPnL/56z8ohizIhOvFp15P/i6+yJSdvT5Nzi2xDE16I+kVty8b+aRme4k6EFRf55Nj/9L/9DjtI1aoB", + "73+duKzOydKYQh8fHl5dXR2EXQ4X6ExIjKrS5aEfp/+S8dvTOjOHrpa4opR04U0GnhVO8Nu7b87O2cnb", + "04PgpczjydHB0cEjfEixAMkLMTmePMGfcPcscd0PHbNNjj9dTyeHS+A5+t7tHyswpUj9J33FFwsoD9wb", + "O/any8eHXpU4/OQcKdfbvh2G5aoPP7X8TdmOnljO9vCTr9KyvXWrDIrzswUdRmKxrdnhDJM/xzYFHTQe", + "ngpeMPThJ1SRB38/dBlx8Y94VaE9cOidsvGWLSp9MmuLa6eHe5L/8BP+B3nymoREDjEXLCWScdY0nzJh", + "GJ+pEsujmHRp5YKvyyB00HKCnEpMfppZ5ra9XhIGvgITlaQ8ft83mSAg5iGhJLBs3mzU1kiNLDZlBWGV", + "xPqkabVvzpv3R8mLj58eTR8dXf/Fnifuz2dPrkfGUrys4bKz+rAY2fAjFjVAQwzu38dHR7d47vNEBuSn", + "RQpele3VCaKVGLY9uqXqAGI1MXYkX3fAx94Pu55Onu454632o1Z0bOQdsK95xnxuJY796PONfSoxksXK", + "dUbn1vV08uxzzv5UWpbnOcOWQTWd/tL/JC+kupK+pVUyqtWKlxu/jXVLKDC32HiU8YVGT0YpLjnqdlLJ", + "1hMhk4/oPYvltw7IG234DeTNme31b3nzueQNLtJdyJs2oDuWN4/33PN//hn/W8L+2STsGYm7W0lYp/BR", + "SlFfA6Xk/0N6n7z/80am0R/7gLrvZMV+PvzUrtPe0pH1sjKZuqLKFNFDAYuR8txVLkMjaH2hMop5AE1A", + "LfvRJbzkG7T8igwYx0x8VZnmxms71x7d2idhITRv6C2ExAHQuIyjUIk+HoSqaUiVpBenOgeQw+yNyqB/", + "AOER81sF5aY5YxyOk2lLAjkWihTEu7VA7wuM6/0YDI3g5MHpM0f9zFTr78MrLow9plxkK1K039kAzw9d", + "znbn1yZNqvcFc7+CH0O3dPTXw7qmbPRj97IZ++ouWwONfMUN/7kxNoXGG2SJ2mzz/qNdWaxY5rilsUUc", + "Hx5itNhSaXM4uZ5+6tgpwo8f68X0pWzqRb3+eP0/AQAA//9NWINiQ74AAA==", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go index 4df6570b5c..89c8e57209 100644 --- a/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go +++ b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go @@ -548,222 +548,228 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9a3PbuJLoX0FptyqPFWXnuSeumtrrJPPwTpJJxZ7ZPWeSOwORLQnHFMADgLY0ufnv", - "t9AASJAEJcp27GTGnxKLeDQajUajnx9HqVgWggPXanTwcVRQSZegQeJfNE1FyXXCMvNXBiqVrNBM8NGB", - "/0aUlozPR+MRM78WVC9G4xGnS6jbmP7jkYR/lUxCNjrQsoTxSKULWFIzsF4XpnU10iqZi8QNcWiHOHo5", - "+rThA80yCUp1ofyJ52vCeJqXGRAtKVc0NZ8UOWd6QfSCKeI6E8aJ4EDEjOhFozGZMcgzNfGL/FcJch2s", - "0k3ev6RPNYiJFDl04XwhllPGwUMFFVDVhhAtSAYzbLSgmpgZDKy+oRZEAZXpgsyE3AKqBSKEF3i5HB38", - "OlLAM5C4WymwM/zvTAL8AYmmcg569GEcW9xMg0w0W0aWduSwL0GVuVYE2+Ia5+wMODG9JuR1qTSZAqGc", - "vPvuBXn06NEzs5Al1RoyR2S9q6pnD9dku48ORhnV4D93aY3mcyEpz5Kq/bvvXuD8x26BQ1tRpSB+WA7N", - "F3L0sm8BvmOEhBjXMMd9aFC/6RE5FPXPU5gJCQP3xDa+0k0J57/RXUmpTheFYFxH9oXgV2I/R3lY0H0T", - "D6sAaLQvDKakGfTX/eTZh48Pxg/2P/3br4fJP9yfTx59Grj8F9W4WzAQbZiWUgJP18lcAsXTsqC8i493", - "jh7UQpR5Rhb0DDefLpHVu77E9LWs84zmpaETlkpxmM+FItSRUQYzWuaa+IlJyXPDpsxojtoJU6SQ4oxl", - "kI0N9z1fsHRBUqrsENiOnLM8NzRYKsj6aC2+ug2H6VOIEgPXhfCBC/pykVGvawsmYIXcIElzoSDRYsv1", - "5G8cyjMSXij1XaV2u6zIyQIITm4+2MsWcccNTef5mmjc14xQRSjxV9OYsBlZi5Kc4+bk7BT7u9UYrC2J", - "QRpuTuMeNYe3D30dZESQNxUiB8oRef7cdVHGZ2xeSlDkfAF64e48CaoQXAER039Cqs22//fxT2+IkOQ1", - "KEXn8JampwR4KrL+PXaTxm7wfyphNnyp5gVNT+PXdc6WLALya7piy3JJeLmcgjT75e8HLYgEXUreB5Ad", - "cQudLemqO+mJLHmKm1tP2xDUDCkxVeR0PSFHM7Kkq2/2xw4cRWiekwJ4xvic6BXvFdLM3NvBS6QoeTZA", - "htFmw4JbUxWQshmDjFSjbIDETbMNHsZ3g6eWrAJw/CC94FSzbAGHwypCM+bomi+koHMISGZCfnacC79q", - "cQq8YnBkusZPhYQzJkpVdeqBEafeLF5zoSEpJMxYhMaOHToM97BtHHtdOgEnFVxTxiEznBeBFhosJ+qF", - "KZhw82Ome0VPqYKnj/su8PrrwN2fifaub9zxQbuNjRJ7JCP3ovnqDmxcbGr0H/D4C+dWbJ7YnzsbyeYn", - "5iqZsRyvmX+a/fNoKBUygQYi/MWj2JxTXUo4eM/vm79IQo415RmVmfllaX96XeaaHbO5+Sm3P70Sc5Ye", - "s3kPMitYo68p7La0/5jx4uxYr6KPhldCnJZFuKC08SqdrsnRy75NtmPuSpiH1VM2fFWcrPxLY9ceelVt", - "ZA+QvbgrqGl4CmsJBlqazvCf1Qzpic7kH+afoshNb13MYqg1dOzuW9QNOJ3BYVHkLKUGie/cZ/PVMAGw", - "rwRat9jDC/XgYwBiIUUBUjM7KC2KJBcpzROlqcaR/l3CbHQw+re9WrmyZ7urvWDyV6bXMXYy8qiVcRJa", - "FDuM8dbINWoDszAMGj8hm7BsDyUixu0mGlJihgXncEa5ntTvkQY/qA7wr26mGt9WlLH4br2vehFObMMp", - "KCve2oZ3FAlQTxCtBNGK0uY8F9Pqh7uHRVFjEL8fFoXFB4qGwFDqghVTWt3D5dP6JIXzHL2ckO/DsVHO", - "Fjxfm8vBihrmbpi5W8vdYpXiyK2hHvGOIridQk7M1ng0GBn+KigO3wwLkRupZyutmMY/uLYhmZnfB3X+", - "OkgsxG0/ceErymHOPmDwl+DlcrdFOV3CcbqcCTls970Y2ZhR4gRzIVrZuJ923A14rFB4LmlhAXRf7F3K", - "OL7AbCML6yW56UBGF4U5OMMBrSFUFz5rW89DFBIkhRYMz3ORnv5A1eIKzvzUj9U9fjgNWQDNQJIFVYvJ", - "KCZlhMerHm3IETMN8fVOpsFUk2qJV7W8LUvLqKbB0hy8cbHEoh77IdMDGXm7/IT/oTkxn83ZNqzfDjsh", - "J8jAlD3OzoKQmae8fSDYmUwDVDEIsrSvd2Je3TtB+aKePL5Pg/boW6swcDvkFoE7JFZXfgyei1UMhudi", - "1TkCYgXqKujDjINipIalGgDfSweZwP136KNS0nUXyTj2ECSbBRrRVeFp4OGNb2apNa+HUyEvxn1abIWT", - "Wp9MqBk1YL7jFpKwaVkkjhQjOinboDVQbcLbzDTaw8cw1sDCsaafAQvKjHoVWGgOdNVYEMuC5XAFpL+I", - "Mv0pVfDoITn+4fDJg4e/PXzy1JBkIcVc0iWZrjUocte9zYjS6xzudVeGr6My1/HRnz72WsjmuLFxlChl", - "CktadIey2k0rAtlmxLTrYq2JZlx1BeCQw3kChpNbtBOruDegvWTKSFjL6ZVsRh/CsnqWjDhIMthKTLsu", - "r55mHS5RrmV5FU9ZkFLIiH4Nj5gWqciTM5CKiYip5K1rQVwLL94W7d8ttOScKmLmRtVvyVGgiFCWXvHh", - "fN8OfbLiNW42cn673sjq3LxD9qWJfK9JVKQAmegVJxlMy3njJTSTYkkoybAj3tHfgz5e8xS1aldBpP3P", - "tCXjqOJXa54GbzazUTlk88YmXP5t1saK18/Zqe6oCDgGHa/wMz7rX0Ku6ZXLL+0JYrC/8BtpgSWZaYiv", - "4FdsvtCBgPlWCjG7ehhjs8QAxQ9WPM9Nn66Q/kZkYBZbqiu4jOvBalo3expSOJ2KUhNKuMgANSqlil/T", - "PWZ5tAeiGVOHN79eWIl7CoaQUlqa1ZYFQSNdh3PUHROaWupNEDWqx4pRmZ9sKzudNfnmEmhmXvXAiZg6", - "U4EzYuAiKVoYtb/onJAQOUsNuAopUlAKssSpKLaC5ttZJqI34AkBR4CrWYgSZEblpYE9PdsK5ymsE7SH", - "K3L3x1/UvRuAVwtN8y2IxTYx9FYPPmcP6kI9bPpNBNeePCQ7KoF4nmtel4ZB5KChD4U74aR3/9oQdXbx", - "8mg5A4mWmc9K8X6SyxFQBepnpvfLQlsWPV5e7qFzwpaot+OUCwWp4JmKDpZTpZNtbNk0arzGzAoCThjj", - "xDhwj1DyiiptrYmMZ6gEsdcJzmMFFDNFP8C9AqkZ+Rcvi3bHTs09yFWpKsFUlUUhpIYstgYOqw1zvYFV", - "NZeYBWNX0q8WpFSwbeQ+LAXjO2TZlVgEUV0p3Z25vbs4VE2be34dRWUDiBoRmwA59q0C7IaeLj2AMFUj", - "2hIOUy3KqdxrxiOlRVEYbqGTklf9+tB0bFsf6p/rtl3iorq+tzMBZnbtYXKQn1vMWh+nBTVPaByZLOmp", - "kT3wQWzNnl2YzWFMFOMpJJso3xzLY9MqPAJbD2lZzCXNIMkgp+vuoD/bz8R+3jQA7nj98BEaEuvPEt/0", - "mpK9+8CGoQWOp2LCI8EvJDVH0Lw8agJxvbeMnAGOHWNOjo7uVEPhXNEt8uPhsu1WR0bE2/BMaLPjlhwQ", - "YsfQh8Dbg4Zq5ItjAjsn9bOsPcXfQbkJKjFi90nWoPqWUI+/0wJ6lGnODTg4Li3u3mLAUa7Zy8W2sJG+", - "E9uj2XtLpWYpK/Cp8yOsr/zl154gam8iGWjKcshI8MG+AouwP7GOGO0xL/YSHKSE6YLf0cJElpMzhRJP", - "E/hTWOOT+6318DsJ/AKv4CkbGdVcT5QTBNT7DRkJPGwCK5rqfG3kNL2ANTkHCUSV0yXT2rpsNl+6WhRJ", - "OEBUwb1hRmfNsd5xfgeGmJeOcahged2tGI/sk2AzfCetd0EDHe4pUAiRD1AedZARhWCQ4Z8Uwuw6cx7C", - "3o3UU1IDSMe00ZRX3f53VAPNuALyd1GSlHJ8cZUaKpFGSJQTUH40MxgJrJrTmfhrDEEOS7APSfxy/357", - "4ffvuz1niszg3LvVm4ZtdNy/j2qct0LpxuG6AlWhOW5HkesDNf947znnhRZP2W5idiMP2cm3rcErc4E5", - "U0o5wjXLvzQDaJ3M1ZC1hzQyzLyO4w5S6gdDx9aN+37MlmV+VRs+oywvJfRbx96//3W2fP/+A/nOtvSG", - "7bEn8hAd53VYxMzdRqVE1xqSM/O+lYJmRkCI6vZxkXyeVM6ZKgrOUhlw/sedQ8rXrUC+oTCQKaS0tF7J", - "jms7CGr3UDWJyIut3W2jMLqQgerxMtf20g6xOpeiLIiqtt1SgaYaPo+quR46BmV34sA3qP7Y5x5knon5", - "+gpuazsQkVBIUMhbQ/WKsl/FLIy/ccxXrZWGZVcDbbv+1vM+e9f7zhE8ZxySpeCwjoacMg6v8WOst+Xv", - "PZ3xpu3r2xaeG/C3wGrOM4QaL4tf3O2Aob2t/OKuYPPb47aMD2HkESrXIC8IJWnOUPUmuNKyTPV7TvFx", - "Hxy2iP+Af8b0q3te+CZx/VJE/eOGes8p+o5UT/4oX5xBhC9/B+C1Pqqcz0HplpQ4A3jPXSvGScmZxrmW", - "Zr8Su2EFSDTiT2zLJV2TGc1RO/UHSEGmpW4yVwyQUJrlubOEmGmImL3nVJMcDFd9zfjJCofzlkRPMxz0", - "uZCnFRYm0fMwBw6KqSTu5/C9/YouaG75C+eOhtGq9rPVnZvx6yiKNb796wjM/3v3vw5+PUz+QZM/9pNn", - "/7H34ePjT/fud358+Ombb/5f86dHn76591//HtspD3vMfd9BfvTSvSmOXqLgWCvPO7Bfm+J0yXgSJbLQ", - "RNyiLXLXiL+egO411Qp6Ae+5XnFDSGc0ZxnVFyOHNovrnEV7OlpU09iIlhrBr3VHcewSXIZEmEyLNV74", - "Gu+6BsUDZdCa42Jf8LzMSm63slTOooR+4N5FQ8zGVTCUTYJwQDBSZkG9f5H78+GTp6NxHeFSfR+NR+7r", - "hwgls2wVi2PKYBWTst0BwYNxR5GCrhXoOPdA2KPeKNYoHg67BPM8UwtWXD+nUJpN4xzOe9e61/qKH3Hr", - "9mrOD9qG1k7lLGbXD7eWABkUehELjm5ICtiq3k2Alr2+kOIM+JiwCUzar+VsDsr7xeRAZxiki/YNMSRa", - "oDoHltA8VQRYDxcy6Ekaox8Ubh23/jQeuctfXbk87gaOwdWeszIE+b+1IHe+//aE7DmGqe7YkDo7dBAE", - "FdFCOT//hieH4WY2JYSNKXzP3/OXMGOcme8H73lGNd2bUsVStVcqkM9pTnkKk7kgBz504CXV9D3vSFq9", - "WVuCoA1SlNOcpeQ0lIhr8rSR+NFnI83nwjwc20btrvzqporyFztBcs70QpQ6caHGiYRzKmNGA1WFmuLI", - "NlHAplnHxI1tWbELZXbjx3keLQrVDjnrLr8ocrP8gAyVC6gyW0aUFtLLIkZAsdDg/r4R7mKQ9NzHqZcK", - "FPl9SYtfGdcfSPK+3N9/BKQRg/W7u/INTa4LaOgrLxQS19ZV4sLtuwZWWtKkoPMepYEGWuDuo7y8xEd2", - "nhPs1oj98r6tOFS9AI+P/g2wcOwcx4KLO7a9fM6Y+BLwE24htjHiRm0xveh+BdFgF96uVkRZZ5dKvUjM", - "2Y6uShkS9ztTpZKYGyHLm7EVm6OroMu6MQWSLiA9hQwTAMCy0Otxo7v3lHCCpmcdTNlEGTaWA6O5UbU7", - "BVIWGXWieEuhZDCsQGvvq/gOTmF9Iupg8F3iaJthnarvoCKlBtKlIdbw2Lox2pvv3HFQ11UUPjoSw2Q8", - "WRxUdOH79B9kK/JewSGOEUUj7LAPEVRGEGGJvwcFF1ioGe9SpB9bnnllTO3NF8mr4Xk/cU3qx5PznAlX", - "g9GU9vsSMOuOOFdkSo3cLlzCGBu6GHCxUtE59EjIoXZ9YIBgQyOPg2y796I3nZi1L7TOfRMF2TZOzJqj", - "lALmiyEVfMy0/KX8TNaAYxWoBPPAOYRNcxSTKscyy3SobFg5bGKrPtDiBAyS1wKHB6OJkVCyWVDlc9lg", - "yh9/lgfJAJ8xFHdTAoajwNUnyOtTKb49z22f087r0qVh8LkXfMKF8Gk5IHmCkfDRuzi2HYKjAJRBDnO7", - "cNvYE0odFlxvkIHjp9ksZxxIEvMaokqJlNlkRPU14+YAIx/fJ8SqgMngEWJkHICNhkkcmLwR4dnk812A", - "5C6smfqx0aQZ/A3xCAzrR2tEHlEYFs54j8e25wDUuZpV91fL4RGHIYyPiWFzZzQ3bM69+OpBOnkAUGxt", - "Rf070/i9PnF2gwbeXiw7rcleRRdZTSgzeaDjAt0GiKdildgQrKjEO11NDb1HXYsxICx2MG3GhTuKTMUK", - "3S3warGurFtg6YfDgxG88FdMIb1iv77b3AKzadrN0lSMChWSjFPnVeTSJ04MmbpHgukjl7tBEoULAdBS", - "dtTpRt3jd+sjtSmedC/z+lYb18mBfNRG7Pj3HaHoLvXgr6uFqdIevG1LLFE9RdNroJnxIRAhY0Rv2ETX", - "SNM1BSnIAR8FSUOISk5jpjvztgG8cY59t0B5gXklKF/fC1xRJMyZ0lAr0c3F7K1C162epJjOSohZ/+p0", - "IWdmfe+EqK4pmy8FOzaWee0rQFfOGZNKJ2iBiC7BNPpO4aP6O9M0Lis1nV1sZkeWxXkDTnsK6yRjeRmn", - "Vzfvjy/NtG8qlqjKKfJbxgnQdEGmmIk06gK3YWrrJblxwa/sgl/RK1vvsNNgmpqJpSGX5hxfyblocd5N", - "7CBCgDHi6O5aL0o3MMggcrHLHQO5yR5OjFycbNK+dg5T5sfe6jbi4yf77ig7UnQtgcJg4yoYmomMWMJ0", - "kMizG1LYcwZoUbBs1dKF2lF7X8x0J4WHz5DUwgLurhtsCwYCvWcsqkGCaibDqgV8m5K1kYtiMggzJ82U", - "VSFDCKdiyicU7yKqinrahqsToPmPsP7FtMXljD6NR5dTncZw7Ubcguu31fZG8YymeatKa1hCdkQ5LQop", - "zmieOAVzH2lKceZIE5t7ffQ1s7q4GvPk28NXbx34n8ajNAcqk0pU6F0Vtiu+mlXZvFs9B8QnLDZvPi+z", - "W1Ey2PwqWVColD5fgEsOG0ijnSx2tcEhOIpOST2LewhtVTk724hd4gYbCRSViaRW31kLSdMqQs8oy73e", - "zEPb482DixuWCjHKFcIBLm1dCYxkyZWym87pjp+Omrq28KRwrg3pa5c2Q7MigrdN6EaERHUckuqSYg46", - "qxXpMideLlGTkKicpXEdK5+i2y23tjPTmGDjHmHUjFiyHlMsL1kwlmmmBjx0W0AGc0SR6fMZ9uFuKlxp", - "jZKzf5VAWAZcm08ST2XroGLSP6dt716nRnbozuUGthr6evjLyBhh/sX2jYdAbBYwQktdB9yX1ZPZL7TS", - "SJkfApPEDgb/cMbOlbjBWO/ow1GzdV5cNC1uYSWMLv8zhGGzJm8vw+Efry4RZM8c0bIaTCUzKf6A+DsP", - "n8eRiBGfcZKhl8sfwAe4mdfanbo6SD1773b3STehFqrppNBD9bjzgVkOU995DTXldqttlvuGr1ucYEKv", - "0j07fk0wDuaOJ25Oz6c0lhfQCBkGpsPaANzQpWtBfGePe6f2Zy4J6IQEtuSqLbPBwAXIOpirm1jkggKD", - "nXawqFBLBki1oUwwtva/XInIMCU/p9wWSzD97FFyvRVY5ZfpdS4khvKruNo/g5QtaR6XHLK0q+LN2JzZ", - "UgGlgiAXvRvI1lixVOTy+VsTe42aoxnZHwfVLtxuZOyMKTbNAVs8sC2mVCEnrxRRVRezPOB6obD5wwHN", - "FyXPJGR6oSxilSCVUIfPm8p4NQV9DsDJPrZ78IzcRbOdYmdwz2DR3c+jgwfPUOlq/9iPXQCuJsgmbpLN", - "wsCXOB2j3dKOYRi3G3USjXq2hZz6GdeG02S7DjlL2NLxuu1naUk5nUPcU2S5BSbbF3cTFWktvPDMViFR", - "Woo1YT0hSKCp4U893ueG/VkwSCqWS6aXzrijxNLQU51o3k7qh7MlTVyOUA+X/4g20sKbiFqPyOtVmtr7", - "LbZqtGS/oUtoonVMqM3fkLPae8FnLiZHPj0MJk2tcqVa3Ji5zNJRzEFnhhkpJOMaHxalniV/I+mCSpoa", - "9jfpAzeZPn0cSRTbTFjIdwP82vEuQYE8i6Ne9pC9lyFcX3KXC54sDUfJ7tXRHsGp7DXmxs12fbbDzUMP", - "FcrMKEkvuZUNcqMBp74U4fENA16SFKv17ESPO6/s2imzlHHyoKXZoZ/fvXJSxlLIWM63+rg7iUOClgzO", - "0HcvvklmzEvuhcwH7cJloL9Zy4MXOQOxzJ/l2EPguYi8Tn3y4kqT7nzVI9qBvmNqPhgymLqhxqSZKPb6", - "jX5e+dw1PpkvHlb8ow3sDW8pItmvoGcTgyTW0e3Mqu+B/ZuS52I1dFNbJ8Rv7BeAmihKSpZnv9RRma0c", - "4ZLydBG1Z01Nx9/qakbV4uz9FE2ttqCcQx4dzsqCv3mZMSLV/lMMnWfJ+MC27bTldrmtxdWAN8H0QPkJ", - "DXqZzs0EIVabAW+VQ3U+FxnBeeo8XjX37Ka7D5IS/6sEpWPBQ/jBOnWh3tK8d21OXAI8w9fihHxvq5Eu", - "gDSytOArzcbHQ+YztFqFelnkgmZjYsY5+fbwFbGz2j62JofNyTvHR0pzFS19VZCicJh7sC+vEQ9dGD7O", - "Zl9qs2qlMWmS0nRZxIJDTYsT3wAjUEMdPj5fQuxMyEv7clT+XWInMfQwY3JpXlzVaFZ2QZow/9Gapgt8", - "kjVYaj/JD08m7alSBQXcqkIsVd4+PHcGbpdP2qaTHhNh3s3nTNkilHAGzXjUKjjbqQR8fGpzebLk3FJK", - "VPbYlDzgImj3wFlHDa/mj0LWQvyOArnNxb5rbu1j7BXNI9RO1N2p3GajG6sCG764cEq54CzFLD6xq9kV", - "tBxiAxuQ8KitZPVH3J3QyOGKpgev3OQcFnsThntG6BDXVcIHX82mWuqwf2qsnLigmsxBK8fZIBv7LPdO", - "D8i4ApeHEWubBnxSyIZdETlk1FSdVCaNHckIw2J6HnbfmW9v3LMf/cVPGUcB36HNuaZbTR3W29PmVcA0", - "mQtQbj3N2GD1q+kzwTDZDFYfJr4+H45hzXJm2dYG3R3q0FuknQXYtH1h2tpUJvXPDQ9kO+lhUbhJ+2sg", - "ROUBveK9CI5YFhNv2gmQW40fjraB3Da6kuB9aggNztAQDQXewx3CqOoBtGrNGKHVUhS2INaFK5rBgPEI", - "GK8Yh7p6ZOSCSKNXAm4MnteefiqVVFsRcBBPOwGao/U5xtCUdqaHyw7V2mBECa7Rz9G/jXUpgx7GUTWo", - "BTfK11XRSkPdgTDxAqvlOkR2CxOgVOWEqAwjClqlCmKMwzBunwqpeQF0j0FXJrLdtaT25OxyE/UFiU7L", - "bA46oVkWy4v5HL8S/OoTRcEK0rLKn1gUJMWcKM0kMV1qcxOlgqtyuWEu3+CS0wW1PyLUENYf8TuMQSjT", - "Nf4bSx7YvzPOCWNnN0DvceGKJewoNzdH6ki9hqYTxebJcEzgnXJ5dNRTX4zQ6/5XSum5mDcBuebUEJu4", - "XLhHMf72rbk4wswJnYyY9mqpEhug053wFdvw2ViF5Da5El5lnRSZaOypct5tVkD013Ya4+XX43obJMSg", - "9n611sM+B9y011+cahe5pinZyIJ6o4Gs946N+0Eo4prTPo8d67BjPnd6D5MMO3I2jr0Rod4VrAvQj97P", - "lBSUOdN4zSy6mHUe6f3qwk2Hrt7g9iKcn3evxu7Hsz6fbKIYn+dA8Hu7Gs4puHD2qhy6Xav3SvJPQvur", - "q0Zqx6u84qPr73on4FQ3qwbtVdqeuMzrdpnuTf7jL9aHjQDXcv0FqHA7m96pJdSVdq16qm5Cqqy9g7L4", - "Nm7FeFmg/vxHdc4jpKdCKFZnio7VCxro63aCJX+C/E3dsbyjyRmkGtOD1wZ0CbBLNiczWVCL7jYPUs/b", - "sXIJdOmPNuU86uYE33KhdcKSgtA6m095MjzDz2HlJoVMCavBzYG7cnDNgIPBbs+zGaSanW0JA/ufBfAg", - "xGjslRC2rGsQFcYqN1rMIrK7iq0GaFOU1kZ4gmx+lwanLwjkFNZ3FGlQQzTB89jfKxdJIIEYQO6QGBIR", - "KuaGYLWmzjLMVEUZiAXv9mO7Q52Kq7c0TBDUeMG5PEmaG7cOdNwwZbw2xaC5TNedwn/RI7QvUqyb275f", - "2H6JpQRUVbbNJ6AIn6TkKJL92SWwwKC9ylDgU1mA8r/5CF07S85OISxeg2aZcyoz3yKqZ/AqjGTDfdQJ", - "74qmrKbKOlE6O3jlpNkN6IkkfkJX3DQXmO65z5+56RcZ1nhH7w+8DjD9NMI1A+mKfKGwlwsFiRbeqXMT", - "HJtQ4eqRXwQJqjfZogWuNwXKuzrHCyadpZjyhDrPlnCBRMKSGuhkkImlf85NyH5hv/sIFp90dKs6paLX", - "ZGsqFe+ey1QHiSHVz4i7LbdHxlxEs8I4tyVFVSwtCzeoDFX/hRRZmdoLOjwYlfZpcNKjDawkqpRIu6vs", - "vC9zTAH2KogzPIX1nhX90wXldS625rG2IpRdQxDX39rtK1U6xd/X+dwuYH4lcN6k4mY8KoTIkx5d/1E3", - "u0z7DJyy9BQyYu4O79jWU12D3EUVc2XMPV+sfTaVogAO2b0JIYfcuhJ7u24zvXFrcn5Hb5p/hbNmpU34", - "5HRKk/c87pOJqZjkJfmbH2YzV1NgmN8lp7KDbMldsurJbCPpeaTWzGToo7RraW3X/6iJykIRk1IuGMg+", - "6Hx39UoR0g9KH2x+/YR5LmoHOmnVkygt1eUgmsLL61rrOKwIg++wBbzwURyUYfDcyIFzw15uryukBEvp", - "pYTG8re9s90Ca74UbJHCsAizTJt1yHpINPclUKKoF5VuIo7nrgoDk1oIjol+uqoPhepqzBccEo45l/KM", - "5tevvsBsJ4eID1cSMb7Q8P0bItmiUl3M1eQVHTR38Na9uqn5W1S3/A+YPYraGdxQTu9Ylb/w2lnMa0dz", - "kou6GBIOSc5xTGuYePCUTJ2bfCEhZYq1IojOfSrT6rmHmb3rSpmb35fb1vmL0JcgY/dAEAV5U6dF1ALv", - "hxrC+ojeMFPpOblRKo9RX4csIviL8agwXn3LdXHasFjYNLMtVxwh4YotF4EPwo6Wi24k/tDlWe28uXRK", - "Bd11Dr6tG7iNXNT12oaa3brI3ZQ7b4i1LJ4S03RHc51FCOaTJQgq+f3B70TCDAtGCHL/Pk5w//7YNf39", - "YfOzOc7378crcl6Xoc7iyI3h5o1RzC99rpvWPbHHS7i1HyXLs22E0fD5rkuuoFfzby7q40aKvvxm9and", - "o+oS7+/iItDeBERMZK2NyYOpAm/uAY7crlvEbRtfJmkpmV5jMgqvfmO/RU2K31cae2fxqcKX3d2nxSlU", - "6Uxq/X6p/O36vaA53kdGpkYHDY1VGL9d0WWRgzso39yZ/ic8+tvjbP/Rg/+c/m3/yX4Kj58829+nzx7T", - "B88ePYCHf3vyeB8ezJ4+mz7MHj5+OH388PHTJ8/SR48fTB8/ffafdwwfMiBbQEc+9HH0v1gZKTl8e5Sc", - "GGBrnNCCVcVXDRn78g40xZMIS8ry0YH/6f/4EzZJxbIe3v86cpFVo4XWhTrY2zs/P5+EXfbmqNBLtCjT", - "xZ6fp1v08u1R5R1vTcG4o9bx2ZACbqojhUP89u7b4xNy+PZoUhPM6GC0P9mfPMBiZgVwWrDRwegR/oSn", - "Z4H7vueIbXTw8dN4tLcAmqP9y/yxBC1Z6j+pczqfg5y4Ohfmp7OHe16U2PvolJmfzKjzWJoO6+cfOHd3", - "yz84wwg6S1k//kY6ZeWy+46rJNtO18AzdL+2+kHD2ipkHWV1NsmjmlH5nBo2ydjBr5HqXzM2L2Wr3nnl", - "qeIy8DNF/vv4pzdESOKeNG9pehqarpEg/1WCXNcE41hZmB3LJ0R2jtBLNS+aXoO1mBQrLBuro4Ezm30O", - "KLWyK9ScSMsSQkhqvmp45X7y7MPHJ3/7NBoACBq5FGDs9O80z3+3BelhhZYCn33ERZePI8l/UTwe13pq", - "7FBv0xjdHquvYX2Hqk3T2f53Ljj83rcNDrDoPtA8Nw0Fh9gefMDoXqQEPEQP9/evrDBMFV9inSerUTxJ", - "XGCgLoexnyK1Hn19mJ5Cj4+vcKFN77BLL7c9XGfRz2mGOfdBabuUB1/tUo442pkNxyf2Rvs0Hj35ivfm", - "iBueQ3OCLYPUGd1b5Gd+ysU59y2NNFMul1SuUVYJCoO0YtfoXKHKElmkPduNUgCjD596r7S9MNP53seG", - "qTK71IXXKfJw9HLLHXhH9XHObuK5ViJ1873Kk43GLJctHjN3q3sT8n3YG7k3xnHbKOlS8roMdiHFGcsM", - "H3ZeHj7dTQ3bHRWGuEdv5ED/e3s5f9bL+bCpmmhkLosB0yDxjTB1fBkuezt24w9adbAuVGcqyFd+gayv", - "n7UYR+tlaGf6EHu4beXCt7jrwV2fDBTAW4lDzTzzn5/ven/f6ppo3AefkSt/5RLda5obOgmW24qFtOn8", - "biW9v4ykV7m32XKWPoPt5WQ/rGWx99GnaLwCec+lqBwg6YVv5qBvkELwboud3JvYfIthm4vxDOfPtlWG", - "w8SZt9Lb55beuhlnY2DUeURvTmJDGBZ1StpdCk02KsjslDr3KxXR/sLI6pXJDKTbpbEL8MaOpOU48Wfj", - "mX9KCcsh7Va2+kvLVpUL+aWkq0bOaBeUEFiXLqV3a+vVmK7ErGYYQcDZqgqd7giP6/oWhsVgyhEfba7G", - "/tmHlk37IrSbNe48Crvy0/cQvj6fr49ebhOdviIlzuDUV5FbIL43n5uXRg0G767HYDCMNz3ef3x9EIS7", - "8EZo8h3e4p+ZQ35WlhYnq11Z2CaOtDe1STk3cSXeYkvIKOpkmwGPwpzmYUJP6yhx11WSC4Ok702IT/2p", - "qkTmLjR3LmhepyChcm47GR5nkEDu+D8PcPw7E/KdkIRxrcbo76Vdlmtyh3F98ODho8euiaTn1p2q3W76", - "9PHB4TffuGZ1olf7vuk0V1oeLCDPhevg7obuuObDwf/+/R+TyeTOVnYqVs/Xb2xWpS+Fp3afdeHG9+3W", - "V75JsVe6y3a1FXXXYnB/LlZR7i9Wt7fPjd0+Bvt/iltn2iQj9wCt1JONUNQrvIXsMdnlHhr7xKmG71SX", - "yYS8ES4rQJlTSYTMQLrKD/OSSso1QDbxlEpmGP6LUdBpzoBr82DEXPYyUSwDG0w5LyVkJGdLLPYo4Qzd", - "1HF6fMs3INjO6NGZ9Ytl8q/pKqzSXV3TWrglY9z1kq58NQ3MFy8k/vTNN2R/XL9a8twMkFSIiTHXJV2N", - "rlHbVxHbIBfwZsLrrT6yOPYQzVEt/djCRbSZXfevzbm/Wondkrvb2CvinDtbc2prTag/cLH3GzUHVrCz", - "tTaw+MOaVLGxRsrzIlScxZkZhioFvmDbwFaVdPTx2Ubv7SG+ffxfipW0CWpHtoGBn2rvI9oyQp7RObcY", - "uPYnsoEGBiEplt4iJMgMdLpwAbEtvEZ4j8+l3c94NlVSu2qRBbeom8o1zGuGFb4GBsoHsYpolQMZodCf", - "fFpL85nNMN1BlSfdFwxEexPzNXSq8jmuyBhT3r3ex82aXdwJyhf15F1pC9FyFUbNWwTvhuAO5/vW12pB", - "jLlF/Bkc8P07MSFvRB2W7dKE/xntiZ/z2v7cC3ojOFjDuRFrLS3e2kgrmQL184gUn4/DPk6qhK0Xli/2", - "fNWhjULGD7bmz0ZBY8jtbSb7Kq/wH6L1PBu3jFnbZGuygXq0IczZNLS5VZtZVW/wiXIj/PQLfLfcBMe6", - "HhaDh9TzGScW8KtlOpjixhLzXpVQs48DxXMUD+ZGWlS+ZdG0wlPIBZ+rL5MVbaKOOF4iVFJlb46naP7r", - "nd0XmD2HC5+o0uVTUoynYKtq+cLKS6aU84B8vP+364NQs6XPQcfDUNIb5i5P9h9d3/THIM9YCuQEloWQ", - "VLJ8TX7mVQW0y3A7TEBd5Tfzqt5oznE0JTXzbqVhkqCLM8GGP9pHvWLZp+3MMMiRtyMfZDzgg2HeQ1oU", - "QOXFGeB2u9RJa8ajl6HLbyMvcpWxKgKKQdGOXu//MRqod8IodDFzl1/JLaA+u5ZjE84fV8zGleeLkQLE", - "7IC85/eJWtAnDx7+9vDJU//nwydPezRnZh6XFKerO6sHMp/tMEMUaF+uru9qRfIKeQfXvZW77dB4xLJV", - "NAlqXeggPBfOMQf5xB1FCrruzZ1cbCnUEA5bF224/kyBSrPpIvp48m+bqpTjEX9ePXFtOjtX3+C2QENP", - "uEPARAyh1ZUaKqxvLtqwQVRskWWVHf+6X551WIC9xTzyZOtCuVEpVt/UCzTBByhwL7U00XJzAiMm6h0H", - "huqqOC56nZRFIaSuTreaDJLloM/g1hDl+gh3J0ktpTpdlMXeR/wPpsf6VIcK2HJ0gYXO/W4LR+9Z+/sm", - "Ie7YtrjkndiSlq3VXzaZk8/U5nwCxIy8ZqkUh5j/2V03aq00LDvp9FzX3zaVJI5eTYLnjEOyFDyW5O0n", - "/PoaP0YzTAtN877OJ+ZjX98Wc2zC3wKrOc8QznhZ/H4h7+xL6Ydaq5VgjnFdtMfS/45HzR+aNU+7J2nN", - "0+4xa1QO6vl572PjT+d941qqRakzcR70xded5UVDDO9B8unhSvHqwdNK4qxIBsoQ7dengQrwEDsx1ddI", - "9q8gxXhvArC/qE5qxnjWIhKUKFNxBlJV2grpHWVuFVN/HsXU4H3ficfaVJbbOFqprlYieSMysOM2s8fG", - "Aj25yMBl3OwKIpUMFn/v+1upbtd6gaW0nC80KQuiReytV3dMaGqZrK0tprYVY7KtfNGRMyA0l0CzNZkC", - "cCKmZtHNonaEKnRy9w9GJ2nGawrVcBVSpKAUZIkPbN0GWpXHFJ+XegOeEHAEuJqFKEFmVF4a2NOzrXBW", - "ub8VufvjL+reDcBrRcHNiLWutRH0Vh4+TtrrQj1s+k0E1548JDsqgXjRAPVbYlnk4DRcERTuhJPe/WtD", - "1NnFy6MFVUDsM1O8n+RyBFSB+pnp/bLQlkVi7u9I1TP79YQtURLjlAsFqeCZ6q9NuI0tY/2NYC3KrCDg", - "hDFOjAP3PDhfUaXfOUtGWMIpqPNhpthQTLEvx7wZ+Zcqw3xn7NTch1yVqkpD7xQYkMXWwGG1Ya43sKrm", - "QlOSH7vSkGhBSgXbRu7DUjC+Q5YKqyPqwAaEVTi6i8NsJNQpKLqobABRI2ITIMe+VYDd0D7RAwhTNaKr", - "kmdNypkKkQPlVtEsisJwC52UvOrXh6Zj2/pQ/1y37RKXqwSE93YmQIXaKwf5ucWswnCLBVXEwUGW9NQp", - "uOYuW1MXZnMYE7Q6J5so3xzLY9MqPAJbD2lZzCXNsGoejahSfrafif28aQDccU+eWJI0mcIsWtXDbHpN", - "ybJXRVQNLXA8FRMesYKpIqk5gjMsM+MJxPXeMnIGPeVTT4KSbq45zhXdIj8eLttudY9ayoxhdtySA0Ls", - "GPoQeHvQUI18cUxg56TWHrSn+DsoN0ElRuw+yRpU3xLq8XdaQFubF95fjYuixd1bDDjKNXu52BY20ndi", - "Y/rDrzIcr222/YwOZ039afD+m1zkbbt3TplOZkK6otB0pkFGVHmtMgKUaR/tZw0oWjh3CIIjuGvTjeOK", - "C9cpMxwTsSAQX8+SLSMZeMxU3wk5KGSn6btGmSYl1ywPwparl/KXpy+81QHc6gBudQC3OoBbHcCtDuBW", - "B3CrA7jVAdzqAG51ALc6gL+sDuCmwvQSL3B4/2YueMJhTjU7gyp+7zZt0J8qrKW6qrxOArUY55Rpl4ST", - "UC8G4JfLRfVpoDnigOXIYwuherMbYSVlJUqZAkkNhIyTIqfmaQArXaWEayYb9emPXS1lzF9KFTx6SI5/", - "OPQO+gvnSN5se/fQpRFXep3DPZeXoSp26hM0ADdId/kZqL8SfOo4l0iP5UCUQe+32PolnEEuCpDW95do", - "WUY0PidA8xcON1sUPo1ylma038cNPZND25IWQc14XCtVhGIwR6sa5Yzmqr8cpR1vSYtY9rbq4rOqIOQm", - "z0W2bp0Qs2t7uIHNs1G76TNO5ToSf9M5ER3S0MLwK0dYXV3WpysPJukSbZfMtlFYTFqXoKLneBOVR6Mo", - "qg3rDGUjeWYtOonWYm6HDowqAIc4wBp69ntC3tl+NxuHjhC5I1Yz8y/Gb7DZsmIa2NY8Ihzr+VqDxj3i", - "o6cXz/7YEHZWpkCYVsTHo2y/XsajVWJGmgNPHANKpiJbJw32NWrcQhlTVClYTrffRCH/dPmK3eVjvmy+", - "p27mGnkZLG4TTw6JZpU4BtzDndcaBvPmCls4omPPAcY/N4vuY6MhCMTxp5hSqV0lZkemV0+zvmV8t4wv", - "OI0tiYBxF7/XZiKTz8j45FqWvJ/nfbuCtDTAhSf5Lmrn0SQHK92wa2YwLedzzLvcsdGZpQGOxwS/IVZo", - "lzuUC+5GQXbwKhfnZTNEtYfrcpcgVu2ukGQuRVncswWm+BqNGcuC8rU3+UKi2LLMLQ5tVrurZbQ2xK7r", - "CIDmWKf769Nqv/Uqv0B3667a5u8WLeScKmL3FzJS8sxFDnUCcVd8eM5nO/TJitdsemPWZ7veyOrcvEOu", - "CL/LLsSlMnMXIBO94vZANROz24Bfe3Int/lm/xrXxltbyK2HwXaDV2uGcEW3hwz4Gl4fQf6ROhSuWSXL", - "1vDrCxwJk5HYllfqPNIZvulDElTQszZSyAtCfTGAVHClZZnq95yijSZY2KTrX+K10f387YVvEjcTRqx4", - "bqj3nGKu+MpyE+VzM4iYKb4D8GxUlfM5KMMrQyKZAbznrhXjpOTmpSVmZMlSKRIbhmrOkJFPJrblkq7J", - "jOZoZPwDpCBTc7MHu24VxkqzPHcOLWYaImbvOdUkB6o0ec0MlzXD+URhlScX6HMhTyssxNNXzIGDYiqJ", - "K1++t18xQ4RbvlfyocLSfq4ju683NYSHnWW9kB+9NHBTzHSTM6VrH4gO7Ndm/14ynkSJ7GQBxLmEtWmL", - "3DWM1xPQvaZ1SC/gPTc3nBYEuTrVFyOHtpmncxbt6WhRTWMjWtYgv9ZBT7wr4TIkwmRuTSt/osDMgA68", - "+RI3HqvItPd+RzPKxsKUsa8uXVhPI/dIAP/ZniK8482yIC0l02u0Q9CC/XYK5v8fPn0w3+SZN1GUMh8d", - "jBZaFwd7e1hxciGU3ht9GoffVOvjh2rlH721oZDsDHNUf/j0/wMAAP//y2yOEiA2AQA=", + "H4sIAAAAAAAC/+x9a3PbuJLoX0FptyqPFeW898RVU3udZB7eSTKp2DO754xzZyCyJeGYAngA0JYmN//9", + "FhoACZKgRNmOncz4U2IRj0aj0Wj08+MoFctCcOBajfY/jgoq6RI0SPyLpqkouU5YZv7KQKWSFZoJPtr3", + "34jSkvH5aDxi5teC6sVoPOJ0CXUb0388kvCvkknIRvtaljAeqXQBS2oG1uvCtK5GWiVzkbghDuwQh69G", + "nzZ8oFkmQakulD/xfE0YT/MyA6Il5Yqm5pMi50wviF4wRVxnwjgRHIiYEb1oNCYzBnmmJn6R/ypBroNV", + "usn7l/SpBjGRIocunC/Fcso4eKigAqraEKIFyWCGjRZUEzODgdU31IIooDJdkJmQW0C1QITwAi+Xo/1f", + "Rwp4BhJ3KwV2hv+dSYA/INFUzkGPPoxji5tpkIlmy8jSDh32Jagy14pgW1zjnJ0BJ6bXhLwplSZTIJST", + "99+9JI8fP35uFrKkWkPmiKx3VfXs4Zps99H+KKMa/OcurdF8LiTlWVK1f//dS5z/yC1waCuqFMQPy4H5", + "Qg5f9S3Ad4yQEOMa5rgPDeo3PSKHov55CjMhYeCe2MZXuinh/De6KynV6aIQjOvIvhD8SuznKA8Lum/i", + "YRUAjfaFwZQ0g/76IHn+4ePD8cMHn/7t14PkH+7Pp48/DVz+y2rcLRiINkxLKYGn62QugeJpWVDexcd7", + "Rw9qIco8Iwt6hptPl8jqXV9i+lrWeUbz0tAJS6U4yOdCEerIKIMZLXNN/MSk5LlhU2Y0R+2EKVJIccYy", + "yMaG+54vWLogKVV2CGxHzlmeGxosFWR9tBZf3YbD9ClEiYHrQvjABX25yKjXtQUTsEJukKS5UJBoseV6", + "8jcO5RkJL5T6rlK7XVbkeAEEJzcf7GWLuOOGpvN8TTTua0aoIpT4q2lM2IysRUnOcXNydor93WoM1pbE", + "IA03p3GPmsPbh74OMiLImwqRA+WIPH/uuijjMzYvJShyvgC9cHeeBFUIroCI6T8h1Wbb//vop7dESPIG", + "lKJzeEfTUwI8FRlkE3I4I1zogDQcLSEOTc++dTi4Ypf8P5UwNLFU84Kmp/EbPWdLFlnVG7piy3JJeLmc", + "gjRb6q8QLYgEXUreB5AdcQspLumqO+mxLHmK+19P25DlDLUxVeR0jQhb0tU3D8YOHEVonpMCeMb4nOgV", + "75XjzNzbwUukKHk2QMzRZk+Di1UVkLIZg4xUo2yAxE2zDR7Gd4OnFr4CcPwgveBUs2wBh8MqQjPmdJsv", + "pKBzCEhmQn52zA2/anEKvCJ0Ml3jp0LCGROlqjr1wIhTb5bAudCQFBJmLEJjRw4dhsHYNo4DL50MlAqu", + "KeOQGeaMQAsNlln1whRMuPm9073Fp1TBsyd9d3z9deDuz0R71zfu+KDdxkaJPZKRq9N8dQc2Llk1+g94", + "H4ZzKzZP7M+djWTzY3PbzFiON9E/zf55NJQKmUADEf5uUmzOqS4l7J/w++YvkpAjTXlGZWZ+Wdqf3pS5", + "Zkdsbn7K7U+vxZylR2zeg8wK1uiDC7st7T9mvDg71qvou+K1EKdlES4obTxcp2ty+Kpvk+2YuxLmQfXa", + "DR8exyv/GNm1h15VG9kDZC/uCmoansJagoGWpjP8ZzVDeqIz+Yf5pyhy01sXsxhqDR27KxnVB06tcFAU", + "OUupQeJ799l8NUwA7EOC1i328ELd/xiAWEhRgNTMDkqLIslFSvNEaapxpH+XMBvtj/5tr9a/7Nnuai+Y", + "/LXpdYSdjMhqxaCEFsUOY7wzoo/awCwMg8ZPyCYs20OhiXG7iYaUmGHBOZxRrif1k6XBD6oD/Kubqca3", + "lXYsvltPsF6EE9twCspKwLbhHUUC1BNEK0G0okA6z8W0+uHuQVHUGMTvB0Vh8YHSIzAUzGDFlFb3cPm0", + "PknhPIevJuT7cGwUxQXP1+ZysKKGuRtm7tZyt1ilW3JrqEe8owhup5ATszUeDUbMvwqKw2fFQuRG6tlK", + "K6bxD65tSGbm90Gdvw4SC3HbT1z40HKYs28c/CV43NxtUU6XcJy6Z0IO2n0vRjZmlDjBXIhWNu6nHXcD", + "HisUnktaWADdF3uXMo6PNNvIwnpJbjqQ0UVhDs5wQGsI1YXP2tbzEIUESaEFw4tcpKc/ULW4gjM/9WN1", + "jx9OQxZAM5BkQdViMopJGeHxqkcbcsRMQ3zgk2kw1aRa4lUtb8vSMqppsDQHb1wssajHfsj0QEbeLj/h", + "f2hOzGdztg3rt8NOyDEyMGWPszMyZOa1bx8IdibTALUQgiztA5+YV/dOUL6sJ4/v06A9+tbqFNwOuUXg", + "DonVlR+DF2IVg+GFWHWOgFiBugr6MOOgGKlhqQbA98pBJnD/HfqolHTdRTKOPQTJZoFGdFV4Gnh445tZ", + "auXswVTIi3GfFlvhpFY5E2pGDZjvuIUkbFoWiSPFiNrKNmgNVFv5NjON9vAxjDWwcKTpZ8CCMqNeBRaa", + "A101FsSyYDlcAekvokx/ShU8fkSOfjh4+vDRb4+ePjMkWUgxl3RJpmsNitx1bzOi9DqHe92V4euozHV8", + "9GdPvKKyOW5sHCVKmcKSFt2hrALUikC2GTHtulhrohlXXQE45HAeg+HkFu3E6vYNaK+YMhLWcnolm9GH", + "sKyeJSMOkgy2EtOuy6unWYdLlGtZXsVTFqQUMqJfwyOmRSry5AykYiJiTXnnWhDXwou3Rft3Cy05p4qY", + "uVH1W3IUKCKUpVd8ON+3Qx+veI2bjZzfrjeyOjfvkH1pIt9rEhUpQCZ6xUkG03LeeAnNpFgSSjLsiHf0", + "96CP1jxFrdpVEGn/M23JOKr41ZqnwZvNbFQO2byxCZd/m7Wx4vVzdqo7KgKOQcdr/IzP+leQa3rl8kt7", + "ghjsL/1GWmBJZhriK/g1my90IGC+k0LMrh7G2CwxQPGDFc9z06crpL8VGZjFluoKLuN6sJrWzZ6GFE6n", + "otSEEi4yQI1KqeLXdI/lHk2GaOnU4c2vF1binoIhpJSWZrVlQdCO1+EcdceEppZ6E0SN6rFiVOYn28pO", + "Z63CuQSamVc9cCKmzlTgjBi4SIpGSO0vOickRM5SA65CihSUgixxKoqtoPl2lonoDXhCwBHgahaiBJlR", + "eWlgT8+2wnkK6wRN5orc/fEXde8G4NVC03wLYrFNDL3Vg8/Zg7pQD5t+E8G1Jw/Jjkognuea16VhEDlo", + "6EPhTjjp3b82RJ1dvDxazkCiZeazUryf5HIEVIH6men9stCWRY8jmHvoHLMl6u045UJBKnimooPlVOlk", + "G1s2jRqvMbOCgBPGODEO3COUvKZKW2si4xkqQex1gvNYAcVM0Q9wr0BqRv7Fy6LdsVNzD3JVqkowVWVR", + "CKkhi62Bw2rDXG9hVc0lZsHYlfSrBSkVbBu5D0vB+A5ZdiUWQVRXSndnbu8uDlXT5p5fR1HZAKJGxCZA", + "jnyrALuhM0wPIEzViLaEw1SLcioPnPFIaVEUhlvopORVvz40HdnWB/rnum2XuKiu7+1MgEIfHNfeQX5u", + "MWvdoBbUPKFxZLKkp0b2wAexNXt2YTaHMVGMp5BsonxzLI9Mq/AIbD2kZTGXNIMkg5yuu4P+bD8T+3nT", + "ALjj9cNHaEisP0t802tK9u4DG4YWOJ6KCY8Ev5DUHEHz8qgJxPXeMnIGOHaMOTk6ulMNhXNFt8iPh8u2", + "Wx0ZEW/DM6HNjltyQIgdQx8Cbw8aqpEvjgnsnNTPsvYUfwflJqjEiN0nWYPqW0I9/k4L6FGmOU/h4Li0", + "uHuLAUe5Zi8X28JG+k5sj2bvHZWapazAp86PsL7yl197gqi9iWSgKcshI8EH+woswv7EOmK0x7zYS3CQ", + "EqYLfkcLE1lOzhRKPE3gT2GNT+531sPvOPALvIKnbGRUcz1RThBQ7zdkJPCwCaxoqvO1kdP0AtbkHCQQ", + "VU6XTGvrudt86WpRJOEAUQX3hhmdNcd6x/kdGGJeOsKhguV1t2I8sk+CzfAdt94FDXS4p0AhRD5AedRB", + "RhSCQYZ/Ugiz68w5EXs3Uk9JDSAd00ZTXnX731ENNOMKyN9FSVLK8cVVaqhEGiFRTkD50cxgJLBqTmfi", + "rzEEOSzBPiTxy/377YXfv+/2nCkyg3PveW8attFx/z6qcd4JpRuH6wpUhea4HUauD9T8473nnBdaPGW7", + "idmNPGQn37UGr8wF5kwp5QjXLP/SDKB1MldD1h7SyDDzOo47SKkfDB1bN+77EVuWOdVXYb7YKI9W7wm2", + "XELGqIZ8TQoJKVjvaiNgKQuLAY1Yv6t0Qfkc5Wopyrlz/LHjIGMsldVgyJJ3hogKH3rFk7kUZRFjlM7Z", + "0zvYG7EDqHn5BIjEzlbOP6fVfC6mYsgN5hEe7M73Zsw+q8J41PswNEg9qx+GFjnNKIE4FjDsIVFlmgJE", + "XYBjT65qqa1oyDq+xQ1oxIZSWh8oQlNd0jykOnI4I5Svm2GSlOXKcEGmCLYznWu/2rFdm49hmdHc2mYj", + "QRXhSWlIfMHO1yhto2Kg3QGJxEhDXcoICdAcL0PGn0eHXw8dg7I7ceB0VX/s87sy7+98fQVikB2ISCgk", + "KLy0Qr2Vsl/FLIx9creaWisNy65q33b9rYfRvO99QAqeMw7JUnBYR8N9GYc3+DHKOPDi7OmMIkxf3/ar", + "pAF/C6zmPEOo8bL4xd0OeNG7yuHwCja/PW7LqhNGfaHWEvKCUJLmDHWagisty1SfcIpak+CwRRwz/Puw", + "X4/20jeJK+4iejU31Amn6JRT6VKixuQZRBQH3wF4dZoq53NQLf5JZgAn3LVinJScaZxrafYrsRtWgETv", + "iIltuaRrwwJR7fcHSEGmpW7yZIw8UdqwS2tiMtMQMTvhVJMczJv6DePHKxzOm2g9zXDQ50KeVliIXyFz", + "4KCYSuIOJN/br+jb55a/cH5+GClsP1ujhBm/Dk9Zo1Kljn79v3f/a//Xg+QfNPnjQfL8P/Y+fHzy6d79", + "zo+PPn3zzf9r/vT40zf3/uvfYzvlYY/FRTjID1+5x9rhK5TIa6tEB/Zr00gvGU+iRBba3lu0Re5iDKAj", + "oHtNfY1ewAnXK24I6YzmLDMi10XIoc3iOmfRno4W1TQ2oqWf8WvdUc69BJchESbTYo0Xvsa7PlfxCCQ0", + "k7mgIjwvs5LbrfSCrnWw974vYjauosxsAop9giFIC+odt9yfj54+G43r0KHq+2g8cl8/RCiZZauodAir", + "2PPFHRA8GHcUKehaQY8AirBH3Xyst0E47BLMu1ctWHH9nEJpNo1zOO+27NQgK37IrT+xOT9odFs7Xb6Y", + "XT/cWho5vNCLWGB6Q1LAVvVuArQcIQopzoCPCZvApK2GyMzTzDkc5UBnGCCNDz0xJAyjOgeW0DxVBFgP", + "FzLorR+jHxRuHbf+NB65y19duTzuBo7B1Z6zsrD5v7Ugd77/9pjsOYap7thYRTt0EF0WebW6AIqGi4zh", + "ZjYdhw3WPOEn/BXMGGfm+/4Jz6ime1OqWKr2SgXyBc0pT2EyF2Tfx2S8opqe8I6k1ZsxJ4iGIUU5zVlK", + "TkOJuCZPmwWhO8LJya80n4uTkw8db4Gu/OqmivIXO0FyzvRClDpxMdyJhHMqY9YYVcXw4sg2ScOmWcfE", + "jW1ZsYsRd+PHeR4tCtWO5esuvyhys/yADJWLVDNbRpQW0ssiRkCx0OD+vhXuYpD03KswSgWK/L6kxa+M", + "6w8kOSkfPHgMpBHc9ru78g1NrgsYrMjojTVs6y9w4fZdAystaVLQeczqc3LyqwZa4O6jvLzER3aeE+zW", + "CKrzTsM4VL0Aj4/+DbBw7BwghIs7sr18vp74EvATbiG2MeJGbYq+6H4FYXYX3q5WqF5nl0q9SMzZjq5K", + "GRL3O1Ol8ZgbIcv7Byg2Rx9Ml/FkCiRdQHrqUlHAstDrcaO7d0FxgqZnHUzZJCU2SAbD5FFnPgVSFhl1", + "onhbgzRdEwVaeyfQ93AK62NRR9nvEqDcjJdVfQcVKTWQLg2xhsfWjdHefOfnhCquovBhpxh/5Mliv6IL", + "36f/IFuR9woOcYwoGvGcfYigMoIIS/w9KLjAQs14lyL92PLMK2Nqb75IwhLP+4lrUj+enEtSuBpUcNvv", + "S8CMR+JckSk1crtwyXpsTGjAxUpF59AjIYdmi4GRlw1TBw6y7d6L3nRi1r7QOvdNFGTbODFrjlIKmC+G", + "VPAx03JE8zNZy5gzAmAOPoewaY5iUuWxZ5kOlQ3zkU0q1gdanIBB8lrg8GA0MRJKNguqfB4hTLfkz/Ig", + "GeAzxjhvymwRKvSDnEqVft3z3PY57bwuXX4Ln9TCZ7IIn5YDslIYCR/dtmPbITgKQBnkMLcLt409odTx", + "1vUGGTh+ms1yxoEkMXcsqpRImU0EVV8zbg4w8vF9QqwKmAweIUbGAdho8cWByVsRnk0+3wVI7uLFqR8b", + "bcXB3xAPbbEOykbkEYVh4azHgJR6DkCdD191f7U8SXEYwviYGDZ3RnPD5tyLrx6kk2ABxdZWOgXnc3Cv", + "T5zdoIG3F8tOa7JX0UVWE8pMHui4QLcB4qlYJTa2LSrxTldTQ+9Rn22MtIsdTJvK4o4iU7FCPxa8WqyP", + "8BZY+uHwYAQv/BVTSK/Yr+82t8BsmnazNBWjQoUk49R5Fbn0iRNDpu6RYPrI5W6QneJCALSUHXWqV/f4", + "3fpIbYon3cu8vtXGddYlHw4TO/59Ryi6Sz3462phqnwS79oSS1RP0XTHaKbSCETIGNEbNtE10nRNQQpy", + "wEdB0hCiktOY6c68bQBvnCPfLVBeYMIOytf3Ah8fCXOmNNRKdO+ScBPqSYp5woSY9a9OF3Jm1vdeiOqa", + "solosGNjmde+AvSRnTGpdIIWiOgSTKPvFD6qvzNN47JS04vIZtVkWZw34LSnsE4ylpdxenXz/vjKTPu2", + "YomqnCK/Zdz6hkwxC2zUt3DD1Nb9dOOCX9sFv6ZXtt5hp8E0NRNLQy7NOb6Sc9HivJvYQYQAY8TR3bVe", + "lG5gkEFIaJc7BnKTPZwYEjrZpH3tHKbMj73VbcQHpvbdUXak6FoChcHGVTA0ExmxhOkgiWo3VrPnDNCi", + "YNmqpQu1o/a+mOlOCg+feqqFBdxdN9gWDAR6z1i4iATVzDJWC/g2HW4jycdkEGaOm7nAQoYQTsWUT+be", + "RVQVTrYNV8dA8x9h/Ytpi8sZfRqPLqc6jeHajbgF1++q7Y3iGU3zVpXWsITsiHJaFFKc0TxxCuY+0pTi", + "zJEmNvf66GtmdXE15vG3B6/fOfA/jUdpDlQmlajQuypsV3w1q7IJzXoOiE8Wbd58Xma3omSw+VUWplAp", + "fb4Al3U3kEY76QFrg0NwFJ2Sehb3ENqqcna2EbvEDTYSKCoTSa2+sxaSplWEnlGWe72Zh7bHmwcXNyzH", + "ZJQrhANc2roSGMmSK2U3ndMdPx01dW3hSeFcG/ICL23qa0UEb5vQ0b14XTir+5Jicj+rFekyJ14uUZOQ", + "qJylcR0rnypDHNzazkxjgo17hFEzYsl6TLG8ZMFYppka8NBtARnMEUWmTxTZh7upcGVNSs7+VQJhGXBt", + "Pkk8la2DitkUnba9e50a2aE7lxvYaujr4S8jY4SJLds3HgKxWcAILXUdcF9VT2a/0Eojhe7WtUliB4N/", + "OGPnStxgrHf04ajZOi8umha3sApJl/8ZwrDpqLeXQPGPV5dhs2eOaEkTppKZFH9A/J2Hz+NIKI5P5cnQ", + "y+UP4AN8zmvtTl2ZpZ69d7v7pJtQC9V0Uuihetz5wCyHOQW9hppyu9W2wkDD1y1OMKFX6Z4dvyYYB3PH", + "Ezen51MaS7hohAwD00FtAG7o0rUgvrPHvaoCG+zsJLAlV22ZjbIuQNZRct2MLRcUGOy0g0WFWjJAqg1l", + "grG1/+VKRIYp+TnltlCF6WePkuutwCq/TK9zITFHgoqr/TNI2ZLmcckhS7sq3ozNma3BUCoIkvy7gWx9", + "G0tFrlBCFa7jUHM4Iw/GQaURtxsZO2OKTXPAFg9tiylVyMkrRVTVxSwPuF4obP5oQPNFyTMJmV4oi1gl", + "SCXU4fOmMl5NQZ8DcPIA2z18Tu6i2U6xM7hnsOju59H+w+eodLV/PIhdAK6GxiZukiE7+R/HTuJ0jHZL", + "O4Zh3G7USTSc3BbR6mdcG06T7TrkLGFLx+u2n6Ul5XQOcU+R5RaYbF/cTVSktfDCM1sBRmkp1oTp+Pyg", + "qeFPPd7nhv1ZMEgqlkuml864o8TS0FOdwd9O6oez5WRc8lUPl/+INtLCm4haj8jrVZra+y22arRkv6VL", + "aKJ1TKhNjJGz2nvBp4Qmhz7vDmajrZLQWtyYuczSUcxBZ4YZKSTjGh8WpZ4lfyPpgkqaGvY36QM3mT57", + "EsnA28wEyXcD/NrxLkGBPIujXvaQvZchXF9ylwueLA1Hye7V0R7Bqew15sbNdn22w81DDxXKzChJL7mV", + "DXKjAae+FOHxDQNekhSr9exEjzuv7Nops5Rx8qCl2aGf3792UsZSyFgyvfq4O4lDgpYMztB3L75JZsxL", + "7oXMB+3CZaC/WcuDFzkDscyf5dhD4IWIvE59VuhKk+581SPagb5jaj4YMpi6ocakmYH3+o1+XvncNT6Z", + "Lx5W/KMN7A1vKSLZr6BnE4Ps4NHtzKrvgf2bkhdiNXRTWyfEb+wXgJooSkqWZ7/UUZmt5OuS8nQRtWdN", + "Tcff6jJR1eLs/RTNWbegnEMeHc7Kgr95mTEi1f5TDJ1nyfjAtu188Ha5rcXVgDfB9ED5CQ16mc7NBCFW", + "mwFvlUN1PhcZwXnqBGk19+zWEQiyPf+rBKVjwUP4wTp1od7SvHdtsmECPMPX4oR8byvBLoA00t/gK63K", + "IuBS31qFelnkgmZjTORw/O3Ba2JntX1ssROb7HiOj5TmKlr6qiD34zD3YF+3JB66MHyczb7UZtVKYzYq", + "pemyiAWHmhbHvgFGoIY6fHy+hNiZkFdBTUcbR2qGMPQwY3JpXlzVaFZ2QZow/9Gapgt8kjVYaj/JD8/S", + "7alSBZXxqgo3VUJEPHcGbpeo2+bpHhNh3s3nTNkCoHAGzXjUKjjbqQR8fGpzebLk3FJKVPbYlDzgImj3", + "wFlHDa/mj0LWQvyOArlNcr9r0vIj7BVN0NTOgN4piWejG6vKJb6wc0q54CzF9Eixq9lVCh1iAxuQSaqt", + "ZPVH3J3QyOGK5l2v3OQcFnszsXtG6BDXVcIHX82mWuqwf2osSbmgmsxBK8fZIBv78gFOD8i4ApfgEuvK", + "BnxSyIZdETlk1FSdVCaNHckIw2J6HnbfmW9v3bMf/cVPGUcB36HNuaZbTR0WMtTmVcA0mQtQbj3N2GD1", + "q+kzwTDZDFYfJr7woc0Gg2Y5s2xrg+4OdeAt0s4CbNq+NG1dnqDq54YHsp30oCjcpP3FJaLygF7xXgRH", + "LIuJN+0EyK3GD0fbQG4bXUnwPjWEBmdoiIYC7+EOYVSFFlpFfIzQaikKWxDrwhXNYMB4BIzXjENdljNy", + "QaTRKwE3Bs9rTz+VSqqtCDiIpx0DzdH6HGNoSjvTw2WHaucSMijBNfo5+rexrhHRwziqBrXgRvm6qgZq", + "qDsQJl5iGWKHyG7FB5SqnBCVYURBqwZEjHEYxu2rzDQvgO4x6MpEtruW1J6cXW6iviDRaZnNQSc0y2IZ", + "qV7gV4JffXIpWEFaVokpi4KkmBOlmSSmS21uolRwVS43zOUbXHK6oKhKhBrCwi5+hzEIZbrGf2NZGft3", + "xjlh7OwG6D0uXBWKHeXm5kgdqdfQdKLYPBmOCbxTLo+OeuqLEXrd/0opPRfzJiDXnBpiE5cL9yjG3741", + "F0eYOaGTatReLVViA3S6E74UHj4bq5DcJlfCq6yTexSNPVWprc0KiP6iWWO8/Hpcb4OEGNTer9Z62OeA", + "m/b6i1PtItc0JRtZUG80kPXesXE/CEVcc9rnsWMddsznTu9hkmFHzsaxNyLUu4J1AfrR+5mSgjJnGq+Z", + "RRezziO9X1246dDVG9xehPPz7tXY/XjW55NNFOPzHAh+b5cZOgUXzl7Vmbdr9V5J/klof3VlXu14lVd8", + "dP1d7wSc6mbVoL1K22OX0t4u073Jf/zF+rAR4FquvwAVbmfTO0WautKuVU/VTUiVDnlQeuTGrRivt9Sf", + "/6jOeYT0VAjF6hTcsUJMA33djrGWUpC/qTuWdzQ5g1Rj3vXagC4BdsnmZCYLivzd5kHqeTtWLoEu/dGm", + "nEfdZOtbLrROWFIQWmcTVU+GZ/g5qNykkClhBtw5cFdnrxlwMNjteTaDVLOzLWFg/7MAHoQYjb0SwtbL", + "DaLCWOVGi1lEdlex1QBtitLaCE+Qze/S4PQFgZzC+o4iDWqIZs4e+3vlIgkkEAPIHRJDIkLF3BCs1tRZ", + "hpmqKAOx4N1+bHeoU3H11twJghovOJcnSXPj1oGOG6aMF/0YNJfpulP4L3qE9kWKdYsG9Avbr7BGg6rq", + "4fkEFOGTlBx20/SduwQWGLRXGQp8KgtQ/jcfoWtnydkphFWB0CxzTmXmW0T1DF6FkWy4jzrhXT7hfRvo", + "WTUzq500uwE9kcRP6Iqb5sLIX0mfP3PTLzIsno/eHzblN3p8GrhmIF31NBT2cqEg0cI7dW6CYxMqXKH3", + "iyBB9SZbtMD1pkB5X+d4waSzFFOeUOfZEi6QSFhSA50MMrH0z7kJ2S/tdx/B4pOOblWnVPS6PdG8d89l", + "qoPEkOpnxN2W2yNjLqJZYZzbWq0qlpaFG1SGqv9CiqxM7QUdHoxK+zQ46dEGVhJVSqTdVXbelzmmAHsd", + "xBmewnrPiv4+Vb/fyhB6K0LZNQRx/a3dvlKlU/x9nc/tAuZXAudNKm7Go0KIPOnR9R92s8u0z8ApS08h", + "I+bu8I5tPWVLyF1UMVfG3PPF2mdTKQrgkN2bEHLArSuxt+s20xu3Jud39Kb5VzhrVtqET06nNDnhcZ9M", + "TMUkL8nf/DCbuZoCw/wuOZUdZEvuklVPZhtJzyNFfCZDH6VdS2u7sEpNVBaKmJSypYRFxIrsayL4Chs+", + "YkWLJUu7VRQ6osQMq1ElNDL4YcXAx41agaxVuMPnGLJlGlJqBTjzeKAsLyW4yAFbNqeVTr+geuG3zzTv", + "ilnmygaFbv02JTtV9lHgHyeuZk/7XIgiyeEMGoYEF85Qpikoxc4grPdjO5MMoMCnevsCiWnIQ7pq8RC3", + "9iTQsQ7BbpSpWMTanSJbOEZPMfbEkocaSkIGojOWlbSBP3WJUiwDa7uHsA48ITsfjvjiOkfDlUtJqmRu", + "MUWmCyfxW2joty7t0hKQghIs1Zg9tSgrLFxGGOlFbRyzF0uiMYgeujrtyJEJyq5s1ryEOXZq511pTSP4", + "UvOnrr2lb+rTOKwAjO+wBbxQIReUgPGSkAPnhj1s31RICZbSSwmN5W/T8bkF1uwr2CLLu80ybcYz653V", + "3JdAgateVnrRvrpMbfUpJtQRHJOMddWuCk1lmKs8JBzDu+UZza9fdYqZlg4QH67ObXyhoe4tRLJFpbqY", + "m9trOmjuQM92dVPzd6jq/R8wexS1cbqhnM2jkhW8ZQhZJs1JLuoKdzgkOccxrVH04TMydSE6hYSUKdaK", + "Xjz3aZQrVRNWFajLH2/WbW1b5y9CX4KMZ158IW/rlKxa4I1RQ1gf0RtmKj0nN0rlMerrkEUEfzEeFebK", + "2HJdnDaspTbFdcsNUEi4Yqtp4P+0o9W0mwVk6PKsZdBcOqWC7joH39YN3EYu6nptQ03+XeRuyts5xFIf", + "T8druqOrgEUI5rImCCr5/eHvRMIMi9UIcv8+TnD//tg1/f1R87M5zvfvx8ssX5eTgMWRG8PNG6OYX/rc", + "xq1rdE+EQms/SpZn2wijEW9Sl3vCiIrfXMTZjRSc+s3acrpH1RX92MU9qb0JiJjIWhuTB1MFkSQDgkhc", + "t0jICGpF0lIyvcZEOF71z36LujN8X1kLnbW5Sp3g7j4tTqFKpVTbFkvlb9fvBc3xPjIyNTqHaSyt++2K", + "Losc3EH55s70P+Hx355kDx4//M/p3x48fZDCk6fPHzygz5/Qh88fP4RHf3v65AE8nD17Pn2UPXryaPrk", + "0ZNnT5+nj588nD559vw/7xg+ZEC2gI582PXof7EqW3Lw7jA5NsDWOKEFqypqGzL2pWVoiicRlpTlo33/", + "0//xJ2ySimU9vP915KI6RwutC7W/t3d+fj4Ju+zN0ZiQaFGmiz0/T7eS8bvDKjLHPi1xR23QhVcZeFI4", + "wG/vvz06JgfvDidBpcz90YPJg8lDLKRYAKcFG+2PHuNPeHoWuO97jthG+x8/jUd7C6A52t7NH0vQkqX+", + "kzqn8znIiauxY346e7TnRYm9j86Q8smMOo+lCLIxRkFgSbf0jDPKoqOmjSFqpHJXLrP4uErw77QWPMPQ", + "D2ubMKytQtZhVmeyPawZlc/nYxMc7v8aqTw4Y3Pzjm5UVG3VajUv7/8++uktEZK4J807mp5WbjPkcGZz", + "M0hxxjCiIAvCUEzPiafZf5Ug1zVNOW4XJu/z+dpdnMZSzYumU3MtScUKisfK/ODMhhQCYq7MnjWz0rKE", + "EJKa9Rp2+iB5/uHj0799Gg0ABG3wCjC1w+80z3+3FWJhhYZMnxzJJb8YR3KTowQ9rs1o2KHeyTF6ZVdf", + "w/IzVZtmLNDvXHD4vW8bHGDRfaB5bhoKDrE9+IDJB5BY8Jw9evDgyupWVeFv1re7GsWTxAUG6jIh+ylS", + "itaXr+qpQ/vkChfadF699HLbw3UW/YJmWBIElLZLefjVLuWQoxuMuRSIvfQ+jUdPv+K9OeSG59CcYMsg", + "s0/3ovmZn3Jxzn1LI/CUyyWVaxRngrpFrdBaOldoUUEWac92o1LJ6MOn3ltvLyzEsPex4UmRXepO7NSg", + "OXy15Zq8o/o4ZzcvZqvOg/lepfFHW7srZoGFBdS9Cfk+7I3cG9NM2CQOpeSQeUcIf+tVebN8Nq4atjsq", + "zMARvbQDFfHt/X3T9/dBU8HRyL0YA6ZxCjbC1PHGuuwF2o2galXyu1ClvKDiwgXyVn/WckKt96Wd6UPs", + "+beVUd/irgd3fWJSAG8lMTUrZXx+1uwjFqqbpHFlfEbG/ZULfW9obugkWG4rmtsmJL0VBv8ywmDloGsL", + "8voc3JcTD7Eaz95Hn2T2CkRCl2R3gDAYPquDvkES1LstdnJvYjPGhm0uxjOcR+5WMQ9T/94KeF+AgNdN", + "qx0Do06WfHNCHcKwqPNu71JNt1Ema6f84F+pFPcXRlav2GYg3S6wXYB9doQxx6w/G1v9UwphDmm34tdf", + "Wvyq4mQuJYA1EuO7yKvAjHUp7V1bO8d0JYk1Y6UCzlaVIXZHeFwX8TEsBvMq+ZQaauxfhmhCtY9Gu1nj", + "zruxK2J9D+ED9cX68NU26eor0vMMzu8XuQXie/O5eWnU7PD+eswOw3jTkwdPrg+CcBfeCk2+w1v8M3PI", + "z8rS4mS1KwvbxJH2pjbz8CauxFtsCRlFnVE44FFYuCHMWmw9Mu66cplhJoh7E+LzG6uqWoPLPzAXNK/z", + "LFE5t50MjzNIIHf8n/s4/p0J+Q4jArQao2OZdqn8yR3G9f7DR4+fuCaSnlu/rXa76bMn+wfffOOa1dms", + "7fum01xpub+APBeug7sbuuOaD/v/+/d/TCaTO1vZqVi9WL+1qeO+FJ46jsUTVBvft1tf+SbFXukupd9W", + "1F2L2f6FWEW5v1jd3j43dvsY7P8pbp1pk4zcA7TSYDbi7a/wFrLHZJd7aOyzQxu+U10mE/JWuNQnZU4l", + "ETID6crbzEsqKdcA2cRTKkatKZvqIc0ZcG0ejFiwQyaKZWAjxuelhIzkbIkVbSWcoT88To9v+QYE2xk9", + "es1+sUz+DV0F6RCm1TWthVsyqjuXdOVLBmFRDCHxp2++IQ/G9aslz80ASYWYGHNd0tXoGrV9FbEN8jVv", + "ZvXf6oyLYw/RHNXSj63ORpspxP/anPurldgtubuNvSLOubPBpzbohPoDl2Bko+bACna2oBBWuFnX4bxG", + "yvMiVJzFmRmGKgW+YNvAVpV09PHZRu/tIb59/F+KlbQJake2gRGmau8j2jJCntE5txgh99cykwY2IymW", + "3mgkyAx0unDBuS3UR9iTrynQz5s2VZS8aqkGd7Gb0jrM74iVDgcmDAniJtFwBzJCxD/59L7mM5vZDBW+", + "XoQvnIomKeZriVVlxFyxRaa8H7+P4TW7uBOUL+vJuwIZouUq7J63CN4NwR3m+K2vWYUYc4v4M3j6+6dk", + "Qt6KOkTclUv4M5ocP+fN/rkX9FZwsLZ1I/laWrw1o1ZiB6rwESk+N4h9v1SJqy8sguz56msb5ZAfbO2z", + "jbLIkNvbTPZVXuE/ROsaN24Zs7bJ1sQH9WhDmLNpaHNMN7NL3+Ar5kb46Rf4tLkJjnU9LAYPqeczTizg", + "V8t0MN2OJea9KrFwHweK52ofzI20qNzPounVp5ALPldfJivaRB1xvESopMpiH09V/9c7uy8xk4958lrP", + "R5fbSTGegq0u6AvMu8RrFsK/XR+Emi19Lk4exqzeMHd5+uDx9U1/BPKMpUCOYVkISSXL1+RnXlWCvAy3", + "w0T8Va41rw2O1l5Aa1MzB1gaJiy6OBNsuKx91CuWfdrODIOMfTvyQcYDPhjmF6RFAVRenAFuN10dt2Y8", + "fBV6BTfyw1fZsyKgGBTt6Bj/H6OBeicMdxczd/mV3ALqM305NuFcdsVsXDnHGClAzPbJCb9P1II+ffjo", + "t0dPn/k/Hz191qM5M/O4BD1d3Vk9kPlshxmiQPuq1YFXK7VX+N2/7t3ebRPHI5atovmi65ownay5Tiy7", + "o0hB171p5ostNW3CYev6Ntef2FBpNl1E31f++VNVvT3kL6pXsM2+50rB3Nay6QmaCPiMIbS6qE2F9c31", + "bTZIky2yrAqJXPfjtA4usBedR55s3Tk3Kujqm3qkJvhGBe4FmyZabk6mxJzm48DcXdURR9+VsiiE1NXp", + "VpNB4h70me0a0l4f4e4kzKVUp4uy2PuI/8FsXp/qgANbuTOw87nfbY39PWvF3yTnHdkWl7wTWwK19R1o", + "5VL3ieWcZ4GYkTcsleIAU+W760atlYZlJ/uf6/rbpurt0atJ8JxxSJaCx3LS/YRf3+DHaDJ+oWne1/nY", + "fOzr22KOTfhbYDXnGcIZL4vfL+QpfikVUmu1EswxruubWfrf8aj5Q7PmafckrXnaPWaNIms9P+99bPzp", + "fHhcS7UodSbOg774ALS8aIj5PsiVPVxvXr2JWjmnFclAGaL9+pRUAR5iJ6b6GslEFmRE701G9hdVW80Y", + "z1pEghJlKs5AqkqhIb27za3u6s+juxq87zvxWJt5cxtHK9XVSiRvRQZ23Gay21i4KBcZuAShXUGkksHi", + "731/K9XtWi+wlJbzhSZlQbSIvfXqjglNLZO1ZRjVtrp1tpWvtnIGhOYSaLYmUwBOxNQsuln/k1CFrvL+", + "wegkzXj5tRquQooUlIIs8eGx20Cr0q7i81JvwBMCjgBXsxAlyIzKSwN7erYVzipVuSJ3f/xF3bsBeK0o", + "uBmx1kE3gt7KCchJe12oh02/ieDak4dkRyUQLxqgfkssixychiuCwp1w0rt/bYg6u3h5tKAKiH1miveT", + "XI6AKlA/M71fFtqySMz9HSkQab8esyVKYpxyoSAVPFP9ZVy3sWUsFxKsRZkVBJwwxolx4J4H52uq9Htn", + "7AiregVlScwUG+rO9qXENyP/UiXE74ydmvuQq1JVWfOdAiNeWYvDasNcb2FVzYXWJj92pSHRgpQKto3c", + "h6VgfIcsFRaS1YGZCIuGdBeHOU2oU1B0UdkAokbEJkCOfKtGybjahNEDCFM1oqvqkE3KCcpjKS2KAqvW", + "JSWv+vWh6ci2PtA/1227xOUKF+G9nQlQofbKQX5uMaswaGNBFXFwkCU9dQquucv5FCnpxZaQoGE62UT5", + "5lgemVbhEdh6SMtiLmmGBUZpRJXys/1M7OdNA+COe/LE6s3JFGbRIiRm02tKlr0qompogeOpmPCIxZ4V", + "Sc0RnGFVHE8grveWkTPoqTR9HFS/dM1xrugW+fFw2Xare9RSZgyz45YcEGLH0IfA24OGauSLYwI7J7X2", + "oD3F30G5CSoxYvdJ1qD6llCPv9MC2tq88P5qXBQt7t5iwFGu2cvFtrCRvhMb0x9+lUF9bbPtZ/RJa+pP", + "g/ff5CJv271zynQyE9LVz6czDTKiymuVNKBM+5hBa0DRwnlMEBzBXZtuHFeHvU684ZiIBYH4Kp1sGcnj", + "Y6b6TshBgT9N9zbKNCm5ZnkQ/Fy9lL88feGtDuBWB3CrA7jVAdzqAG51ALc6gFsdwK0O4FYHcKsDuNUB", + "/GV1ADcVyZd4gcP7N3PBEw5zqtkZVCF+t8mH/lSRL9VV5XUSqMU4p0y7VJ6EejEAv1wu8E8DzREHLEce", + "WwjVmyMJCz8rUcoUSGogZJwUOTVPA1jpKrFcM2WpT6LsSj9jFlSq4PEjcvTDgXfQXzhH8mbbuwcuGbnS", + "6xzuudQNVW1Wn8MBuEG6S+FA/ZXgE9C5dHwsB6IMer/F1q/gDHJRgLS+v0TLMqLxOQaav3S42aLwaZTW", + "NKP9Pm7omRzalrQIStzjWqki1EZtNCtjzmiu+ktj2vGWtIjlgKsuPqsKQm7yQmTr1gkxu7aHG9g8G7Wb", + "PuNUriMhOp0T0SENLQy/coTV1WV9uvJgki7RdslsG4XFpHUJKnqON1F5NIqi2rDOUDbYZ9aik2jp6Hbo", + "wKgCcIgDrKFnvyfkve13s6HqCJE7YjUz/2L8BpstK6aBbc0jwrGerzWu3CM+enrx7I8NYWdlCoRpRXw8", + "yvbrZTxaJWakOfDEMaBkKrJ10mBfo8YtlDFFlYLldPtNFPJPl/XYXT7my+Z76maukVfB4jbx5JBoVolj", + "wD3cea1hMG+usIUjOvYcYPxzs+g+NhqCQBx/iimV2rVmdmR69TTrW8Z3y/iC09iSCBh38XttJjL5jIxP", + "rmXJ+3netytISwNceJLvonYeTXKw0g27ZgbTcj7H7M0dG51ZGuB4TPAbYoV2uUO54G4UZAevMnpeNolU", + "e7gudwli1e4KSeZSlMU9W6aKr9GYsSwoX3uTLySKLcvc4tAmvrtaRmtD7LqOAGiOdbq/Pq32O6/yC3S3", + "7qpt/m7RQs6pInZ/ISMlz1zkUCcQd8WHZ462Qx+veM2mN+aOtuuNrM7NO+SK8LvsQlwqM3cBMtErbg9U", + "M727Dfi1J3dym7X2r3FtvLMZF3oYbDd4tWYIV3R7yICv4fURpCipQ+GatbZsJcC+wJEwX4lteaXOI53h", + "mz4kQR0+ayOFvCDUlxRIBVdalqk+4RRtNMHCJl3/Eq+N7udvL32TuJkwYsVzQ51wihnnK8tNlM/NIGKm", + "+A7As1FVzuegDK8MiWQGcMJdK8ZJyc1LS8zIkqVSJDYM1ZwhI59MbMslXZMZzdHI+AdIQabmZg923SqM", + "lWZ57hxazDREzE441SQHqjR5wwyXNcP5XGKVJxfocyFPKyzE01fMgYNiKokrX763XzFDhFu+V/KhwtJ+", + "riO7rzc1hIedZb2QH74ycFNMhpMzpWsfiA7s12b/XjKeRInseAHEuYS1aYvcxVwxjoDuNa1DegEn3Nxw", + "WhDk6lRfjBzaZp7OWbSno0U1jY1oWYP8Wgc98a6Ey5AIk7k1rfyJAjMDOvDmS9x4rEXT3vsdzSgby1vG", + "vrqMYj2N3CMB/Gd7ivCON8uCtJRMr9EOQQv22ymY/3/49MF8k2feRFHKfLQ/Wmhd7O/tYd3KhVB6b/Rp", + "HH5TrY8fqpV/9NaGQrIzTGP94dP/DwAA//8ICcmexzwBAA==", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/generated/participating/private/routes.go b/daemon/algod/api/server/v2/generated/participating/private/routes.go index a95cdb1458..3fd51bda92 100644 --- a/daemon/algod/api/server/v2/generated/participating/private/routes.go +++ b/daemon/algod/api/server/v2/generated/participating/private/routes.go @@ -158,167 +158,173 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+y9e3PcNrIo/lVQc06VH7/hSH4ku1ZV6vxkO8nqxvG6LCV7z7F9EwzZM4MVCXABUJqJ", - "r777LTQAEiRBDvWIvKnKX7aGeDQajUa/8XmWiqIUHLhWs6PPs5JKWoAGiX/RNBUV1wnLzF8ZqFSyUjPB", - "Z0f+G1FaMr6ezWfM/FpSvZnNZ5wW0LQx/eczCf+qmIRsdqRlBfOZSjdQUDOw3pWmdT3SNlmLxA1xbIc4", - "eT27GvlAs0yCUn0o/87zHWE8zasMiJaUK5qaT4pcMr0hesMUcZ0J40RwIGJF9KbVmKwY5Jla+EX+qwK5", - "C1bpJh9e0lUDYiJFDn04X4liyTh4qKAGqt4QogXJYIWNNlQTM4OB1TfUgiigMt2QlZB7QLVAhPACr4rZ", - "0YeZAp6BxN1KgV3gf1cS4DdINJVr0LNP89jiVhpkolkRWdqJw74EVeVaEWyLa1yzC+DE9FqQHyulyRII", - "5eT9d6/Is2fPXpiFFFRryByRDa6qmT1ck+0+O5plVIP/3Kc1mq+FpDxL6vbvv3uF85+6BU5tRZWC+GE5", - "Nl/IyeuhBfiOERJiXMMa96FF/aZH5FA0Py9hJSRM3BPb+E43JZz/i+5KSnW6KQXjOrIvBL8S+znKw4Lu", - "YzysBqDVvjSYkmbQD4fJi0+fn8yfHF79x4fj5H/cn189u5q4/Ff1uHswEG2YVlICT3fJWgLF07KhvI+P", - "944e1EZUeUY29AI3nxbI6l1fYvpa1nlB88rQCUulOM7XQhHqyCiDFa1yTfzEpOK5YVNmNEfthClSSnHB", - "Msjmhvtebli6ISlVdghsRy5ZnhsarBRkQ7QWX93IYboKUWLguhE+cEH/vsho1rUHE7BFbpCkuVCQaLHn", - "evI3DuUZCS+U5q5S17usyNkGCE5uPtjLFnHHDU3n+Y5o3NeMUEUo8VfTnLAV2YmKXOLm5Owc+7vVGKwV", - "xCANN6d1j5rDO4S+HjIiyFsKkQPliDx/7voo4yu2riQocrkBvXF3ngRVCq6AiOU/IdVm2//X6d/fEiHJ", - "j6AUXcM7mp4T4KnIhvfYTRq7wf+phNnwQq1Lmp7Hr+ucFSwC8o90y4qqILwqliDNfvn7QQsiQVeSDwFk", - "R9xDZwXd9ic9kxVPcXObaVuCmiElpsqc7hbkZEUKuv3mcO7AUYTmOSmBZ4yvid7yQSHNzL0fvESKimcT", - "ZBhtNiy4NVUJKVsxyEg9yggkbpp98DB+PXgaySoAxw8yCE49yx5wOGwjNGOOrvlCSrqGgGQW5CfHufCr", - "FufAawZHljv8VEq4YKJSdacBGHHqcfGaCw1JKWHFIjR26tBhuIdt49hr4QScVHBNGYfMcF4EWmiwnGgQ", - "pmDCcWWmf0UvqYKvnw9d4M3Xibu/Et1dH93xSbuNjRJ7JCP3ovnqDmxcbGr1n6D8hXMrtk7sz72NZOsz", - "c5WsWI7XzD/N/nk0VAqZQAsR/uJRbM2priQcfeSPzV8kIaea8ozKzPxS2J9+rHLNTtna/JTbn96INUtP", - "2XoAmTWsUW0KuxX2HzNenB3rbVRpeCPEeVWGC0pbWulyR05eD22yHfO6hHlcq7KhVnG29ZrGdXvobb2R", - "A0AO4q6kpuE57CQYaGm6wn+2K6QnupK/mX/KMje9dbmKodbQsbtv0TbgbAbHZZmzlBokvnefzVfDBMBq", - "CbRpcYAX6tHnAMRSihKkZnZQWpZJLlKaJ0pTjSP9p4TV7Gj2HweNceXAdlcHweRvTK9T7GTkUSvjJLQs", - "rzHGOyPXqBFmYRg0fkI2YdkeSkSM2000pMQMC87hgnK9aPSRFj+oD/AHN1ODbyvKWHx39KtBhBPbcAnK", - "ire24QNFAtQTRCtBtKK0uc7Fsv7h4XFZNhjE78dlafGBoiEwlLpgy5RWj3D5tDlJ4Twnrxfk+3BslLMF", - "z3fmcrCihrkbVu7WcrdYbThya2hGfKAIbqeQC7M1Hg1Ghr8LikOdYSNyI/XspRXT+G+ubUhm5vdJnf8Y", - "JBbidpi4UItymLMKDP4SaC4PO5TTJxxny1mQ427fm5GNGSVOMDeildH9tOOO4LFG4aWkpQXQfbF3KeOo", - "gdlGFtZbctOJjC4Kc3CGA1pDqG581vaehygkSAodGF7mIj3/G1WbOzjzSz9W//jhNGQDNANJNlRtFrOY", - "lBEer2a0KUfMNETtnSyDqRb1Eu9qeXuWllFNg6U5eONiiUU99kOmBzKiu/wd/0NzYj6bs21Yvx12Qc6Q", - "gSl7nJ0HITOqvFUQ7EymAZoYBCms9k6M1n0tKF81k8f3adIefWsNBm6H3CJwh8T2zo/BS7GNwfBSbHtH", - "QGxB3QV9mHFQjNRQqAnwvXaQCdx/hz4qJd31kYxjT0GyWaARXRWeBh7e+GaWxvJ6vBTyZtynw1Y4aezJ", - "hJpRA+Y77yAJm1Zl4kgxYpOyDToDNS68cabRHT6GsRYWTjX9HbCgzKh3gYX2QHeNBVGULIc7IP1NlOkv", - "qYJnT8np346/evL0l6dffW1IspRiLWlBljsNijx0uhlRepfDo/7KUDuqch0f/evn3grZHjc2jhKVTKGg", - "ZX8oa920IpBtRky7PtbaaMZV1wBOOZxnYDi5RTuxhnsD2mumjIRVLO9kM4YQljWzZMRBksFeYrru8ppp", - "duES5U5Wd6HKgpRCRuxreMS0SEWeXIBUTERcJe9cC+JaePG27P5uoSWXVBEzN5p+K44CRYSy9JZP5/t2", - "6LMtb3AzyvnteiOrc/NO2Zc28r0lUZESZKK3nGSwrNYtTWglRUEoybAj3tHfgz7d8RStandBpMNqWsE4", - "mvjVjqeBzmY2Kods3dqE2+tmXax4+5yd6oGKgGPQ8QY/o1r/GnJN71x+6U4Qg/2V30gLLMlMQ9SC37D1", - "RgcC5jspxOruYYzNEgMUP1jxPDd9+kL6W5GBWWyl7uAybgZraN3saUjhdCkqTSjhIgO0qFQqfk0PuOXR", - "H4huTB3e/HpjJe4lGEJKaWVWW5UEnXQ9ztF0TGhqqTdB1KgBL0btfrKt7HTW5ZtLoJnR6oETsXSuAufE", - "wEVS9DBqf9E5ISFyllpwlVKkoBRkiTNR7AXNt7NMRI/gCQFHgOtZiBJkReWtgT2/2AvnOewS9Icr8vCH", - "n9WjLwCvFprmexCLbWLorRU+5w/qQz1t+jGC604ekh2VQDzPNdqlYRA5aBhC4bVwMrh/XYh6u3h7tFyA", - "RM/M70rxfpLbEVAN6u9M77eFtioHoryconPGCrTbccqFglTwTEUHy6nSyT62bBq1tDGzgoATxjgxDjwg", - "lLyhSltvIuMZGkHsdYLzWAHFTDEM8KBAakb+2cui/bFTcw9yValaMFVVWQqpIYutgcN2ZK63sK3nEqtg", - "7Fr61YJUCvaNPISlYHyHLLsSiyCqa6O7c7f3F4emaXPP76KobAHRIGIMkFPfKsBuGOkyAAhTDaIt4TDV", - "oZw6vGY+U1qUpeEWOql43W8ITae29bH+qWnbJy6qm3s7E2Bm1x4mB/mlxayNcdpQo0LjyKSg50b2QIXY", - "uj37MJvDmCjGU0jGKN8cy1PTKjwCew9pVa4lzSDJIKe7/qA/2c/Efh4bAHe8UXyEhsTGs8Q3vaFkHz4w", - "MrTA8VRMeCT4haTmCBrNoyEQ13vPyBng2DHm5OjoQT0UzhXdIj8eLttudWREvA0vhDY7bskBIXYMfQq8", - "A2ioR745JrBz0qhl3Sn+G5SboBYjrj/JDtTQEprxr7WAAWOaCwMOjkuHu3cYcJRrDnKxPWxk6MQOWPbe", - "UalZykpUdX6A3Z1rft0Jov4mkoGmLIeMBB+sFliG/YkNxOiOeTNNcJIRpg9+zwoTWU7OFEo8beDPYYcq", - "9zsb4XcWxAXegSobGdVcT5QTBNTHDRkJPGwCW5rqfGfkNL2BHbkECURVy4JpbUM225quFmUSDhA1cI/M", - "6Lw5NjrO78AU99IpDhUsr78V85lVCcbhO+voBS10OFWgFCKfYDzqISMKwSTHPymF2XXmIoR9GKmnpBaQ", - "jmmjK6++/R+oFppxBeS/RUVSylHjqjTUIo2QKCeg/GhmMBJYPadz8TcYghwKsIokfnn8uLvwx4/dnjNF", - "VnDpw+pNwy46Hj9GM847oXTrcN2BqdAct5PI9YGWf7z3XPBCh6fsdzG7kafs5LvO4LW7wJwppRzhmuXf", - "mgF0TuZ2ytpDGpnmXsdxJxn1g6Fj68Z9P2VFld/Vhq8oyysJw96xjx8/rIqPHz+R72xL79ieeyIP0XHZ", - "pEWs3G1USQytITkz+q0UNDMCQtS2j4vk66QOzlRRcAplwPmHO4eU7zqJfFNhIEtIaWWjkh3XdhA04aFq", - "EZEXO7vbRWF0IRPN41Wu7aUdYnUtRVUSVW+7pQJNNfw+puZm6BiU/YmD2KDm41B4kFET890d3NZ2ICKh", - "lKCQt4bmFWW/ilWYf+OYr9opDUXfAm27/jKgn70f1HMEzxmHpBAcdtGUU8bhR/wY6235+0BnvGmH+naF", - "5xb8HbDa80yhxtviF3c7YGjv6ri4O9j87rgd50OYeYTGNchLQkmaMzS9Ca60rFL9kVNU7oPDFokf8GrM", - "sLnnlW8Sty9FzD9uqI+cYuxIrfJH+eIKInz5OwBv9VHVeg1Kd6TEFcBH7loxTirONM5VmP1K7IaVINGJ", - "v7AtC7ojK5qjdeo3kIIsK91mrpggoTTLc+cJMdMQsfrIqSY5GK76I+NnWxzOexI9zXDQl0Ke11hYRM/D", - "GjgoppJ4nMP39iuGoLnlb1w4Gmar2s/Wdm7Gb7Iodqj7NxmY/+fhfx19OE7+hya/HSYv/r+DT5+fXz16", - "3Pvx6dU33/zf9k/Prr559F//GdspD3ssfN9BfvLa6RQnr1FwbIznPdjvzXBaMJ5EiSx0EXdoizw04q8n", - "oEdts4LewEeut9wQ0gXNWUb1zcihy+J6Z9Gejg7VtDaiY0bwa72mOHYLLkMiTKbDGm98jfdDg+KJMujN", - "cbkveF5WFbdbWSnnUcI4cB+iIVbzOhnKFkE4Ipgps6E+vsj9+fSrr2fzJsOl/j6bz9zXTxFKZtk2lseU", - "wTYmZbsDggfjgSIl3SnQce6BsEejUaxTPBy2AKOeqQ0r759TKM2WcQ7no2udtr7lJ9yGvZrzg76hnTM5", - "i9X9w60lQAal3sSSo1uSArZqdhOg468vpbgAPidsAYuutpytQfm4mBzoCpN00b8hpmQL1OfAEpqnigDr", - "4UImqaQx+kHh1nHrq/nMXf7qzuVxN3AMru6ctSPI/60FefD9t2fkwDFM9cCm1NmhgySoiBXKxfm3IjkM", - "N7MlIWxO4Uf+kb+GFePMfD/6yDOq6cGSKpaqg0qBfElzylNYrAU58qkDr6mmH3lP0hqs2hIkbZCyWuYs", - "JeehRNyQp83Ej6qNNF8Lozh2ndp9+dVNFeUvdoLkkumNqHTiUo0TCZdUxpwGqk41xZFtoYCxWefEjW1Z", - "sUtlduPHeR4tS9VNOesvvyxzs/yADJVLqDJbRpQW0ssiRkCx0OD+vhXuYpD00uepVwoU+bWg5QfG9SeS", - "fKwOD58BaeVg/equfEOTuxJa9sobpcR1bZW4cKvXwFZLmpR0PWA00EBL3H2UlwtUsvOcYLdW7pePbcWh", - "mgV4fAxvgIXj2nksuLhT28vXjIkvAT/hFmIbI240HtOb7leQDXbj7epklPV2qdKbxJzt6KqUIXG/M3Up", - "ibURsrwbW7E1hgq6qhtLIOkG0nPIsAAAFKXezVvdfaSEEzQ962DKFsqwuRyYzY2m3SWQqsyoE8U7BiWD", - "YQVa+1jF93AOuzPRJINfJ4+2ndaphg4qUmogXRpiDY+tG6O7+S4cB21dZemzIzFNxpPFUU0Xvs/wQbYi", - "7x0c4hhRtNIOhxBBZQQRlvgHUHCDhZrxbkX6seUZLWNpb75IXQ3P+4lr0ihPLnImXA1mU9rvBWDVHXGp", - "yJIauV24gjE2dTHgYpWiaxiQkEPr+sQEwZZFHgfZd+9Fbzqx6l5ovfsmCrJtnJg1RykFzBdDKqjMdOKl", - "/EzWgWMNqATrwDmELXMUk+rAMst0qGx5OWxhqyHQ4gQMkjcChwejjZFQstlQ5WvZYMkff5YnyQC/Yyru", - "WAGGkyDUJ6jrUxu+Pc/tntOedunKMPjaC77gQqhaTiieYCR8jC6ObYfgKABlkMPaLtw29oTSpAU3G2Tg", - "+PtqlTMOJIlFDVGlRMpsMaLmmnFzgJGPHxNiTcBk8ggxMg7ARsckDkzeivBs8vV1gOQurZn6sdGlGfwN", - "8QwMG0drRB5RGhbO+EDEtucA1IWa1fdXJ+ARhyGMz4lhcxc0N2zOaXzNIL06ACi2drL+nWv80ZA4O2KB", - "txfLtdZkr6KbrCaUmTzQcYFuBOKl2CY2BSsq8S63S0Pv0dBiTAiLHUxbceGBIkuxxXALvFpsKOseWIbh", - "8GAEGv6WKaRX7Dd0m1tgxqYdl6ZiVKiQZJw5ryaXIXFiytQDEswQuTwMiijcCICOsaMpN+qU371Kals8", - "6V/mza02b4oD+ayN2PEfOkLRXRrAX98KU5c9eNeVWKJ2inbUQLviQyBCxojesIm+k6bvClKQAyoFSUuI", - "Ss5jrjuj2wDeOKe+W2C8wLoSlO8eBaEoEtZMaWiM6OZi9l6h+zZPUixnJcRqeHW6lCuzvvdC1NeUrZeC", - "HVvLvPcVYCjnikmlE/RARJdgGn2nUKn+zjSNy0rtYBdb2ZFlcd6A057DLslYXsXp1c37w2sz7duaJapq", - "ifyWcQI03ZAlViKNhsCNTG2jJEcX/MYu+A29s/VOOw2mqZlYGnJpz/EHORcdzjvGDiIEGCOO/q4NonSE", - "QQaZi33uGMhN9nBi5uJizPraO0yZH3tv2IjPnxy6o+xI0bUEBoPRVTB0ExmxhOmgkGc/pXDgDNCyZNm2", - "Ywu1ow5qzPRaBg9fIamDBdxdN9geDAR2z1hWgwTVLobVCPi2JGurFsViEmbO2iWrQoYQTsWULyjeR1Sd", - "9bQPV2dA8x9g97Npi8uZXc1ntzOdxnDtRtyD63f19kbxjK55a0preUKuiXJallJc0DxxBuYh0pTiwpEm", - "Nvf26HtmdXEz5tm3x2/eOfCv5rM0ByqTWlQYXBW2K/8wq7J1twYOiC9YbHQ+L7NbUTLY/LpYUGiUvtyA", - "Kw4bSKO9KnaNwyE4is5IvYpHCO01OTvfiF3iiI8EytpF0pjvrIek7RWhF5Tl3m7moR2I5sHFTSuFGOUK", - "4QC39q4ETrLkTtlN73THT0dDXXt4UjjXSPnawlZoVkTwrgvdiJBojkNSLSjWoLNWkT5z4lWBloRE5SyN", - "21j5EsNuufWdmcYEGw8Io2bEig24YnnFgrFMMzVB0e0AGcwRRaavZziEu6VwT2tUnP2rAsIy4Np8kngq", - "OwcVi/45a3v/OjWyQ38uN7C10DfD30bGCOsvdm88BGJcwAg9dT1wX9cqs19obZEyPwQuiWs4/MMZe1fi", - "iLPe0YejZhu8uGl73MKXMPr8zxCGrZq8/xkOr7y6QpADc0Sf1WAqWUnxG8T1PFSPIxkjvuIkwyiX34BP", - "CDNvrDvN6yDN7IPbPSTdhFaodpDCANXjzgduOSx95y3UlNuttlXuW7FucYIJo0oP7PgNwTiYe5G4Ob1c", - "0lhdQCNkGJiOGwdwy5auBfGdPe6d2Z+5IqALEviS67bMJgOXIJtkrn5hkRsKDHbayaJCIxkg1YYywdz6", - "/3IlIsNU/JJy+1iC6WePkuutwBq/TK9LITGVX8XN/hmkrKB5XHLI0r6JN2NrZp8KqBQEtejdQPaNFUtF", - "rp6/dbE3qDlZkcN58NqF242MXTDFljlgiye2xZIq5OS1IaruYpYHXG8UNn86ofmm4pmETG+URawSpBbq", - "UL2pnVdL0JcAnBxiuycvyEN02yl2AY8MFt39PDt68gKNrvaPw9gF4N4EGeMm2SpMfInTMfot7RiGcbtR", - "F9GsZ/uQ0zDjGjlNtuuUs4QtHa/bf5YKyuka4pEixR6YbF/cTTSkdfDCM/sKidJS7AgbSEECTQ1/Gog+", - "N+zPgkFSURRMF865o0Rh6KkpNG8n9cPZJ01cjVAPl/+IPtLSu4g6SuT9Gk3t/RZbNXqy39IC2midE2rr", - "N+SsiV7wlYvJiS8Pg0VT61qpFjdmLrN0FHMwmGFFSsm4RsWi0qvkryTdUElTw/4WQ+Amy6+fRwrFtgsW", - "8usBfu94l6BAXsRRLwfI3ssQri95yAVPCsNRskdNtkdwKgeduXG33ZDvcHzoqUKZGSUZJLeqRW404NS3", - "Ijw+MuAtSbFez7Xo8doru3fKrGScPGhlduin92+clFEIGav51hx3J3FI0JLBBcbuxTfJjHnLvZD5pF24", - "DfRf1vPgRc5ALPNnOaYIvBQR7dQXL64t6S5WPWIdGDqm5oMhg6Ubak7ahWLv3+nnjc9955P54mHFP7rA", - "fuEtRST7FQxsYlDEOrqdWf098H9T8lJsp25q54T4jf03QE0UJRXLs5+brMxOjXBJebqJ+rOWpuMvzWtG", - "9eLs/RQtrbahnEMeHc7Kgr94mTEi1f5TTJ2nYHxi227ZcrvczuIawNtgeqD8hAa9TOdmghCr7YS3OqA6", - "X4uM4DxNHa+Ge/bL3QdFif9VgdKx5CH8YIO60G5p9F1bE5cAz1BbXJDv7WukGyCtKi2opdn8eMh8hVZr", - "UK/KXNBsTsw4Z98evyF2VtvHvslha/KuUUlpr6JjrwpKFE4LD/bPa8RTF6aPMx5LbVatNBZNUpoWZSw5", - "1LQ48w0wAzW04aP6EmJnQV5bzVF5vcROYuhhxWRhNK56NCu7IE2Y/2hN0w2qZC2WOkzy04tJe6pUwQNu", - "9UMsdd0+PHcGbldP2paTnhNh9OZLpuwjlHAB7XzUOjnbmQR8fmp7ebLi3FJKVPYYKx5wE7R74Gyghjfz", - "RyHrIP6aArmtxX7d2tqn2CtaR6hbqLv3cpvNbqwf2PCPC6eUC85SrOITu5rdg5ZTfGATCh51jaz+iLsT", - "Gjlc0fLgdZicw+JgwXDPCB3i+kb44KvZVEsd9k+NLyduqCZr0MpxNsjmvsq9swMyrsDVYcS3TQM+KWTL", - "r4gcMuqqTmqXxjXJCNNiBhS778y3t07tx3jxc8ZRwHdoc6Hp1lKH7+1poxUwTdYClFtPOzdYfTB9Fpgm", - "m8H208K/z4djWLecWbb1QfeHOvYeaecBNm1fmba2lEnzcysC2U56XJZu0uE3EKLygN7yQQRHPIuJd+0E", - "yK3HD0cbIbfRUBK8Tw2hwQU6oqHEe7hHGPV7AJ23ZozQaikKWxAbwhWtYMB4BIw3jEPzemTkgkijVwJu", - "DJ7XgX4qlVRbEXASTzsDmqP3OcbQlHauh9sO1dlgRAmu0c8xvI3NUwYDjKNu0AhulO/qRysNdQfCxCt8", - "Ldchsv8wAUpVTojKMKOg81RBjHEYxu1LIbUvgP4x6MtEtruW1J6c69xEQ0miyypbg05olsXqYr7ErwS/", - "+kJRsIW0qusnliVJsSZKu0hMn9rcRKngqipG5vINbjld8PZHhBrC90f8DmMSynKH/8aKBw7vjAvCuHYY", - "oI+4cI8lXFNubo/Uk3oNTSeKrZPpmMA75fboaKa+GaE3/e+U0nOxbgNyz6UhxrhcuEcx/vatuTjCygm9", - "ipj2aqkLG2DQnfAvtqHaWKfktrkSXmW9Epno7Klr3o0bIIbfdprj5TcQehsUxKD2frXew6EA3HQwXpxq", - "l7mmKRllQYPZQDZ6x+b9IBRxy+lQxI4N2DGfe72nSYY9ORvHHkWoDwXrA/SDjzMlJWXONd4wiz5mXUT6", - "sLlw7NA1G9xdhIvzHrTY/XAxFJNNFOPrHAh+776Gcw4unb1+Dt2u1UcleZXQ/upeI7Xj1VHx0fX3oxNw", - "qi9rBh002p65yut2mU4n/+FnG8NGgGu5+zcw4fY2vfeWUF/ateappgmpq/ZOquLbuhXjzwIN1z9qah4h", - "PZVCsaZSdOy9oImxbmf45E9Qv6k/lg80uYBUY3nwxoEuAa5TzclMFrxF92cdpAHdsQ4JdOWPxmoe9WuC", - "77nQemlJQWqdrae8mF7h57gOk0KmhK/BrYG75+DaCQeTw55XK0g1u9iTBvaPDfAgxWjujRD2WdcgK4zV", - "YbRYReT6JrYGoLEsrVF4gmp+twZnKAnkHHYPFGlRQ7TA89zfKzcpIIEYQO6QGBIRKhaGYK2mzjPMVE0Z", - "iAUf9mO7Q1OKa/BpmCCp8YZzeZI0N26T6DgyZfxtiklzma7XSv/FiNChTLF+bfthYfs1PiWg6mfbfAGK", - "UCUlJ5Hqz66ABSbt1Y4CX8oClP/NZ+jaWXJ2DuHjNeiWuaQy8y2idgZvwkhG7qNeele0ZDVVNojS+cHr", - "IM1+Qk+k8BOG4qa5wHLPQ/HM7bjI8I13jP7A6wDLTyNcK5DukS8U9nKhINHCB3WOwTGGCvce+U2QoAaL", - "LVrgBkugvG9qvGDRWYolT6iLbAkXSCQU1EAng0osw3OOIfuV/e4zWHzR0b3mlJpek72lVHx4LlM9JIZU", - "vyLuttyfGXMTywrj3D4pqmJlWbhBZWj6L6XIqtRe0OHBqK1Pk4sejbCSqFEi7a+yp1/mWALsTZBneA67", - "Ayv6pxvKm1ps7WNtRSi7hiCvv7Pbd2p0iuvX+douYH0ncH5Jw818VgqRJwO2/pN+dZnuGThn6TlkxNwd", - "PrBt4HUN8hBNzLUz93Kz89VUyhI4ZI8WhBxzG0rs/brt8sadyfkDPTb/FmfNKlvwydmUFh95PCYTSzHJ", - "W/I3P8w4V1NgmN8tp7KD7Kldsh2obCPpZeStmcVUpbTvae2+/9EQlYUiJqXcMJF90vnu25UipB88fTCu", - "/YR1LpoAOmnNkygtNc9BtIWXHxur47RHGHyHPeCFSnHwDIPnRg6cLxzl9mONlGApg5TQWv4+PdstsOFL", - "wRYpTIswy7RVh2yERHtfAiOKelXbJuJ47pswsKiF4Fjop2/6UGiuxnrBIeGYcykvaH7/5gusdnKM+HBP", - "IsYXGuq/IZItKtXNQk3e0ElzB7ru3U3N36G55R9g9ijqZ3BDObtj/fyFt85iXTuak1w0jyHhkOQSx7SO", - "iSdfk6ULky8lpEyxTgbRpS9lWqt7WNm7eSlzXL/ct86fhb4FGTsFQZTkbVMWUQu8HxoImyP6hZnKwMmN", - "UnmM+npkEcFfjEeF+ep7rovzlsfClpnthOIICXfsuQhiEK7puehn4k9dnrXOm0unUtBf5+TbuoXbyEXd", - "rG2q262P3LHaeVO8ZfGSmKY7uussQrCeLEFQya9PfiUSVvhghCCPH+MEjx/PXdNfn7Y/m+P8+HH8Rc77", - "ctRZHLkx3Lwxivl5KHTThicORAl39qNiebaPMFox382TKxjV/IvL+vgij778Yu2p/aPqCu9fJ0SguwmI", - "mMhaW5MHUwXR3BMCuV23SNg2aiZpJZneYTEKb35jv0Rdit/XFnvn8anTl93dp8U51OVMGvt+pfzt+r2g", - "Od5HRqbGAA2NrzB+u6VFmYM7KN88WP4Fnv31eXb47Mlfln89/OowhedfvTg8pC+e0ycvnj2Bp3/96vkh", - "PFl9/WL5NHv6/Ony+dPnX3/1In32/Mny+dcv/vLA8CEDsgV05lMfZ/8bX0ZKjt+dJGcG2AYntGT146uG", - "jP3zDjTFkwgFZfnsyP/0//sTtkhF0Qzvf525zKrZRutSHR0cXF5eLsIuB2s06CVaVOnmwM/Tf/Ty3Ukd", - "HW9dwbijNvDZkAJuqiOFY/z2/tvTM3L87mTREMzsaHa4OFw8wcfMSuC0ZLOj2TP8CU/PBvf9wBHb7Ojz", - "1Xx2sAGao//L/FGAliz1n9QlXa9BLtw7F+ani6cHXpQ4+OyMmVdj3w7CkrEHn1s232xPTywpefDZV0oY", - "b90qReBs3UGHiVCMNTtYYgLW1KaggsbDS0EFQx18RhF58PcDl5US/4iqij0DB94xEm/ZwtJnvTWwdnq4", - "15sPPuN/kCYDsGwMWB9cm61xYB+U6/+842n0x/5AvcLma4gmmmDKBx17YRhPgT1AJxnyNd17MBmrpFqz", - "Jx6Op4eHf4y3k59fE9BRm0oraisCzEuaEZ/zg3M/ub+5Tzh6WA2vI5aXIwTP7w+CdknaH2BH3gpNvkOl", - "42o+++o+d+KEGxGI5gRbBhUn+kfkJ37OxSX3LY0QUBUFlbvJx0fTtUKjn2QX1IlgQZXy2Se0Mts8sPZR", - "O86yHtFbYQiUfimy3QjGCrUuXYx2g7RGFmTcLKGvTPaffes9cHwOO2J9cN7WygW+Z99IaVpWcHVLnvCH", - "fYv5T57yJ0+Rdvpn9zf9KcgLlgI5g6IUkkqW78hPvM6wuzGPO86yaHxT++jv5XFGz05FBmvgiWNgyVJk", - "O19FrDXBOVi1ryfIHHxulwK2IuAsgxx0NHbD/F4/0tZfxHJHTl73JBzbrct5X+6waVBi9+jDZ6s3GaWg", - "UWu6IPY4Y1jdtcubPsW55hjZm4WshSYWC5lb1J+M6E9GdCvhZvLhmSLfRLUPm79Oe3f23Keix4qQUN0H", - "ZYqO8kWP751sfF//iek7Nk4MMhJ8sAHNXTT/ySL+ZBG3YxHfQ+Qw4ql1TCNCdNfTh6YyDAyRyboPbqDr", - "wDevciqJgqlmjmMc0Rk37oNr3LdSF8WV1ekob94kimzg3ep5f7K8P1neH4flHe9nNG3B5Naa0TnsClrW", - "+pDaVDoTl4EnAWGxkT19O3D9BGDr74NLynSyEtJlHWBB2n5nDTQ/cPU0Or82Kay9L5iXG/wY2Mrjvx7U", - "9b6jH7tOiNhXZ4QfaOSrIfnPjRMydOoha6/deR8+GbaM1SQd1298VEcHBxjJuxFKH8yu5p87/qvw46ea", - "BD7Xd4UjhatPV/8vAAD//xVsHQFjwgAA", + "H4sIAAAAAAAC/+y9e3PcNrIo/lVQc06VH7+hJD+SXasqdX6ynXh143hdlpK951i+CYbsmcGKBLgAKM3E", + "V9/9FhoACZIgh3pE3lT5L1tDPBqNRqPf+DxLRVEKDlyr2eHnWUklLUCDxL9omoqK64Rl5q8MVCpZqZng", + "s0P/jSgtGV/N5jNmfi2pXs/mM04LaNqY/vOZhH9VTEI2O9SygvlMpWsoqBlYb0vTuh5pk6xE4oY4skMc", + "v55djXygWSZBqT6Uf+f5ljCe5lUGREvKFU3NJ0UumV4TvWaKuM6EcSI4ELEket1qTJYM8kzt+UX+qwK5", + "DVbpJh9e0lUDYiJFDn04X4liwTh4qKAGqt4QogXJYImN1lQTM4OB1TfUgiigMl2TpZA7QLVAhPACr4rZ", + "4ceZAp6BxN1KgV3gf5cS4HdINJUr0LNP89jilhpkolkRWdqxw74EVeVaEWyLa1yxC+DE9NojP1VKkwUQ", + "ysmHH16RZ8+evTALKajWkDkiG1xVM3u4Jtt9djjLqAb/uU9rNF8JSXmW1O0//PAK5z9xC5zaiioF8cNy", + "ZL6Q49dDC/AdIyTEuIYV7kOL+k2PyKFofl7AUkiYuCe28Z1uSjj/F92VlOp0XQrGdWRfCH4l9nOUhwXd", + "x3hYDUCrfWkwJc2gHw+SF58+P5k/Obj6j49Hyf+4P795djVx+a/qcXdgINowraQEnm6TlQSKp2VNeR8f", + "Hxw9qLWo8oys6QVuPi2Q1bu+xPS1rPOC5pWhE5ZKcZSvhCLUkVEGS1rlmviJScVzw6bMaI7aCVOklOKC", + "ZZDNDfe9XLN0TVKq7BDYjlyyPDc0WCnIhmgtvrqRw3QVosTAdSN84IL+fZHRrGsHJmCD3CBJc6Eg0WLH", + "9eRvHMozEl4ozV2lrndZkdM1EJzcfLCXLeKOG5rO8y3RuK8ZoYpQ4q+mOWFLshUVucTNydk59nerMVgr", + "iEEabk7rHjWHdwh9PWREkLcQIgfKEXn+3PVRxpdsVUlQ5HINeu3uPAmqFFwBEYt/QqrNtv+vk7+/I0KS", + "n0ApuoL3ND0nwFORQbZHjpeECx2QhqMlxKHpObQOB1fskv+nEoYmCrUqaXoev9FzVrDIqn6iG1ZUBeFV", + "sQBpttRfIVoQCbqSfAggO+IOUizopj/pqax4ivvfTNuS5Qy1MVXmdIsIK+jmu4O5A0cRmuekBJ4xviJ6", + "wwflODP3bvASKSqeTRBztNnT4GJVJaRsySAj9SgjkLhpdsHD+PXgaYSvABw/yCA49Sw7wOGwidCMOd3m", + "CynpCgKS2SM/O+aGX7U4B14TOlls8VMp4YKJStWdBmDEqcclcC40JKWEJYvQ2IlDh2Ewto3jwIWTgVLB", + "NWUcMsOcEWihwTKrQZiCCcf1nf4tvqAKvn0+dMc3Xyfu/lJ0d310xyftNjZK7JGMXJ3mqzuwccmq1X+C", + "fhjOrdgqsT/3NpKtTs1ts2Q53kT/NPvn0VApZAItRPi7SbEVp7qScHjGH5u/SEJONOUZlZn5pbA//VTl", + "mp2wlfkptz+9FSuWnrDVADJrWKMKF3Yr7D9mvDg71puoXvFWiPOqDBeUthTXxZYcvx7aZDvmdQnzqNZ2", + "Q8XjdOOVkev20Jt6IweAHMRdSU3Dc9hKMNDSdIn/bJZIT3Qpfzf/lGVueutyGUOtoWN3JaP5wJkVjsoy", + "Zyk1SPzgPpuvhgmAVSRo02IfL9TDzwGIpRQlSM3soLQsk1ykNE+UphpH+k8Jy9nh7D/2G/vLvu2u9oPJ", + "35peJ9jJiKxWDEpoWV5jjPdG9FEjzMIwaPyEbMKyPRSaGLebaEiJGRacwwXleq9RWVr8oD7AH91MDb6t", + "tGPx3VHBBhFObMMFKCsB24YPFAlQTxCtBNGKAukqF4v6h4dHZdlgEL8flaXFB0qPwFAwgw1TWj3C5dPm", + "JIXzHL/eI2/CsVEUFzzfmsvBihrmbli6W8vdYrVtya2hGfGBIridQu6ZrfFoMGL+XVAcqhVrkRupZyet", + "mMZ/c21DMjO/T+r85yCxELfDxIWKlsOc1XHwl0C5edihnD7hOHPPHjnq9r0Z2ZhR4gRzI1oZ3U877gge", + "axReSlpaAN0Xe5cyjkqabWRhvSU3ncjoojAHZzigNYTqxmdt53mIQoKk0IHhZS7S879Rtb6DM7/wY/WP", + "H05D1kAzkGRN1XpvFpMywuPVjDbliJmGqOCTRTDVXr3Eu1rejqVlVNNgaQ7euFhiUY/9kOmBjOguf8f/", + "0JyYz+ZsG9Zvh90jp8jAlD3OzsmQGW3fKgh2JtMArRCCFFbBJ0brvhaUr5rJ4/s0aY++tzYFt0NuEbhD", + "YnPnx+Cl2MRgeCk2vSMgNqDugj7MOChGaijUBPheO8gE7r9DH5WSbvtIxrGnINks0IiuCk8DD298M0tj", + "nD1aCHkz7tNhK5w0JmdCzagB8513kIRNqzJxpBgxW9kGnYEaL9840+gOH8NYCwsnmv4BWFBm1LvAQnug", + "u8aCKEqWwx2Q/jrK9BdUwbOn5ORvR988efrr02++NSRZSrGStCCLrQZFHjrdjCi9zeFRf2WoHVW5jo/+", + "7XNvqGyPGxtHiUqmUNCyP5Q1gFoRyDYjpl0fa20046prAKcczlMwnNyinVjbvgHtNVNGwioWd7IZQwjL", + "mlky4iDJYCcxXXd5zTTbcIlyK6u7UGVBSiEj9jU8YlqkIk8uQComIt6U964FcS28eFt2f7fQkkuqiJkb", + "Tb8VR4EiQll6w6fzfTv06YY3uBnl/Ha9kdW5eafsSxv53pKoSAky0RtOMlhUq5YmtJSiIJRk2BHv6Deg", + "T7Y8RavaXRDpsJpWMI4mfrXlaaCzmY3KIVu1NuH2ulkXK94+Z6d6oCLgGHS8xc+o1r+GXNM7l1+6E8Rg", + "f+U30gJLMtMQteC3bLXWgYD5XgqxvHsYY7PEAMUPVjzPTZ++kP5OZGAWW6k7uIybwRpaN3saUjhdiEoT", + "SrjIAC0qlYpf0wOee3QZoqdThze/XluJewGGkFJamdVWJUE/Xo9zNB0TmlrqTRA1asCLUbufbCs7nfUK", + "5xJoZrR64EQsnKvAOTFwkRSdkNpfdE5IiJylFlylFCkoBVniTBQ7QfPtLBPRI3hCwBHgehaiBFlSeWtg", + "zy92wnkO2wRd5oo8/PEX9egLwKuFpvkOxGKbGHprhc/5g/pQT5t+jOC6k4dkRyUQz3ONdmkYRA4ahlB4", + "LZwM7l8Xot4u3h4tFyDRM/OHUryf5HYEVIP6B9P7baGtyoFAMKfonLIC7XaccqEgFTxT0cFyqnSyiy2b", + "Ri1tzKwg4IQxTowDDwglb6nS1pvIeIZGEHud4DxWQDFTDAM8KJCakX/xsmh/7NTcg1xVqhZMVVWWQmrI", + "YmvgsBmZ6x1s6rnEMhi7ln61IJWCXSMPYSkY3yHLrsQiiOra6O7c7f3FoWna3PPbKCpbQDSIGAPkxLcK", + "sBsGwwwAwlSDaEs4THUop47Amc+UFmVpuIVOKl73G0LTiW19pH9u2vaJi+rm3s4EKIzBce0d5JcWszYM", + "ak2NCo0jk4KeG9kDFWLr9uzDbA5johhPIRmjfHMsT0yr8AjsPKRVuZI0gySDnG77g/5sPxP7eWwA3PFG", + "8REaEhvPEt/0hpJ9+MDI0ALHUzHhkeAXkpojaDSPhkBc7x0jZ4Bjx5iTo6MH9VA4V3SL/Hi4bLvVkRHx", + "NrwQ2uy4JQeE2DH0KfAOoKEe+eaYwM5Jo5Z1p/hvUG6CWoy4/iRbUENLaMa/1gIGjGkuUjg4Lh3u3mHA", + "Ua45yMV2sJGhEztg2XtPpWYpK1HV+RG2d675dSeI+ptIBpqyHDISfLBaYBn2JzYQozvmzTTBSUaYPvg9", + "K0xkOTlTKPG0gT+HLarc722E32kQF3gHqmxkVHM9UU4QUB83ZCTwsAlsaKrzrZHT9Bq25BIkEFUtCqa1", + "jdxta7palEk4QNTAPTKj8+bY6Di/A1PcSyc4VLC8/lbMZ1YlGIfvtKMXtNDhVIFSiHyC8aiHjCgEkxz/", + "pBRm15kLIvZhpJ6SWkA6po2uvPr2f6BaaMYVkP8WFUkpR42r0lCLNEKinIDyo5nBSGD1nM7F32AIcijA", + "KpL45fHj7sIfP3Z7zhRZwqWPvDcNu+h4/BjNOO+F0q3DdQemQnPcjiPXB1r+8d5zwQsdnrLbxexGnrKT", + "7zuD1+4Cc6aUcoRrln9rBtA5mZspaw9pZJp7HcedZNQPho6tG/f9hBVVTvVduC9G5dFan2BFARmjGvIt", + "KSWkYKOrjYClLCwGNGLjrtI15SuUq6WoVi7wx46DjLFS1oIhK94bIip86A1PVlJUZYxRumBPH2BvxA6g", + "RvMJEImdrZx/Sev5XE7FlBvMIzzYnTdmzCGvwnw2qBgapF40iqFFTjtLII4FTHtIVJWmANEQ4JjKVS+1", + "kw3Z5Le4AY3YUEkbA0Voqiuah1RHjpeE8m07TZKyXBkuyBTBdqZzE1c7t2vzOSxLmlvfbCSpIjwpLYkv", + "2PkGpV1UTPQ7IJEYaahPGSEBmuNlyPiPseE3Q8eg7E8cBF01H4firoz+nW/vQAyyAxEJpQSFl1Zot1L2", + "q1iGuU/uVlNbpaHom/Zt118HGM2HQQVS8JxxSArBYRtN92UcfsKPUcaBF+dAZxRhhvp2tZIW/B2w2vNM", + "ocbb4hd3O+BF7+uAwzvY/O64Ha9OmPWFVkvIS0JJmjO0aQqutKxSfcYpWk2CwxYJzPD64bAd7ZVvEjfc", + "RexqbqgzTjEop7alRJ3JS4gYDn4A8OY0Va1WoDr8kywBzrhrxTipONM4V2H2K7EbVoLE6Ig927KgW8MC", + "0ez3O0hBFpVu82TMPFHasEvrYjLTELE841STHIxO/RPjpxsczrtoPc1w0JdCntdYiF8hK+CgmEriASRv", + "7FeM7XPLX7s4P8wUtp+tU8KM36SnbNGo0mS//p+H/3X48Sj5H5r8fpC8+P/2P31+fvXoce/Hp1ffffd/", + "2z89u/ru0X/9Z2ynPOyxvAgH+fFrp6wdv0aJvPFK9GC/N4t0wXgSJbLQ996hLfIQcwAdAT1q22v0Gs64", + "3nBDSBc0Z5kRuW5CDl0W1zuL9nR0qKa1ER37jF/rNeXcW3AZEmEyHdZ442u8H3MVz0BCN5lLKsLzsqy4", + "3Uov6NoAex/7IpbzOsvMFqA4JJiCtKY+cMv9+fSbb2fzJnWo/j6bz9zXTxFKZtkmKh3CJqa+uAOCB+OB", + "IiXdKhgQQBH2aJiPjTYIhy3A6L1qzcr75xRKs0Wcw/mwZWcG2fBjbuOJzflBp9vW2fLF8v7h1tLI4aVe", + "xxLTW5ICtmp2E6ATCFFKcQF8Ttge7HXNEJlRzVzAUQ50iQnSqOiJKWkY9TmwhOapIsB6uJBJun6MflC4", + "ddz6aj5zl7+6c3ncDRyDqztn7WHzf2tBHrz5/pTsO4apHthcRTt0kF0W0VpdAkUrRMZwM1uOwyZrnvEz", + "/hqWjDPz/fCMZ1TT/QVVLFX7lQL5kuaUp7C3EuTQ52S8ppqe8Z6kNVgxJ8iGIWW1yFlKzkOJuCFPWwWh", + "P8LZ2Uear8TZ2adetEBffnVTRfmLnSC5ZHotKp24HO5EwiWVMW+MqnN4cWRbpGFs1jlxY1tW7HLE3fhx", + "nkfLUnVz+frLL8vcLD8gQ+Uy1cyWEaWF9LKIEVAsNLi/74S7GCS99CaMSoEivxW0/Mi4/kSSs+rg4BmQ", + "VnLbb+7KNzS5LWGyIWMw17Brv8CFW70GNlrSpKSrmNfn7OyjBlri7qO8XKCSnecEu7WS6nzQMA7VLMDj", + "Y3gDLBzXThDCxZ3YXr5eT3wJ+Am3ENsYcaNxRd90v4I0uxtvVydVr7dLlV4n5mxHV6UMifudqct4rIyQ", + "5eMDFFthDKareLIAkq4hPXelKKAo9Xbe6u5DUJyg6VkHU7ZIiU2SwTR5tJkvgFRlRp0o3rUgLbZEgdY+", + "CPQDnMP2VDRZ9tdJUG7ny6qhg4qUGkiXhljDY+vG6G6+i3NCE1dZ+rRTzD/yZHFY04XvM3yQrch7B4c4", + "RhStfM4hRFAZQYQl/gEU3GChZrxbkX5seUbLWNibL1KwxPN+4po0ypMLSQpXgwZu+70ArHgkLhVZUCO3", + "C1esx+aEBlysUnQFAxJy6LaYmHnZcnXgILvuvehNJ5bdC61330RBto0Ts+YopYD5YkgFlZlOIJqfyXrG", + "nBMAa/A5hC1yFJPqiD3LdKhsuY9sUbEh0OIEDJI3AocHo42RULJZU+XrCGG5JX+WJ8kAf2CO81hli9Cg", + "H9RUqu3rnud2z2lPu3T1LXxRC1/JIlQtJ1SlMBI+hm3HtkNwFIAyyGFlF24be0Jp8q2bDTJw/H25zBkH", + "ksTCsahSImW2EFRzzbg5wMjHjwmxJmAyeYQYGQdgo8cXBybvRHg2+eo6QHKXL0792OgrDv6GeGqLDVA2", + "Io8oDQtnAw6k1HMA6mL46vurE0mKwxDG58SwuQuaGzbnNL5mkF6BBRRbO+UUXMzBoyFxdsQCby+Wa63J", + "XkU3WU0oM3mg4wLdCMQLsUlsbltU4l1sFobeozHbmGkXO5i2lMUDRRZig3EseLXYGOEdsAzD4cEINPwN", + "U0iv2G/oNrfAjE07Lk3FqFAhyThzXk0uQ+LElKkHJJghcnkYVKe4EQAdY0dT6tUpvzuV1LZ40r/Mm1tt", + "3lRd8ukwseM/dISiuzSAv74Vpq4n8b4rsUTtFO1wjHYpjUCEjBG9YRN9J03fFaQgB1QKkpYQlZzHXHdG", + "twG8cU58t8B4gQU7KN8+CmJ8JKyY0tAY0X1IwpcwT1KsEybEcnh1upRLs74PQtTXlC1Egx1by7z3FWCM", + "7JJJpRP0QESXYBr9oFCp/sE0jctK7SgiW1WTZXHegNOewzbJWF7F6dXN++NrM+27miWqaoH8lnEbG7LA", + "KrDR2MKRqW346eiC39oFv6V3tt5pp8E0NRNLQy7tOf4k56LDecfYQYQAY8TR37VBlI4wyCAltM8dA7nJ", + "Hk5MCd0bs772DlPmx94ZNuITU4fuKDtSdC2BwWB0FQzdREYsYToootrP1Rw4A7QsWbbp2ELtqIMaM72W", + "wcOXnupgAXfXDbYDA4HdM5YuIkG1q4w1Ar4th9sq8rE3CTOn7VpgIUMIp2LKF3PvI6pOJ9uFq1Og+Y+w", + "/cW0xeXMruaz25lOY7h2I+7A9ft6e6N4Rte8NaW1PCHXRDktSykuaJ44A/MQaUpx4UgTm3t79D2zurgZ", + "8/T7o7fvHfhX81maA5VJLSoMrgrblX+aVdmCZgMHxBeLNjqfl9mtKBlsfl2FKTRKX67BVd0NpNFeecDG", + "4RAcRWekXsYjhHaanJ1vxC5xxEcCZe0iacx31kPS9orQC8pybzfz0A5E8+DiptWYjHKFcIBbe1cCJ1ly", + "p+ymd7rjp6Ohrh08KZxrpC5wYUtfKyJ414WO4cXb0nndC4rF/axVpM+ceFWgJSFROUvjNla+UIY4uPWd", + "mcYEGw8Io2bEig24YnnFgrFMMzVB0e0AGcwRRaYvFDmEu4Vwz5pUnP2rAsIy4Np8kngqOwcVqyk6a3v/", + "OjWyQ38uN7C10DfD30bGCAtbdm88BGJcwAg9dT1wX9cqs19obZHCcOvGJXENh384Y+9KHHHWO/pw1GyD", + "F9dtj1v4Ckmf/xnCsOWodz+B4pVXV2FzYI7okyZMJUspfoe4nofqcSQVx5fyZBjl8jvwCTHnjXWneZml", + "mX1wu4ekm9AK1Q5SGKB63PnALYc1Bb2FmnK71faFgVasW5xgwqjSfTt+QzAO5l4kbk4vFzRWcNEIGQam", + "o8YB3LKla0F8Z497VSc22NlJ4Euu2zKbZV2CbLLk+hVbbigw2GkniwqNZIBUG8oEc+v/y5WIDFPxS8rt", + "QxWmnz1KrrcCa/wyvS6FxBoJKm72zyBlBc3jkkOW9k28GVsx+wZDpSAo8u8Gsu/bWCpyDyXU6ToONcdL", + "cjAPXhpxu5GxC6bYIgds8cS2WFCFnLw2RNVdzPKA67XC5k8nNF9XPJOQ6bWyiFWC1EIdqje182oB+hKA", + "kwNs9+QFeYhuO8Uu4JHBorufZ4dPXqDR1f5xELsA3BsaY9wkQ3byD8dO4nSMfks7hmHcbtS9aDq5fURr", + "mHGNnCbbdcpZwpaO1+0+SwXldAXxSJFiB0y2L+4mGtI6eOGZfQFGaSm2hOn4/KCp4U8D0eeG/VkwSCqK", + "gunCOXeUKAw9NRX87aR+OPucjCu+6uHyH9FHWnoXUUeJvF+jqb3fYqtGT/Y7WkAbrXNCbWGMnDXRC74k", + "NDn2dXewGm1dhNbixsxllo5iDgYzLEkpGdeoWFR6mfyVpGsqaWrY394QuMni2+eRCrztSpD8eoDfO94l", + "KJAXcdTLAbL3MoTrSx5ywZPCcJTsUZPtEZzKQWdu3G035DscH3qqUGZGSQbJrWqRGw049a0Ij48MeEtS", + "rNdzLXq89srunTIrGScPWpkd+vnDWydlFELGiuk1x91JHBK0ZHCBsXvxTTJj3nIvZD5pF24D/Zf1PHiR", + "MxDL/FmOKQIvRUQ79VWha0u6i1WPWAeGjqn5YMhg4Yaak3YF3vt3+nnjc9/5ZL54WPGPLrBfeEsRyX4F", + "A5sYVAePbmdWfw/835S8FJupm9o5IX5j/w1QE0VJxfLslyYrs1N8XVKerqP+rIXp+GvzTFS9OHs/RWvW", + "rSnnkEeHs7Lgr15mjEi1/xRT5ykYn9i2Ww/eLrezuAbwNpgeKD+hQS/TuZkgxGo74a0OqM5XIiM4T1Mg", + "reGe/XcEgmrP/6pA6VjyEH6wQV1otzT6ri02TIBnqC3ukTf2Jdg1kFb5G9TS6ioCrvStNahXZS5oNsdC", + "DqffH70ldlbbxz52Yosdr1BJaa+iY68Kaj9OCw/275bEUxemjzMeS21WrTRWo1KaFmUsOdS0OPUNMAM1", + "tOGj+hJiZ4+8Dt50tHmkZghDD0smC6Nx1aNZ2QVpwvxHa5quUSVrsdRhkp9epdtTpQpexqtfuKkLIuK5", + "M3C7Qt22TvecCKM3XzJlHwCFC2jno9bJ2c4k4PNT28uTFeeWUqKyx1jxgJug3QNnAzW8mT8KWQfx1xTI", + "bZH76xYtP8Fe0QJN3QrovSfxbHZj/XKJf9g5pVxwlmJ5pNjV7F4KneIDm1BJqmtk9UfcndDI4YrWXa/D", + "5BwWByuxe0boENc3wgdfzaZa6rB/anySck01WYFWjrNBNvfPBzg7IOMKXIFLfFc24JNCtvyKyCGjruqk", + "dmlck4wwLWZAsfvBfHvn1H6MFz9nHAV8hzYXmm4tdfiQoTZaAdNkJUC59bRzg9VH02cP02Qz2Hza8w8f", + "2mow6JYzy7Y+6P5QR94j7TzApu0r09bVCap/bkUg20mPytJNOvy4RFQe0Bs+iOCIZzHxrp0AufX44Wgj", + "5DYaSoL3qSE0uEBHNJR4D/cIo35oofOIjxFaLUVhC2JDuKIVDBiPgPGWcWie5YxcEGn0SsCNwfM60E+l", + "kmorAk7iaadAc/Q+xxia0s71cNuhurWEDEpwjX6O4W1s3ogYYBx1g0Zwo3xbvwZqqDsQJl7hM8QOkf0X", + "H1CqckJUhhkFnTcgYozDMG7/ykz7Augfg75MZLtrSe3Juc5NNJQkuqiyFeiEZlmsItVL/Erwqy8uBRtI", + "q7owZVmSFGuitIvE9KnNTZQKrqpiZC7f4JbTBY+qRKghfNjF7zAmoSy2+G+sKuPwzrggjGuHAfqIC/cK", + "xTXl5vZIPanX0HSi2CqZjgm8U26PjmbqmxF60/9OKT0XqzYg91waYozLhXsU42/fm4sjrJzQKzVqr5a6", + "sAEG3Qn/FB6qjXVKbpsr4VXWqz2Kzp76qa1xA8Two1lzvPwGQm+DghjU3q/WezgUgJsOxotT7TLXNCWj", + "LGgwG8hG79i8H4QibjkditixATvmc6/3NMmwJ2fj2KMI9aFgfYB+9HGmpKTMucYbZtHHrItIHzYXjh26", + "ZoO7i3Bx3oMWux8vhmKyiWJ8lQPB791nhs7BpbPX78zbtfqoJK8S2l/dM692vDoqPrr+fnQCTvVlzaCD", + "RttTV9LeLtPp5D/+YmPYCHAtt/8GJtzepvceaepLu9Y81TQhdTnkSeWRW7di/L2l4fpHTc0jpKdSKNaU", + "4I49xDQx1u0U31IK6jf1x/KBJheQaqy73jjQJcB1qjmZyYJH/r7WQRrQHeuQQFf+aKzmUb/Y+o4LrZeW", + "FKTW2ULVe9Mr/BzVYVLIlLAC7gq4e2evnXAwOex5uYRUs4sdaWD/WAMPUozm3ghh38sNssJYHUaLVUSu", + "b2JrABrL0hqFJ6jmd2twhpJAzmH7QJEWNUQrZ8/9vXKTAhKIAeQOiSERoWJhCNZq6jzDTNWUgVjwYT+2", + "OzSluAbf3AmSGm84lydJc+M2iY4jU8Yf/Zg0l+l6rfRfjAgdyhTrPxowLGy/xjcaVP0eni9AEaqk5Lhf", + "pu/SFbDApL3aUeBLWYDyv/kMXTtLzs4hfBUI3TKXVGa+RdTO4E0Yych91Evv8gXvu0Av65lZE6TZT+iJ", + "FH7CUNw0F0b+SobimdtxkeHj+Rj9YUt+Y8SngWsJ0r2ehsJeLhQkWvigzjE4xlDhHnq/CRLUYLFFC9xg", + "CZQPTY0XLDpLseQJdZEt4QKJhIIa6GRQiWV4zjFkv7LffQaLLzq605xS0+vuQvM+PJepHhJDql8Sd1vu", + "zoy5iWWFcW7falWxsizcoDI0/ZdSZFVqL+jwYNTWp8lFj0ZYSdQokfZX2dMvcywB9jbIMzyH7b4V/X2p", + "fr+VIfRWhLJrCPL6O7t9p0anuH6dr+wCVncC55c03MxnpRB5MmDrP+5Xl+megXOWnkNGzN3hA9sGni0h", + "D9HEXDtzL9dbX02lLIFD9miPkCNuQ4m9X7dd3rgzOX+gx+bf4KxZZQs+OZvS3hmPx2RiKSZ5S/7mhxnn", + "agoM87vlVHaQHbVLNgOVbSS9jDziszdVKe17WrsPqzREZaGISSk7nrCIeJH9mwj+hQ2fsaJFwdL+Kwo9", + "UWKJr1ElNDL4cc3A5623Alnn4Q5fY8g+05BSK8AZ5YGyvJLgMgfsszmdcvol1Wu/faZ5X8wyVzYoDOu3", + "JdmpskqBV07cmz3dcyHKJIcLaDkSXDpDlaagFLuA8L0f25lkACWq6t0LJGYhD+mqw0Pc2pPAxjoFu1Gm", + "YhFrd4rs4BgDj7EnljzUVBIyEF2wrKIt/KlbPMUy8W33ENaJJ+TahyO+uN7RcM+lJHUxt5gh06WT+C00", + "9Ns87dIRkIInWOoxB96irLFwG2FkELVxzN6siMYkeujbtCNHJnh2ZdzyEtbYaYJ3pXWNoKbmT113S39q", + "TuO0B2B8hx3ghQa54AkYLwk5cL5whO1PNVKCpQxSQmv5u2x8boEN+wq2yPJus0xb8cxGZ7X3JTDgqle1", + "XXToXaau+RQL6giORcb6ZleFrjKsVR4SjuHd8oLm9286xUpLR4gP985tfKGh7S1EskWlulmY21s6ae7A", + "znZ3U/P3aOr9B5g9ivo43VDO51HLCt4zhCyT5iQXzQt3OCS5xDGtU/TJt2ThUnRKCSlTrJO9eOnLKNem", + "JnxVoHn+eNy2tWudvwh9CzJeevGFvGtKsmqBN0YDYXNEvzBTGTi5USqPUV+PLCL4i/GosFbGjuvivOUt", + "tSWuO2GAQsIde02D+Kdrek37VUCmLs96Bs2lUynor3Pybd3CbeSibtY21eXfR+5Y3c4pnvp4OV7THUMF", + "LEKwljVBUMlvT34jEpb4WI0gjx/jBI8fz13T3562P5vj/Phx/Jnl+woSsDhyY7h5YxTzy1DYuA2NHshQ", + "6OxHxfJsF2G08k2a554wo+JXl3H2RR6c+tX6cvpH1T36cZ3wpO4mIGIia21NHkwVZJJMSCJx3SIpI2gV", + "SSvJ9BYL4XjTP/s1Gs7wpvYWOm9zXTrB3X1anENdSqnxLVbK365vBM3xPjIyNQaHaXxa9/sNLcoc3EH5", + "7sHiL/Dsr8+zg2dP/rL468E3Byk8/+bFwQF98Zw+efHsCTz96zfPD+DJ8tsXi6fZ0+dPF8+fPv/2mxfp", + "s+dPFs+/ffGXB4YPGZAtoDOfdj373/gqW3L0/jg5NcA2OKElq1/UNmTsn5ahKZ5EKCjLZ4f+p//fn7C9", + "VBTN8P7XmcvqnK21LtXh/v7l5eVe2GV/hc6ERIsqXe/7efovGb8/rjNzrGqJO2qTLrzJwJPCEX778P3J", + "KTl6f7wXvJR5ODvYO9h7gg8plsBpyWaHs2f4E56eNe77viO22eHnq/lsfw00R9+7+aMALVnqP6lLulqB", + "3HNv7JifLp7ue1Fi/7NzpFyNfdsPy1Xvf275m7IdPbGc7f5nX6VlvHWrDIrzswUdJkIx1mx/gcmfU5uC", + "ChoPLwUVDLX/GUXkwd/3XUZc/COqKvYM7HunbLxlC0uf9cbA2unhnuTf/4z/QZoMwLLxp31wbabYvn3M", + "sv/zlqfRH/sD9R5VWEE0yQ3TzejYs/F4CuwBOs6Qr+neK/hYodlaOfBwPD04+HM8iP/8moCO2lRaEaMR", + "YF7SjPh8Q5z7yf3NfcwxusPwOmJ5OULw/P4gaJfD/hG25J3Q5AdUOq7ms2/ucyeOuRGBaE6wZVDtpn9E", + "fubnXFxy39IIAVVRULmdfHw0XSl0OEh2QZ0IFryQMPuEHi6bg9o+akdZ1iN6KwyB0i9Fth3BWKFWpcsP", + "aZDWyIKMmyX0lcn+k5O9V+vPYUus/987CrjIYBZKaVpWcHVLnvCnfWD/K0/5ylOknf7Z/U1/AvKCpUBO", + "oSiFpJLlW/Izr7N7b8zjjrIsGlvZPvo7eZzRs1ORwQp44hhYshDZ1lcwbE1wDlbt6wky+5/bZcitCDjL", + "IAcdjRszv9cPRPYXsdiS49c9Ccd263Lel1tsGpT3Pvz42epNRilo1JouiD3OGFaW7vKmT3GuOUb2ZiEr", + "oYnFQuYW9ZURfWVEtxJuJh+eKfJNVPuwtTNo786e+zIYsQJIVPdBmaKjfNHjeycb39d/YvqOjVGFjAQf", + "bDJFF81fWcRXFnE7FvEGIocRT61jGhGiu54+NJVhYLBN1n3sB10HvnmVU0kUTDVzHOGIzrhxH1zjvpW6", + "KK6sTkd58x5aZAPvVs/7yvK+srw/D8s72s1o2oLJrTWjc9gWtKz1IbWudCYuA08CwmIje/p24Pr50dbf", + "+5eU6WQppMt4wmLY/c4aaL7vavl0fm3S53tfsCZA8GMYrhj9db9+ayD6seuEiH11RviBRr4Sm//cOCFD", + "px6y9tqd9/GTYctYydZx/cZHdbi/j1kEa6H0/uxq/rnjvwo/fqpJ4HN9VzhSuPp09f8CAAD//9hbr4Fb", + "yAAA", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/generated/participating/public/routes.go b/daemon/algod/api/server/v2/generated/participating/public/routes.go index 0d5e4ac890..92c4525894 100644 --- a/daemon/algod/api/server/v2/generated/participating/public/routes.go +++ b/daemon/algod/api/server/v2/generated/participating/public/routes.go @@ -177,174 +177,181 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+y9fXPcNpIw/lXwm7sqv9xwJL9lY1Wl7ifbSVYX23FZSnbvLD8JhuyZwYoEGACUZuLH", - "3/0pNAASJMEZjqTYmz3/ZWuIl0aj0eh3fJikoigFB67V5OjDpKSSFqBB4l80TUXFdcIy81cGKpWs1Ezw", - "yZH/RpSWjC8n0wkzv5ZUrybTCacFNG1M/+lEwm8Vk5BNjrSsYDpR6QoKagbWm9K0rkdaJ0uRuCGO7RAn", - "LyYft3ygWSZBqT6UP/J8QxhP8yoDoiXliqbmkyJXTK+IXjFFXGfCOBEciFgQvWo1JgsGeaZmfpG/VSA3", - "wSrd5MNL+tiAmEiRQx/O56KYMw4eKqiBqjeEaEEyWGCjFdXEzGBg9Q21IAqoTFdkIeQOUC0QIbzAq2Jy", - "9G6igGcgcbdSYJf434UE+B0STeUS9OT9NLa4hQaZaFZElnbisC9BVblWBNviGpfsEjgxvWbkVaU0mQOh", - "nLz97jl59OjRU7OQgmoNmSOywVU1s4drst0nR5OMavCf+7RG86WQlGdJ3f7td89x/lO3wLGtqFIQPyzH", - "5gs5eTG0AN8xQkKMa1jiPrSo3/SIHIrm5zkshISRe2Ib3+qmhPN/1l1JqU5XpWBcR/aF4FdiP0d5WNB9", - "Gw+rAWi1Lw2mpBn03WHy9P2HB9MHhx//7d1x8j/uzyePPo5c/vN63B0YiDZMKymBp5tkKYHiaVlR3sfH", - "W0cPaiWqPCMreombTwtk9a4vMX0t67ykeWXohKVSHOdLoQh1ZJTBgla5Jn5iUvHcsCkzmqN2whQppbhk", - "GWRTw32vVixdkZQqOwS2I1cszw0NVgqyIVqLr27LYfoYosTAdS184IL+eZHRrGsHJmCN3CBJc6Eg0WLH", - "9eRvHMozEl4ozV2l9rusyNkKCE5uPtjLFnHHDU3n+YZo3NeMUEUo8VfTlLAF2YiKXOHm5OwC+7vVGKwV", - "xCANN6d1j5rDO4S+HjIiyJsLkQPliDx/7voo4wu2rCQocrUCvXJ3ngRVCq6AiPk/INVm2//r9MfXREjy", - "CpSiS3hD0wsCPBXZ8B67SWM3+D+UMBteqGVJ04v4dZ2zgkVAfkXXrKgKwqtiDtLsl78ftCASdCX5EEB2", - "xB10VtB1f9IzWfEUN7eZtiWoGVJiqszpZkZOFqSg628Opw4cRWiekxJ4xviS6DUfFNLM3LvBS6SoeDZC", - "htFmw4JbU5WQsgWDjNSjbIHETbMLHsb3g6eRrAJw/CCD4NSz7ACHwzpCM+bomi+kpEsISGZGfnKcC79q", - "cQG8ZnBkvsFPpYRLJipVdxqAEafeLl5zoSEpJSxYhMZOHToM97BtHHstnICTCq4p45AZzotACw2WEw3C", - "FEy4XZnpX9FzquCrx0MXePN15O4vRHfXt+74qN3GRok9kpF70Xx1BzYuNrX6j1D+wrkVWyb2595GsuWZ", - "uUoWLMdr5h9m/zwaKoVMoIUIf/EotuRUVxKOzvl98xdJyKmmPKMyM78U9qdXVa7ZKVuan3L700uxZOkp", - "Ww4gs4Y1qk1ht8L+Y8aLs2O9jioNL4W4qMpwQWlLK51vyMmLoU22Y+5LmMe1KhtqFWdrr2ns20Ov640c", - "AHIQdyU1DS9gI8FAS9MF/rNeID3Rhfzd/FOWuemty0UMtYaO3X2LtgFnMzguy5yl1CDxrftsvhomAFZL", - "oE2LA7xQjz4EIJZSlCA1s4PSskxykdI8UZpqHOnfJSwmR5N/O2iMKwe2uzoIJn9pep1iJyOPWhknoWW5", - "xxhvjFyjtjALw6DxE7IJy/ZQImLcbqIhJWZYcA6XlOtZo4+0+EF9gN+5mRp8W1HG4rujXw0inNiGc1BW", - "vLUN7ygSoJ4gWgmiFaXNZS7m9Q93j8uywSB+Py5Liw8UDYGh1AVrprS6h8unzUkK5zl5MSPfh2OjnC14", - "vjGXgxU1zN2wcLeWu8Vqw5FbQzPiHUVwO4Wcma3xaDAy/G1QHOoMK5EbqWcnrZjGf3VtQzIzv4/q/Ocg", - "sRC3w8SFWpTDnFVg8JdAc7nboZw+4Thbzowcd/tej2zMKHGCuRatbN1PO+4WPNYovJK0tAC6L/YuZRw1", - "MNvIwnpDbjqS0UVhDs5wQGsI1bXP2s7zEIUESaEDw7NcpBd/pWp1C2d+7sfqHz+chqyAZiDJiqrVbBKT", - "MsLj1Yw25oiZhqi9k3kw1axe4m0tb8fSMqppsDQHb1wssajHfsj0QEZ0lx/xPzQn5rM524b122Fn5AwZ", - "mLLH2XkQMqPKWwXBzmQaoIlBkMJq78Ro3XtB+byZPL5Po/boW2swcDvkFoE7JNa3fgyeiXUMhmdi3TsC", - "Yg3qNujDjINipIZCjYDvhYNM4P479FEp6aaPZBx7DJLNAo3oqvA08PDGN7M0ltfjuZDX4z4dtsJJY08m", - "1IwaMN9pB0nYtCoTR4oRm5Rt0BmoceFtZxrd4WMYa2HhVNM/AAvKjHobWGgPdNtYEEXJcrgF0l9Fmf6c", - "Knj0kJz+9fjJg4e/PHzylSHJUoqlpAWZbzQoctfpZkTpTQ73+itD7ajKdXz0rx57K2R73Ng4SlQyhYKW", - "/aGsddOKQLYZMe36WGujGVddAzjmcJ6B4eQW7cQa7g1oL5gyElYxv5XNGEJY1sySEQdJBjuJad/lNdNs", - "wiXKjaxuQ5UFKYWM2NfwiGmRijy5BKmYiLhK3rgWxLXw4m3Z/d1CS66oImZuNP1WHAWKCGXpNR/P9+3Q", - "Z2ve4GYr57frjazOzTtmX9rI95ZERUqQiV5zksG8WrY0oYUUBaEkw454R38P+nTDU7Sq3QaRDqtpBeNo", - "4lcbngY6m9moHLJlaxNurpt1seLtc3aqOyoCjkHHS/yMav0LyDW9dfmlO0EM9ud+Iy2wJDMNUQt+yZYr", - "HQiYb6QQi9uHMTZLDFD8YMXz3PTpC+mvRQZmsZW6hcu4GayhdbOnIYXTuag0oYSLDNCiUqn4NT3glkd/", - "ILoxdXjz65WVuOdgCCmllVltVRJ00vU4R9Mxoaml3gRRowa8GLX7ybay01mXby6BZkarB07E3LkKnBMD", - "F0nRw6j9ReeEhMhZasFVSpGCUpAlzkSxEzTfzjIRvQVPCDgCXM9ClCALKm8M7MXlTjgvYJOgP1yRuz/8", - "rO59Bni10DTfgVhsE0NvrfA5f1Af6nHTbyO47uQh2VEJxPNco10aBpGDhiEU7oWTwf3rQtTbxZuj5RIk", - "emb+UIr3k9yMgGpQ/2B6vym0VTkQ5eUUnTNWoN2OUy4UpIJnKjpYTpVOdrFl06iljZkVBJwwxolx4AGh", - "5CVV2noTGc/QCGKvE5zHCihmimGABwVSM/LPXhbtj52ae5CrStWCqarKUkgNWWwNHNZb5noN63ousQjG", - "rqVfLUilYNfIQ1gKxnfIsiuxCKK6Nro7d3t/cWiaNvf8JorKFhANIrYBcupbBdgNI10GAGGqQbQlHKY6", - "lFOH10wnSouyNNxCJxWv+w2h6dS2PtY/NW37xEV1c29nAszs2sPkIL+ymLUxTitqVGgcmRT0wsgeqBBb", - "t2cfZnMYE8V4Csk2yjfH8tS0Co/AzkNalUtJM0gyyOmmP+hP9jOxn7cNgDveKD5CQ2LjWeKb3lCyDx/Y", - "MrTA8VRMeCT4haTmCBrNoyEQ13vHyBng2DHm5OjoTj0UzhXdIj8eLttudWREvA0vhTY7bskBIXYMfQy8", - "A2ioR74+JrBz0qhl3Sn+G5SboBYj9p9kA2poCc34ey1gwJjmwoCD49Lh7h0GHOWag1xsBxsZOrEDlr03", - "VGqWshJVnR9gc+uaX3eCqL+JZKApyyEjwQerBZZhf2IDMbpjXk8THGWE6YPfs8JElpMzhRJPG/gL2KDK", - "/cZG+J0FcYG3oMpGRjXXE+UEAfVxQ0YCD5vAmqY63xg5Ta9gQ65AAlHVvGBa25DNtqarRZmEA0QN3Ftm", - "dN4cGx3nd2CMe+kUhwqW19+K6cSqBNvhO+voBS10OFWgFCIfYTzqISMKwSjHPymF2XXmIoR9GKmnpBaQ", - "jmmjK6++/e+oFppxBeS/RUVSylHjqjTUIo2QKCeg/GhmMBJYPadz8TcYghwKsIokfrl/v7vw+/fdnjNF", - "FnDlw+pNwy467t9HM84boXTrcN2CqdAct5PI9YGWf7z3XPBCh6fsdjG7kcfs5JvO4LW7wJwppRzhmuXf", - "mAF0TuZ6zNpDGhnnXsdxRxn1g6Fj68Z9P2VFld/Whi8oyysJw96x8/N3i+L8/D35zrb0ju2pJ/IQHVdN", - "WsTC3UaVxNAakjOj30pBMyMgRG37uEi+TOrgTBUFp1AGnL+5c0j5ppPINxYGMoeUVjYq2XFtB0ETHqpm", - "EXmxs7tdFEYXMtI8XuXaXtohVpdSVCVR9bZbKtBUwx9jam6GjkHZnziIDWo+DoUHGTUx39zCbW0HIhJK", - "CQp5a2heUfarWIT5N475qo3SUPQt0LbrLwP62dtBPUfwnHFICsFhE005ZRxe4cdYb8vfBzrjTTvUtys8", - "t+DvgNWeZww13hS/uNsBQ3tTx8XdwuZ3x+04H8LMIzSuQV4SStKcoelNcKVllepzTlG5Dw5bJH7AqzHD", - "5p7nvkncvhQx/7ihzjnF2JFa5Y/yxQVE+PJ3AN7qo6rlEpTuSIkLgHPuWjFOKs40zlWY/UrshpUg0Yk/", - "sy0LuiELmqN16neQgswr3WaumCChNMtz5wkx0xCxOOdUkxwMV33F+Nkah/OeRE8zHPSVkBc1FmbR87AE", - "DoqpJB7n8L39iiFobvkrF46G2ar2s7Wdm/GbLIoN6v5NBub/ufufR++Ok/+hye+HydP/OHj/4fHHe/d7", - "Pz78+M03/7f906OP39z7z3+P7ZSHPRa+7yA/eeF0ipMXKDg2xvMe7J/McFownkSJLHQRd2iL3DXiryeg", - "e22zgl7BOddrbgjpkuYso/p65NBlcb2zaE9Hh2paG9ExI/i17imO3YDLkAiT6bDGa1/j/dCgeKIMenNc", - "7guel0XF7VZWynmUMA7ch2iIxbROhrJFEI4IZsqsqI8vcn8+fPLVZNpkuNTfJ9OJ+/o+QsksW8fymDJY", - "x6Rsd0DwYNxRpKQbBTrOPRD2aDSKdYqHwxZg1DO1YuWn5xRKs3mcw/noWqetr/kJt2Gv5vygb2jjTM5i", - "8enh1hIgg1KvYsnRLUkBWzW7CdDx15dSXAKfEjaDWVdbzpagfFxMDnSBSbro3xBjsgXqc2AJzVNFgPVw", - "IaNU0hj9oHDruPXH6cRd/urW5XE3cAyu7py1I8j/rQW58/23Z+TAMUx1x6bU2aGDJKiIFcrF+bciOQw3", - "syUhbE7hOT/nL2DBODPfj855RjU9mFPFUnVQKZDPaE55CrOlIEc+deAF1fSc9yStwaotQdIGKat5zlJy", - "EUrEDXnaTPyo2kjzpTCKY9ep3Zdf3VRR/mInSK6YXolKJy7VOJFwRWXMaaDqVFMc2RYK2DbrlLixLSt2", - "qcxu/DjPo2Wpuiln/eWXZW6WH5ChcglVZsuI0kJ6WcQIKBYa3N/Xwl0Mkl75PPVKgSK/FrR8x7h+T5Lz", - "6vDwEZBWDtav7so3NLkpoWWvvFZKXNdWiQu3eg2staRJSZcDRgMNtMTdR3m5QCU7zwl2a+V++dhWHKpZ", - "gMfH8AZYOPbOY8HFndpevmZMfAn4CbcQ2xhxo/GYXne/gmywa29XJ6Ost0uVXiXmbEdXpQyJ+52pS0ks", - "jZDl3diKLTFU0FXdmANJV5BeQIYFAKAo9Wba6u4jJZyg6VkHU7ZQhs3lwGxuNO3OgVRlRp0o3jEoGQwr", - "0NrHKr6FC9iciSYZfJ882nZapxo6qEipgXRpiDU8tm6M7ua7cBy0dZWlz47ENBlPFkc1Xfg+wwfZiry3", - "cIhjRNFKOxxCBJURRFjiH0DBNRZqxrsR6ceWZ7SMub35InU1PO8nrkmjPLnImXA1mE1pvxeAVXfElSJz", - "auR24QrG2NTFgItVii5hQEIOresjEwRbFnkcZNe9F73pxKJ7ofXumyjItnFi1hylFDBfDKmgMtOJl/Iz", - "WQeONaASrAPnEDbPUUyqA8ss06Gy5eWwha2GQIsTMEjeCBwejDZGQslmRZWvZYMlf/xZHiUD/IGpuNsK", - "MJwEoT5BXZ/a8O15bvec9rRLV4bB117wBRdC1XJE8QQj4WN0cWw7BEcBKIMclnbhtrEnlCYtuNkgA8eP", - "i0XOOJAkFjVElRIps8WImmvGzQFGPr5PiDUBk9EjxMg4ABsdkzgweS3Cs8mX+wDJXVoz9WOjSzP4G+IZ", - "GDaO1og8ojQsnPGBiG3PAagLNavvr07AIw5DGJ8Sw+YuaW7YnNP4mkF6dQBQbO1k/TvX+L0hcXaLBd5e", - "LHutyV5F11lNKDN5oOMC3RaI52Kd2BSsqMQ7X88NvUdDizEhLHYwbcWFO4rMxRrDLfBqsaGsO2AZhsOD", - "EWj4a6aQXrHf0G1ugdk27XZpKkaFCknGmfNqchkSJ8ZMPSDBDJHL3aCIwrUA6Bg7mnKjTvndqaS2xZP+", - "Zd7catOmOJDP2ogd/6EjFN2lAfz1rTB12YM3XYklaqdoRw20Kz4EImSM6A2b6Dtp+q4gBTmgUpC0hKjk", - "Iua6M7oN4I1z6rsFxgusK0H55l4QiiJhyZSGxohuLmbvFfrU5kmK5ayEWAyvTpdyYdb3Voj6mrL1UrBj", - "a5mffAUYyrlgUukEPRDRJZhG3ylUqr8zTeOyUjvYxVZ2ZFmcN+C0F7BJMpZXcXp18/7wwkz7umaJqpoj", - "v2WcAE1XZI6VSKMhcFumtlGSWxf80i74Jb219Y47DaapmVgacmnP8Sc5Fx3Ou40dRAgwRhz9XRtE6RYG", - "GWQu9rljIDfZw4mZi7Nt1tfeYcr82DvDRnz+5NAdZUeKriUwGGxdBUM3kRFLmA4KefZTCgfOAC1Llq07", - "tlA76qDGTPcyePgKSR0s4O66wXZgILB7xrIaJKh2MaxGwLclWVu1KGajMHPWLlkVMoRwKqZ8QfE+ouqs", - "p124OgOa/wCbn01bXM7k43RyM9NpDNduxB24flNvbxTP6Jq3prSWJ2RPlNOylOKS5okzMA+RphSXjjSx", - "ubdHf2JWFzdjnn17/PKNA//jdJLmQGVSiwqDq8J25Z9mVbbu1sAB8QWLjc7nZXYrSgabXxcLCo3SVytw", - "xWEDabRXxa5xOARH0RmpF/EIoZ0mZ+cbsUvc4iOBsnaRNOY76yFpe0XoJWW5t5t5aAeieXBx40ohRrlC", - "OMCNvSuBkyy5VXbTO93x09FQ1w6eFM61pXxtYSs0KyJ414VuREg0xyGpFhRr0FmrSJ858apAS0KicpbG", - "bax8jmG33PrOTGOCjQeEUTNixQZcsbxiwVimmRqh6HaADOaIItPXMxzC3Vy4pzUqzn6rgLAMuDafJJ7K", - "zkHFon/O2t6/To3s0J/LDWwt9M3wN5ExwvqL3RsPgdguYISeuh64L2qV2S+0tkiZHwKXxB4O/3DG3pW4", - "xVnv6MNRsw1eXLU9buFLGH3+ZwjDVk3e/QyHV15dIciBOaLPajCVLKT4HeJ6HqrHkYwRX3GSYZTL78BH", - "hJk31p3mdZBm9sHtHpJuQitUO0hhgOpx5wO3HJa+8xZqyu1W2yr3rVi3OMGEUaUHdvyGYBzMvUjcnF7N", - "aawuoBEyDEzHjQO4ZUvXgvjOHvfO7M9cEdAZCXzJdVtmk4FLkE0yV7+wyDUFBjvtaFGhkQyQakOZYGr9", - "f7kSkWEqfkW5fSzB9LNHyfVWYI1fpteVkJjKr+Jm/wxSVtA8Ljlkad/Em7Els08FVAqCWvRuIPvGiqUi", - "V8/futgb1JwsyOE0eO3C7UbGLpli8xywxQPbYk4VcvLaEFV3McsDrlcKmz8c0XxV8UxCplfKIlYJUgt1", - "qN7Uzqs56CsATg6x3YOn5C667RS7hHsGi+5+nhw9eIpGV/vHYewCcG+CbOMm2SJMfInTMfot7RiGcbtR", - "Z9GsZ/uQ0zDj2nKabNcxZwlbOl63+ywVlNMlxCNFih0w2b64m2hI6+CFZ/YVEqWl2BA2kIIEmhr+NBB9", - "btifBYOkoiiYLpxzR4nC0FNTaN5O6oezT5q4GqEeLv8RfaSldxF1lMhPazS191ts1ejJfk0LaKN1Sqit", - "35CzJnrBVy4mJ748DBZNrWulWtyYuczSUczBYIYFKSXjGhWLSi+Sr0m6opKmhv3NhsBN5l89jhSKbRcs", - "5PsB/snxLkGBvIyjXg6QvZchXF9ylwueFIajZPeabI/gVA46c+NuuyHf4fahxwplZpRkkNyqFrnRgFPf", - "iPD4lgFvSIr1evaix71X9skps5Jx8qCV2aGf3r50UkYhZKzmW3PcncQhQUsGlxi7F98kM+YN90Lmo3bh", - "JtB/Xs+DFzkDscyf5Zgi8ExEtFNfvLi2pLtY9Yh1YOiYmg+GDOZuqClpF4r99E4/b3zuO5/MFw8r/tEF", - "9jNvKSLZr2BgE4Mi1tHtzOrvgf+bkmdiPXZTOyfEb+w/AWqiKKlYnv3cZGV2aoRLytNV1J81Nx1/aV4z", - "qhdn76doabUV5Rzy6HBWFvzFy4wRqfYfYuw8BeMj23bLltvldhbXAN4G0wPlJzToZTo3E4RYbSe81QHV", - "+VJkBOdp6ng13LNf7j4oSvxbBUrHkofwgw3qQrul0XdtTVwCPENtcUa+t6+RroC0qrSglmbz4yHzFVqt", - "Qb0qc0GzKTHjnH17/JLYWW0f+yaHrcm7RCWlvYqOvSooUTguPNg/rxFPXRg/zvZYarNqpbFoktK0KGPJ", - "oabFmW+AGaihDR/VlxA7M/LCao7K6yV2EkMPCyYLo3HVo1nZBWnC/Edrmq5QJWux1GGSH19M2lOlCh5w", - "qx9iqev24bkzcLt60rac9JQIozdfMWUfoYRLaOej1snZziTg81Pby5MV55ZSorLHtuIB10G7B84Gangz", - "fxSyDuL3FMhtLfZ9a2ufYq9oHaFuoe7ey202u7F+YMM/LpxSLjhLsYpP7Gp2D1qO8YGNKHjUNbL6I+5O", - "aORwRcuD12FyDouDBcM9I3SI6xvhg69mUy112D81vpy4oposQSvH2SCb+ir3zg7IuAJXhxHfNg34pJAt", - "vyJyyKirOqldGnuSEabFDCh235lvr53aj/HiF4yjgO/Q5kLTraUO39vTRitgmiwFKLeedm6wemf6zDBN", - "NoP1+5l/nw/HsG45s2zrg+4Pdew90s4DbNo+N21tKZPm51YEsp30uCzdpMNvIETlAb3mgwiOeBYT79oJ", - "kFuPH462hdy2hpLgfWoIDS7REQ0l3sM9wqjfA+i8NWOEVktR2ILYEK5oBQPGI2C8ZBya1yMjF0QavRJw", - "Y/C8DvRTqaTaioCjeNoZ0By9zzGGprRzPdx0qM4GI0pwjX6O4W1snjIYYBx1g0Zwo3xTP1ppqDsQJp7j", - "a7kOkf2HCVCqckJUhhkFnacKYozDMG5fCql9AfSPQV8mst21pPbk7HMTDSWJzqtsCTqhWRari/kMvxL8", - "6gtFwRrSqq6fWJYkxZoo7SIxfWpzE6WCq6rYMpdvcMPpgrc/ItQQvj/idxiTUOYb/DdWPHB4Z1wQxt5h", - "gD7iwj2WsKfc3B6pJ/Uamk4UWybjMYF3ys3R0Ux9PUJv+t8qpedi2QbkE5eG2Mblwj2K8bdvzcURVk7o", - "VcS0V0td2ACD7oR/sQ3Vxjolt82V8CrrlchEZ09d8267AWL4bacpXn4DobdBQQxq71frPRwKwE0H48Wp", - "dplrmpKtLGgwG8hG79i8H4QibjkditixATvmc6/3OMmwJ2fj2FsR6kPB+gD94ONMSUmZc403zKKPWReR", - "Pmwu3Hbomg3uLsLFeQ9a7H64HIrJJorxZQ4Ev3dfw7kAl85eP4du1+qjkrxKaH91r5Ha8eqo+Oj6+9EJ", - "ONXnNYMOGm3PXOV1u0ynk//ws41hI8C13PwTmHB7m957S6gv7VrzVNOE1FV7R1Xxbd2K8WeBhusfNTWP", - "kJ5KoVhTKTr2XtDIWLczfPInqN/UH8sHmlxCqrE8eONAlwD7VHMykwVv0X2pgzSgO9Yhga780baaR/2a", - "4DsutF5aUpBaZ+spz8ZX+Dmuw6SQKeFrcEvg7jm4dsLB6LDnxQJSzS53pIH9bQU8SDGaeiOEfdY1yApj", - "dRgtVhHZ38TWALQtS2srPEE1vxuDM5QEcgGbO4q0qCFa4Hnq75XrFJBADCB3SAyJCBULQ7BWU+cZZqqm", - "DMSCD/ux3aEpxTX4NEyQ1HjNuTxJmhu3SXTcMmX8bYpRc5mue6X/YkToUKZYv7b9sLD9Ap8SUPWzbb4A", - "RaiSkpNI9WdXwAKT9mpHgS9lAcr/5jN07Sw5u4Dw8Rp0y1xRmfkWUTuDN2EkW+6jXnpXtGQ1VTaI0vnB", - "6yDNfkJPpPAThuKmucByz0PxzO24yPCNd4z+wOsAy08jXAuQ7pEvFPZyoSDRwgd1boNjGyrce+TXQYIa", - "LLZogRssgfK2qfGCRWcpljyhLrIlXCCRUFADnQwqsQzPuQ3Zz+13n8Hii47uNKfU9JrsLKXiw3OZ6iEx", - "pPoFcbfl7syY61hWGOf2SVEVK8vCDSpD038pRVal9oIOD0ZtfRpd9GgLK4kaJdL+Knv6ZY4lwF4GeYYX", - "sDmwon+6orypxdY+1laEsmsI8vo7u32rRqe4fp0v7QKWtwLn5zTcTCelEHkyYOs/6VeX6Z6BC5ZeQEbM", - "3eED2wZe1yB30cRcO3OvVhtfTaUsgUN2b0bIMbehxN6v2y5v3Jmc39Hb5l/jrFllCz45m9LsnMdjMrEU", - "k7whf/PDbOdqCgzzu+FUdpAdtUvWA5VtJL2KvDUzG6uU9j2t3fc/GqKyUMSklGsmso863327UoT0g6cP", - "tms/YZ2LJoBOWvMkSkvNcxBt4eVVY3Uc9wiD77ADvFApDp5h8NzIgfOZo9xe1UgJljJICa3l79Kz3QIb", - "vhRskcK0CLNMW3XIRki09yUwoqjntW0ijue+CQOLWgiOhX76pg+F5mqsFxwSjjmX8pLmn958gdVOjhEf", - "7knE+EJD/TdEskWlul6oyUs6au5A1729qfkbNLf8DcweRf0Mbihnd6yfv/DWWaxrR3OSi+YxJBySXOGY", - "1jHx4Csyd2HypYSUKdbJILrypUxrdQ8rezcvZW7XL3et82ehb0DGTkEQJXndlEXUAu+HBsLmiH5mpjJw", - "cqNUHqO+HllE8BfjUWG++o7r4qLlsbBlZjuhOELCLXsughiEPT0X/Uz8scuz1nlz6VQK+uscfVu3cBu5", - "qJu1jXW79ZG7rXbeGG9ZvCSm6Y7uOosQrCdLEFTy64NfiYQFPhghyP37OMH9+1PX9NeH7c/mON+/H3+R", - "81M56iyO3Bhu3hjF/DwUumnDEweihDv7UbE820UYrZjv5skVjGr+xWV9fJZHX36x9tT+UXWF9/cJEehu", - "AiImstbW5MFUQTT3iEBu1y0Sto2aSVpJpjdYjMKb39gvUZfi97XF3nl86vRld/dpcQF1OZPGvl8pf7t+", - "L2iO95GRqTFAQ+MrjN+uaVHm4A7KN3fmf4FHXz/ODh89+Mv868Mnhyk8fvL08JA+fUwfPH30AB5+/eTx", - "ITxYfPV0/jB7+Pjh/PHDx189eZo+evxg/virp3+5Y/iQAdkCOvGpj5O/48tIyfGbk+TMANvghJasfnzV", - "kLF/3oGmeBKhoCyfHPmf/n9/wmapKJrh/a8Tl1k1WWldqqODg6urq1nY5WCJBr1EiypdHfh5+o9evjmp", - "o+OtKxh31AY+G1LATXWkcIzf3n57ekaO35zMGoKZHE0OZ4ezB/iYWQmclmxyNHmEP+HpWeG+Hzhimxx9", - "+DidHKyA5uj/Mn8UoCVL/Sd1RZdLkDP3zoX56fLhgRclDj44Y+bHbd8OwpKxBx9aNt9sR08sKXnwwVdK", - "2N66VYrA2brNcpex+iHfQ/DmZ1DPumVrm2+8uXZKVP2yeSmZMCdpaq7FDFIJFOleSIxOb14PdfoL2Kfc", - "Xx3/Ha3tr47/Tr4hh1OXtKBQ1YhNb+0ZNQmcZBbsyOu2zzbHtfcgqKN29C72IG3s/Q08QoY+AgqvR2w4", - "mJYVhPW9Gn5seOxh8vT9hydff4zJef133zySBl6f1cJXE0CkFXT9zRDK1vZ04Bp+q0BumkUUdD0JAe77", - "YCJPwC3YspKdR+/rcCX3DANT5L9Of3xNhCROr31D04swfiEGjrvPQoh8VWwXDV+oZdkOHa1x+B7TixEK", - "PMUPDw+/vJH8v+ON5Glraz2NfNndLy9g/2u8gP14T1a21TzcCkAddXb2Ga63Wa/oui4jQwkXPOFY3/8S", - "SKDnPT588Kdd4QnHCBcjaxIrS3+cTp78ibfshBupheYEW9rVPPrTruYU5CVLgZxBUQpJJcs35Cde5ycG", - "NYn67O8nfsHFFfeIMGpiVRRUbpyETGueU/EgY3Qr/+k5BxspGrkoXSr0I6H8OWm9Y8OXk/cfvYA/UmvY", - "1uxgjgUTxjYFFTQeVj3QIaAOPqBJe/D3A5dFHv+IrgWrsx74QKZ4y5ZW80GvDaydHinV6aoqDz7gf1CH", - "DMCyORt9cG129YF9ALr/84an0R/7A3UfIor9fPChXQi7hVC1qnQmroK+aDS3Hp/+fPXTMK2/D64o00ZC", - "cNFoWKis31kDzQ9cnmXn1ya1ofcF8zWCHzsyRSlsKnxbV3tLr0IJxUoLoPQzkW22cJt1Mmccj2DIIhpT", - "mP3Y1w/6r96uwNb39N7EiACmRfCOvhY+I7mn9X28ofLRkRvXJxFfEYKJinQ/sMkcptlOBwKOu+eDwEHZ", - "SJR0lfIP+/6RUkkPomc0I752QkJe0dxsOGTk2Mm+LWz80RLF5xcBPvOd/cku2Wf+8ClCMXSjpR21Qr/I", - "UoqmvJw7qGNuVKNCGQawBJ44FpTMRbbxVVAlvdJrG+jRZW4HdTnb6MdbsLH9cxvWdtnTvpixvpixvhg6", - "vpixvuzuFzPWFyPPFyPP/1ojzz6WnZgM6Swbw6Ik1oujrXmt4kabVKWaxYfNpoTpWuDqlwZlekbIGSaC", - "UHNLwCVImmP5dBVkdhUYEqiqNAXIjs550oLEBt6Zie82/7URj+516sN73T5KszwPeXO/Lwqz+MnWTPiG", - "nE/OJ72RJBTiEjKbXxoGxtteO4f9/+pxf+zl2GBqIr6J6kP5iaoWC5Yyi/Jc8CWhS9FE6xq+TbjALyAN", - "cDZTmTA9danvTJErs3hXta8dv98Wy/sSwEmzhTu93R1yiTu6DeHt6eX+jzEu7n9dEfy6SUU35ZJbx+6x", - "zC8s41OwjM/ONP7s/sPA8PcvKUM+Pnz8p11QaCZ+LTT5DsPMbyZr1WVOY9nY15WifM1cb6hrQlXD0E+8", - "Iuugz3fvzUWAbw6427OJZDw6OMB8z5VQ+mBi7rZ2lGP48X0Nsy9GPSklu8TaVu8//r8AAAD//xYAv/aJ", - "0AAA", + "H4sIAAAAAAAC/+x9/XPcNpLov4I3d1W2dUNJ/kh2rarUPdlOsrrYjstSsrtn+WUxZM8MViTABUDNTPz8", + "v1+hAZAgCc5wJMXe3PonW0N8NBqNRn/jwyQVRSk4cK0mJx8mJZW0AA0S/6JpKiquE5aZvzJQqWSlZoJP", + "Tvw3orRkfDGZTpj5taR6OZlOOC2gaWP6TycS/lExCdnkRMsKphOVLqGgZmC9KU3reqR1shCJG+LUDnH2", + "YvJxyweaZRKU6kP5I883hPE0rzIgWlKuaGo+KbJiekn0kiniOhPGieBAxJzoZasxmTPIM3XoF/mPCuQm", + "WKWbfHhJHxsQEyly6MP5XBQzxsFDBTVQ9YYQLUgGc2y0pJqYGQysvqEWRAGV6ZLMhdwBqgUihBd4VUxO", + "3k0U8Awk7lYK7Br/O5cAv0KiqVyAnryfxhY31yATzYrI0s4c9iWoKteKYFtc44JdAyem1yF5VSlNZkAo", + "J2+/e04eP3781CykoFpD5ohscFXN7OGabPfJySSjGvznPq3RfCEk5VlSt3/73XOc/9wtcGwrqhTED8up", + "+ULOXgwtwHeMkBDjGha4Dy3qNz0ih6L5eQZzIWHkntjGd7op4fyfdVdSqtNlKRjXkX0h+JXYz1EeFnTf", + "xsNqAFrtS4MpaQZ9d5w8ff/h4fTh8cd/e3ea/Lf786vHH0cu/3k97g4MRBumlZTA002ykEDxtCwp7+Pj", + "raMHtRRVnpElvcbNpwWyeteXmL6WdV7TvDJ0wlIpTvOFUIQ6MspgTqtcEz8xqXhu2JQZzVE7YYqUUlyz", + "DLKp4b6rJUuXJKXKDoHtyIrluaHBSkE2RGvx1W05TB9DlBi4boQPXNA/LzKade3ABKyRGyRpLhQkWuy4", + "nvyNQ3lGwguluavUfpcVuVgCwcnNB3vZIu64oek83xCN+5oRqggl/mqaEjYnG1GRFW5Ozq6wv1uNwVpB", + "DNJwc1r3qDm8Q+jrISOCvJkQOVCOyPPnro8yPmeLSoIiqyXopbvzJKhScAVEzP4OqTbb/l/nP74mQpJX", + "oBRdwBuaXhHgqcggOyRnc8KFDkjD0RLi0PQcWoeDK3bJ/10JQxOFWpQ0vYrf6DkrWGRVr+iaFVVBeFXM", + "QJot9VeIFkSCriQfAsiOuIMUC7ruT3ohK57i/jfTtmQ5Q21MlTndIMIKuv7meOrAUYTmOSmBZ4wviF7z", + "QTnOzL0bvESKimcjxBxt9jS4WFUJKZszyEg9yhZI3DS74GF8P3ga4SsAxw8yCE49yw5wOKwjNGNOt/lC", + "SrqAgGQOyU+OueFXLa6A14ROZhv8VEq4ZqJSdacBGHHq7RI4FxqSUsKcRWjs3KHDMBjbxnHgwslAqeCa", + "Mg6ZYc4ItNBgmdUgTMGE2/Wd/i0+owq+fjJ0xzdfR+7+XHR3feuOj9ptbJTYIxm5Os1Xd2DjklWr/wj9", + "MJxbsUVif+5tJFtcmNtmznK8if5u9s+joVLIBFqI8HeTYgtOdSXh5JIfmL9IQs415RmVmfmlsD+9qnLN", + "ztnC/JTbn16KBUvP2WIAmTWsUYULuxX2HzNenB3rdVSveCnEVVWGC0pbiutsQ85eDG2yHXNfwjyttd1Q", + "8bhYe2Vk3x56XW/kAJCDuCupaXgFGwkGWprO8Z/1HOmJzuWv5p+yzE1vXc5jqDV07K5kNB84s8JpWeYs", + "pQaJb91n89UwAbCKBG1aHOGFevIhALGUogSpmR2UlmWSi5TmidJU40j/LmE+OZn821Fjfzmy3dVRMPlL", + "0+scOxmR1YpBCS3LPcZ4Y0QftYVZGAaNn5BNWLaHQhPjdhMNKTHDgnO4plwfNipLix/UB/idm6nBt5V2", + "LL47KtggwoltOANlJWDb8J4iAeoJopUgWlEgXeRiVv9w/7QsGwzi99OytPhA6REYCmawZkqrB7h82pyk", + "cJ6zF4fk+3BsFMUFzzfmcrCihrkb5u7WcrdYbVtya2hGvKcIbqeQh2ZrPBqMmH8XFIdqxVLkRurZSSum", + "8Z9c25DMzO+jOv8+SCzE7TBxoaLlMGd1HPwlUG7udyinTzjO3HNITrt9b0Y2ZpQ4wdyIVrbupx13Cx5r", + "FK4kLS2A7ou9SxlHJc02srDekpuOZHRRmIMzHNAaQnXjs7bzPEQhQVLowPAsF+nVn6ha3sGZn/mx+scP", + "pyFLoBlIsqRqeTiJSRnh8WpGG3PETENU8MksmOqwXuJdLW/H0jKqabA0B29cLLGox37I9EBGdJcf8T80", + "J+azOduG9dthD8kFMjBlj7NzMmRG27cKgp3JNEArhCCFVfCJ0br3gvJ5M3l8n0bt0bfWpuB2yC0Cd0is", + "7/wYPBPrGAzPxLp3BMQa1F3QhxkHxUgNhRoB3wsHmcD9d+ijUtJNH8k49hgkmwUa0VXhaeDhjW9maYyz", + "pzMhb8Z9OmyFk8bkTKgZNWC+0w6SsGlVJo4UI2Yr26AzUOPl2840usPHMNbCwrmmvwEWlBn1LrDQHuiu", + "sSCKkuVwB6S/jDL9GVXw+BE5/9PpVw8f/fLoq68NSZZSLCQtyGyjQZH7TjcjSm9yeNBfGWpHVa7jo3/9", + "xBsq2+PGxlGikikUtOwPZQ2gVgSyzYhp18daG8246hrAMYfzAgwnt2gn1rZvQHvBlJGwitmdbMYQwrJm", + "low4SDLYSUz7Lq+ZZhMuUW5kdReqLEgpZMS+hkdMi1TkyTVIxUTEm/LGtSCuhRdvy+7vFlqyooqYudH0", + "W3EUKCKUpdd8PN+3Q1+seYObrZzfrjeyOjfvmH1pI99bEhUpQSZ6zUkGs2rR0oTmUhSEkgw74h39Pejz", + "DU/RqnYXRDqsphWMo4lfbXga6Gxmo3LIFq1NuL1u1sWKt8/Zqe6pCDgGHS/xM6r1LyDX9M7ll+4EMdif", + "+420wJLMNEQt+CVbLHUgYL6RQszvHsbYLDFA8YMVz3PTpy+kvxYZmMVW6g4u42awhtbNnoYUTmei0oQS", + "LjJAi0ql4tf0gOceXYbo6dThza+XVuKegSGklFZmtVVJ0I/X4xxNx4SmlnoTRI0a8GLU7ifbyk5nvcK5", + "BJoZrR44ETPnKnBODFwkRSek9hedExIiZ6kFVylFCkpBljgTxU7QfDvLRPQWPCHgCHA9C1GCzKm8NbBX", + "1zvhvIJNgi5zRe7/8LN68Bng1ULTfAdisU0MvbXC5/xBfajHTb+N4LqTh2RHJRDPc412aRhEDhqGULgX", + "Tgb3rwtRbxdvj5ZrkOiZ+U0p3k9yOwKqQf2N6f220FblQCCYU3QuWIF2O065UJAKnqnoYDlVOtnFlk2j", + "ljZmVhBwwhgnxoEHhJKXVGnrTWQ8QyOIvU5wHiugmCmGAR4USM3IP3tZtD92au5BripVC6aqKkshNWSx", + "NXBYb5nrNazrucQ8GLuWfrUglYJdIw9hKRjfIcuuxCKI6tro7tzt/cWhadrc85soKltANIjYBsi5bxVg", + "NwyGGQCEqQbRlnCY6lBOHYEznSgtytJwC51UvO43hKZz2/pU/9S07RMX1c29nQlQGIPj2jvIVxazNgxq", + "SY0KjSOTgl4Z2QMVYuv27MNsDmOiGE8h2Ub55liem1bhEdh5SKtyIWkGSQY53fQH/cl+JvbztgFwxxvF", + "R2hIbDxLfNMbSvbhA1uGFjieigmPBL+Q1BxBo3k0BOJ67xg5Axw7xpwcHd2rh8K5olvkx8Nl262OjIi3", + "4bXQZsctOSDEjqGPgXcADfXIN8cEdk4ataw7xV9BuQlqMWL/STaghpbQjL/XAgaMaS5SODguHe7eYcBR", + "rjnIxXawkaETO2DZe0OlZikrUdX5ATZ3rvl1J4j6m0gGmrIcMhJ8sFpgGfYnNhCjO+bNNMFRRpg++D0r", + "TGQ5OVMo8bSBv4INqtxvbITfRRAXeAeqbGRUcz1RThBQHzdkJPCwCaxpqvONkdP0EjZkBRKIqmYF09pG", + "7rY1XS3KJBwgauDeMqPz5tjoOL8DY9xL5zhUsLz+VkwnViXYDt9FRy9oocOpAqUQ+QjjUQ8ZUQhGOf5J", + "KcyuMxdE7MNIPSW1gHRMG1159e1/T7XQjCsgfxUVSSlHjavSUIs0QqKcgPKjmcFIYPWczsXfYAhyKMAq", + "kvjl4KC78IMDt+dMkTmsfOS9adhFx8EBmnHeCKVbh+sOTIXmuJ1Frg+0/OO954IXOjxlt4vZjTxmJ990", + "Bq/dBeZMKeUI1yz/1gygczLXY9Ye0sg49zqOO8qoHwwdWzfu+zkrqpzqu3BfbJVHa32CFQVkjGrIN6SU", + "kIKNrjYClrKwGNCIjbtKl5QvUK6Wolq4wB87DjLGSlkLhqx4b4io8KHXPFlIUZUxRumCPX2AvRE7gBrN", + "J0AkdrZy/orW87mcijE3mEd4sDvfmzGHvArTyaBiaJB63SiGFjntLIE4FjDtIVFVmgJEQ4BjKle91E42", + "ZJPf4gY0YkMlbQwUoamuaB5SHTmbE8o37TRJynJluCBTBNuZzk1c7dSuzeewzGlufbORpIrwpLQkvmDn", + "G5R2UTHS74BEYqShPmWEBGiOlyHj38aG3wwdg7I/cRB01Xwcirsy+ne+uQMxyA5EJJQSFF5aod1K2a9i", + "HuY+uVtNbZSGom/at11/GWA0bwcVSMFzxiEpBIdNNN2XcXiFH6OMAy/Ogc4owgz17WolLfg7YLXnGUON", + "t8Uv7nbAi97UAYd3sPndcTtenTDrC62WkJeEkjRnaNMUXGlZpfqSU7SaBIctEpjh9cNhO9pz3yRuuIvY", + "1dxQl5xiUE5tS4k6k+cQMRx8B+DNaapaLEB1+CeZA1xy14pxUnGmca7C7FdiN6wEidERh7ZlQTeGBaLZ", + "71eQgswq3ebJmHmitGGX1sVkpiFifsmpJjkYnfoV4xdrHM67aD3NcNArIa9qLMSvkAVwUEwl8QCS7+1X", + "jO1zy1+6OD/MFLafrVPCjN+kp2zQqNJkv/6/+/958u40+W+a/HqcPP2Po/cfnnx8cND78dHHb775/+2f", + "Hn/85sF//ntspzzssbwIB/nZC6esnb1AibzxSvRg/2QW6YLxJEpkoe+9Q1vkPuYAOgJ60LbX6CVccr3m", + "hpCuac4yI3LdhBy6LK53Fu3p6FBNayM69hm/1j3l3FtwGRJhMh3WeONrvB9zFc9AQjeZSyrC8zKvuN1K", + "L+jaAHsf+yLm0zrLzBagOCGYgrSkPnDL/fnoq68n0yZ1qP4+mU7c1/cRSmbZOiodwjqmvrgDggfjniIl", + "3SgYEEAR9miYj402CIctwOi9asnKT88plGazOIfzYcvODLLmZ9zGE5vzg063jbPli/mnh1tLI4eXehlL", + "TG9JCtiq2U2ATiBEKcU18Clhh3DYNUNkRjVzAUc50DkmSKOiJ8akYdTnwBKap4oA6+FCRun6MfpB4dZx", + "64/Tibv81Z3L427gGFzdOWsPm/9bC3Lv+28vyJFjmOqezVW0QwfZZRGt1SVQtEJkDDez5ThssuYlv+Qv", + "YM44M99PLnlGNT2aUcVSdVQpkM9oTnkKhwtBTnxOxguq6SXvSVqDFXOCbBhSVrOcpeQqlIgb8rRVEPoj", + "XF6+o/lCXF6+70UL9OVXN1WUv9gJkhXTS1HpxOVwJxJWVMa8MarO4cWRbZGGbbNOiRvbsmKXI+7Gj/M8", + "Wpaqm8vXX35Z5mb5ARkql6lmtowoLaSXRYyAYqHB/X0t3MUg6cqbMCoFivytoOU7xvV7klxWx8ePgbSS", + "2/7mrnxDk5sSRhsyBnMNu/YLXLjVa2CtJU1Kuoh5fS4v32mgJe4+yssFKtl5TrBbK6nOBw3jUM0CPD6G", + "N8DCsXeCEC7u3Pby9XriS8BPuIXYxogbjSv6pvsVpNndeLs6qXq9Xar0MjFnO7oqZUjc70xdxmNhhCwf", + "H6DYAmMwXcWTGZB0CemVK0UBRak301Z3H4LiBE3POpiyRUpskgymyaPNfAakKjPqRPGuBWm2IQq09kGg", + "b+EKNheiybLfJ0G5nS+rhg4qUmogXRpiDY+tG6O7+S7OCU1cZenTTjH/yJPFSU0Xvs/wQbYi7x0c4hhR", + "tPI5hxBBZQQRlvgHUHCDhZrxbkX6seUZLWNmb75IwRLP+4lr0ihPLiQpXA0auO33ArDikVgpMqNGbheu", + "WI/NCQ24WKXoAgYk5NBtMTLzsuXqwEF23XvRm07Muxda776JgmwbJ2bNUUoB88WQCioznUA0P5P1jDkn", + "ANbgcwib5Sgm1RF7lulQ2XIf2aJiQ6DFCRgkbwQOD0YbI6Fks6TK1xHCckv+LI+SAX7DHOdtlS1Cg35Q", + "U6m2r3ue2z2nPe3S1bfwRS18JYtQtRxRlcJI+Bi2HdsOwVEAyiCHhV24bewJpcm3bjbIwPHjfJ4zDiSJ", + "hWNRpUTKbCGo5ppxc4CRjw8IsSZgMnqEGBkHYKPHFwcmr0V4NvliHyC5yxenfmz0FQd/Qzy1xQYoG5FH", + "lIaFswEHUuo5AHUxfPX91YkkxWEI41Ni2Nw1zQ2bcxpfM0ivwAKKrZ1yCi7m4MGQOLvFAm8vlr3WZK+i", + "m6wmlJk80HGBbgvEM7FObG5bVOKdrWeG3qMx25hpFzuYtpTFPUVmYo1xLHi12BjhHbAMw+HBCDT8NVNI", + "r9hv6Da3wGybdrs0FaNChSTjzHk1uQyJE2OmHpBghsjlflCd4kYAdIwdTalXp/zuVFLb4kn/Mm9utWlT", + "dcmnw8SO/9ARiu7SAP76Vpi6nsSbrsQStVO0wzHapTQCETJG9IZN9J00fVeQghxQKUhaQlRyFXPdGd0G", + "8MY5990C4wUW7KB88yCI8ZGwYEpDY0T3IQmfwzxJsU6YEPPh1elSzs363gpRX1O2EA12bC3zk68AY2Tn", + "TCqdoAciugTT6DuFSvV3pmlcVmpHEdmqmiyL8wac9go2ScbyKk6vbt4fXphpX9csUVUz5LeM29iQGVaB", + "jcYWbpnahp9uXfBLu+CX9M7WO+40mKZmYmnIpT3H7+RcdDjvNnYQIcAYcfR3bRClWxhkkBLa546B3GQP", + "J6aEHm6zvvYOU+bH3hk24hNTh+4oO1J0LYHBYOsqGLqJjFjCdFBEtZ+rOXAGaFmybN2xhdpRBzVmupfB", + "w5ee6mABd9cNtgMDgd0zli4iQbWrjDUCvi2H2yrycTgKMxftWmAhQwinYsoXc+8jqk4n24WrC6D5D7D5", + "2bTF5Uw+Tie3M53GcO1G3IHrN/X2RvGMrnlrSmt5QvZEOS1LKa5pnjgD8xBpSnHtSBObe3v0J2Z1cTPm", + "xbenL9848D9OJ2kOVCa1qDC4KmxX/m5WZQuaDRwQXyza6HxeZreiZLD5dRWm0Ci9WoKruhtIo73ygI3D", + "ITiKzkg9j0cI7TQ5O9+IXeIWHwmUtYukMd9ZD0nbK0KvKcu93cxDOxDNg4sbV2MyyhXCAW7tXQmcZMmd", + "spve6Y6fjoa6dvCkcK4tdYELW/paEcG7LnQML96UzuteUCzuZ60ifebEqwItCYnKWRq3sfKZMsTBre/M", + "NCbYeEAYNSNWbMAVyysWjGWaqRGKbgfIYI4oMn2hyCHczYR71qTi7B8VEJYB1+aTxFPZOahYTdFZ2/vX", + "qZEd+nO5ga2Fvhn+NjJGWNiye+MhENsFjNBT1wP3Ra0y+4XWFikMt25cEns4/MMZe1fiFme9ow9HzTZ4", + "cdn2uIWvkPT5nyEMW4569xMoXnl1FTYH5og+acJUMpfiV4jreageR1JxfClPhlEuvwIfEXPeWHeal1ma", + "2Qe3e0i6Ca1Q7SCFAarHnQ/cclhT0FuoKbdbbV8YaMW6xQkmjCo9suM3BONg7kXi5nQ1o7GCi0bIMDCd", + "Ng7gli1dC+I7e9yrOrHBzk4CX3Ldltks6xJkkyXXr9hyQ4HBTjtaVGgkA6TaUCaYWv9frkRkmIqvKLcP", + "VZh+9ii53gqs8cv0WgmJNRJU3OyfQcoKmsclhyztm3gztmD2DYZKQVDk3w1k37exVOQeSqjTdRxqzubk", + "eBq8NOJ2I2PXTLFZDtjioW0xowo5eW2IqruY5QHXS4XNH41ovqx4JiHTS2URqwSphTpUb2rn1Qz0CoCT", + "Y2z38Cm5j247xa7hgcGiu58nJw+fotHV/nEcuwDcGxrbuEmG7OTPjp3E6Rj9lnYMw7jdqIfRdHL7iNYw", + "49pymmzXMWcJWzpet/ssFZTTBcQjRYodMNm+uJtoSOvghWf2BRilpdgQpuPzg6aGPw1Enxv2Z8EgqSgK", + "pgvn3FGiMPTUVPC3k/rh7HMyrviqh8t/RB9p6V1EHSXy0xpN7f0WWzV6sl/TAtponRJqC2PkrIle8CWh", + "yZmvu4PVaOsitBY3Zi6zdBRzMJhhTkrJuEbFotLz5I8kXVJJU8P+DofATWZfP4lU4G1XguT7Af7J8S5B", + "gbyOo14OkL2XIVxfcp8LnhSGo2QPmmyP4FQOOnPjbrsh3+H2occKZWaUZJDcqha50YBT34rw+JYBb0mK", + "9Xr2ose9V/bJKbOScfKgldmhn96+dFJGIWSsmF5z3J3EIUFLBtcYuxffJDPmLfdC5qN24TbQf17Pgxc5", + "A7HMn+WYIvBMRLRTXxW6tqS7WPWIdWDomJoPhgxmbqgpaVfg/fROP2987jufzBcPK/7RBfYzbyki2a9g", + "YBOD6uDR7czq74H/m5JnYj12UzsnxG/sPwFqoiipWJ793GRldoqvS8rTZdSfNTMdf2meiaoXZ++naM26", + "JeUc8uhwVhb8xcuMEan272LsPAXjI9t268Hb5XYW1wDeBtMD5Sc06GU6NxOEWG0nvNUB1flCZATnaQqk", + "Ndyz/45AUO35HxUoHUsewg82qAvtlkbftcWGCfAMtcVD8r19CXYJpFX+BrW0uoqAK31rDepVmQuaTbGQ", + "w8W3py+JndX2sY+d2GLHC1RS2qvo2KuC2o/jwoP9uyXx1IXx42yPpTarVhqrUSlNizKWHGpaXPgGmIEa", + "2vBRfQmxc0heBG862jxSM4ShhzmThdG46tGs7II0Yf6jNU2XqJK1WOowyY+v0u2pUgUv49Uv3NQFEfHc", + "GbhdoW5bp3tKhNGbV0zZB0DhGtr5qHVytjMJ+PzU9vJkxbmllKjssa14wE3Q7oGzgRrezB+FrIP4PQVy", + "W+R+36Ll59grWqCpWwG99ySezW6sXy7xDzunlAvOUiyPFLua3UuhY3xgIypJdY2s/oi7Exo5XNG663WY", + "nMPiYCV2zwgd4vpG+OCr2VRLHfZPjU9SLqkmC9DKcTbIpv75AGcHZFyBK3CJ78oGfFLIll8ROWTUVZ3U", + "Lo09yQjTYgYUu+/Mt9dO7cd48SvGUcB3aHOh6dZShw8ZaqMVME0WApRbTzs3WL0zfQ4xTTaD9ftD//Ch", + "rQaDbjmzbOuD7g916j3SzgNs2j43bV2doPrnVgSynfS0LN2kw49LROUBveaDCI54FhPv2gmQW48fjraF", + "3LaGkuB9aggNrtERDSXewz3CqB9a6DziY4RWS1HYgtgQrmgFA8YjYLxkHJpnOSMXRBq9EnBj8LwO9FOp", + "pNqKgKN42gXQHL3PMYamtHM93Haobi0hgxJco59jeBubNyIGGEfdoBHcKN/Ur4Ea6g6Eief4DLFDZP/F", + "B5SqnBCVYUZB5w2IGOMwjNu/MtO+APrHoC8T2e5aUnty9rmJhpJEZ1W2AJ3QLItVpHqGXwl+9cWlYA1p", + "VRemLEuSYk2UdpGYPrW5iVLBVVVsmcs3uOV0waMqEWoIH3bxO4xJKLMN/huryji8My4IY+8wQB9x4V6h", + "2FNubo/Uk3oNTSeKLZLxmMA75fboaKa+GaE3/e+U0nOxaAPyiUtDbONy4R7F+Nu35uIIKyf0So3aq6Uu", + "bIBBd8I/hYdqY52S2+ZKeJX1ao+is6d+amu7AWL40awpXn4DobdBQQxq71frPRwKwE0H48WpdplrmpKt", + "LGgwG8hG79i8H4QibjkditixATvmc6/3OMmwJ2fj2FsR6kPB+gD94ONMSUmZc403zKKPWReRPmwu3Hbo", + "mg3uLsLFeQ9a7H64HorJJorxRQ4Ev3efGboCl85evzNv1+qjkrxKaH91z7za8eqo+Oj6+9EJONXnNYMO", + "Gm0vXEl7u0ynk//ws41hI8C13PwTmHB7m957pKkv7VrzVNOE1OWQR5VHbt2K8feWhusfNTWPkJ5KoVhT", + "gjv2ENPIWLcLfEspqN/UH8sHmlxDqrHueuNAlwD7VHMykwWP/H2pgzSgO9Yhga780baaR/1i6zsutF5a", + "UpBaZwtVH46v8HNah0khU8IKuAvg7p29dsLB6LDn+RxSza53pIH9eQk8SDGaeiOEfS83yApjdRgtVhHZ", + "38TWALQtS2srPEE1v1uDM5QEcgWbe4q0qCFaOXvq75WbFJBADCB3SAyJCBULQ7BWU+cZZqqmDMSCD/ux", + "3aEpxTX45k6Q1HjDuTxJmhu3SXTcMmX80Y9Rc5mue6X/YkToUKZY/9GAYWH7Bb7RoOr38HwBilAlJWf9", + "Mn0rV8ACk/ZqR4EvZQHK/+YzdO0sObuC8FUgdMusqMx8i6idwZswki33US+9yxe87wI9r2dmTZBmP6En", + "UvgJQ3HTXBj5KxmKZ27HRYaP52P0hy35jRGfBq45SPd6Ggp7uVCQaOGDOrfBsQ0V7qH3myBBDRZbtMAN", + "lkB529R4waKzFEueUBfZEi6QSCiogU4GlViG59yG7Of2u89g8UVHd5pTanrdXWjeh+cy1UNiSPVz4m7L", + "3ZkxN7GsMM7tW60qVpaFG1SGpv9SiqxK7QUdHoza+jS66NEWVhI1SqT9Vfb0yxxLgL0M8gyvYHNkRX9f", + "qt9vZQi9FaHsGoK8/s5u36nRKa5f5wu7gMWdwPk5DTfTSSlEngzY+s/61WW6Z+CKpVeQEXN3+MC2gWdL", + "yH00MdfO3NVy46uplCVwyB4cEnLKbSix9+u2yxt3Juf39Lb51zhrVtmCT86mdHjJ4zGZWIpJ3pK/+WG2", + "czUFhvndcio7yI7aJeuByjaSriKP+ByOVUr7ntbuwyoNUVkoYlLKjicsIl5k/yaCf2HDZ6xoUbC0/4pC", + "T5SY42tUCY0MflYz8GnrrUDWebjD1xiyzzSk1ApwRnmgLK8kuMwB+2xOp5x+SfXSb59p3hezzJUNCsP6", + "bUl2qqxS4JUT92ZP91yIMsnhGlqOBJfOUKUpKMWuIXzvx3YmGUCJqnr3AolZyEO66vAQt/YksLGOwW6U", + "qVjE2p0iOzjGwGPsiSUPNZaEDETXLKtoC3/qFk+xjHzbPYR15AnZ+3DEF9c7Gu65lKQu5hYzZLp0Er+F", + "hn6bp106AlLwBEs95sBblDUWbiOMDKI2jtmbFdEYRQ99m3bkyATPrmy3vIQ1dprgXWldI6ip+VPX3dJX", + "zWkc9wCM77ADvNAgFzwB4yUhB85njrB9VSMlWMogJbSWv8vG5xbYsK9giyzvNsu0Fc9sdFZ7XwIDrnpe", + "20WH3mXqmk+xoI7gWGSsb3ZV6CrDWuUh4RjeLa9p/ulNp1hp6RTx4d65jS80tL2FSLaoVDcLc3tJR80d", + "2Nnubmr+Bk29fwazR1EfpxvK+TxqWcF7hpBl0pzkonnhDockKxzTOkUffk1mLkWnlJAyxTrZiytfRrk2", + "NeGrAs3zx9ttW7vW+bPQtyDjuRdfyOumJKsWeGM0EDZH9DMzlYGTG6XyGPX1yCKCvxiPCmtl7Lgurlre", + "UlviuhMGKCTcsdc0iH/a02varwIydnnWM2gunUpBf52jb+sWbiMXdbO2sS7/PnK31e0c46mPl+M13TFU", + "wCIEa1kTBJX87eHfiIQ5PlYjyMEBTnBwMHVN//ao/dkc54OD+DPLnypIwOLIjeHmjVHMz0Nh4zY0eiBD", + "obMfFcuzXYTRyjdpnnvCjIpfXMbZZ3lw6hfry+kfVffoxz7hSd1NQMRE1tqaPJgqyCQZkUTiukVSRtAq", + "klaS6Q0WwvGmf/ZLNJzh+9pb6LzNdekEd/dpcQV1KaXGt1gpf7t+L2iO95GRqTE4TOPTut+uaVHm4A7K", + "N/dmf4DHf3ySHT9++IfZH4+/Ok7hyVdPj4/p0yf04dPHD+HRH796cgwP518/nT3KHj15NHvy6MnXXz1N", + "Hz95OHvy9dM/3DN8yIBsAZ34tOvJX/BVtuT0zVlyYYBtcEJLVr+obcjYPy1DUzyJUFCWT078T//Xn7DD", + "VBTN8P7XicvqnCy1LtXJ0dFqtToMuxwt0JmQaFGlyyM/T/8l4zdndWaOVS1xR23ShTcZeFI4xW9vvz2/", + "IKdvzg6DlzJPJseHx4cP8SHFEjgt2eRk8hh/wtOzxH0/csQ2OfnwcTo5WgLN0fdu/ihAS5b6T2pFFwuQ", + "h+6NHfPT9aMjL0ocfXCOlI/bvh2F5aqPPrT8TdmOnljO9uiDr9KyvXWrDIrzs5nlLmK1i76H4CHnoJZ+", + "y84/23hX0ZQofC/e/FRKJsxJmpprMYNUAkW6FxIzY5onoZ3+Ahz/++r0L+jpe3X6F/INOZ66hCmFqkZs", + "emtLrUngLLNgR54sf7Y5rT2XQQ3Hk3exV8Zjb//gETL0EVB4PWLDwbSsIKwt2PBjw2OPk6fvP3z1x48x", + "Oa//5qRH0sCT4lr4SiaItIKuvxlC2doZ1My4/6hAbppFFHQ9CQHu+38jz0/O2aKSnWd1Ow/2EqbIf53/", + "+JoISZxe+4amV3XslAEZC3RIcc0wrSQLcpFMzyGI3ZUXAu2L9rtknUItynZke43m91j9AAHFg/7o+PjL", + "2/j/Gm/jT1tb62nky+7+79jdvrxASmHONMPUvubK8ddZC8im+rMDd8CFeEj+KiqU6uz7LhArt4YzoDHa", + "z+liIIJQu8YTgl8ODroLPzhwe84UmcMKmSzl2LCLjoMDfBDwyZ6sbKsFuRUfP+rs7DNcb7Ne0XVd5YoS", + "LnjC8fmRayCBKvjk+OHvdoVnHAPwjDhKrLj9cTr56ne8ZWfcCDY0J9jSrubx73Y15yCvWQrkAopSSCpZ", + "viE/8Tp9OiiZ1md/P/ErLlbcI8JoklVRULlxQjSteU7Fg4T2rfynF7vQCNrIRelCoZsbRdRJ65ktvpi8", + "/+h1gJGKxbZmRzOs5zK2Kaig8bB2gj4DdfQBrd6Dvx+5Ihfxj+h9sGrtkY+zjLdsKT4f9NrA2umRUp0u", + "q/LoA/4H1cwALJtS1gfXFn84su/T93/e8DT6Y3+g7jtpsZ+PPrTr9LcQqpaVzsQq6It2desU6s9Xv1zV", + "+vtoRZk2EoILlsU6iv3OGmh+5NLAO782mVe9L5hOFvzYkSlKYSt1tNW5t3R10fIaS1u645nINlu4zTqZ", + "MY5HMGQRjbXMfuzrB/1HuZdgyw97h2NEANOCzKSgWUoVludzBRN6iuHHWyofHblxfRZxJyGYqGv34y7N", + "YTrc6WPAcfd8rzyoaouSrlL+3fHfUirpQfSMZsSXdknIK5qbDYeMnDrZt4WN31qi+PwiwGe+sz/ZJfvM", + "Hz5FKEaWtbQjGYnucTFQ7qCOuVGNCmUYwAJ44lhQMhPZxhdplnSl1zYOrcvcjupq29GPd2CG++e2ve0y", + "uX2xdH2xdH2xhXyxdH3Z3S+WrpGWri92oC92oH9JO9A+xp+YmOmMH8PSJla8pK15rW5Hm2TLmsW3o/2Z", + "rmWyfnFjpg8JucBUNmpuCbgGSXN8AEIFuakFBhZizgBkJ5c8aUFiw/fMxPeb/9q4Sfe+/vGDbh+lWZ6H", + "vLnfF+Vd/GSrvnxDLieXk95IEgpxDZnNkA9Te2yvncP+n3rcH3tZgphcja86+9QCoqr5nKXMojwXfEHo", + "QjQxv4ZvEy7wC0gDnK21QJieuuIdTJGVWbyrO9rOQGpL7n0J4KzZwp0+8w65xN3lhvD29JX/xxhH+b+0", + "lH6LZIVbMdKtY/e46heu8im4ymfnK793L2RgPvxfKWY+OX7yu11QaGx+LTT5DuPZbyeO1bWcYyUnbipo", + "+cLg3tzXxMSGMaZ4i9bRpe/em4sAH1ZxF2wTMnlydIRJ7Uuh9NHEXH/tcMrw4/saZl9xf1JKdo0F/N5/", + "/J8AAAD//7X6ZKDq1gAA", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go index 15599daa24..3af95fa1c5 100644 --- a/daemon/algod/api/server/v2/handlers.go +++ b/daemon/algod/api/server/v2/handlers.go @@ -98,7 +98,7 @@ type NodeInterface interface { GenesisID() string GenesisHash() crypto.Digest BroadcastSignedTxGroup(txgroup []transactions.SignedTxn) error - Simulate(txgroup []transactions.SignedTxn) (vb *ledgercore.ValidatedBlock, missingSignatures bool, err error) + Simulate(txgroup []transactions.SignedTxn) (result simulation.Result, err error) GetPendingTransaction(txID transactions.Txid) (res node.TxnWithStatus, found bool) GetPendingTxnsFromPool() ([]transactions.SignedTxn, error) SuggestedFee() basics.MicroAlgos @@ -918,9 +918,30 @@ func (v2 *Handlers) RawTransaction(ctx echo.Context) error { return ctx.JSON(http.StatusOK, model.PostTransactionsResponse{TxId: txid.String()}) } +// preEncodedSimulateTxnResult mirrors model.SimulateTransactionResult +type preEncodedSimulateTxnResult struct { + Txn PreEncodedTxInfo `codec:"txn-result"` + MissingSignature *bool `codec:"missing-signature,omitempty"` +} + +// preEncodedSimulateTxnGroupResult mirrors model.SimulateTransactionGroupResult +type preEncodedSimulateTxnGroupResult struct { + Txns []preEncodedSimulateTxnResult `codec:"txn-results"` + FailureMessage *string `codec:"failure-message,omitempty"` + FailedAt *[]uint64 `codec:"failed-at,omitempty"` +} + +// preEncodedSimulateResponse mirrors model.SimulateResponse +type preEncodedSimulateResponse struct { + Version uint64 `codec:"version"` + LastRound uint64 `codec:"last-round"` + TxnGroups []preEncodedSimulateTxnGroupResult `codec:"txn-groups"` + WouldSucceed bool `codec:"would-succeed"` +} + // SimulateTransaction simulates broadcasting a raw transaction to the network, returning relevant simulation results. // (POST /v2/transactions/simulate) -func (v2 *Handlers) SimulateTransaction(ctx echo.Context) error { +func (v2 *Handlers) SimulateTransaction(ctx echo.Context, params model.SimulateTransactionParams) error { if !v2.Node.Config().EnableExperimentalAPI { // Right now this is a redundant/useless check at runtime, since experimental APIs are not registered when EnableExperimentalAPI=false. // However, this endpoint won't always be experimental, so I've left this here as a reminder to have some other flag guarding its usage. @@ -942,31 +963,30 @@ func (v2 *Handlers) SimulateTransaction(ctx echo.Context) error { return badRequest(ctx, err, err.Error(), v2.Log) } - var res model.SimulationResponse - // Simulate transaction - _, missingSignatures, err := v2.Node.Simulate(txgroup) + simulationResult, err := v2.Node.Simulate(txgroup) if err != nil { - var invalidTxErr *simulation.InvalidTxGroupError - var evalErr *simulation.EvalFailureError + var invalidTxErr simulation.InvalidTxGroupError switch { case errors.As(err, &invalidTxErr): return badRequest(ctx, invalidTxErr, invalidTxErr.Error(), v2.Log) - case errors.As(err, &evalErr): - res.FailureMessage = evalErr.Error() default: return internalError(ctx, err, err.Error(), v2.Log) } } - res.MissingSignatures = missingSignatures - // Return msgpack response - msgpack, err := encode(protocol.CodecHandle, &res) + response := convertSimulationResult(simulationResult) + + handle, contentType, err := getCodecHandle((*string)(params.Format)) + if err != nil { + return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log) + } + data, err := encode(handle, &response) if err != nil { return internalError(ctx, err, errFailedToEncodeResponse, v2.Log) } - return ctx.Blob(http.StatusOK, "application/msgpack", msgpack) + return ctx.Blob(http.StatusOK, contentType, data) } // TealDryrun takes transactions and additional simulated ledger state and returns debugging information. diff --git a/daemon/algod/api/server/v2/handlers_test.go b/daemon/algod/api/server/v2/handlers_test.go index 2a703e2553..42570db4c1 100644 --- a/daemon/algod/api/server/v2/handlers_test.go +++ b/daemon/algod/api/server/v2/handlers_test.go @@ -17,10 +17,15 @@ package v2 import ( + "fmt" "math" + "reflect" + "strings" "testing" + "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" "github.com/algorand/go-algorand/test/partitiontest" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -39,3 +44,166 @@ func TestApplicationBoxesMaxKeys(t *testing.T) { // Response size _not_ limited require.Equal(t, uint64(math.MaxUint64), applicationBoxesMaxKeys(0, 0)) } + +type tagNode struct { + children map[string]*tagNode +} + +func (node *tagNode) addChild(name string, value *tagNode) { + if _, ok := node.children[name]; ok { + panic(fmt.Sprintf("child already present: %s", name)) + } + node.children[name] = value +} + +func (node *tagNode) assertEquals(t *testing.T, other *tagNode, path []string, seen map[*tagNode]bool) { + t.Helper() + + if seen[node] { + return + } + + seen[node] = true + + nodeTags := make(map[string]bool) + otherTags := make(map[string]bool) + commonTags := make(map[string]bool) + + for tag := range node.children { + nodeTags[tag] = true + _, ok := other.children[tag] + if ok { + commonTags[tag] = true + } + } + + for tag := range other.children { + otherTags[tag] = true + } + + assert.Equal(t, nodeTags, otherTags, "different tags at path [%s]", strings.Join(path, ", ")) + + for tag := range commonTags { + childPath := append(path, tag) + node.children[tag].assertEquals(t, other.children[tag], childPath, seen) + } +} + +func (node *tagNode) AssertEquals(t *testing.T, other *tagNode) { + t.Helper() + node.assertEquals(t, other, nil, make(map[*tagNode]bool)) +} + +// makeTagGraph creates a graph of encoding keys that an object uses when encoded as JSON or +// msgpack. TODO: also represent if fields have "omitempty" +func makeTagGraph(rootType reflect.Type, seen map[reflect.Type]*tagNode) *tagNode { + if value, ok := seen[rootType]; ok { + return value + } + + node := &tagNode{ + children: make(map[string]*tagNode), + } + seen[rootType] = node + + switch rootType.Kind() { + case reflect.Map: + keyGraph := makeTagGraph(rootType.Key(), seen) + node.addChild("", keyGraph) + fallthrough + case reflect.Array, reflect.Slice: + valueGraph := makeTagGraph(rootType.Elem(), seen) + node.addChild("", valueGraph) + case reflect.Ptr: + // Directly embed value type graph + node = makeTagGraph(rootType.Elem(), seen) + case reflect.Struct: + for i := 0; i < rootType.NumField(); i++ { + field := rootType.Field(i) + subgraph := makeTagGraph(field.Type, seen) + if field.Anonymous { + // merge subgraph into this node + for name, value := range subgraph.children { + node.addChild(name, value) + } + } else { + codecTagValue, codecOk := field.Tag.Lookup("codec") + jsonTagValue, jsonOk := field.Tag.Lookup("json") + var tagValue string + if codecOk { + components := strings.Split(codecTagValue, ",") + // remove any ,omitempty or other modifiers + tagValue = components[0] + } else if jsonOk { + components := strings.Split(jsonTagValue, ",") + // remove any ,omitempty or other modifiers + tagValue = components[0] + } else { + tagValue = field.Name + } + if len(tagValue) != 0 { + // ignore any empty tags + node.addChild(tagValue, subgraph) + } + } + } + } + + return node +} + +// TestPendingTransactionResponseStruct ensures that the hand-written PreEncodedTxInfo has the same +// encoding structure as the generated model.PendingTransactionResponse +func TestPendingTransactionResponseStruct(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + generatedResponseType := reflect.TypeOf(model.PendingTransactionResponse{}) + generatedResponseGraph := makeTagGraph(generatedResponseType, make(map[reflect.Type]*tagNode)) + + customResponseType := reflect.TypeOf(PreEncodedTxInfo{}) + customResponseGraph := makeTagGraph(customResponseType, make(map[reflect.Type]*tagNode)) + + expectedGeneratedTxnGraph := map[string]*tagNode{ + "": {children: make(map[string]*tagNode)}, + "": {children: make(map[string]*tagNode)}, + } + if assert.Equal(t, expectedGeneratedTxnGraph, generatedResponseGraph.children["txn"].children) { + // The generated response type uses map[string]interface{} to represent a transaction, while + // the custom response type uses transactions.SignedTxn. Let's copy that into the generated + // type. + generatedResponseGraph.children["txn"].children = customResponseGraph.children["txn"].children + } + + generatedResponseGraph.AssertEquals(t, customResponseGraph) +} + +// TestSimulateResponseStruct ensures that the hand-written preEncodedSimulateResponse has the same +// encoding structure as the generated model.SimulateResponse +func TestSimulateResponseStruct(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + generatedResponseType := reflect.TypeOf(model.SimulateResponse{}) + generatedResponseGraph := makeTagGraph(generatedResponseType, make(map[reflect.Type]*tagNode)) + + customResponseType := reflect.TypeOf(preEncodedSimulateResponse{}) + customResponseGraph := makeTagGraph(customResponseType, make(map[reflect.Type]*tagNode)) + + expectedGeneratedTxnGraph := map[string]*tagNode{ + "": {children: make(map[string]*tagNode)}, + "": {children: make(map[string]*tagNode)}, + } + preEncodedTxPath := func(graph *tagNode) *tagNode { + // Resolve the field model.SimulationResponse{}.TxnGroups[0].TxnResults[0].TxnResult.Txn + return graph.children["txn-groups"].children[""].children["txn-results"].children[""].children["txn-result"].children["txn"] + } + if assert.Equal(t, expectedGeneratedTxnGraph, preEncodedTxPath(generatedResponseGraph).children) { + // The generated response type uses map[string]interface{} to represent a transaction, while + // the custom response type uses transactions.SignedTxn. Let's copy that into the generated + // type. + preEncodedTxPath(generatedResponseGraph).children = preEncodedTxPath(customResponseGraph).children + } + + generatedResponseGraph.AssertEquals(t, customResponseGraph) +} diff --git a/daemon/algod/api/server/v2/test/handlers_test.go b/daemon/algod/api/server/v2/test/handlers_test.go index 728f75ff45..fe02164f2d 100644 --- a/daemon/algod/api/server/v2/test/handlers_test.go +++ b/daemon/algod/api/server/v2/test/handlers_test.go @@ -50,6 +50,9 @@ import ( "github.com/algorand/go-algorand/data/stateproofmsg" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/logic" + "github.com/algorand/go-algorand/data/transactions/logic/mocktracer" + "github.com/algorand/go-algorand/data/txntest" + simulationtesting "github.com/algorand/go-algorand/ledger/simulation/testing" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/node" "github.com/algorand/go-algorand/protocol" @@ -693,21 +696,374 @@ func TestPostTransaction(t *testing.T) { postTransactionTest(t, 0, 200) } -func simulateTransactionTest(t *testing.T, txnToUse, expectedCode int, enableTransactionSimulator bool) { +func simulateTransactionTest(t *testing.T, txnToUse int, format string, expectedCode int, enableTransactionSimulator bool) { handler, c, rec, releasefunc := prepareTransactionTest(t, txnToUse, expectedCode, enableTransactionSimulator) defer releasefunc() - err := handler.SimulateTransaction(c) + err := handler.SimulateTransaction(c, model.SimulateTransactionParams{Format: (*model.SimulateTransactionParamsFormat)(&format)}) require.NoError(t, err) require.Equal(t, expectedCode, rec.Code) } +func TestPostSimulateTransaction(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + testCases := []struct { + txnIndex int + format string + expectedStatus int + enableTransactionSimulator bool + }{ + { + txnIndex: -1, + format: "json", + expectedStatus: 400, + enableTransactionSimulator: true, + }, + { + txnIndex: 0, + format: "json", + expectedStatus: 404, + enableTransactionSimulator: false, + }, + { + txnIndex: 0, + format: "msgpack", + expectedStatus: 404, + enableTransactionSimulator: false, + }, + { + txnIndex: 0, + format: "bad format", + expectedStatus: 404, + enableTransactionSimulator: false, + }, + { + txnIndex: 0, + format: "json", + expectedStatus: 200, + enableTransactionSimulator: true, + }, + { + txnIndex: 0, + format: "msgpack", + expectedStatus: 200, + enableTransactionSimulator: true, + }, + { + txnIndex: 0, + format: "bad format", + expectedStatus: 400, + enableTransactionSimulator: true, + }, + } + + for i, testCase := range testCases { + testCase := testCase + t.Run(fmt.Sprintf("i=%d", i), func(t *testing.T) { + t.Parallel() + simulateTransactionTest(t, testCase.txnIndex, testCase.format, testCase.expectedStatus, testCase.enableTransactionSimulator) + }) + } +} + +func copyInnerTxnGroupIDs(t *testing.T, dst, src *model.PendingTransactionResponse) { + t.Helper() + + // msgpack decodes to map[interface{}]interface{} while JSON decodes to map[string]interface{} + txn := dst.Txn["txn"] + switch dstTxnMap := txn.(type) { + case map[string]interface{}: + srcTxnMap := src.Txn["txn"].(map[string]interface{}) + groupID, hasGroupID := srcTxnMap["grp"] + if hasGroupID { + dstTxnMap["grp"] = groupID + } + case map[interface{}]interface{}: + srcTxnMap := src.Txn["txn"].(map[interface{}]interface{}) + groupID, hasGroupID := srcTxnMap["grp"] + if hasGroupID { + dstTxnMap["grp"] = groupID + } + } + + if dst.InnerTxns == nil || src.InnerTxns == nil { + return + } + + assert.Equal(t, len(*dst.InnerTxns), len(*src.InnerTxns)) + + for innerIndex := range *dst.InnerTxns { + if innerIndex == len(*src.InnerTxns) { + break + } + dstInner := &(*dst.InnerTxns)[innerIndex] + srcInner := &(*src.InnerTxns)[innerIndex] + copyInnerTxnGroupIDs(t, dstInner, srcInner) + } +} + +func assertSimulationResultsEqual(t *testing.T, expectedError string, expected, actual model.SimulateResponse) { + t.Helper() + + if len(expectedError) != 0 { + require.NotNil(t, actual.TxnGroups[0].FailureMessage) + require.Contains(t, *actual.TxnGroups[0].FailureMessage, expectedError) + require.False(t, expected.WouldSucceed, "Test case WouldSucceed value is not consistent with expected failure") + // if it matched the expected error, copy the actual one so it will pass the equality check below + expected.TxnGroups[0].FailureMessage = actual.TxnGroups[0].FailureMessage + } + + // Copy inner txn groups IDs, since the mocktracer scenarios don't populate them + assert.Equal(t, len(expected.TxnGroups), len(actual.TxnGroups)) + for groupIndex := range expected.TxnGroups { + if groupIndex == len(actual.TxnGroups) { + break + } + expectedGroup := &expected.TxnGroups[groupIndex] + actualGroup := &actual.TxnGroups[groupIndex] + assert.Equal(t, len(expectedGroup.TxnResults), len(actualGroup.TxnResults)) + for txnIndex := range expectedGroup.TxnResults { + if txnIndex == len(actualGroup.TxnResults) { + break + } + expectedTxn := &expectedGroup.TxnResults[txnIndex] + actualTxn := &actualGroup.TxnResults[txnIndex] + if expectedTxn.TxnResult.InnerTxns == nil || actualTxn.TxnResult.InnerTxns == nil { + continue + } + assert.Equal(t, len(*expectedTxn.TxnResult.InnerTxns), len(*actualTxn.TxnResult.InnerTxns)) + for innerIndex := range *expectedTxn.TxnResult.InnerTxns { + if innerIndex == len(*actualTxn.TxnResult.InnerTxns) { + break + } + expectedInner := &(*expectedTxn.TxnResult.InnerTxns)[innerIndex] + actualInner := &(*actualTxn.TxnResult.InnerTxns)[innerIndex] + copyInnerTxnGroupIDs(t, expectedInner, actualInner) + } + } + } + + require.Equal(t, expected, actual) +} + +func makePendingTxnResponse(t *testing.T, txn transactions.SignedTxnWithAD, handle codec.Handle) model.PendingTransactionResponse { + t.Helper() + preEncoded := v2.ConvertInnerTxn(&txn) + + // encode to bytes + var encodedBytes []byte + encoder := codec.NewEncoderBytes(&encodedBytes, handle) + err := encoder.Encode(&preEncoded) + require.NoError(t, err) + + // decode to model.PendingTransactionResponse + var response model.PendingTransactionResponse + decoder := codec.NewDecoderBytes(encodedBytes, handle) + err = decoder.Decode(&response) + require.NoError(t, err) + + return response +} + func TestSimulateTransaction(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - simulateTransactionTest(t, -1, 400, true) - simulateTransactionTest(t, 0, 404, false) - simulateTransactionTest(t, 0, 200, true) + // prepare node and handler + numAccounts := 5 + offlineAccounts := true + mockLedger, roots, _, _, releasefunc := testingenvWithBalances(t, 999_998, 999_999, numAccounts, 1, offlineAccounts) + defer releasefunc() + dummyShutdownChan := make(chan struct{}) + mockNode := makeMockNode(mockLedger, t.Name(), nil, false) + mockNode.config.EnableExperimentalAPI = true + handler := v2.Handlers{ + Node: mockNode, + Log: logging.Base(), + Shutdown: dummyShutdownChan, + } + + hdr, err := mockLedger.BlockHdr(mockLedger.Latest()) + require.NoError(t, err) + txnInfo := simulationtesting.TxnInfo{LatestHeader: hdr} + + scenarios := mocktracer.GetTestScenarios() + + for name, scenarioFn := range scenarios { + t.Run(name, func(t *testing.T) { //nolint:paralleltest // Uses shared testing env + sender := roots[0] + futureAppID := basics.AppIndex(2) + + payTxn := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender.Address(), + Receiver: futureAppID.Address(), + Amount: 700_000, + }) + appCallTxn := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.ApplicationCallTx, + Sender: sender.Address(), + ClearStateProgram: `#pragma version 6 +int 1`, + }) + scenario := scenarioFn(mocktracer.TestScenarioInfo{ + CallingTxn: appCallTxn.Txn(), + MinFee: basics.MicroAlgos{Raw: txnInfo.CurrentProtocolParams().MinTxnFee}, + CreatedAppID: futureAppID, + }) + appCallTxn.ApprovalProgram = scenario.Program + + txntest.Group(&payTxn, &appCallTxn) + + stxns := []transactions.SignedTxn{ + payTxn.Txn().Sign(sender.Secrets()), + appCallTxn.Txn().Sign(sender.Secrets()), + } + + // build request body + var body io.Reader + var bodyBytes []byte + for _, stxn := range stxns { + bodyBytes = append(bodyBytes, protocol.Encode(&stxn)...) + } + + msgpackFormat := model.SimulateTransactionParamsFormatMsgpack + jsonFormat := model.SimulateTransactionParamsFormatJson + responseFormats := []struct { + name string + params model.SimulateTransactionParams + handle codec.Handle + }{ + { + name: "msgpack", + params: model.SimulateTransactionParams{ + Format: &msgpackFormat, + }, + handle: protocol.CodecHandle, + }, + { + name: "json", + params: model.SimulateTransactionParams{ + Format: &jsonFormat, + }, + handle: protocol.JSONStrictHandle, + }, + { + name: "default", + params: model.SimulateTransactionParams{ + Format: nil, // should default to JSON + }, + handle: protocol.JSONStrictHandle, + }, + } + + for _, responseFormat := range responseFormats { + t.Run(string(responseFormat.name), func(t *testing.T) { //nolint:paralleltest // Uses shared testing env + body = bytes.NewReader(bodyBytes) + req := httptest.NewRequest(http.MethodPost, "/", body) + rec := httptest.NewRecorder() + + e := echo.New() + c := e.NewContext(req, rec) + + // simulate transaction + err := handler.SimulateTransaction(c, responseFormat.params) + require.NoError(t, err) + require.Equal(t, 200, rec.Code, rec.Body.String()) + + // decode actual response + var actualBody model.SimulateResponse + decoder := codec.NewDecoderBytes(rec.Body.Bytes(), responseFormat.handle) + err = decoder.Decode(&actualBody) + require.NoError(t, err) + + var expectedFailedAt *[]uint64 + if len(scenario.FailedAt) != 0 { + clone := make([]uint64, len(scenario.FailedAt)) + copy(clone, scenario.FailedAt) + clone[0]++ + expectedFailedAt = &clone + } + expectedBody := model.SimulateResponse{ + Version: 1, + TxnGroups: []model.SimulateTransactionGroupResult{ + { + FailedAt: expectedFailedAt, + TxnResults: []model.SimulateTransactionResult{ + { + TxnResult: makePendingTxnResponse(t, transactions.SignedTxnWithAD{ + SignedTxn: stxns[0], + // expect no ApplyData info + }, responseFormat.handle), + }, + { + TxnResult: makePendingTxnResponse(t, transactions.SignedTxnWithAD{ + SignedTxn: stxns[1], + ApplyData: scenario.ExpectedSimulationAD, + }, responseFormat.handle), + }, + }, + }, + }, + WouldSucceed: scenario.Outcome == mocktracer.ApprovalOutcome, + } + assertSimulationResultsEqual(t, scenario.ExpectedError, expectedBody, actualBody) + }) + } + }) + } +} + +func TestSimulateTransactionVerificationFailure(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + // prepare node and handler + numAccounts := 5 + offlineAccounts := true + mockLedger, roots, _, _, releasefunc := testingenv(t, numAccounts, 1, offlineAccounts) + defer releasefunc() + dummyShutdownChan := make(chan struct{}) + mockNode := makeMockNode(mockLedger, t.Name(), nil, false) + mockNode.config.EnableExperimentalAPI = true + handler := v2.Handlers{ + Node: mockNode, + Log: logging.Base(), + Shutdown: dummyShutdownChan, + } + + hdr, err := mockLedger.BlockHdr(mockLedger.Latest()) + require.NoError(t, err) + txnInfo := simulationtesting.TxnInfo{LatestHeader: hdr} + + sender := roots[0] + receiver := roots[1] + + txn := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender.Address(), + Receiver: receiver.Address(), + Amount: 0, + }) + + stxn := txn.Txn().Sign(sender.Secrets()) + // make signature invalid + stxn.Sig[0] += byte(1) // will wrap if > 255 + + // build request body + bodyBytes := protocol.Encode(&stxn) + body := bytes.NewReader(bodyBytes) + req := httptest.NewRequest(http.MethodPost, "/", body) + rec := httptest.NewRecorder() + + e := echo.New() + c := e.NewContext(req, rec) + + // simulate transaction + err = handler.SimulateTransaction(c, model.SimulateTransactionParams{}) + require.NoError(t, err) + require.Equal(t, 400, rec.Code, rec.Body.String()) } func startCatchupTest(t *testing.T, catchpoint string, nodeError error, expectedCode int) { diff --git a/daemon/algod/api/server/v2/test/helpers.go b/daemon/algod/api/server/v2/test/helpers.go index 272e05154d..8b3ac5f8fc 100644 --- a/daemon/algod/api/server/v2/test/helpers.go +++ b/daemon/algod/api/server/v2/test/helpers.go @@ -37,6 +37,7 @@ import ( "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/logic" "github.com/algorand/go-algorand/ledger/ledgercore" + "github.com/algorand/go-algorand/ledger/simulation" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/node" "github.com/algorand/go-algorand/node/indexer" @@ -193,8 +194,9 @@ func (m *mockNode) BroadcastSignedTxGroup(txgroup []transactions.SignedTxn) erro return m.err } -func (m *mockNode) Simulate(txgroup []transactions.SignedTxn) (*ledgercore.ValidatedBlock, bool, error) { - return nil, false, m.err +func (m *mockNode) Simulate(txgroup []transactions.SignedTxn) (simulation.Result, error) { + simulator := simulation.MakeSimulator(m.ledger.(*data.Ledger)) + return simulator.Simulate(txgroup) } func (m *mockNode) GetPendingTransaction(txID transactions.Txid) (res node.TxnWithStatus, found bool) { @@ -277,10 +279,14 @@ var retOneProgram = []byte{2, 0x20, 1, 1, 0x22} var proto = config.Consensus[protocol.ConsensusCurrentVersion] func testingenv(t testing.TB, numAccounts, numTxs int, offlineAccounts bool) (*data.Ledger, []account.Root, []account.Participation, []transactions.SignedTxn, func()) { + minMoneyAtStart := 100000 // min money start + maxMoneyAtStart := 1000000 // max money start + return testingenvWithBalances(t, minMoneyAtStart, maxMoneyAtStart, numAccounts, numTxs, offlineAccounts) +} + +func testingenvWithBalances(t testing.TB, minMoneyAtStart, maxMoneyAtStart, numAccounts, numTxs int, offlineAccounts bool) (*data.Ledger, []account.Root, []account.Participation, []transactions.SignedTxn, func()) { P := numAccounts // n accounts TXs := numTxs // n txns - maxMoneyAtStart := 1000000 // max money start - minMoneyAtStart := 100000 // max money start transferredMoney := 100 // max money/txn maxFee := 10 // max maxFee/txn lastValid := basics.Round(500) // max round diff --git a/daemon/algod/api/server/v2/utils.go b/daemon/algod/api/server/v2/utils.go index cf2648e8d4..b2b4e4a351 100644 --- a/daemon/algod/api/server/v2/utils.go +++ b/daemon/algod/api/server/v2/utils.go @@ -30,6 +30,7 @@ import ( "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/ledger/simulation" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/node" "github.com/algorand/go-algorand/protocol" @@ -90,6 +91,13 @@ func byteOrNil(data []byte) *[]byte { return &data } +func trueOrNil(b bool) *bool { + if !b { + return nil + } + return &b +} + func nilToZero(numPtr *uint64) uint64 { if numPtr == nil { return 0 @@ -311,12 +319,13 @@ func convertLogs(txn node.TxnWithStatus) *[][]byte { func convertInners(txn *node.TxnWithStatus) *[]PreEncodedTxInfo { inner := make([]PreEncodedTxInfo, len(txn.ApplyData.EvalDelta.InnerTxns)) for i := range txn.ApplyData.EvalDelta.InnerTxns { - inner[i] = convertInnerTxn(&txn.ApplyData.EvalDelta.InnerTxns[i]) + inner[i] = ConvertInnerTxn(&txn.ApplyData.EvalDelta.InnerTxns[i]) } return &inner } -func convertInnerTxn(txn *transactions.SignedTxnWithAD) PreEncodedTxInfo { +// ConvertInnerTxn converts an inner SignedTxnWithAD to PreEncodedTxInfo for the REST API +func ConvertInnerTxn(txn *transactions.SignedTxnWithAD) PreEncodedTxInfo { // This copies from handlers.PendingTransactionInformation, with // simplifications because we have a SignedTxnWithAD rather than // TxnWithStatus, and we know this txn has committed. @@ -344,6 +353,48 @@ func convertInnerTxn(txn *transactions.SignedTxnWithAD) PreEncodedTxInfo { return response } +func convertTxnResult(txnResult simulation.TxnResult) preEncodedSimulateTxnResult { + return preEncodedSimulateTxnResult{ + Txn: ConvertInnerTxn(&txnResult.Txn), + MissingSignature: trueOrNil(txnResult.MissingSignature), + } +} + +func convertTxnGroupResult(txnGroupResult simulation.TxnGroupResult) preEncodedSimulateTxnGroupResult { + txnResults := make([]preEncodedSimulateTxnResult, len(txnGroupResult.Txns)) + for i, txnResult := range txnGroupResult.Txns { + txnResults[i] = convertTxnResult(txnResult) + } + + encoded := preEncodedSimulateTxnGroupResult{ + Txns: txnResults, + FailureMessage: strOrNil(txnGroupResult.FailureMessage), + } + + if len(txnGroupResult.FailedAt) > 0 { + failedAt := make([]uint64, len(txnGroupResult.FailedAt)) + copy(failedAt, txnGroupResult.FailedAt) + encoded.FailedAt = &failedAt + } + + return encoded +} + +func convertSimulationResult(result simulation.Result) preEncodedSimulateResponse { + encodedSimulationResult := preEncodedSimulateResponse{ + Version: result.Version, + LastRound: uint64(result.LastRound), + WouldSucceed: result.WouldSucceed, + TxnGroups: make([]preEncodedSimulateTxnGroupResult, len(result.TxnGroups)), + } + + for i, txnGroup := range result.TxnGroups { + encodedSimulationResult.TxnGroups[i] = convertTxnGroupResult(txnGroup) + } + + return encodedSimulationResult +} + // printableUTF8OrEmpty checks to see if the entire string is a UTF8 printable string. // If this is the case, the string is returned as is. Otherwise, the empty string is returned. func printableUTF8OrEmpty(in string) string { diff --git a/data/transactions/logic/debugger_eval_test.go b/data/transactions/logic/debugger_eval_test.go index 55d2c9f766..4e6b37b859 100644 --- a/data/transactions/logic/debugger_eval_test.go +++ b/data/transactions/logic/debugger_eval_test.go @@ -248,22 +248,22 @@ func TestDebuggerInnerAppEval(t *testing.T) { expectedStack := []basics.TealValue{} switch { case scenarioName == "none": - expectedUpdateCount = 26 + expectedUpdateCount = 32 expectedStack = []basics.TealValue{{Type: basics.TealUintType, Uint: 1}} case strings.HasPrefix(scenarioName, "before inners"): - expectedUpdateCount = 2 + expectedUpdateCount = 4 expectedStack = []basics.TealValue{{Type: basics.TealUintType}} case strings.HasPrefix(scenarioName, "first inner"): - expectedUpdateCount = 10 - case strings.HasPrefix(scenarioName, "between inners"): expectedUpdateCount = 12 + case strings.HasPrefix(scenarioName, "between inners"): + expectedUpdateCount = 16 expectedStack = []basics.TealValue{{Type: basics.TealUintType}} case scenarioName == "second inner": - expectedUpdateCount = 25 + expectedUpdateCount = 29 case scenarioName == "third inner": - expectedUpdateCount = 25 + expectedUpdateCount = 29 case strings.HasPrefix(scenarioName, "after inners"): - expectedUpdateCount = 26 + expectedUpdateCount = 32 if scenario.Outcome == mocktracer.RejectionOutcome { expectedStack = []basics.TealValue{{Type: basics.TealUintType}} } diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go index 2440148735..c0e94e71c9 100644 --- a/data/transactions/logic/eval.go +++ b/data/transactions/logic/eval.go @@ -331,6 +331,12 @@ type EvalParams struct { caller *EvalContext } +// GetCaller returns the calling EvalContext if this is an inner transaction evaluation. Otherwise, +// this returns nil. +func (ep *EvalParams) GetCaller() *EvalContext { + return ep.caller +} + func copyWithClearAD(txgroup []transactions.SignedTxnWithAD) []transactions.SignedTxnWithAD { copy := make([]transactions.SignedTxnWithAD, len(txgroup)) for i := range txgroup { @@ -596,6 +602,11 @@ type EvalContext struct { programHashCached crypto.Digest } +// GroupIndex returns the group index of the transaction being evaluated +func (cx *EvalContext) GroupIndex() int { + return cx.groupIndex +} + // RunMode returns the evaluation context's mode (signature or application) func (cx *EvalContext) RunMode() RunMode { return cx.runModeFlags @@ -1026,6 +1037,11 @@ func (cx *EvalContext) Cost() int { return cx.cost } +// AppID returns the ID of the currently executing app. For LogicSigs it returns 0. +func (cx *EvalContext) AppID() basics.AppIndex { + return cx.appID +} + func (cx *EvalContext) remainingBudget() int { if cx.runModeFlags == ModeSig { return int(cx.Proto.LogicSigMaxCost) - cx.cost diff --git a/data/transactions/logic/mocktracer/scenarios.go b/data/transactions/logic/mocktracer/scenarios.go index df540efeb7..f89a1d1396 100644 --- a/data/transactions/logic/mocktracer/scenarios.go +++ b/data/transactions/logic/mocktracer/scenarios.go @@ -17,6 +17,7 @@ package mocktracer import ( + "encoding/hex" "fmt" "math" @@ -29,6 +30,9 @@ import ( ) const programTemplate string = `#pragma version 6 +pushbytes "a" +log + %s itxn_begin @@ -42,6 +46,9 @@ pushbytes 0x068101 // #pragma version 6; int 1; itxn_field ClearStateProgram itxn_submit +pushbytes "b" +log + %s itxn_begin @@ -60,8 +67,15 @@ global CurrentApplicationAddress itxn_field Receiver itxn_submit +pushbytes "c" +log + %s` +func fillProgramTemplate(beforeInnersOps, innerApprovalProgram, betweenInnersOps string, innerPay1Amount, innerPay2Amount uint64, afterInnersOps string) string { + return fmt.Sprintf(programTemplate, beforeInnersOps, innerApprovalProgram, betweenInnersOps, innerPay1Amount, innerPay2Amount, afterInnersOps) +} + // TestScenarioInfo holds arguments used to call a TestScenarioGenerator type TestScenarioInfo struct { CallingTxn transactions.Transaction @@ -74,9 +88,11 @@ func expectedApplyData(info TestScenarioInfo) transactions.ApplyData { Type: protocol.ApplicationCallTx, Sender: info.CreatedAppID.Address(), ApprovalProgram: `#pragma version 6 -int 1`, +pushbytes "x" +log +pushint 1`, ClearStateProgram: `#pragma version 6 -int 1`, +pushint 1`, FirstValid: info.CallingTxn.FirstValid, LastValid: info.CallingTxn.LastValid, @@ -87,6 +103,7 @@ int 1`, EvalDelta: transactions.EvalDelta{ GlobalDelta: basics.StateDelta{}, LocalDeltas: map[uint64]basics.StateDelta{}, + Logs: []string{"x"}, }, } expectedInnerPay1 := txntest.Txn{ @@ -130,6 +147,7 @@ int 1`, ApplyData: expectedInnerPay2AD, }, }, + Logs: []string{"a", "b", "c"}, }, } } @@ -148,10 +166,12 @@ const ( // TestScenario represents a testing scenario. See GetTestScenarios for more details. type TestScenario struct { - Outcome TestScenarioOutcome - Program string - ExpectedError string - ExpectedEvents []Event + Outcome TestScenarioOutcome + Program string + ExpectedError string + FailedAt []uint64 + ExpectedEvents []Event + ExpectedSimulationAD transactions.ApplyData } // TestScenarioGenerator is a function which instantiates a TestScenario @@ -171,34 +191,38 @@ type TestScenarioGenerator func(info TestScenarioInfo) TestScenario // groups, and after all inners. For app call failures, there are scenarios for both rejection and // runtime errors, which should invoke tracer hooks slightly differently. func GetTestScenarios() map[string]TestScenarioGenerator { + successInnerProgramBytes := []byte{0x06, 0x80, 0x01, 0x78, 0xb0, 0x81, 0x01} // #pragma version 6; pushbytes "x"; log; pushint 1 + successInnerProgram := "0x" + hex.EncodeToString(successInnerProgramBytes) + noFailureName := "none" noFailure := func(info TestScenarioInfo) TestScenario { expectedAD := expectedApplyData(info) - program := fmt.Sprintf(programTemplate, "", "0x068101", "", 1, 2, "pushint 1") + program := fillProgramTemplate("", successInnerProgram, "", 1, 2, "pushint 1") return TestScenario{ Outcome: ApprovalOutcome, Program: program, + FailedAt: nil, ExpectedError: "", // no error ExpectedEvents: FlattenEvents([][]Event{ { BeforeTxn(protocol.ApplicationCallTx), BeforeProgram(logic.ModeApp), }, - OpcodeEvents(9, false), + OpcodeEvents(11, false), { BeforeOpcode(), BeforeTxnGroup(1), // start first itxn group BeforeTxn(protocol.ApplicationCallTx), BeforeProgram(logic.ModeApp), }, - OpcodeEvents(1, false), + OpcodeEvents(3, false), { AfterProgram(logic.ModeApp, false), AfterTxn(protocol.ApplicationCallTx, expectedAD.EvalDelta.InnerTxns[0].ApplyData, false), AfterTxnGroup(1, false), // end first itxn group AfterOpcode(false), }, - OpcodeEvents(14, false), + OpcodeEvents(16, false), { BeforeOpcode(), BeforeTxnGroup(2), // start second itxn group @@ -209,12 +233,13 @@ func GetTestScenarios() map[string]TestScenarioGenerator { AfterTxnGroup(2, false), // end second itxn group AfterOpcode(false), }, - OpcodeEvents(1, false), + OpcodeEvents(3, false), { AfterProgram(logic.ModeApp, false), AfterTxn(protocol.ApplicationCallTx, expectedAD, false), }, }), + ExpectedSimulationAD: expectedAD, } } @@ -226,7 +251,7 @@ func GetTestScenarios() map[string]TestScenarioGenerator { shouldError := shouldError failureOps := "pushint 0\nreturn" singleFailureOp := "pushint 0" - failureInnerProgram := "0x068100" + failureInnerProgramBytes := []byte{0x06, 0x80, 0x01, 0x78, 0xb0, 0x81, 0x00} // #pragma version 6; pushbytes "x"; log; pushint 0 failureMessage := "transaction rejected by ApprovalProgram" outcome := RejectionOutcome if shouldError { @@ -234,32 +259,39 @@ func GetTestScenarios() map[string]TestScenarioGenerator { // trace event consistency with rejections. failureOps = "pushint 0\nerr" singleFailureOp = "err" - failureInnerProgram = "0x0600" + failureInnerProgramBytes = []byte{0x06, 0x80, 0x01, 0x78, 0xb0, 0x00} // #pragma version 6; pushbytes "x"; log; err failureMessage = "err opcode executed" outcome = ErrorOutcome } + failureInnerProgram := "0x" + hex.EncodeToString(failureInnerProgramBytes) beforeInnersName := fmt.Sprintf("before inners,error=%t", shouldError) beforeInners := func(info TestScenarioInfo) TestScenario { expectedAD := expectedApplyData(info) - program := fmt.Sprintf(programTemplate, failureOps, "0x068101", "", 1, 2, "pushint 1") + program := fillProgramTemplate(failureOps, successInnerProgram, "", 1, 2, "pushint 1") // EvalDeltas are removed from failed app call transactions - expectedAD.EvalDelta = transactions.EvalDelta{} + expectedADNoED := expectedAD + expectedADNoED.EvalDelta = transactions.EvalDelta{} + // Only first log happens + expectedAD.EvalDelta.Logs = expectedAD.EvalDelta.Logs[:1] + expectedAD.EvalDelta.InnerTxns = nil return TestScenario{ Outcome: outcome, Program: program, ExpectedError: failureMessage, + FailedAt: []uint64{0}, ExpectedEvents: FlattenEvents([][]Event{ { BeforeTxn(protocol.ApplicationCallTx), BeforeProgram(logic.ModeApp), }, - OpcodeEvents(2, shouldError), + OpcodeEvents(4, shouldError), { AfterProgram(logic.ModeApp, shouldError), - AfterTxn(protocol.ApplicationCallTx, expectedAD, true), + AfterTxn(protocol.ApplicationCallTx, expectedADNoED, true), }, }), + ExpectedSimulationAD: expectedAD, } } scenarios[beforeInnersName] = beforeInners @@ -267,37 +299,48 @@ func GetTestScenarios() map[string]TestScenarioGenerator { firstInnerName := fmt.Sprintf("first inner,error=%t", shouldError) firstInner := func(info TestScenarioInfo) TestScenario { expectedAD := expectedApplyData(info) + // EvalDeltas are removed from failed app call transactions expectedInnerAppCallADNoEvalDelta := expectedAD.EvalDelta.InnerTxns[0].ApplyData expectedInnerAppCallADNoEvalDelta.EvalDelta = transactions.EvalDelta{} - expectedAD.EvalDelta = transactions.EvalDelta{} - program := fmt.Sprintf(programTemplate, "", failureInnerProgram, "", 1, 2, "pushint 1") + expectedADNoED := expectedAD + expectedADNoED.EvalDelta = transactions.EvalDelta{} + + // Only first log happens + expectedAD.EvalDelta.Logs = expectedAD.EvalDelta.Logs[:1] + + expectedAD.EvalDelta.InnerTxns = expectedAD.EvalDelta.InnerTxns[:1] + expectedAD.EvalDelta.InnerTxns[0].Txn.ApprovalProgram = failureInnerProgramBytes + + program := fillProgramTemplate("", failureInnerProgram, "", 1, 2, "pushint 1") return TestScenario{ Outcome: outcome, Program: program, ExpectedError: failureMessage, + FailedAt: []uint64{0, 0}, ExpectedEvents: FlattenEvents([][]Event{ { BeforeTxn(protocol.ApplicationCallTx), BeforeProgram(logic.ModeApp), }, - OpcodeEvents(9, false), + OpcodeEvents(11, false), { BeforeOpcode(), BeforeTxnGroup(1), // start first itxn group BeforeTxn(protocol.ApplicationCallTx), BeforeProgram(logic.ModeApp), }, - OpcodeEvents(1, shouldError), + OpcodeEvents(3, shouldError), { AfterProgram(logic.ModeApp, shouldError), AfterTxn(protocol.ApplicationCallTx, expectedInnerAppCallADNoEvalDelta, true), AfterTxnGroup(1, true), // end first itxn group AfterOpcode(true), AfterProgram(logic.ModeApp, true), - AfterTxn(protocol.ApplicationCallTx, expectedAD, true), + AfterTxn(protocol.ApplicationCallTx, expectedADNoED, true), }, }), + ExpectedSimulationAD: expectedAD, } } scenarios[firstInnerName] = firstInner @@ -306,38 +349,48 @@ func GetTestScenarios() map[string]TestScenarioGenerator { betweenInners := func(info TestScenarioInfo) TestScenario { expectedAD := expectedApplyData(info) expectedInnerAppCallAD := expectedAD.EvalDelta.InnerTxns[0].ApplyData + // EvalDeltas are removed from failed app call transactions - expectedAD.EvalDelta = transactions.EvalDelta{} - program := fmt.Sprintf(programTemplate, "", "0x068101", failureOps, 1, 2, "pushint 1") + expectedADNoED := expectedAD + expectedADNoED.EvalDelta = transactions.EvalDelta{} + + // Only first two logs happen + expectedAD.EvalDelta.Logs = expectedAD.EvalDelta.Logs[:2] + + expectedAD.EvalDelta.InnerTxns = expectedAD.EvalDelta.InnerTxns[:1] + + program := fillProgramTemplate("", successInnerProgram, failureOps, 1, 2, "pushint 1") return TestScenario{ Outcome: outcome, Program: program, ExpectedError: failureMessage, + FailedAt: []uint64{0}, ExpectedEvents: FlattenEvents([][]Event{ { BeforeTxn(protocol.ApplicationCallTx), BeforeProgram(logic.ModeApp), }, - OpcodeEvents(9, false), + OpcodeEvents(11, false), { BeforeOpcode(), BeforeTxnGroup(1), // start first itxn group BeforeTxn(protocol.ApplicationCallTx), BeforeProgram(logic.ModeApp), }, - OpcodeEvents(1, false), + OpcodeEvents(3, false), { AfterProgram(logic.ModeApp, false), AfterTxn(protocol.ApplicationCallTx, expectedInnerAppCallAD, false), AfterTxnGroup(1, false), // end first itxn group AfterOpcode(false), }, - OpcodeEvents(2, shouldError), + OpcodeEvents(4, shouldError), { AfterProgram(logic.ModeApp, shouldError), - AfterTxn(protocol.ApplicationCallTx, expectedAD, true), + AfterTxn(protocol.ApplicationCallTx, expectedADNoED, true), }, }), + ExpectedSimulationAD: expectedAD, } } scenarios[betweenInnersName] = betweenInners @@ -348,33 +401,42 @@ func GetTestScenarios() map[string]TestScenarioGenerator { expectedAD := expectedApplyData(info) expectedInnerAppCallAD := expectedAD.EvalDelta.InnerTxns[0].ApplyData expectedInnerPay1AD := expectedAD.EvalDelta.InnerTxns[1].ApplyData + // EvalDeltas are removed from failed app call transactions - expectedAD.EvalDelta = transactions.EvalDelta{} - program := fmt.Sprintf(programTemplate, "", "0x068101", "", uint64(math.MaxUint64), 2, "pushint 1") + expectedADNoED := expectedAD + expectedADNoED.EvalDelta = transactions.EvalDelta{} + + // Only first two logs happen + expectedAD.EvalDelta.Logs = expectedAD.EvalDelta.Logs[:2] + + expectedAD.EvalDelta.InnerTxns[1].Txn.Amount.Raw = math.MaxUint64 + + program := fillProgramTemplate("", successInnerProgram, "", math.MaxUint64, 2, "pushint 1") return TestScenario{ Outcome: ErrorOutcome, Program: program, ExpectedError: "overspend", + FailedAt: []uint64{0, 1}, ExpectedEvents: FlattenEvents([][]Event{ { BeforeTxn(protocol.ApplicationCallTx), BeforeProgram(logic.ModeApp), }, - OpcodeEvents(9, false), + OpcodeEvents(11, false), { BeforeOpcode(), BeforeTxnGroup(1), // start first itxn group BeforeTxn(protocol.ApplicationCallTx), BeforeProgram(logic.ModeApp), }, - OpcodeEvents(1, false), + OpcodeEvents(3, false), { AfterProgram(logic.ModeApp, false), AfterTxn(protocol.ApplicationCallTx, expectedInnerAppCallAD, false), AfterTxnGroup(1, false), // end first itxn group AfterOpcode(false), }, - OpcodeEvents(14, false), + OpcodeEvents(16, false), { BeforeOpcode(), BeforeTxnGroup(2), // start second itxn group @@ -383,9 +445,10 @@ func GetTestScenarios() map[string]TestScenarioGenerator { AfterTxnGroup(2, true), // end second itxn group AfterOpcode(true), AfterProgram(logic.ModeApp, true), - AfterTxn(protocol.ApplicationCallTx, expectedAD, true), + AfterTxn(protocol.ApplicationCallTx, expectedADNoED, true), }, }), + ExpectedSimulationAD: expectedAD, } } scenarios[secondInnerName] = secondInner @@ -393,36 +456,46 @@ func GetTestScenarios() map[string]TestScenarioGenerator { thirdInnerName := "third inner" thirdInner := func(info TestScenarioInfo) TestScenario { expectedAD := expectedApplyData(info) + expectedInnerAppCallAD := expectedAD.EvalDelta.InnerTxns[0].ApplyData expectedInnerPay1AD := expectedAD.EvalDelta.InnerTxns[1].ApplyData expectedInnerPay2AD := expectedAD.EvalDelta.InnerTxns[2].ApplyData + // EvalDeltas are removed from failed app call transactions - expectedAD.EvalDelta = transactions.EvalDelta{} - program := fmt.Sprintf(programTemplate, "", "0x068101", "", 1, uint64(math.MaxUint64), "pushint 1") + expectedADNoED := expectedAD + expectedADNoED.EvalDelta = transactions.EvalDelta{} + + // Only first two logs happen + expectedAD.EvalDelta.Logs = expectedAD.EvalDelta.Logs[:2] + + expectedAD.EvalDelta.InnerTxns[2].Txn.Amount.Raw = math.MaxUint64 + + program := fillProgramTemplate("", successInnerProgram, "", 1, math.MaxUint64, "pushint 1") return TestScenario{ Outcome: ErrorOutcome, Program: program, ExpectedError: "overspend", + FailedAt: []uint64{0, 2}, ExpectedEvents: FlattenEvents([][]Event{ { BeforeTxn(protocol.ApplicationCallTx), BeforeProgram(logic.ModeApp), }, - OpcodeEvents(9, false), + OpcodeEvents(11, false), { BeforeOpcode(), BeforeTxnGroup(1), // start first itxn group BeforeTxn(protocol.ApplicationCallTx), BeforeProgram(logic.ModeApp), }, - OpcodeEvents(1, false), + OpcodeEvents(3, false), { AfterProgram(logic.ModeApp, false), AfterTxn(protocol.ApplicationCallTx, expectedInnerAppCallAD, false), AfterTxnGroup(1, false), // end first itxn group AfterOpcode(false), }, - OpcodeEvents(14, false), + OpcodeEvents(16, false), { BeforeOpcode(), BeforeTxnGroup(2), // start second itxn group @@ -433,9 +506,10 @@ func GetTestScenarios() map[string]TestScenarioGenerator { AfterTxnGroup(2, true), // end second itxn group AfterOpcode(true), AfterProgram(logic.ModeApp, true), - AfterTxn(protocol.ApplicationCallTx, expectedAD, true), + AfterTxn(protocol.ApplicationCallTx, expectedADNoED, true), }, }), + ExpectedSimulationAD: expectedAD, } } scenarios[thirdInnerName] = thirdInner @@ -448,32 +522,34 @@ func GetTestScenarios() map[string]TestScenarioGenerator { expectedInnerPay1AD := expectedAD.EvalDelta.InnerTxns[1].ApplyData expectedInnerPay2AD := expectedAD.EvalDelta.InnerTxns[2].ApplyData // EvalDeltas are removed from failed app call transactions - expectedAD.EvalDelta = transactions.EvalDelta{} - program := fmt.Sprintf(programTemplate, "", "0x068101", "", 1, 2, singleFailureOp) + expectedADNoED := expectedAD + expectedADNoED.EvalDelta = transactions.EvalDelta{} + program := fillProgramTemplate("", successInnerProgram, "", 1, 2, singleFailureOp) return TestScenario{ Outcome: outcome, Program: program, ExpectedError: failureMessage, + FailedAt: []uint64{0}, ExpectedEvents: FlattenEvents([][]Event{ { BeforeTxn(protocol.ApplicationCallTx), BeforeProgram(logic.ModeApp), }, - OpcodeEvents(9, false), + OpcodeEvents(11, false), { BeforeOpcode(), BeforeTxnGroup(1), // start first itxn group BeforeTxn(protocol.ApplicationCallTx), BeforeProgram(logic.ModeApp), }, - OpcodeEvents(1, false), + OpcodeEvents(3, false), { AfterProgram(logic.ModeApp, false), AfterTxn(protocol.ApplicationCallTx, expectedInnerAppCallAD, false), AfterTxnGroup(1, false), // end first itxn group AfterOpcode(false), }, - OpcodeEvents(14, false), + OpcodeEvents(16, false), { BeforeOpcode(), BeforeTxnGroup(2), // start second itxn group @@ -484,12 +560,13 @@ func GetTestScenarios() map[string]TestScenarioGenerator { AfterTxnGroup(2, false), // end second itxn group AfterOpcode(false), }, - OpcodeEvents(1, shouldError), + OpcodeEvents(3, shouldError), { AfterProgram(logic.ModeApp, shouldError), - AfterTxn(protocol.ApplicationCallTx, expectedAD, true), + AfterTxn(protocol.ApplicationCallTx, expectedADNoED, true), }, }), + ExpectedSimulationAD: expectedAD, } } scenarios[afterInnersName] = afterInners diff --git a/data/transactions/verify/txn.go b/data/transactions/verify/txn.go index 683a2701c8..1d36055210 100644 --- a/data/transactions/verify/txn.go +++ b/data/transactions/verify/txn.go @@ -416,6 +416,20 @@ func logicSigSanityCheckBatchPrep(txn *transactions.SignedTxn, groupIndex int, g return nil } +// LogicSigError represents a LogicSig evaluation which rejected or errored +type LogicSigError struct { + GroupIndex int + err error +} + +func (e LogicSigError) Error() string { + return e.err.Error() +} + +func (e LogicSigError) Unwrap() error { + return e.err +} + // logicSigVerify checks that the signature is valid, executing the program. func logicSigVerify(txn *transactions.SignedTxn, groupIndex int, groupCtx *GroupContext, evalTracer logic.EvalTracer) error { err := LogicSigSanityCheck(txn, groupIndex, groupCtx) @@ -436,11 +450,11 @@ func logicSigVerify(txn *transactions.SignedTxn, groupIndex int, groupCtx *Group pass, cx, err := logic.EvalSignatureFull(groupIndex, &ep) if err != nil { logicErrTotal.Inc(nil) - return fmt.Errorf("transaction %v: rejected by logic err=%v", txn.ID(), err) + return LogicSigError{groupIndex, fmt.Errorf("transaction %v: rejected by logic err=%w", txn.ID(), err)} } if !pass { logicRejTotal.Inc(nil) - return fmt.Errorf("transaction %v: rejected by logic", txn.ID()) + return LogicSigError{groupIndex, fmt.Errorf("transaction %v: rejected by logic", txn.ID())} } logicGoodTotal.Inc(nil) logicCostTotal.AddUint64(uint64(cx.Cost()), nil) diff --git a/ledger/simulation/simulation_eval_test.go b/ledger/simulation/simulation_eval_test.go new file mode 100644 index 0000000000..0af8b5ae01 --- /dev/null +++ b/ledger/simulation/simulation_eval_test.go @@ -0,0 +1,1531 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package simulation_test + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "testing" + + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/data/transactions/logic" + "github.com/algorand/go-algorand/data/transactions/logic/mocktracer" + "github.com/algorand/go-algorand/data/txntest" + + "github.com/algorand/go-algorand/ledger/simulation" + simulationtesting "github.com/algorand/go-algorand/ledger/simulation/testing" + ledgertesting "github.com/algorand/go-algorand/ledger/testing" + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// attachGroupID calculates and assigns the ID for a transaction group. +// Mutates the group directly. +func attachGroupID(txns []transactions.SignedTxn) { + txgroup := transactions.TxGroup{ + TxGroupHashes: make([]crypto.Digest, len(txns)), + } + for i, txn := range txns { + txn.Txn.Group = crypto.Digest{} + txgroup.TxGroupHashes[i] = crypto.Digest(txn.ID()) + } + group := crypto.HashObj(txgroup) + + for i := range txns { + txns[i].Txn.Header.Group = group + } +} + +func uint64ToBytes(num uint64) []byte { + ibytes := make([]byte, 8) + binary.BigEndian.PutUint64(ibytes, num) + return ibytes +} + +type simulationTestCase struct { + input []transactions.SignedTxn + expected simulation.Result + expectedError string +} + +func normalizeEvalDeltas(t *testing.T, actual, expected *transactions.EvalDelta) { + t.Helper() + for _, evalDelta := range []*transactions.EvalDelta{actual, expected} { + // The difference between a nil contain and a 0-length one is not meaningful for these tests + if len(evalDelta.GlobalDelta) == 0 { + evalDelta.GlobalDelta = nil + } + if len(evalDelta.LocalDeltas) == 0 { + evalDelta.LocalDeltas = nil + } + } + // Use assert instead of require here so that we get a more useful error message later + assert.Equal(t, len(expected.InnerTxns), len(actual.InnerTxns)) + for innerIndex := range expected.InnerTxns { + if innerIndex == len(actual.InnerTxns) { + break + } + expectedTxn := &expected.InnerTxns[innerIndex] + actualTxn := &actual.InnerTxns[innerIndex] + if expectedTxn.SignedTxn.Txn.Type == "" { + // Use Type as a marker for whether the transaction was specified or not. If not + // specified, replace it with the actual inner txn + expectedTxn.SignedTxn = actualTxn.SignedTxn + } else if expectedTxn.SignedTxn.Txn.Group.IsZero() { + // Inner txn IDs are very difficult to calculate, so copy from actual + expectedTxn.SignedTxn.Txn.Group = actualTxn.SignedTxn.Txn.Group + } + normalizeEvalDeltas(t, &actualTxn.EvalDelta, &expectedTxn.EvalDelta) + } +} + +func validateSimulationResult(t *testing.T, result simulation.Result) { + t.Helper() + + shouldHaveBlock := true + if !result.WouldSucceed { + // WouldSucceed might be false because of missing signatures, in which case a block would + // still be generated. The only reason for no block would be an eval error. + for _, groupResult := range result.TxnGroups { + if len(groupResult.FailureMessage) != 0 { + shouldHaveBlock = false + break + } + } + } + if !shouldHaveBlock { + assert.Nil(t, result.Block) + return + } + require.NotNil(t, result.Block) + + blockGroups, err := result.Block.Block().DecodePaysetGroups() + require.NoError(t, err) + + if !assert.Equal(t, len(blockGroups), len(result.TxnGroups)) { + return + } + + for i, groupResult := range result.TxnGroups { + if i == len(blockGroups) { + break + } + blockGroup := blockGroups[i] + + if !assert.Equal(t, len(blockGroup), len(groupResult.Txns), "mismatched number of txns in group %d", i) { + continue + } + + for j, txnResult := range groupResult.Txns { + blockTxn := blockGroup[j] + assert.Equal(t, blockTxn.ApplyData, txnResult.Txn.ApplyData, "transaction %d of group %d has a simulation ApplyData that does not match what appears in a block", i, j) + } + } +} + +func simulationTest(t *testing.T, f func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase) { + t.Helper() + l, accounts, txnInfo := simulationtesting.PrepareSimulatorTest(t) + defer l.Close() + s := simulation.MakeSimulator(l) + + testcase := f(accounts, txnInfo) + + actual, err := s.Simulate(testcase.input) + require.NoError(t, err) + + validateSimulationResult(t, actual) + + require.Len(t, testcase.expected.TxnGroups, 1, "Test case must expect a single txn group") + require.Len(t, testcase.expected.TxnGroups[0].Txns, len(testcase.input), "Test case expected a different number of transactions than its input") + + for i, inputTxn := range testcase.input { + if testcase.expected.TxnGroups[0].Txns[i].Txn.Txn.Type == "" { + // Use Type as a marker for whether the transaction was specified or not. If not + // specified, replace it with the input txn + testcase.expected.TxnGroups[0].Txns[i].Txn.SignedTxn = inputTxn + } + normalizeEvalDeltas(t, &actual.TxnGroups[0].Txns[i].Txn.EvalDelta, &testcase.expected.TxnGroups[0].Txns[i].Txn.EvalDelta) + } + + if len(testcase.expectedError) != 0 { + require.Contains(t, actual.TxnGroups[0].FailureMessage, testcase.expectedError) + require.False(t, testcase.expected.WouldSucceed, "Test case WouldSucceed value is not consistent with expected failure") + // if it matched the expected error, copy the actual one so it will pass the equality check below + testcase.expected.TxnGroups[0].FailureMessage = actual.TxnGroups[0].FailureMessage + } + + // Do not attempt to compare blocks + actual.Block = nil + require.Equal(t, testcase.expected, actual) +} + +func TestPayTxn(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + t.Run("simple", func(t *testing.T) { + t.Parallel() + for _, signed := range []bool{true, false} { + signed := signed + t.Run(fmt.Sprintf("signed=%t", signed), func(t *testing.T) { + t.Parallel() + simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase { + sender := accounts[0] + receiver := accounts[1] + + txn := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender.Addr, + Receiver: receiver.Addr, + Amount: 1_000_000, + }).SignedTxn() + + if signed { + txn = txn.Txn.Sign(sender.Sk) + } + + return simulationTestCase{ + input: []transactions.SignedTxn{txn}, + expected: simulation.Result{ + Version: 1, + LastRound: txnInfo.LatestRound(), + TxnGroups: []simulation.TxnGroupResult{ + { + Txns: []simulation.TxnResult{ + { + MissingSignature: !signed, + }, + }, + }, + }, + WouldSucceed: signed, + }, + } + }) + }) + } + }) + + t.Run("close to", func(t *testing.T) { + t.Parallel() + for _, signed := range []bool{true, false} { + signed := signed + t.Run(fmt.Sprintf("signed=%t", signed), func(t *testing.T) { + t.Parallel() + simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase { + sender := accounts[0] + receiver := accounts[1] + closeTo := accounts[2] + amount := uint64(1_000_000) + + txn := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender.Addr, + Receiver: receiver.Addr, + Amount: amount, + CloseRemainderTo: closeTo.Addr, + }).SignedTxn() + + if signed { + txn = txn.Txn.Sign(sender.Sk) + } + + expectedClosingAmount := sender.AcctData.MicroAlgos.Raw + expectedClosingAmount -= amount + txn.Txn.Fee.Raw + + return simulationTestCase{ + input: []transactions.SignedTxn{txn}, + expected: simulation.Result{ + Version: 1, + LastRound: txnInfo.LatestRound(), + TxnGroups: []simulation.TxnGroupResult{ + { + Txns: []simulation.TxnResult{ + { + Txn: transactions.SignedTxnWithAD{ + ApplyData: transactions.ApplyData{ + ClosingAmount: basics.MicroAlgos{Raw: expectedClosingAmount}, + }, + }, + MissingSignature: !signed, + }, + }, + }, + }, + WouldSucceed: signed, + }, + } + }) + }) + } + }) + + t.Run("overspend", func(t *testing.T) { + t.Parallel() + for _, signed := range []bool{true, false} { + signed := signed + t.Run(fmt.Sprintf("signed=%t", signed), func(t *testing.T) { + t.Parallel() + simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase { + sender := accounts[0] + receiver := accounts[1] + amount := sender.AcctData.MicroAlgos.Raw + 100 + + txn := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender.Addr, + Receiver: receiver.Addr, + Amount: amount, + }).SignedTxn() + + if signed { + txn = txn.Txn.Sign(sender.Sk) + } + + return simulationTestCase{ + input: []transactions.SignedTxn{txn}, + expectedError: fmt.Sprintf("tried to spend {%d}", amount), + expected: simulation.Result{ + Version: 1, + LastRound: txnInfo.LatestRound(), + TxnGroups: []simulation.TxnGroupResult{ + { + Txns: []simulation.TxnResult{ + { + MissingSignature: !signed, + }, + }, + FailedAt: simulation.TxnPath{0}, + }, + }, + WouldSucceed: false, + }, + } + }) + }) + } + }) +} + +func TestWrongAuthorizerTxn(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + for _, signed := range []bool{true, false} { + signed := signed + t.Run(fmt.Sprintf("signed=%t", signed), func(t *testing.T) { + t.Parallel() + simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase { + sender := accounts[0] + authority := accounts[1] + + txn := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender.Addr, + Receiver: sender.Addr, + Amount: 0, + }) + + var stxn transactions.SignedTxn + if signed { + stxn = txn.Txn().Sign(authority.Sk) + } else { + stxn = txn.SignedTxn() + stxn.AuthAddr = authority.Addr + } + + return simulationTestCase{ + input: []transactions.SignedTxn{stxn}, + expectedError: fmt.Sprintf("should have been authorized by %s but was actually authorized by %s", sender.Addr, authority.Addr), + expected: simulation.Result{ + Version: 1, + LastRound: txnInfo.LatestRound(), + TxnGroups: []simulation.TxnGroupResult{ + { + Txns: []simulation.TxnResult{ + { + MissingSignature: !signed, + }, + }, + FailedAt: simulation.TxnPath{0}, + }, + }, + WouldSucceed: false, + }, + } + }) + }) + } +} + +func TestRekey(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + for _, signed := range []bool{true, false} { + signed := signed + t.Run(fmt.Sprintf("signed=%t", signed), func(t *testing.T) { + t.Parallel() + simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase { + sender := accounts[0] + authority := accounts[1] + + txn1 := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender.Addr, + Receiver: sender.Addr, + Amount: 1, + RekeyTo: authority.Addr, + }) + txn2 := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender.Addr, + Receiver: sender.Addr, + Amount: 2, + }) + + txntest.Group(&txn1, &txn2) + + var stxn1 transactions.SignedTxn + var stxn2 transactions.SignedTxn + if signed { + stxn1 = txn1.Txn().Sign(sender.Sk) + stxn2 = txn2.Txn().Sign(authority.Sk) + } else { + stxn1 = txn1.SignedTxn() + stxn2 = txn2.SignedTxn() + stxn2.AuthAddr = authority.Addr + } + return simulationTestCase{ + input: []transactions.SignedTxn{stxn1, stxn2}, + expected: simulation.Result{ + Version: 1, + LastRound: txnInfo.LatestRound(), + TxnGroups: []simulation.TxnGroupResult{ + { + Txns: []simulation.TxnResult{ + { + MissingSignature: !signed, + }, + { + MissingSignature: !signed, + }, + }, + }, + }, + WouldSucceed: signed, + }, + } + }) + }) + } +} + +func TestStateProofTxn(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + l, _, txnInfo := simulationtesting.PrepareSimulatorTest(t) + defer l.Close() + s := simulation.MakeSimulator(l) + + txgroup := []transactions.SignedTxn{ + txnInfo.NewTxn(txntest.Txn{ + Type: protocol.StateProofTx, + // No need to fill out StateProofTxnFields, this should fail at signature verification + }).SignedTxn(), + } + + _, err := s.Simulate(txgroup) + require.ErrorContains(t, err, "cannot simulate StateProof transactions") +} + +func TestSimpleGroupTxn(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + l, accounts, txnInfo := simulationtesting.PrepareSimulatorTest(t) + defer l.Close() + s := simulation.MakeSimulator(l) + sender1 := accounts[0].Addr + sender1Balance := accounts[0].AcctData.MicroAlgos + sender2 := accounts[1].Addr + sender2Balance := accounts[1].AcctData.MicroAlgos + + // Send money back and forth + txgroup := []transactions.SignedTxn{ + txnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender1, + Receiver: sender2, + Amount: 1_000_000, + }).SignedTxn(), + txnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender2, + Receiver: sender1, + Amount: 0, + }).SignedTxn(), + } + + // Should fail if there is no group parameter + result, err := s.Simulate(txgroup) + require.NoError(t, err) + require.False(t, result.WouldSucceed) + require.Len(t, result.TxnGroups, 1) + require.Len(t, result.TxnGroups[0].Txns, 2) + require.Contains(t, result.TxnGroups[0].FailureMessage, "had zero Group but was submitted in a group of 2") + + // Add group parameter + attachGroupID(txgroup) + + // Check balances before transaction + sender1Data, _, err := l.LookupWithoutRewards(l.Latest(), sender1) + require.NoError(t, err) + require.Equal(t, sender1Balance, sender1Data.MicroAlgos) + + sender2Data, _, err := l.LookupWithoutRewards(l.Latest(), sender2) + require.NoError(t, err) + require.Equal(t, sender2Balance, sender2Data.MicroAlgos) + + // Should now pass + result, err = s.Simulate(txgroup) + require.NoError(t, err) + require.False(t, result.WouldSucceed) + require.Len(t, result.TxnGroups, 1) + require.Len(t, result.TxnGroups[0].Txns, 2) + require.Zero(t, result.TxnGroups[0].FailureMessage) + + // Confirm balances have not changed + sender1Data, _, err = l.LookupWithoutRewards(l.Latest(), sender1) + require.NoError(t, err) + require.Equal(t, sender1Balance, sender1Data.MicroAlgos) + + sender2Data, _, err = l.LookupWithoutRewards(l.Latest(), sender2) + require.NoError(t, err) + require.Equal(t, sender2Balance, sender2Data.MicroAlgos) +} + +func TestLogicSig(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + op, err := logic.AssembleString(`#pragma version 8 +arg 0 +btoi`) + require.NoError(t, err) + program := logic.Program(op.Program) + lsigAddr := basics.Address(crypto.HashObj(&program)) + + testCases := []struct { + name string + arguments [][]byte + expectedError string + }{ + { + name: "approval", + arguments: [][]byte{{1}}, + expectedError: "", // no error + }, + { + name: "rejection", + arguments: [][]byte{{0}}, + expectedError: "rejected by logic", + }, + { + name: "error", + arguments: [][]byte{}, + expectedError: "rejected by logic err=cannot load arg[0] of 0", + }, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase { + sender := accounts[0] + + payTxn := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender.Addr, + Receiver: lsigAddr, + Amount: 1_000_000, + }) + appCallTxn := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.ApplicationCallTx, + Sender: lsigAddr, + ApprovalProgram: `#pragma version 8 +byte "hello" +log +int 1`, + ClearStateProgram: `#pragma version 8 +int 1`, + }) + + txntest.Group(&payTxn, &appCallTxn) + + signedPayTxn := payTxn.Txn().Sign(sender.Sk) + signedAppCallTxn := appCallTxn.SignedTxn() + signedAppCallTxn.Lsig = transactions.LogicSig{ + Logic: program, + Args: testCase.arguments, + } + + expectedSuccess := len(testCase.expectedError) == 0 + var expectedAppCallAD transactions.ApplyData + expectedFailedAt := simulation.TxnPath{1} + if expectedSuccess { + expectedAppCallAD = transactions.ApplyData{ + ApplicationID: 2, + EvalDelta: transactions.EvalDelta{ + Logs: []string{"hello"}, + }, + } + expectedFailedAt = nil + } + + return simulationTestCase{ + input: []transactions.SignedTxn{signedPayTxn, signedAppCallTxn}, + expectedError: testCase.expectedError, + expected: simulation.Result{ + Version: 1, + LastRound: txnInfo.LatestRound(), + TxnGroups: []simulation.TxnGroupResult{ + { + Txns: []simulation.TxnResult{ + {}, + { + Txn: transactions.SignedTxnWithAD{ + ApplyData: expectedAppCallAD, + }, + }, + }, + FailedAt: expectedFailedAt, + }, + }, + WouldSucceed: expectedSuccess, + }, + } + }) + }) + } +} + +func TestSimpleAppCall(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + for _, signed := range []bool{true, false} { + signed := signed + t.Run(fmt.Sprintf("signed=%t", signed), func(t *testing.T) { + t.Parallel() + simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase { + sender := accounts[0] + + // Create program and call it + futureAppID := basics.AppIndex(1) + createTxn := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.ApplicationCallTx, + Sender: sender.Addr, + ApplicationID: 0, + ApprovalProgram: `#pragma version 6 +txn ApplicationID +bz create +byte "app call" +log +b end +create: +byte "app creation" +log +end: +int 1 +`, + ClearStateProgram: `#pragma version 6 +int 0 +`, + }) + callTxn := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.ApplicationCallTx, + Sender: sender.Addr, + ApplicationID: futureAppID, + }) + + txntest.Group(&createTxn, &callTxn) + + signedCreateTxn := createTxn.SignedTxn() + signedCallTxn := callTxn.SignedTxn() + + if signed { + signedCreateTxn = signedCreateTxn.Txn.Sign(sender.Sk) + signedCallTxn = signedCallTxn.Txn.Sign(sender.Sk) + } + + return simulationTestCase{ + input: []transactions.SignedTxn{signedCreateTxn, signedCallTxn}, + expected: simulation.Result{ + Version: 1, + LastRound: txnInfo.LatestRound(), + TxnGroups: []simulation.TxnGroupResult{ + { + Txns: []simulation.TxnResult{ + { + Txn: transactions.SignedTxnWithAD{ + ApplyData: transactions.ApplyData{ + ApplicationID: futureAppID, + EvalDelta: transactions.EvalDelta{ + Logs: []string{"app creation"}, + }, + }, + }, + MissingSignature: !signed, + }, + { + Txn: transactions.SignedTxnWithAD{ + ApplyData: transactions.ApplyData{ + EvalDelta: transactions.EvalDelta{ + Logs: []string{"app call"}, + }, + }, + }, + MissingSignature: !signed, + }, + }, + }, + }, + WouldSucceed: signed, + }, + } + }) + }) + } +} + +func TestRejectAppCall(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + for _, signed := range []bool{true, false} { + signed := signed + t.Run(fmt.Sprintf("signed=%t", signed), func(t *testing.T) { + t.Parallel() + simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase { + sender := accounts[0] + + futureAppID := basics.AppIndex(1) + createTxn := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.ApplicationCallTx, + Sender: sender.Addr, + ApplicationID: 0, + ApprovalProgram: `#pragma version 6 +byte "app creation" +log +int 0 + `, + ClearStateProgram: `#pragma version 6 +int 0 +`, + }) + + signedCreateTxn := createTxn.SignedTxn() + + if signed { + signedCreateTxn = createTxn.Txn().Sign(sender.Sk) + } + + return simulationTestCase{ + input: []transactions.SignedTxn{signedCreateTxn}, + expectedError: "transaction rejected by ApprovalProgram", + expected: simulation.Result{ + Version: 1, + LastRound: txnInfo.LatestRound(), + TxnGroups: []simulation.TxnGroupResult{ + { + Txns: []simulation.TxnResult{ + { + Txn: transactions.SignedTxnWithAD{ + ApplyData: transactions.ApplyData{ + ApplicationID: futureAppID, + EvalDelta: transactions.EvalDelta{ + Logs: []string{"app creation"}, + }, + }, + }, + MissingSignature: !signed, + }, + }, + FailedAt: simulation.TxnPath{0}, + }, + }, + WouldSucceed: false, + }, + } + }) + }) + } +} + +func TestErrorAppCall(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + for _, signed := range []bool{true, false} { + signed := signed + t.Run(fmt.Sprintf("signed=%t", signed), func(t *testing.T) { + t.Parallel() + simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase { + sender := accounts[0] + + futureAppID := basics.AppIndex(1) + createTxn := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.ApplicationCallTx, + Sender: sender.Addr, + ApplicationID: 0, + ApprovalProgram: `#pragma version 6 +byte "app creation" +log +err + `, + ClearStateProgram: `#pragma version 6 +int 0 +`, + }) + + signedCreateTxn := createTxn.SignedTxn() + + if signed { + signedCreateTxn = createTxn.Txn().Sign(sender.Sk) + } + + return simulationTestCase{ + input: []transactions.SignedTxn{signedCreateTxn}, + expectedError: "err opcode executed", + expected: simulation.Result{ + Version: 1, + LastRound: txnInfo.LatestRound(), + TxnGroups: []simulation.TxnGroupResult{ + { + Txns: []simulation.TxnResult{ + { + Txn: transactions.SignedTxnWithAD{ + ApplyData: transactions.ApplyData{ + ApplicationID: futureAppID, + EvalDelta: transactions.EvalDelta{ + Logs: []string{"app creation"}, + }, + }, + }, + MissingSignature: !signed, + }, + }, + FailedAt: simulation.TxnPath{0}, + }, + }, + WouldSucceed: false, + }, + } + }) + }) + } +} + +func TestSignatureCheck(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + l, accounts, txnInfo := simulationtesting.PrepareSimulatorTest(t) + defer l.Close() + s := simulation.MakeSimulator(l) + sender := accounts[0].Addr + + txgroup := []transactions.SignedTxn{ + txnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender, + Receiver: sender, + Amount: 0, + }).SignedTxn(), + } + + // should catch missing signature + result, err := s.Simulate(txgroup) + require.NoError(t, err) + require.False(t, result.WouldSucceed) + require.Len(t, result.TxnGroups, 1) + require.Len(t, result.TxnGroups[0].Txns, 1) + require.True(t, result.TxnGroups[0].Txns[0].MissingSignature) + require.Zero(t, result.TxnGroups[0].FailureMessage) + + // add signature + signatureSecrets := accounts[0].Sk + txgroup[0] = txgroup[0].Txn.Sign(signatureSecrets) + + // should not error now that we have a signature + result, err = s.Simulate(txgroup) + require.NoError(t, err) + require.True(t, result.WouldSucceed) + require.Len(t, result.TxnGroups, 1) + require.Len(t, result.TxnGroups[0].Txns, 1) + require.False(t, result.TxnGroups[0].Txns[0].MissingSignature) + require.Zero(t, result.TxnGroups[0].FailureMessage) + + // should error with invalid signature + txgroup[0].Sig[0] += byte(1) // will wrap if > 255 + result, err = s.Simulate(txgroup) + require.ErrorAs(t, err, &simulation.InvalidTxGroupError{}) + require.ErrorContains(t, err, "one signature didn't pass") +} + +// TestInvalidTxGroup tests that a transaction group with invalid transactions +// is rejected by the simulator as an InvalidTxGroupError instead of a EvalFailureError. +func TestInvalidTxGroup(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + l, accounts, txnInfo := simulationtesting.PrepareSimulatorTest(t) + defer l.Close() + s := simulation.MakeSimulator(l) + receiver := accounts[0].Addr + + txgroup := []transactions.SignedTxn{ + txnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: ledgertesting.PoolAddr(), + Receiver: receiver, + Amount: 0, + }).SignedTxn(), + } + + // should error with invalid transaction group error + _, err := s.Simulate(txgroup) + require.ErrorAs(t, err, &simulation.InvalidTxGroupError{}) + require.ErrorContains(t, err, "transaction from incentive pool is invalid") +} + +// TestBalanceChangesWithApp sends a payment transaction to a new account and confirms its balance +// within a subsequent app call +func TestBalanceChangesWithApp(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + for _, signed := range []bool{true, false} { + signed := signed + t.Run(fmt.Sprintf("signed=%t", signed), func(t *testing.T) { + t.Parallel() + simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase { + sender := accounts[0] + senderBalance := sender.AcctData.MicroAlgos.Raw + sendAmount := senderBalance - 500_000 // Leave 0.5 Algos in the sender account + receiver := accounts[1] + receiverBalance := receiver.AcctData.MicroAlgos.Raw + + futureAppID := basics.AppIndex(1) + createTxn := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.ApplicationCallTx, + Sender: sender.Addr, + ApprovalProgram: `#pragma version 6 +txn ApplicationID // [appId] +bz end // [] +int 1 // [1] +balance // [bal[1]] +itob // [itob(bal[1])] +txn ApplicationArgs 0 // [itob(bal[1]), args[0]] +== // [itob(bal[1])=?=args[0]] +assert +end: +int 1 // [1] +`, + ClearStateProgram: `#pragma version 6 +int 1`, + }) + checkStartingBalanceTxn := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.ApplicationCallTx, + Sender: sender.Addr, + ApplicationID: futureAppID, + Accounts: []basics.Address{receiver.Addr}, + ApplicationArgs: [][]byte{uint64ToBytes(receiverBalance)}, + }) + paymentTxn := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender.Addr, + Receiver: receiver.Addr, + Amount: sendAmount, + }) + checkEndingBalanceTxn := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.ApplicationCallTx, + Sender: sender.Addr, + ApplicationID: futureAppID, + Accounts: []basics.Address{receiver.Addr}, + // Receiver's balance should have increased by sendAmount + ApplicationArgs: [][]byte{uint64ToBytes(receiverBalance + sendAmount)}, + }) + + txntest.Group(&createTxn, &checkStartingBalanceTxn, &paymentTxn, &checkEndingBalanceTxn) + + signedCreateTxn := createTxn.SignedTxn() + signedCheckStartingBalanceTxn := checkStartingBalanceTxn.SignedTxn() + signedPaymentTxn := paymentTxn.SignedTxn() + signedCheckEndingBalanceTxn := checkEndingBalanceTxn.SignedTxn() + + if signed { + signedCreateTxn = createTxn.Txn().Sign(sender.Sk) + signedCheckStartingBalanceTxn = checkStartingBalanceTxn.Txn().Sign(sender.Sk) + signedPaymentTxn = paymentTxn.Txn().Sign(sender.Sk) + signedCheckEndingBalanceTxn = checkEndingBalanceTxn.Txn().Sign(sender.Sk) + } + + return simulationTestCase{ + input: []transactions.SignedTxn{ + signedCreateTxn, + signedCheckStartingBalanceTxn, + signedPaymentTxn, + signedCheckEndingBalanceTxn, + }, + expected: simulation.Result{ + Version: 1, + LastRound: txnInfo.LatestRound(), + TxnGroups: []simulation.TxnGroupResult{ + { + Txns: []simulation.TxnResult{ + { + Txn: transactions.SignedTxnWithAD{ + ApplyData: transactions.ApplyData{ + ApplicationID: futureAppID, + }, + }, + MissingSignature: !signed, + }, + { + MissingSignature: !signed, + }, + { + MissingSignature: !signed, + }, + { + MissingSignature: !signed, + }, + }, + }, + }, + WouldSucceed: signed, + }, + } + }) + }) + } +} + +func TestPartialMissingSignatures(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase { + sender := accounts[0] + + txn1 := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.AssetConfigTx, + Sender: sender.Addr, + AssetParams: basics.AssetParams{ + Total: 10, + Decimals: 0, + Manager: sender.Addr, + UnitName: "A", + }, + }) + txn2 := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.AssetConfigTx, + Sender: sender.Addr, + AssetParams: basics.AssetParams{ + Total: 10, + Decimals: 0, + Manager: sender.Addr, + UnitName: "B", + }, + }) + + txntest.Group(&txn1, &txn2) + + // add signature to second transaction only + signedTxn1 := txn1.SignedTxn() + signedTxn2 := txn2.Txn().Sign(sender.Sk) + + return simulationTestCase{ + input: []transactions.SignedTxn{signedTxn1, signedTxn2}, + expected: simulation.Result{ + Version: 1, + LastRound: txnInfo.LatestRound(), + TxnGroups: []simulation.TxnGroupResult{ + { + Txns: []simulation.TxnResult{ + { + MissingSignature: true, + Txn: transactions.SignedTxnWithAD{ + ApplyData: transactions.ApplyData{ + ConfigAsset: 1, + }, + }, + }, { + Txn: transactions.SignedTxnWithAD{ + ApplyData: transactions.ApplyData{ + ConfigAsset: 2, + }, + }, + }, + }, + }, + }, + WouldSucceed: false, + }, + } + }) +} + +// TestPooledFeesAcrossSignedAndUnsigned tests that the simulator's transaction group checks +// allow for pooled fees across a mix of signed and unsigned transactions. +// Transaction 1 is a signed transaction with not enough fees paid on its own. +// Transaction 2 is an unsigned transaction with enough fees paid to cover transaction 1. +func TestPooledFeesAcrossSignedAndUnsigned(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase { + sender1 := accounts[0] + sender2 := accounts[1] + + pay1 := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender1.Addr, + Receiver: sender2.Addr, + Amount: 1_000_000, + Fee: txnInfo.CurrentProtocolParams().MinTxnFee - 100, + }) + pay2 := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender2.Addr, + Receiver: sender1.Addr, + Amount: 0, + Fee: txnInfo.CurrentProtocolParams().MinTxnFee + 100, + }) + + txntest.Group(&pay1, &pay2) + + // sign pay1 only + signedPay1 := pay1.Txn().Sign(sender1.Sk) + signedPay2 := pay2.SignedTxn() + + return simulationTestCase{ + input: []transactions.SignedTxn{signedPay1, signedPay2}, + expected: simulation.Result{ + Version: 1, + LastRound: txnInfo.LatestRound(), + TxnGroups: []simulation.TxnGroupResult{ + { + Txns: []simulation.TxnResult{ + {}, { + MissingSignature: true, + }, + }, + }, + }, + WouldSucceed: false, + }, + } + }) +} + +const logAndFail = `#pragma version 6 +byte "message" +log +int 0 +` + +func makeItxnSubmitToCallInner(t *testing.T, program string) string { + t.Helper() + ops, err := logic.AssembleString(program) + require.NoError(t, err) + programBytesHex := hex.EncodeToString(ops.Program) + itxnSubmit := fmt.Sprintf(`byte "starting inner txn" +log + +itxn_begin +int appl +itxn_field TypeEnum +int NoOp +itxn_field OnCompletion +byte 0x068101 +itxn_field ClearStateProgram +byte 0x%s +itxn_field ApprovalProgram +itxn_submit + +byte "finished inner txn" +log +`, programBytesHex) + return itxnSubmit +} + +func wrapCodeWithVersionAndReturn(code string) string { + return fmt.Sprintf(`#pragma version 6 +%s +int 1 +return`, code) +} + +func makeProgramToCallInner(t *testing.T, program string) string { + t.Helper() + itxnSubmitCode := makeItxnSubmitToCallInner(t, program) + return wrapCodeWithVersionAndReturn(itxnSubmitCode) +} + +func TestAppCallInnerTxnApplyDataOnFail(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase { + sender := accounts[0] + + singleInnerLogAndFail := makeProgramToCallInner(t, logAndFail) + nestedInnerLogAndFail := makeProgramToCallInner(t, singleInnerLogAndFail) + + // fund outer app + pay1 := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender.Addr, + Receiver: basics.AppIndex(3).Address(), + Amount: 401_000, // 400_000 min balance plus 1_000 for 1 txn + }) + // fund inner app + pay2 := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender.Addr, + Receiver: basics.AppIndex(4).Address(), + Amount: 401_000, // 400_000 min balance plus 1_000 for 1 txn + }) + // create app + appCall := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.ApplicationCallTx, + Sender: sender.Addr, + ApplicationArgs: [][]byte{uint64ToBytes(uint64(1))}, + ApprovalProgram: nestedInnerLogAndFail, + ClearStateProgram: `#pragma version 6 +int 1`, + }) + + txgroup := txntest.Group(&pay1, &pay2, &appCall) + + return simulationTestCase{ + input: txgroup, + expectedError: "rejected by ApprovalProgram", + expected: simulation.Result{ + Version: 1, + LastRound: txnInfo.LatestRound(), + TxnGroups: []simulation.TxnGroupResult{ + { + Txns: []simulation.TxnResult{ + { + MissingSignature: true, + }, { + MissingSignature: true, + }, { + Txn: transactions.SignedTxnWithAD{ + ApplyData: transactions.ApplyData{ + ApplicationID: 3, + EvalDelta: transactions.EvalDelta{ + Logs: []string{"starting inner txn"}, + InnerTxns: []transactions.SignedTxnWithAD{ + { + ApplyData: transactions.ApplyData{ + ApplicationID: 4, + EvalDelta: transactions.EvalDelta{ + Logs: []string{"starting inner txn"}, + InnerTxns: []transactions.SignedTxnWithAD{ + { + ApplyData: transactions.ApplyData{ + ApplicationID: 5, + EvalDelta: transactions.EvalDelta{ + Logs: []string{"message"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + MissingSignature: true, + }, + }, + FailedAt: simulation.TxnPath{2, 0, 0}, + }, + }, + WouldSucceed: false, + }, + } + }) +} + +const createAssetCode = `byte "starting asset create" +log + +itxn_begin +int acfg +itxn_field TypeEnum +itxn_submit + +byte "finished asset create" +log +` + +func TestNonAppCallInnerTxnApplyDataOnFail(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase { + sender := accounts[0] + + logAndFailItxnCode := makeItxnSubmitToCallInner(t, logAndFail) + approvalProgram := wrapCodeWithVersionAndReturn(createAssetCode + logAndFailItxnCode) + + // fund outer app + pay1 := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender.Addr, + Receiver: basics.AppIndex(2).Address(), + Amount: 401_000, // 400_000 min balance plus 1_000 for 1 txn + }) + // create app + appCall := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.ApplicationCallTx, + Sender: sender.Addr, + ApplicationArgs: [][]byte{uint64ToBytes(uint64(1))}, + ApprovalProgram: approvalProgram, + ClearStateProgram: `#pragma version 6 +int 1`, + }) + + txgroup := txntest.Group(&pay1, &appCall) + + return simulationTestCase{ + input: txgroup, + expectedError: "rejected by ApprovalProgram", + expected: simulation.Result{ + Version: 1, + LastRound: txnInfo.LatestRound(), + TxnGroups: []simulation.TxnGroupResult{ + { + Txns: []simulation.TxnResult{ + { + MissingSignature: true, + }, { + Txn: transactions.SignedTxnWithAD{ + ApplyData: transactions.ApplyData{ + ApplicationID: 2, + EvalDelta: transactions.EvalDelta{ + Logs: []string{"starting asset create", "finished asset create", "starting inner txn"}, + InnerTxns: []transactions.SignedTxnWithAD{ + { + ApplyData: transactions.ApplyData{ + ConfigAsset: 3, + }, + }, + { + ApplyData: transactions.ApplyData{ + ApplicationID: 4, + EvalDelta: transactions.EvalDelta{ + Logs: []string{"message"}, + }, + }, + }, + }, + }, + }, + }, + MissingSignature: true, + }, + }, + FailedAt: simulation.TxnPath{1, 1}, + }, + }, + WouldSucceed: false, + }, + } + }) +} + +const configAssetCode = `byte "starting asset config" +log + +itxn_begin +int acfg +itxn_field TypeEnum +int %d +itxn_field ConfigAsset +itxn_submit + +byte "finished asset config" +log +` + +func TestInnerTxnNonAppCallFailure(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase { + sender := accounts[0] + + // configAssetCode should fail because createAssetCode does not set an asset manager + approvalProgram := wrapCodeWithVersionAndReturn(createAssetCode + fmt.Sprintf(configAssetCode, 3)) + + // fund outer app + pay1 := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender.Addr, + Receiver: basics.AppIndex(2).Address(), + Amount: 402_000, // 400_000 min balance plus 2_000 for 2 inners + }) + // create app + appCall := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.ApplicationCallTx, + Sender: sender.Addr, + ApplicationArgs: [][]byte{uint64ToBytes(uint64(1))}, + ApprovalProgram: approvalProgram, + ClearStateProgram: `#pragma version 6 +int 1`, + }) + + txgroup := txntest.Group(&pay1, &appCall) + + return simulationTestCase{ + input: txgroup, + expectedError: "logic eval error: this transaction should be issued by the manager", + expected: simulation.Result{ + Version: 1, + LastRound: txnInfo.LatestRound(), + TxnGroups: []simulation.TxnGroupResult{ + { + Txns: []simulation.TxnResult{ + { + MissingSignature: true, + }, { + Txn: transactions.SignedTxnWithAD{ + ApplyData: transactions.ApplyData{ + ApplicationID: 2, + EvalDelta: transactions.EvalDelta{ + Logs: []string{"starting asset create", "finished asset create", "starting asset config"}, + InnerTxns: []transactions.SignedTxnWithAD{ + { + ApplyData: transactions.ApplyData{ + ConfigAsset: 3, + }, + }, + {}, + }, + }, + }, + }, + MissingSignature: true, + }, + }, + FailedAt: simulation.TxnPath{1, 1}, + }, + }, + WouldSucceed: false, + }, + } + }) +} + +func TestMockTracerScenarios(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + scenarios := mocktracer.GetTestScenarios() + + for name, scenarioFn := range scenarios { + scenarioFn := scenarioFn + t.Run(name, func(t *testing.T) { + t.Parallel() + simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase { + sender := accounts[0] + + futureAppID := basics.AppIndex(2) + payTxn := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender.Addr, + Receiver: futureAppID.Address(), + Amount: 2_000_000, + }) + appCallTxn := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.ApplicationCallTx, + Sender: sender.Addr, + ClearStateProgram: `#pragma version 6 + int 1`, + }) + scenario := scenarioFn(mocktracer.TestScenarioInfo{ + CallingTxn: appCallTxn.Txn(), + MinFee: basics.MicroAlgos{Raw: txnInfo.CurrentProtocolParams().MinTxnFee}, + CreatedAppID: futureAppID, + }) + appCallTxn.ApprovalProgram = scenario.Program + + txntest.Group(&payTxn, &appCallTxn) + + signedPayTxn := payTxn.Txn().Sign(sender.Sk) + signedAppCallTxn := appCallTxn.Txn().Sign(sender.Sk) + + expectedFailedAt := scenario.FailedAt + if len(expectedFailedAt) != 0 { + // The mocktracer scenario txn is second in our group, so add 1 to the top-level index + expectedFailedAt[0]++ + } + expected := simulation.Result{ + Version: 1, + LastRound: txnInfo.LatestRound(), + TxnGroups: []simulation.TxnGroupResult{ + { + FailedAt: expectedFailedAt, + Txns: []simulation.TxnResult{ + {}, + { + Txn: transactions.SignedTxnWithAD{ + ApplyData: scenario.ExpectedSimulationAD, + }, + }, + }, + }, + }, + WouldSucceed: scenario.Outcome == mocktracer.ApprovalOutcome, + } + + return simulationTestCase{ + input: []transactions.SignedTxn{signedPayTxn, signedAppCallTxn}, + expectedError: scenario.ExpectedError, + expected: expected, + } + }) + }) + } +} diff --git a/ledger/simulation/simulator.go b/ledger/simulation/simulator.go index b559e989f4..5ae93b0195 100644 --- a/ledger/simulation/simulator.go +++ b/ledger/simulation/simulator.go @@ -30,10 +30,6 @@ import ( "github.com/algorand/go-algorand/protocol" ) -// ============================== -// > Simulator Ledger -// ============================== - // simulatorLedger patches the ledger interface to use a constant latest round. type simulatorLedger struct { *data.Ledger @@ -55,22 +51,6 @@ func (l simulatorLedger) LookupLatest(addr basics.Address) (basics.AccountData, return basics.AccountData{}, 0, basics.MicroAlgos{}, err } -// ============================== -// > Simulator Tracer -// ============================== - -type evalTracer struct { - logic.NullEvalTracer -} - -func makeTracer() logic.EvalTracer { - return &evalTracer{} -} - -// ============================== -// > Simulator Errors -// ============================== - // SimulatorError is the base error type for all simulator errors. type SimulatorError struct { err error @@ -94,10 +74,6 @@ type EvalFailureError struct { SimulatorError } -// ============================== -// > Simulator -// ============================== - // Simulator is a transaction group simulator for the block evaluator. type Simulator struct { ledger simulatorLedger @@ -128,10 +104,10 @@ var proxySigner = crypto.PrivateKey{ // check verifies that the transaction is well-formed and has valid or missing signatures. // An invalid transaction group error is returned if the transaction is not well-formed or there are invalid signatures. // To make things easier, we support submitting unsigned transactions and will respond whether signatures are missing. -func (s Simulator) check(hdr bookkeeping.BlockHeader, txgroup []transactions.SignedTxn, debugger logic.EvalTracer) (bool, error) { +func (s Simulator) check(hdr bookkeeping.BlockHeader, txgroup []transactions.SignedTxn, debugger logic.EvalTracer) ([]int, error) { proxySignerSecrets, err := crypto.SecretKeyToSignatureSecrets(proxySigner) if err != nil { - return false, err + return nil, err } // Find and prep any transactions that are missing signatures. We will modify a copy of these @@ -147,7 +123,7 @@ func (s Simulator) check(hdr bookkeeping.BlockHeader, txgroup []transactions.Sig txnsToVerify := make([]transactions.SignedTxn, len(txgroup)) for i, stxn := range txgroup { if stxn.Txn.Type == protocol.StateProofTx { - return false, errors.New("cannot simulate StateProof transactions") + return nil, errors.New("cannot simulate StateProof transactions") } if txnHasNoSignature(stxn) { missingSigs = append(missingSigs, i) @@ -165,10 +141,9 @@ func (s Simulator) check(hdr bookkeeping.BlockHeader, txgroup []transactions.Sig // Verify the signed transactions are well-formed and have valid signatures _, err = verify.TxnGroupWithTracer(txnsToVerify, &hdr, nil, s.ledger, debugger) if err != nil { - return false, InvalidTxGroupError{SimulatorError{err}} + err = InvalidTxGroupError{SimulatorError{err}} } - - return len(missingSigs) != 0, nil + return missingSigs, err } func (s Simulator) evaluate(hdr bookkeeping.BlockHeader, stxns []transactions.SignedTxn, tracer logic.EvalTracer) (*ledgercore.ValidatedBlock, error) { @@ -196,22 +171,52 @@ func (s Simulator) evaluate(hdr bookkeeping.BlockHeader, stxns []transactions.Si return vb, nil } -// Simulate simulates a transaction group using the simulator. Will error if the transaction group is not well-formed. -func (s Simulator) Simulate(txgroup []transactions.SignedTxn) (*ledgercore.ValidatedBlock, bool, error) { +func (s Simulator) simulateWithTracer(txgroup []transactions.SignedTxn, tracer logic.EvalTracer) (*ledgercore.ValidatedBlock, []int, error) { prevBlockHdr, err := s.ledger.BlockHdr(s.ledger.start) if err != nil { - return nil, false, err + return nil, nil, err } nextBlock := bookkeeping.MakeBlock(prevBlockHdr) hdr := nextBlock.BlockHeader - simulatorTracer := makeTracer() // check that the transaction is well-formed and mark whether signatures are missing - missingSignatures, err := s.check(hdr, txgroup, simulatorTracer) + missingSignatures, err := s.check(hdr, txgroup, tracer) if err != nil { - return nil, false, err + return nil, missingSignatures, err } - vb, err := s.evaluate(hdr, txgroup, simulatorTracer) + vb, err := s.evaluate(hdr, txgroup, tracer) return vb, missingSignatures, err } + +// Simulate simulates a transaction group using the simulator. Will error if the transaction group is not well-formed. +func (s Simulator) Simulate(txgroup []transactions.SignedTxn) (Result, error) { + simulatorTracer := makeEvalTracer(s.ledger.start, txgroup) + block, missingSigIndexes, err := s.simulateWithTracer(txgroup, simulatorTracer) + if err != nil { + simulatorTracer.result.WouldSucceed = false + + var lsigError verify.LogicSigError + switch { + case errors.As(err, &lsigError): + simulatorTracer.result.TxnGroups[0].FailureMessage = lsigError.Error() + simulatorTracer.result.TxnGroups[0].FailedAt = TxnPath{uint64(lsigError.GroupIndex)} + case errors.As(err, &EvalFailureError{}): + simulatorTracer.result.TxnGroups[0].FailureMessage = err.Error() + simulatorTracer.result.TxnGroups[0].FailedAt = simulatorTracer.failedAt + default: + // error is not related to evaluation + return Result{}, err + } + } + + simulatorTracer.result.Block = block + + // mark whether signatures are missing + for _, index := range missingSigIndexes { + simulatorTracer.result.TxnGroups[0].Txns[index].MissingSignature = true + simulatorTracer.result.WouldSucceed = false + } + + return *simulatorTracer.result, nil +} diff --git a/ledger/simulation/simulator_test.go b/ledger/simulation/simulator_test.go index d029109197..856fabe25b 100644 --- a/ledger/simulation/simulator_test.go +++ b/ledger/simulation/simulator_test.go @@ -14,129 +14,25 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package simulation_test +package simulation import ( - "encoding/binary" - "fmt" "reflect" "testing" - "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" - "github.com/algorand/go-algorand/data" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/logic" - - "github.com/algorand/go-algorand/ledger" + "github.com/algorand/go-algorand/data/transactions/logic/mocktracer" + "github.com/algorand/go-algorand/data/txntest" "github.com/algorand/go-algorand/ledger/internal" - "github.com/algorand/go-algorand/ledger/simulation" - ledgertesting "github.com/algorand/go-algorand/ledger/testing" - "github.com/algorand/go-algorand/libgoal" - "github.com/algorand/go-algorand/logging" + simulationtesting "github.com/algorand/go-algorand/ledger/simulation/testing" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/test/partitiontest" "github.com/stretchr/testify/require" ) -// ============================== -// > Simulation Test Helpers -// ============================== - -type account struct { - addr basics.Address - sk *crypto.SignatureSecrets - acctData basics.AccountData -} - -func prepareSimulatorTest(t *testing.T) (l *data.Ledger, accounts []account, makeTxnHeader func(sender basics.Address) transactions.Header) { - genesisInitState, keys := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100) - - // Prepare ledger - const inMem = true - cfg := config.GetDefaultLocal() - cfg.Archival = true - log := logging.TestingLog(t) - log.SetLevel(logging.Warn) - realLedger, err := ledger.OpenLedger(log, t.Name(), inMem, genesisInitState, cfg) - require.NoError(t, err, "could not open ledger") - - l = &data.Ledger{Ledger: realLedger} - require.NotNil(t, l) - - // Reformat accounts - accounts = make([]account, len(keys)-2) // -2 for pool and sink accounts - i := 0 - for addr, key := range keys { - if addr == ledgertesting.PoolAddr() || addr == ledgertesting.SinkAddr() { - continue - } - - acctData := genesisInitState.Accounts[addr] - accounts[i] = account{addr, key, acctData} - i++ - } - - // txn header generator - hdr, err := l.BlockHdr(l.Latest()) - require.NoError(t, err) - makeTxnHeader = func(sender basics.Address) transactions.Header { - return transactions.Header{ - Fee: basics.MicroAlgos{Raw: 1000}, - FirstValid: hdr.Round, - GenesisID: hdr.GenesisID, - GenesisHash: hdr.GenesisHash, - LastValid: hdr.Round + basics.Round(1000), - Note: []byte{240, 134, 38, 55, 197, 14, 142, 132}, - Sender: sender, - } - } - - return -} - -func makeTestClient() libgoal.Client { - c, err := libgoal.MakeClientFromConfig(libgoal.ClientConfig{ - AlgodDataDir: "NO_DIR", - }, libgoal.DynamicClient) - if err != nil { - panic(err) - } - - return c -} - -// Attach group ID to a transaction group. Mutates the group directly. -func attachGroupID(txgroup []transactions.SignedTxn) error { - txnArray := make([]transactions.Transaction, len(txgroup)) - for i, txn := range txgroup { - txnArray[i] = txn.Txn - } - - client := makeTestClient() - groupID, err := client.GroupID(txnArray) - if err != nil { - return err - } - - for i := range txgroup { - txgroup[i].Txn.Header.Group = groupID - } - - return nil -} - -func uint64ToBytes(num uint64) []byte { - ibytes := make([]byte, 8) - binary.BigEndian.PutUint64(ibytes, num) - return ibytes -} - -// ============================== -// > Sanity Tests -// ============================== - // We want to be careful that the Algod ledger does not move on to another round // so we confirm here that all ledger methods which implicitly access the current round // are overriden within the `simulatorLedger`. @@ -144,7 +40,7 @@ func TestNonOverridenDataLedgerMethodsUseRoundParameter(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - l, _, _ := prepareSimulatorTest(t) + l, _, _ := simulationtesting.PrepareSimulatorTest(t) // methods overriden by `simulatorLedger`` overridenMethods := []string{ @@ -202,545 +98,70 @@ func TestNonOverridenDataLedgerMethodsUseRoundParameter(t *testing.T) { } } -// ============================== -// > Simulation Tests -// ============================== - -func TestPayTxn(t *testing.T) { - partitiontest.PartitionTest(t) - t.Parallel() - - for _, signed := range []bool{true, false} { - signed := signed - t.Run(fmt.Sprintf("signed=%t", signed), func(t *testing.T) { - t.Parallel() - l, accounts, makeTxnHeader := prepareSimulatorTest(t) - defer l.Close() - s := simulation.MakeSimulator(l) - sender := accounts[0] - - txn := transactions.SignedTxn{ - Txn: transactions.Transaction{ - Type: protocol.PaymentTx, - Header: makeTxnHeader(sender.addr), - PaymentTxnFields: transactions.PaymentTxnFields{ - Receiver: sender.addr, - Amount: basics.MicroAlgos{Raw: 0}, - }, - }, - } - - if signed { - txn = txn.Txn.Sign(sender.sk) - } - - txgroup := []transactions.SignedTxn{txn} - - _, _, err := s.Simulate(txgroup) - require.NoError(t, err) - }) - } -} - -func TestOverspendPayTxn(t *testing.T) { - partitiontest.PartitionTest(t) - t.Parallel() - - for _, signed := range []bool{true, false} { - signed := signed - t.Run(fmt.Sprintf("signed=%t", signed), func(t *testing.T) { - t.Parallel() - l, accounts, makeTxnHeader := prepareSimulatorTest(t) - defer l.Close() - s := simulation.MakeSimulator(l) - sender := accounts[0] - senderBalance := sender.acctData.MicroAlgos - amount := senderBalance.Raw + 100 - - txn := transactions.SignedTxn{ - Txn: transactions.Transaction{ - Type: protocol.PaymentTx, - Header: makeTxnHeader(sender.addr), - PaymentTxnFields: transactions.PaymentTxnFields{ - Receiver: sender.addr, - Amount: basics.MicroAlgos{Raw: amount}, // overspend - }, - }, - } - - if signed { - txn = txn.Txn.Sign(sender.sk) - } - - txgroup := []transactions.SignedTxn{txn} - - _, _, err := s.Simulate(txgroup) - require.ErrorAs(t, err, &simulation.EvalFailureError{}) - require.ErrorContains(t, err, fmt.Sprintf("tried to spend {%d}", amount)) - }) - } -} - -func TestAuthAddrTxn(t *testing.T) { +// TestSimulateWithTrace is a simple test to ensure that the debugger hooks are called. More +// complicated tests are in data/transactions/logic/tracer_test.go +func TestSimulateWithTrace(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - for _, signed := range []bool{true, false} { - signed := signed - t.Run(fmt.Sprintf("signed=%t", signed), func(t *testing.T) { - t.Parallel() - l, accounts, makeTxnHeader := prepareSimulatorTest(t) - defer l.Close() - s := simulation.MakeSimulator(l) - sender := accounts[0] - authority := accounts[1] - - txn := transactions.SignedTxn{ - Txn: transactions.Transaction{ - Type: protocol.PaymentTx, - Header: makeTxnHeader(sender.addr), - PaymentTxnFields: transactions.PaymentTxnFields{ - Receiver: sender.addr, - Amount: basics.MicroAlgos{Raw: 0}, - }, - }, - AuthAddr: authority.addr, - } - - if signed { - txn = txn.Txn.Sign(authority.sk) - } - - txgroup := []transactions.SignedTxn{txn} - - _, _, err := s.Simulate(txgroup) - require.ErrorContains(t, err, fmt.Sprintf("should have been authorized by %s but was actually authorized by %s", sender.addr, authority.addr)) - }) - } -} - -func TestStateProofTxn(t *testing.T) { - partitiontest.PartitionTest(t) - t.Parallel() - - l, _, makeTxnHeader := prepareSimulatorTest(t) + l, accounts, txnInfo := simulationtesting.PrepareSimulatorTest(t) defer l.Close() - s := simulation.MakeSimulator(l) - - txgroup := []transactions.SignedTxn{ - { - Txn: transactions.Transaction{ - Type: protocol.StateProofTx, - Header: makeTxnHeader(transactions.StateProofSender), - // No need to fill out StateProofTxnFields, this should fail at signature verification - }, - }, - } - - _, _, err := s.Simulate(txgroup) - require.ErrorContains(t, err, "cannot simulate StateProof transactions") -} - -func TestSimpleGroupTxn(t *testing.T) { - partitiontest.PartitionTest(t) - t.Parallel() - - l, accounts, makeTxnHeader := prepareSimulatorTest(t) - defer l.Close() - s := simulation.MakeSimulator(l) - sender1 := accounts[0].addr - sender1Balance := accounts[0].acctData.MicroAlgos - sender2 := accounts[1].addr - sender2Balance := accounts[1].acctData.MicroAlgos - - // Send money back and forth - txgroup := []transactions.SignedTxn{ - { - Txn: transactions.Transaction{ - Type: protocol.PaymentTx, - Header: makeTxnHeader(sender1), - PaymentTxnFields: transactions.PaymentTxnFields{ - Receiver: sender2, - Amount: basics.MicroAlgos{Raw: 1000000}, - }, - }, - }, - { - Txn: transactions.Transaction{ - Type: protocol.PaymentTx, - Header: makeTxnHeader(sender2), - PaymentTxnFields: transactions.PaymentTxnFields{ - Receiver: sender1, - Amount: basics.MicroAlgos{Raw: 0}, - }, - }, - }, - } - - // Should fail if there is no group parameter - _, _, err := s.Simulate(txgroup) - require.ErrorAs(t, err, &simulation.EvalFailureError{}) - require.ErrorContains(t, err, "had zero Group but was submitted in a group of 2") - - // Add group parameter - err = attachGroupID(txgroup) - require.NoError(t, err) - - // Check balances before transaction - sender1Data, _, err := l.LookupWithoutRewards(l.Latest(), sender1) - require.NoError(t, err) - require.Equal(t, sender1Balance, sender1Data.MicroAlgos) - - sender2Data, _, err := l.LookupWithoutRewards(l.Latest(), sender2) - require.NoError(t, err) - require.Equal(t, sender2Balance, sender2Data.MicroAlgos) - - // Should now pass - _, _, err = s.Simulate(txgroup) - require.NoError(t, err) - - // Confirm balances have not changed - sender1Data, _, err = l.LookupWithoutRewards(l.Latest(), sender1) - require.NoError(t, err) - require.Equal(t, sender1Balance, sender1Data.MicroAlgos) - - sender2Data, _, err = l.LookupWithoutRewards(l.Latest(), sender2) - require.NoError(t, err) - require.Equal(t, sender2Balance, sender2Data.MicroAlgos) -} - -const trivialAVMProgram = `#pragma version 2 -int 1` -const rejectAVMProgram = `#pragma version 2 -int 0` - -func TestSimpleAppCall(t *testing.T) { - partitiontest.PartitionTest(t) - t.Parallel() - - l, accounts, makeTxnHeader := prepareSimulatorTest(t) - defer l.Close() - s := simulation.MakeSimulator(l) - sender := accounts[0].addr - - // Compile AVM program - ops, err := logic.AssembleString(trivialAVMProgram) - require.NoError(t, err, ops.Errors) - prog := ops.Program - - // Create program and call it - futureAppID := 1 - txgroup := []transactions.SignedTxn{ - { - Txn: transactions.Transaction{ - Type: protocol.ApplicationCallTx, - Header: makeTxnHeader(sender), - ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{ - ApplicationID: 0, - ApprovalProgram: prog, - ClearStateProgram: prog, - LocalStateSchema: basics.StateSchema{ - NumUint: 0, - NumByteSlice: 0, - }, - GlobalStateSchema: basics.StateSchema{ - NumUint: 0, - NumByteSlice: 0, - }, - }, - }, - }, - { - Txn: transactions.Transaction{ - Type: protocol.ApplicationCallTx, - Header: makeTxnHeader(sender), - ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{ - ApplicationID: basics.AppIndex(futureAppID), - }, - }, - }, - } - - err = attachGroupID(txgroup) - require.NoError(t, err) - - _, _, err = s.Simulate(txgroup) - require.NoError(t, err) -} - -func TestRejectAppCall(t *testing.T) { - partitiontest.PartitionTest(t) - t.Parallel() - - l, accounts, makeTxnHeader := prepareSimulatorTest(t) - defer l.Close() - s := simulation.MakeSimulator(l) - sender := accounts[0].addr - - // Compile AVM program - ops, err := logic.AssembleString(rejectAVMProgram) - require.NoError(t, err, ops.Errors) - prog := ops.Program - - // Create program and call it - txgroup := []transactions.SignedTxn{ - { - Txn: transactions.Transaction{ - Type: protocol.ApplicationCallTx, - Header: makeTxnHeader(sender), - ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{ - ApplicationID: 0, - ApprovalProgram: prog, - ClearStateProgram: prog, - LocalStateSchema: basics.StateSchema{ - NumUint: 0, - NumByteSlice: 0, - }, - GlobalStateSchema: basics.StateSchema{ - NumUint: 0, - NumByteSlice: 0, - }, - }, - }, - }, - } - - err = attachGroupID(txgroup) - require.NoError(t, err) - - _, _, err = s.Simulate(txgroup) - require.ErrorAs(t, err, &simulation.EvalFailureError{}) - require.ErrorContains(t, err, "transaction rejected by ApprovalProgram") -} - -func TestSignatureCheck(t *testing.T) { - partitiontest.PartitionTest(t) - t.Parallel() - - l, accounts, makeTxnHeader := prepareSimulatorTest(t) - defer l.Close() - s := simulation.MakeSimulator(l) - sender := accounts[0].addr - - txgroup := []transactions.SignedTxn{ - { - Txn: transactions.Transaction{ - Type: protocol.PaymentTx, - Header: makeTxnHeader(sender), - PaymentTxnFields: transactions.PaymentTxnFields{ - Receiver: sender, - Amount: basics.MicroAlgos{Raw: 0}, - }, - }, - }, - } - - // should catch missing signature - _, missingSignatures, err := s.Simulate(txgroup) - require.NoError(t, err) - require.True(t, missingSignatures) - - // add signature - signatureSecrets := accounts[0].sk - txgroup[0] = txgroup[0].Txn.Sign(signatureSecrets) - - // should not error now that we have a signature - _, missingSignatures, err = s.Simulate(txgroup) - require.NoError(t, err) - require.False(t, missingSignatures) - - // should error with invalid signature - txgroup[0].Sig[0] += byte(1) // will wrap if > 255 - _, _, err = s.Simulate(txgroup) - require.ErrorAs(t, err, &simulation.InvalidTxGroupError{}) - require.ErrorContains(t, err, "one signature didn't pass") -} - -// TestInvalidTxGroup tests that a transaction group with invalid transactions -// is rejected by the simulator as an InvalidTxGroupError instead of a EvalFailureError. -func TestInvalidTxGroup(t *testing.T) { - partitiontest.PartitionTest(t) - t.Parallel() - - l, accounts, makeTxnHeader := prepareSimulatorTest(t) - defer l.Close() - s := simulation.MakeSimulator(l) - receiver := accounts[0].addr - - txgroup := []transactions.SignedTxn{ - { - Txn: transactions.Transaction{ - Type: protocol.PaymentTx, - // invalid sender - Header: makeTxnHeader(ledgertesting.PoolAddr()), - PaymentTxnFields: transactions.PaymentTxnFields{ - Receiver: receiver, - Amount: basics.MicroAlgos{Raw: 0}, - }, - }, - }, - } - - // should error with invalid transaction group error - _, _, err := s.Simulate(txgroup) - require.ErrorAs(t, err, &simulation.InvalidTxGroupError{}) - require.ErrorContains(t, err, "transaction from incentive pool is invalid") -} - -const accountBalanceCheckProgram = `#pragma version 4 - txn ApplicationID // [appId] - bz end // [] - int 1 // [1] - balance // [bal[1]] - itob // [itob(bal[1])] - txn ApplicationArgs 0 // [itob(bal[1]), args[0]] - == // [itob(bal[1])=?=args[0]] - assert - b end -end: - int 1 // [1] -` - -func TestBalanceChangesWithApp(t *testing.T) { - // Send a payment transaction to a new account and confirm its balance within an app call - partitiontest.PartitionTest(t) - t.Parallel() - - l, accounts, makeTxnHeader := prepareSimulatorTest(t) - defer l.Close() - s := simulation.MakeSimulator(l) - sender := accounts[0].addr - senderBalance := accounts[0].acctData.MicroAlgos.Raw - sendAmount := senderBalance - 500000 - receiver := accounts[1].addr - receiverBalance := accounts[1].acctData.MicroAlgos.Raw - - // Compile approval program - ops, err := logic.AssembleString(accountBalanceCheckProgram) - require.NoError(t, err, ops.Errors) - approvalProg := ops.Program - - // Compile clear program - ops, err = logic.AssembleString(trivialAVMProgram) - require.NoError(t, err, ops.Errors) - clearStateProg := ops.Program - - futureAppID := 1 - txgroup := []transactions.SignedTxn{ - // create app - { - Txn: transactions.Transaction{ - Type: protocol.ApplicationCallTx, - Header: makeTxnHeader(sender), - ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{ - ApplicationID: 0, - ApprovalProgram: approvalProg, - ClearStateProgram: clearStateProg, - LocalStateSchema: basics.StateSchema{ - NumUint: 0, - NumByteSlice: 0, - }, - GlobalStateSchema: basics.StateSchema{ - NumUint: 0, - NumByteSlice: 0, - }, - }, - }, - }, - // check balance - { - Txn: transactions.Transaction{ - Type: protocol.ApplicationCallTx, - Header: makeTxnHeader(sender), - ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{ - ApplicationID: basics.AppIndex(futureAppID), - Accounts: []basics.Address{receiver}, - ApplicationArgs: [][]byte{uint64ToBytes(receiverBalance)}, - }, - }, - }, - // send payment - { - Txn: transactions.Transaction{ - Type: protocol.PaymentTx, - Header: makeTxnHeader(sender), - PaymentTxnFields: transactions.PaymentTxnFields{ - Receiver: receiver, - Amount: basics.MicroAlgos{Raw: sendAmount}, - }, - }, - }, - // check balance changed - { - Txn: transactions.Transaction{ - Type: protocol.ApplicationCallTx, - Header: makeTxnHeader(sender), - ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{ - ApplicationID: basics.AppIndex(futureAppID), - Accounts: []basics.Address{receiver}, - ApplicationArgs: [][]byte{uint64ToBytes(receiverBalance + sendAmount)}, - }, - }, - }, - } - - err = attachGroupID(txgroup) - require.NoError(t, err) - - _, _, err = s.Simulate(txgroup) - require.NoError(t, err) -} - -// TestBalanceChangesWithApp tests that the simulator's transaction group checks -// allow for pooled fees across a mix of signed and unsigned transactions. -// Transaction 1 is a signed transaction with not enough fees paid on its own. -// Transaction 2 is an unsigned transaction with enough fees paid to cover transaction 1. -func TestPooledFeesAcrossSignedAndUnsigned(t *testing.T) { - partitiontest.PartitionTest(t) - t.Parallel() - - l, accounts, makeTxnHeader := prepareSimulatorTest(t) - defer l.Close() - s := simulation.MakeSimulator(l) - sender1 := accounts[0].addr - sender2 := accounts[1].addr - - txnHeader1 := makeTxnHeader(sender1) - txnHeader2 := makeTxnHeader(sender2) - txnHeader1.Fee = basics.MicroAlgos{Raw: txnHeader1.Fee.Raw - 100} - txnHeader2.Fee = basics.MicroAlgos{Raw: txnHeader2.Fee.Raw + 100} - - // Send money back and forth - txgroup := []transactions.SignedTxn{ - { - Txn: transactions.Transaction{ - Type: protocol.PaymentTx, - Header: txnHeader1, - PaymentTxnFields: transactions.PaymentTxnFields{ - Receiver: sender2, - Amount: basics.MicroAlgos{Raw: 1000000}, - }, - }, - }, - { - Txn: transactions.Transaction{ - Type: protocol.PaymentTx, - Header: txnHeader2, - PaymentTxnFields: transactions.PaymentTxnFields{ - Receiver: sender1, - Amount: basics.MicroAlgos{Raw: 0}, - }, - }, - }, - } - - err := attachGroupID(txgroup) - require.NoError(t, err) - - // add signature to txn 1 - signatureSecrets := accounts[0].sk - txgroup[0] = txgroup[0].Txn.Sign(signatureSecrets) - - _, _, err = s.Simulate(txgroup) - require.NoError(t, err) + s := MakeSimulator(l) + sender := accounts[0] + + op, err := logic.AssembleString(`#pragma version 8 +int 1`) + require.NoError(t, err) + program := logic.Program(op.Program) + lsigAddr := basics.Address(crypto.HashObj(&program)) + + payTxn := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender.Addr, + Receiver: lsigAddr, + Amount: 1_000_000, + }) + appCallTxn := txnInfo.NewTxn(txntest.Txn{ + Type: protocol.ApplicationCallTx, + Sender: lsigAddr, + ApprovalProgram: `#pragma version 8 +int 1`, + ClearStateProgram: `#pragma version 8 +int 1`, + }) + + txntest.Group(&payTxn, &appCallTxn) + + signedPayTxn := payTxn.Txn().Sign(sender.Sk) + signedAppCallTxn := appCallTxn.SignedTxn() + signedAppCallTxn.Lsig.Logic = program + + txgroup := []transactions.SignedTxn{signedPayTxn, signedAppCallTxn} + + mockTracer := &mocktracer.Tracer{} + block, _, err := s.simulateWithTracer(txgroup, mockTracer) + require.NoError(t, err) + + payset := block.Block().Payset + require.Len(t, payset, 2) + + expectedEvents := []mocktracer.Event{ + // LogicSig evaluation + mocktracer.BeforeProgram(logic.ModeSig), + mocktracer.BeforeOpcode(), + mocktracer.AfterOpcode(false), + mocktracer.AfterProgram(logic.ModeSig, false), + // Txn evaluation + mocktracer.BeforeTxnGroup(2), + mocktracer.BeforeTxn(protocol.PaymentTx), + mocktracer.AfterTxn(protocol.PaymentTx, payset[0].ApplyData, false), + mocktracer.BeforeTxn(protocol.ApplicationCallTx), + mocktracer.BeforeProgram(logic.ModeApp), + mocktracer.BeforeOpcode(), + mocktracer.AfterOpcode(false), + mocktracer.AfterProgram(logic.ModeApp, false), + mocktracer.AfterTxn(protocol.ApplicationCallTx, payset[1].ApplyData, false), + mocktracer.AfterTxnGroup(2, false), + } + require.Equal(t, expectedEvents, mockTracer.Events) } diff --git a/ledger/simulation/testing/utils.go b/ledger/simulation/testing/utils.go new file mode 100644 index 0000000000..a18a791e1c --- /dev/null +++ b/ledger/simulation/testing/utils.go @@ -0,0 +1,135 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package simulationtesting + +import ( + "math/rand" + "testing" + "time" + + "github.com/algorand/go-algorand/agreement" + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/data" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/data/txntest" + "github.com/algorand/go-algorand/ledger" + ledgertesting "github.com/algorand/go-algorand/ledger/testing" + "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/protocol" + "github.com/stretchr/testify/require" +) + +// Account contains public and private keys, as well as the state of an account +type Account struct { + Addr basics.Address + Sk *crypto.SignatureSecrets + AcctData basics.AccountData +} + +// TxnInfo contains information about the network used for instantiating txntest.Txns +type TxnInfo struct { + LatestHeader bookkeeping.BlockHeader +} + +// LatestRound returns the round number of the most recently committed block +func (info TxnInfo) LatestRound() basics.Round { + return info.LatestHeader.Round +} + +// CurrentProtocolParams returns the consensus parameters that the network is currently using +func (info TxnInfo) CurrentProtocolParams() config.ConsensusParams { + return config.Consensus[info.LatestHeader.CurrentProtocol] +} + +// NewTxn sets network-specific values to the given transaction +func (info TxnInfo) NewTxn(txn txntest.Txn) txntest.Txn { + txn.FirstValid = info.LatestHeader.Round + txn.GenesisID = info.LatestHeader.GenesisID + txn.GenesisHash = info.LatestHeader.GenesisHash + txn.FillDefaults(info.CurrentProtocolParams()) + return txn +} + +// InnerTxn sets network- and parent-specific values to the given inner transaction. This is only +// useful for creating an expected inner transaction to compare against. +func (info TxnInfo) InnerTxn(parent transactions.SignedTxn, inner txntest.Txn) txntest.Txn { + inner.FirstValid = parent.Txn.FirstValid + inner.LastValid = parent.Txn.LastValid + inner.FillDefaults(info.CurrentProtocolParams()) + return inner +} + +// PrepareSimulatorTest creates an environment to test transaction simulations +func PrepareSimulatorTest(t *testing.T) (l *data.Ledger, accounts []Account, txnInfo TxnInfo) { + genesisInitState, keys := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100) + + // Prepare ledger + const inMem = true + cfg := config.GetDefaultLocal() + cfg.Archival = true + log := logging.TestingLog(t) + log.SetLevel(logging.Warn) + realLedger, err := ledger.OpenLedger(log, t.Name(), inMem, genesisInitState, cfg) + require.NoError(t, err, "could not open ledger") + + l = &data.Ledger{Ledger: realLedger} + require.NotNil(t, l) + + // Reformat accounts + accounts = make([]Account, len(keys)-2) // -2 for pool and sink accounts + i := 0 + for addr, key := range keys { + if addr == ledgertesting.PoolAddr() || addr == ledgertesting.SinkAddr() { + continue + } + + acctData := genesisInitState.Accounts[addr] + accounts[i] = Account{ + Addr: addr, + Sk: key, + AcctData: acctData, + } + i++ + } + + latest := l.Latest() + latestHeader, err := l.BlockHdr(latest) + require.NoError(t, err) + + rand.Seed(time.Now().UnixNano()) + + // append a random number of blocks to ensure simulation results have a valid LastRound field + numBlocks := rand.Intn(4) + for i := 0; i < numBlocks; i++ { + nextBlock := bookkeeping.MakeBlock(latestHeader) + err = l.AddBlock(nextBlock, agreement.Certificate{}) + require.NoError(t, err) + + // round has advanced by 1 + require.Equal(t, latest+1, l.Latest()) + latest++ + + latestHeader = nextBlock.BlockHeader + } + + txnInfo = TxnInfo{latestHeader} + + return +} diff --git a/ledger/simulation/trace.go b/ledger/simulation/trace.go new file mode 100644 index 0000000000..d9c2e5f6a0 --- /dev/null +++ b/ledger/simulation/trace.go @@ -0,0 +1,94 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package simulation + +import ( + "fmt" + + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/ledger/ledgercore" +) + +// TxnPath is a "transaction path": e.g. [0, 0, 1] means the second inner txn of the first inner txn of the first txn. +// You can use this transaction path to find the txn data in the `TxnResults` list. +type TxnPath []uint64 + +// TxnResult contains the simulation result for a single transaction +type TxnResult struct { + Txn transactions.SignedTxnWithAD + MissingSignature bool +} + +// TxnGroupResult contains the simulation result for a single transaction group +type TxnGroupResult struct { + Txns []TxnResult + FailureMessage string + + // FailedAt is the path to the txn that failed inside of this group + FailedAt TxnPath +} + +func makeTxnGroupResult(txgroup []transactions.SignedTxn) TxnGroupResult { + groupResult := TxnGroupResult{Txns: make([]TxnResult, len(txgroup))} + for i, tx := range txgroup { + groupResult.Txns[i] = TxnResult{Txn: transactions.SignedTxnWithAD{ + SignedTxn: tx, + }} + } + return groupResult +} + +// ResultLatestVersion is the latest version of the Result struct +const ResultLatestVersion = uint64(1) + +// Result contains the result from a call to Simulator.Simulate +type Result struct { + Version uint64 + LastRound basics.Round + TxnGroups []TxnGroupResult // this is a list so that supporting multiple in the future is not breaking + WouldSucceed bool // true iff no failure message, no missing signatures, and the budget was not exceeded + Block *ledgercore.ValidatedBlock +} + +func makeSimulationResultWithVersion(lastRound basics.Round, txgroups [][]transactions.SignedTxn, version uint64) (Result, error) { + if version != ResultLatestVersion { + return Result{}, fmt.Errorf("invalid SimulationResult version: %d", version) + } + + groups := make([]TxnGroupResult, len(txgroups)) + + for i, txgroup := range txgroups { + groups[i] = makeTxnGroupResult(txgroup) + } + + return Result{ + Version: version, + LastRound: lastRound, + TxnGroups: groups, + WouldSucceed: true, + }, nil +} + +func makeSimulationResult(lastRound basics.Round, txgroups [][]transactions.SignedTxn) Result { + result, err := makeSimulationResultWithVersion(lastRound, txgroups, ResultLatestVersion) + if err != nil { + // this should never happen, since we pass in ResultLatestVersion + panic(err) + } + return result +} diff --git a/ledger/simulation/tracer.go b/ledger/simulation/tracer.go new file mode 100644 index 0000000000..7f5840753f --- /dev/null +++ b/ledger/simulation/tracer.go @@ -0,0 +1,193 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package simulation + +import ( + "fmt" + + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/data/transactions/logic" +) + +// cursorEvalTracer is responsible for maintaining a TxnPath that points to the currently executing +// transaction. The absolutePath() function is used to get this path. +type cursorEvalTracer struct { + logic.NullEvalTracer + + relativeCursor []int + previousInnerTxns []int +} + +func (tracer *cursorEvalTracer) BeforeTxnGroup(ep *logic.EvalParams) { + tracer.relativeCursor = append(tracer.relativeCursor, -1) // will go to 0 in BeforeTxn +} + +func (tracer *cursorEvalTracer) BeforeTxn(ep *logic.EvalParams, groupIndex int) { + top := len(tracer.relativeCursor) - 1 + tracer.relativeCursor[top]++ + tracer.previousInnerTxns = append(tracer.previousInnerTxns, 0) +} + +func (tracer *cursorEvalTracer) AfterTxn(ep *logic.EvalParams, groupIndex int, ad transactions.ApplyData, evalError error) { + tracer.previousInnerTxns = tracer.previousInnerTxns[:len(tracer.previousInnerTxns)-1] +} + +func (tracer *cursorEvalTracer) AfterTxnGroup(ep *logic.EvalParams, evalError error) { + top := len(tracer.relativeCursor) - 1 + if len(tracer.previousInnerTxns) != 0 { + tracer.previousInnerTxns[len(tracer.previousInnerTxns)-1] += tracer.relativeCursor[top] + 1 + } + tracer.relativeCursor = tracer.relativeCursor[:top] +} + +func (tracer *cursorEvalTracer) relativeGroupIndex() int { + top := len(tracer.relativeCursor) - 1 + return tracer.relativeCursor[top] +} + +func (tracer *cursorEvalTracer) absolutePath() TxnPath { + path := make(TxnPath, len(tracer.relativeCursor)) + for i, relativeGroupIndex := range tracer.relativeCursor { + absoluteIndex := uint64(relativeGroupIndex) + if i > 0 { + absoluteIndex += uint64(tracer.previousInnerTxns[i-1]) + } + path[i] = absoluteIndex + } + return path +} + +// evalTracer is responsible for populating a Result during a simulation evaluation. It saves +// EvalDelta & inner transaction changes as they happen, so if an error occurs during evaluation, we +// can return a partially-built ApplyData with as much information as possible at the time of the +// error. +type evalTracer struct { + cursorEvalTracer + + result *Result + failedAt TxnPath +} + +func makeEvalTracer(lastRound basics.Round, txgroup []transactions.SignedTxn) *evalTracer { + result := makeSimulationResult(lastRound, [][]transactions.SignedTxn{txgroup}) + return &evalTracer{result: &result} +} + +func (tracer *evalTracer) handleError(evalError error) { + if evalError != nil && tracer.failedAt == nil { + tracer.failedAt = tracer.absolutePath() + } +} + +func (tracer *evalTracer) getApplyDataAtPath(path TxnPath) (*transactions.ApplyData, error) { + if len(path) == 0 { + return nil, fmt.Errorf("simulator debugger error: path is empty") + } + + applyDataCursor := &tracer.result.TxnGroups[0].Txns[path[0]].Txn.ApplyData + + for _, index := range path[1:] { + innerTxns := applyDataCursor.EvalDelta.InnerTxns + if index >= uint64(len(innerTxns)) { + return nil, fmt.Errorf("simulator debugger error: index %d out of range with length %d. Full path: %v", index, len(innerTxns), path) + } + applyDataCursor = &innerTxns[index].ApplyData + } + + return applyDataCursor, nil +} + +func (tracer *evalTracer) mustGetApplyDataAtPath(path TxnPath) *transactions.ApplyData { + ad, err := tracer.getApplyDataAtPath(path) + if err != nil { + panic(err) + } + return ad +} + +// Copy the inner transaction group to the ApplyData.EvalDelta.InnerTxns of the calling transaction +func (tracer *evalTracer) populateInnerTransactions(txgroup []transactions.SignedTxnWithAD) { + applyDataOfCallingTxn := tracer.mustGetApplyDataAtPath(tracer.absolutePath()) // this works because the cursor has not been updated yet by `BeforeTxn` + applyDataOfCallingTxn.EvalDelta.InnerTxns = append(applyDataOfCallingTxn.EvalDelta.InnerTxns, txgroup...) +} + +func (tracer *evalTracer) BeforeTxnGroup(ep *logic.EvalParams) { + if ep.GetCaller() != nil { + // If this is an inner txn group, save the txns + tracer.populateInnerTransactions(ep.TxnGroup) + } + tracer.cursorEvalTracer.BeforeTxnGroup(ep) +} + +func (tracer *evalTracer) AfterTxnGroup(ep *logic.EvalParams, evalError error) { + tracer.handleError(evalError) + tracer.cursorEvalTracer.AfterTxnGroup(ep, evalError) +} + +func (tracer *evalTracer) saveApplyData(applyData transactions.ApplyData) { + applyDataOfCurrentTxn := tracer.mustGetApplyDataAtPath(tracer.absolutePath()) + // Copy everything except the EvalDelta, since that has been kept up-to-date after every op + evalDelta := applyDataOfCurrentTxn.EvalDelta + *applyDataOfCurrentTxn = applyData + applyDataOfCurrentTxn.EvalDelta = evalDelta +} + +func (tracer *evalTracer) AfterTxn(ep *logic.EvalParams, groupIndex int, ad transactions.ApplyData, evalError error) { + tracer.handleError(evalError) + tracer.saveApplyData(ad) + tracer.cursorEvalTracer.AfterTxn(ep, groupIndex, ad, evalError) +} + +func (tracer *evalTracer) saveEvalDelta(evalDelta transactions.EvalDelta, appIDToSave basics.AppIndex) { + applyDataOfCurrentTxn := tracer.mustGetApplyDataAtPath(tracer.absolutePath()) + // Copy everything except the inner transactions, since those have been kept up-to-date when we + // traced those transactions. + inners := applyDataOfCurrentTxn.EvalDelta.InnerTxns + applyDataOfCurrentTxn.EvalDelta = evalDelta + applyDataOfCurrentTxn.EvalDelta.InnerTxns = inners +} + +func (tracer *evalTracer) BeforeOpcode(cx *logic.EvalContext) { + if cx.RunMode() != logic.ModeApp { + // do nothing for LogicSig ops + return + } + groupIndex := tracer.relativeGroupIndex() + var appIDToSave basics.AppIndex + if cx.TxnGroup[groupIndex].SignedTxn.Txn.ApplicationID == 0 { + // app creation + appIDToSave = cx.AppID() + } + tracer.saveEvalDelta(cx.TxnGroup[groupIndex].EvalDelta, appIDToSave) +} + +func (tracer *evalTracer) AfterOpcode(cx *logic.EvalContext, evalError error) { + if cx.RunMode() != logic.ModeApp { + // do nothing for LogicSig ops + return + } + tracer.handleError(evalError) +} + +func (tracer *evalTracer) AfterProgram(cx *logic.EvalContext, evalError error) { + if cx.RunMode() != logic.ModeApp { + // do nothing for LogicSig programs + return + } + tracer.handleError(evalError) +} diff --git a/ledger/simulation/tracer_test.go b/ledger/simulation/tracer_test.go new file mode 100644 index 0000000000..b4429be414 --- /dev/null +++ b/ledger/simulation/tracer_test.go @@ -0,0 +1,257 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package simulation + +import ( + "fmt" + "testing" + + "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/data/transactions/logic" + "github.com/algorand/go-algorand/data/transactions/logic/mocktracer" + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/stretchr/testify/require" +) + +func TestCursorEvalTracer(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + type step struct { + action mocktracer.EventType + expectedPath TxnPath + } + + type testCase struct { + name string + timeline []step + expectedPathAtEnd TxnPath + } + + testCases := []testCase{ + { + name: "empty", + timeline: []step{}, + expectedPathAtEnd: TxnPath{}, + }, + { + name: "single txn", + timeline: []step{ + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{0}}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{0}}, + {action: mocktracer.AfterTxnGroupEvent}, + }, + }, + { + name: "two txns", + timeline: []step{ + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{0}}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{0}}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{1}}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{1}}, + {action: mocktracer.AfterTxnGroupEvent}, + }, + }, + { + name: "many txns", + timeline: []step{ + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{0}}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{0}}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{1}}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{1}}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{2}}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{2}}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{3}}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{3}}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{4}}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{4}}, + {action: mocktracer.AfterTxnGroupEvent}, + }, + }, + { + name: "single txn with inner txn", + timeline: []step{ + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{0}}, + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{0, 0}}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{0, 0}}, + {action: mocktracer.AfterTxnGroupEvent}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{0}}, + {action: mocktracer.AfterTxnGroupEvent}, + }, + }, + { + name: "single txn with multiple serial inner txns", + timeline: []step{ + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{0}}, + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{0, 0}}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{0, 0}}, + {action: mocktracer.AfterTxnGroupEvent}, + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{0, 1}}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{0, 1}}, + {action: mocktracer.AfterTxnGroupEvent}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{0}}, + {action: mocktracer.AfterTxnGroupEvent}, + }, + }, + { + name: "single txn with 2 serial inner txns with another inner txn in the second one", + timeline: []step{ + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{0}}, + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{0, 0}}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{0, 0}}, + {action: mocktracer.AfterTxnGroupEvent}, + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{0, 1}}, + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{0, 1, 0}}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{0, 1, 0}}, + {action: mocktracer.AfterTxnGroupEvent}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{0, 1}}, + {action: mocktracer.AfterTxnGroupEvent}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{0}}, + {action: mocktracer.AfterTxnGroupEvent}, + }, + }, + { + name: "single txn with 2 serial inner txns with 2 serial inner txns in the second one", + timeline: []step{ + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{0}}, + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{0, 0}}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{0, 0}}, + {action: mocktracer.AfterTxnGroupEvent}, + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{0, 1}}, + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{0, 1, 0}}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{0, 1, 0}}, + {action: mocktracer.AfterTxnGroupEvent}, + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{0, 1, 1}}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{0, 1, 1}}, + {action: mocktracer.AfterTxnGroupEvent}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{0, 1}}, + {action: mocktracer.AfterTxnGroupEvent}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{0}}, + {action: mocktracer.AfterTxnGroupEvent}, + }, + }, + { + name: "single txn with 2 serial inner txns with an inner txn in the first one and 2 serial inner txns in the second one", + timeline: []step{ + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{0}}, + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{0, 0}}, + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{0, 0, 0}}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{0, 0, 0}}, + {action: mocktracer.AfterTxnGroupEvent}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{0, 0}}, + {action: mocktracer.AfterTxnGroupEvent}, + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{0, 1}}, + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{0, 1, 0}}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{0, 1, 0}}, + {action: mocktracer.AfterTxnGroupEvent}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{0, 1}}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{0, 2}}, + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{0, 2, 0}}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{0, 2, 0}}, + {action: mocktracer.AfterTxnGroupEvent}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{0, 2}}, + {action: mocktracer.AfterTxnGroupEvent}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{0}}, + {action: mocktracer.AfterTxnGroupEvent}, + }, + }, + { + name: "second txn with deep inners", + timeline: []step{ + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{0}}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{0}}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{1}}, + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{1, 0}}, + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{1, 0, 0}}, + {action: mocktracer.BeforeTxnGroupEvent}, + {action: mocktracer.BeforeTxnEvent, expectedPath: TxnPath{1, 0, 0, 0}}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{1, 0, 0, 0}}, + {action: mocktracer.AfterTxnGroupEvent}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{1, 0, 0}}, + {action: mocktracer.AfterTxnGroupEvent}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{1, 0}}, + {action: mocktracer.AfterTxnGroupEvent}, + {action: mocktracer.AfterTxnEvent, expectedPath: TxnPath{1}}, + {action: mocktracer.AfterTxnGroupEvent}, + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(fmt.Sprintf("%s", tc.name), func(t *testing.T) { + t.Parallel() + var tracer cursorEvalTracer + + // These don't matter so they can be anything + ep := logic.EvalParams{} + groupIndex := 0 + + for i, step := range tc.timeline { + switch step.action { + case mocktracer.BeforeTxnEvent: + tracer.BeforeTxn(&ep, groupIndex) + case mocktracer.AfterTxnEvent: + tracer.AfterTxn(&ep, groupIndex, transactions.ApplyData{}, nil) + case mocktracer.BeforeTxnGroupEvent: + tracer.BeforeTxnGroup(&ep) + case mocktracer.AfterTxnGroupEvent: + tracer.AfterTxnGroup(&ep, nil) + default: + t.Fatalf("unexpected timeline hook: %v", step.action) + } + if step.expectedPath != nil { + switch step.action { + case mocktracer.BeforeTxnGroupEvent, mocktracer.AfterTxnGroupEvent: + t.Fatalf("Path is unspecified for hook: %v", step.action) + } + require.Equalf(t, step.expectedPath, tracer.absolutePath(), "step index %d (action %v), tracer: %#v", i, step.action, tracer) + } + } + + if tc.expectedPathAtEnd != nil { + require.Equal(t, tc.expectedPathAtEnd, tracer.absolutePath()) + } + }) + } +} diff --git a/node/follower_node.go b/node/follower_node.go index 95c2787d81..921ab97f42 100644 --- a/node/follower_node.go +++ b/node/follower_node.go @@ -37,6 +37,7 @@ import ( "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/ledger" "github.com/algorand/go-algorand/ledger/ledgercore" + "github.com/algorand/go-algorand/ledger/simulation" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/network" "github.com/algorand/go-algorand/protocol" @@ -240,7 +241,7 @@ func (node *AlgorandFollowerNode) BroadcastInternalSignedTxGroup(_ []transaction // Simulate speculatively runs a transaction group against the current // blockchain state and returns the effects and/or errors that would result. -func (node *AlgorandFollowerNode) Simulate(_ []transactions.SignedTxn) (vb *ledgercore.ValidatedBlock, missingSignatures bool, err error) { +func (node *AlgorandFollowerNode) Simulate(_ []transactions.SignedTxn) (result simulation.Result, err error) { err = fmt.Errorf("cannot simulate in data mode") return } diff --git a/node/follower_node_test.go b/node/follower_node_test.go index 7be1c620e6..2c936a6f96 100644 --- a/node/follower_node_test.go +++ b/node/follower_node_test.go @@ -86,7 +86,7 @@ func TestErrors(t *testing.T) { node := setupFollowNode(t) require.Error(t, node.BroadcastSignedTxGroup([]transactions.SignedTxn{})) require.Error(t, node.BroadcastInternalSignedTxGroup([]transactions.SignedTxn{})) - _, _, err := node.Simulate([]transactions.SignedTxn{}) + _, err := node.Simulate([]transactions.SignedTxn{}) require.Error(t, err) _, err = node.GetParticipationKey(account.ParticipationID{}) require.Error(t, err) diff --git a/node/node.go b/node/node.go index c27cef8b51..bc78d78566 100644 --- a/node/node.go +++ b/node/node.go @@ -557,7 +557,7 @@ func (node *AlgorandFullNode) broadcastSignedTxGroup(txgroup []transactions.Sign // Simulate speculatively runs a transaction group against the current // blockchain state and returns the effects and/or errors that would result. -func (node *AlgorandFullNode) Simulate(txgroup []transactions.SignedTxn) (vb *ledgercore.ValidatedBlock, missingSignatures bool, err error) { +func (node *AlgorandFullNode) Simulate(txgroup []transactions.SignedTxn) (result simulation.Result, err error) { simulator := simulation.MakeSimulator(node.ledger) return simulator.Simulate(txgroup) } From 60c5a7a4e600a3ca2ffe0de8d6b8a7a2d2d18641 Mon Sep 17 00:00:00 2001 From: AlgoAxel <113933518+AlgoAxel@users.noreply.github.com> Date: Fri, 24 Feb 2023 16:41:32 -0500 Subject: [PATCH 54/81] tests: guard addPeer from adding peers who are closing (#5151) --- network/wsNetwork.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/network/wsNetwork.go b/network/wsNetwork.go index e04e04b0f6..0280952d19 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -118,6 +118,7 @@ var networkPeerBroadcastDropped = metrics.MakeCounter(metrics.MetricName{Name: " var networkPeerIdentityDisconnect = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_identity_duplicate", Description: "number of times identity challenge cause us to disconnect a peer"}) var networkPeerIdentityError = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_identity_error", Description: "number of times an error occurs (besides expected) when processing identity challenges"}) +var networkPeerAlreadyClosed = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_peer_already_closed", Description: "number of times a peer would be added but the peer connection is already closed"}) var networkSlowPeerDrops = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_slow_drops_total", Description: "number of peers dropped for being slow to send to"}) var networkIdlePeerDrops = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_idle_drops_total", Description: "number of peers dropped due to idle connection"}) @@ -2462,6 +2463,12 @@ func (wn *WebsocketNetwork) removePeer(peer *wsPeer, reason disconnectReason) { func (wn *WebsocketNetwork) addPeer(peer *wsPeer) { wn.peersLock.Lock() defer wn.peersLock.Unlock() + // guard against peers which are closed or closing + if atomic.LoadInt32(&peer.didSignalClose) == 1 { + networkPeerAlreadyClosed.Inc(nil) + wn.log.Debugf("peer closing %s", peer.conn.RemoteAddr().String()) + return + } // simple duplicate *pointer* check. should never trigger given the callers to addPeer // TODO: remove this after making sure it is safe to do so for _, p := range wn.peers { From 51d592515f03ea46b942b97c85b540c79b73defa Mon Sep 17 00:00:00 2001 From: Will Winder Date: Sat, 25 Feb 2023 12:23:49 -0500 Subject: [PATCH 55/81] devmode: Allow DevMode + Follower configurations. (#5157) --- netdeploy/networkTemplate.go | 57 +++++++++++-- netdeploy/networkTemplates_test.go | 129 +++++++++++++++++++++++++++++ node/follower_node.go | 3 +- node/follower_node_test.go | 40 ++++++++- node/node.go | 6 +- 5 files changed, 216 insertions(+), 19 deletions(-) diff --git a/netdeploy/networkTemplate.go b/netdeploy/networkTemplate.go index 34cd01e1b6..865ad9c30a 100644 --- a/netdeploy/networkTemplate.go +++ b/netdeploy/networkTemplate.go @@ -209,7 +209,6 @@ func (t NetworkTemplate) Validate() error { } // No wallet can be assigned to more than one node - // At least one relay is required wallets := make(map[string]bool) for _, cfg := range t.Nodes { for _, wallet := range cfg.Wallets { @@ -221,17 +220,49 @@ func (t NetworkTemplate) Validate() error { } } + // At least one relay is required if len(t.Nodes) > 1 && countRelayNodes(t.Nodes) == 0 { return fmt.Errorf("invalid template: at least one relay is required when more than a single node presents") } + // Validate JSONOverride decoding + for _, cfg := range t.Nodes { + local := config.GetDefaultLocal() + err := decodeJSONOverride(cfg.ConfigJSONOverride, &local) + if err != nil { + return fmt.Errorf("invalid template: unable to decode JSONOverride: %w", err) + } + } + + // Follow nodes cannot be relays + for _, cfg := range t.Nodes { + if cfg.IsRelay && isEnableFollowMode(cfg.ConfigJSONOverride) { + return fmt.Errorf("invalid template: follower nodes may not be relays") + } + } + if t.Genesis.DevMode && len(t.Nodes) != 1 { - return fmt.Errorf("invalid template: DevMode should only have a single node") + if countRelayNodes(t.Nodes) != 1 { + return fmt.Errorf("invalid template: devmode configurations may have at most one relay") + } + + for _, cfg := range t.Nodes { + if !cfg.IsRelay && !isEnableFollowMode(cfg.ConfigJSONOverride) { + return fmt.Errorf("invalid template: devmode configurations may only contain one relay and follower nodes") + } + } } return nil } +func isEnableFollowMode(JSONOverride string) bool { + local := config.GetDefaultLocal() + // decode error is checked elsewhere + _ = decodeJSONOverride(JSONOverride, &local) + return local.EnableFollowMode +} + // countRelayNodes counts the total number of relays func countRelayNodes(nodeCfgs []remote.NodeConfigGoal) (relayCount int) { for _, cfg := range nodeCfgs { @@ -242,6 +273,18 @@ func countRelayNodes(nodeCfgs []remote.NodeConfigGoal) (relayCount int) { return } +func decodeJSONOverride(override string, cfg *config.Local) error { + if override != "" { + reader := strings.NewReader(override) + dec := json.NewDecoder(reader) + dec.DisallowUnknownFields() + if err := dec.Decode(&cfg); err != nil { + return err + } + } + return nil +} + func createConfigFile(node remote.NodeConfigGoal, configFile string, numNodes int, relaysCount int) error { cfg := config.GetDefaultLocal() cfg.GossipFanout = numNodes @@ -266,12 +309,10 @@ func createConfigFile(node remote.NodeConfigGoal, configFile string, numNodes in cfg.DeadlockDetection = node.DeadlockDetection } - if node.ConfigJSONOverride != "" { - reader := strings.NewReader(node.ConfigJSONOverride) - dec := json.NewDecoder(reader) - if err := dec.Decode(&cfg); err != nil { - return err - } + err := decodeJSONOverride(node.ConfigJSONOverride, &cfg) + if err != nil { + return err } + return cfg.SaveToFile(configFile) } diff --git a/netdeploy/networkTemplates_test.go b/netdeploy/networkTemplates_test.go index 62bf8be5fe..e4bc38e77b 100644 --- a/netdeploy/networkTemplates_test.go +++ b/netdeploy/networkTemplates_test.go @@ -26,6 +26,8 @@ import ( "github.com/stretchr/testify/require" "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/gen" + "github.com/algorand/go-algorand/netdeploy/remote" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -98,6 +100,133 @@ func TestValidate(t *testing.T) { a.NoError(err) } +func TestDevModeValidate(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + // same genesis configuration for all tests. + devmodeGenesis := gen.GenesisData{ + DevMode: true, + Wallets: []gen.WalletData{ + { + Stake: 100, + }, + }, + } + + t.Run("DevMode two relays", func(t *testing.T) { + t.Parallel() + tmpl := NetworkTemplate{ + Genesis: devmodeGenesis, + Nodes: []remote.NodeConfigGoal{ + { + IsRelay: true, + }, + { + IsRelay: true, + }, + }, + } + require.ErrorContains(t, tmpl.Validate(), "devmode configurations may have at most one relay") + }) + + t.Run("FollowMode relay", func(t *testing.T) { + t.Parallel() + tmpl := NetworkTemplate{ + Genesis: devmodeGenesis, + Nodes: []remote.NodeConfigGoal{ + { + IsRelay: true, + ConfigJSONOverride: "{\"EnableFollowMode\":true}", + }, + }, + } + require.ErrorContains(t, tmpl.Validate(), "follower nodes may not be relays") + }) + + t.Run("DevMode multiple regular nodes", func(t *testing.T) { + t.Parallel() + tmpl := NetworkTemplate{ + Genesis: devmodeGenesis, + Nodes: []remote.NodeConfigGoal{ + { + IsRelay: true, + }, + {}, + }, + } + require.ErrorContains(t, tmpl.Validate(), "devmode configurations may only contain one relay and follower nodes") + }) + + t.Run("ConfigJSONOverride does not parse", func(t *testing.T) { + t.Parallel() + tmpl := NetworkTemplate{ + Genesis: devmodeGenesis, + Nodes: []remote.NodeConfigGoal{ + { + IsRelay: false, + ConfigJSONOverride: "DOES NOT PARSE", + }, + }, + } + require.ErrorContains(t, tmpl.Validate(), "unable to decode JSONOverride") + }) + + t.Run("ConfigJSONOverride unknown key", func(t *testing.T) { + t.Parallel() + tmpl := NetworkTemplate{ + Genesis: devmodeGenesis, + Nodes: []remote.NodeConfigGoal{ + { + IsRelay: false, + ConfigJSONOverride: "{\"Unknown Key\": \"Valid JSON\"}", + }, + }, + } + require.ErrorContains(t, tmpl.Validate(), "json: unknown field \"Unknown Key\"") + }) + + t.Run("Valid multi-node DevMode", func(t *testing.T) { + t.Parallel() + tmpl := NetworkTemplate{ + Genesis: devmodeGenesis, + Nodes: []remote.NodeConfigGoal{ + { + IsRelay: true, + }, + { + IsRelay: false, + ConfigJSONOverride: "{\"EnableFollowMode\":true}", + }, + }, + } + // this one is fine. + require.NoError(t, tmpl.Validate()) + }) + + t.Run("Valid two-follower DevMode", func(t *testing.T) { + t.Parallel() + tmpl := NetworkTemplate{ + Genesis: devmodeGenesis, + Nodes: []remote.NodeConfigGoal{ + { + IsRelay: true, + }, + { + IsRelay: false, + ConfigJSONOverride: "{\"EnableFollowMode\":true}", + }, + { + IsRelay: false, + ConfigJSONOverride: "{\"EnableFollowMode\":true}", + }, + }, + } + // this one is fine. + require.NoError(t, tmpl.Validate()) + }) +} + type overlayTestStruct struct { A string B string diff --git a/node/follower_node.go b/node/follower_node.go index 921ab97f42..34dc48c241 100644 --- a/node/follower_node.go +++ b/node/follower_node.go @@ -87,8 +87,7 @@ func MakeFollower(log logging.Logger, rootDir string, cfg config.Local, phoneboo node.devMode = genesis.DevMode if node.devMode { - log.Errorf("Cannot run follower node in devMode--submitting txns won't work") - return nil, fmt.Errorf("cannot run with both EnableFollowMode and DevMode") + log.Warn("Follower running on a devMode network. Must submit txns to a different node.") } node.config = cfg diff --git a/node/follower_node_test.go b/node/follower_node_test.go index 2c936a6f96..4389ed3e0f 100644 --- a/node/follower_node_test.go +++ b/node/follower_node_test.go @@ -19,6 +19,8 @@ package node import ( "testing" + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" "github.com/stretchr/testify/require" "github.com/algorand/go-algorand/agreement" @@ -32,10 +34,8 @@ import ( "github.com/algorand/go-algorand/test/partitiontest" ) -func setupFollowNode(t *testing.T) *AlgorandFollowerNode { - cfg := config.GetDefaultLocal() - cfg.EnableFollowMode = true - genesis := bookkeeping.Genesis{ +func followNodeDefaultGenesis() bookkeeping.Genesis { + return bookkeeping.Genesis{ SchemaID: "go-test-follower-node-genesis", Proto: protocol.ConsensusCurrentVersion, Network: config.Devtestnet, @@ -50,6 +50,12 @@ func setupFollowNode(t *testing.T) *AlgorandFollowerNode { }, }, } +} + +func setupFollowNode(t *testing.T) *AlgorandFollowerNode { + cfg := config.GetDefaultLocal() + cfg.EnableFollowMode = true + genesis := followNodeDefaultGenesis() node, err := MakeFollower(logging.Base(), t.TempDir(), cfg, []string{}, genesis) require.NoError(t, err) return node @@ -95,3 +101,29 @@ func TestErrors(t *testing.T) { _, err = node.InstallParticipationKey([]byte{}) require.Error(t, err) } + +func TestDevModeWarning(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + cfg := config.GetDefaultLocal() + cfg.EnableFollowMode = true + genesis := followNodeDefaultGenesis() + genesis.DevMode = true + + logger, hook := test.NewNullLogger() + tlogger := logging.NewWrappedLogger(logger) + _, err := MakeFollower(tlogger, t.TempDir(), cfg, []string{}, genesis) + require.NoError(t, err) + + // check for the warning + var foundEntry *logrus.Entry + entries := hook.AllEntries() + for i := range entries { + if entries[i].Level == logrus.WarnLevel { + foundEntry = entries[i] + } + } + require.NotNil(t, foundEntry) + require.Contains(t, foundEntry.Message, "Follower running on a devMode network. Must submit txns to a different node.") +} diff --git a/node/node.go b/node/node.go index bc78d78566..be3b8156db 100644 --- a/node/node.go +++ b/node/node.go @@ -183,10 +183,6 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd node.genesisID = genesis.ID() node.genesisHash = genesis.Hash() node.devMode = genesis.DevMode - - if node.devMode { - cfg.DisableNetworking = true - } node.config = cfg // tie network, block fetcher, and agreement services together @@ -485,7 +481,7 @@ func (node *AlgorandFullNode) writeDevmodeBlock() (err error) { } // add the newly generated block to the ledger - err = node.ledger.AddValidatedBlock(*vb, agreement.Certificate{}) + err = node.ledger.AddValidatedBlock(*vb, agreement.Certificate{Round: vb.Block().Round()}) return err } From a5258049c7cd84b8cd336782f481e22e5b1f70af Mon Sep 17 00:00:00 2001 From: Eric Warehime Date: Mon, 27 Feb 2023 09:37:18 -0800 Subject: [PATCH 56/81] algocfg: profile subcommand (#5069) Co-authored-by: Will Winder --- cmd/algocfg/profileCommand.go | 126 ++++++++++++++++++++++++++++++++++ 1 file changed, 126 insertions(+) create mode 100644 cmd/algocfg/profileCommand.go diff --git a/cmd/algocfg/profileCommand.go b/cmd/algocfg/profileCommand.go new file mode 100644 index 0000000000..ef64dda0be --- /dev/null +++ b/cmd/algocfg/profileCommand.go @@ -0,0 +1,126 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package main + +import ( + "bufio" + "fmt" + "github.com/spf13/cobra" + "os" + "path/filepath" + "strings" + + "github.com/algorand/go-algorand/cmd/util/datadir" + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/util/codecs" +) + +// profileConfigUpdater updates the provided config for non-defaults in a given profile +type profileConfigUpdater func(cfg config.Local) config.Local + +// defaultConfigUpdater leaves all default values in place +func defaultConfigUpdater(cfg config.Local) config.Local { + return cfg +} + +// relayConfigUpdater alters config values to set up a relay node +func relayConfigUpdater(cfg config.Local) config.Local { + cfg.Archival = true + cfg.EnableLedgerService = true + cfg.EnableBlockService = true + cfg.NetAddress = "4160" + return cfg +} + +var ( + // profileNames are the supported pre-configurations of config values + profileNames = map[string]profileConfigUpdater{ + "relay": relayConfigUpdater, + "default": defaultConfigUpdater, + } + forceUpdate bool +) + +func init() { + rootCmd.AddCommand(profileCmd) + profileCmd.AddCommand(setProfileCmd) + setProfileCmd.Flags().BoolVarP(&forceUpdate, "yes", "y", false, "Force updates to be written") + profileCmd.AddCommand(listProfileCmd) +} + +var profileCmd = &cobra.Command{ + Use: "profile", + Short: "Manipulate config profiles", + Args: cobra.NoArgs, + Run: func(cmd *cobra.Command, args []string) { + cmd.HelpFunc()(cmd, args) + }, +} + +var listProfileCmd = &cobra.Command{ + Use: "list", + Short: "List config profiles", + Args: cobra.NoArgs, + Run: func(cmd *cobra.Command, args []string) { + var profiles string + for key := range profileNames { + profiles += fmt.Sprintf("%s ", key) + } + reportInfof(profiles) + }, +} + +var setProfileCmd = &cobra.Command{ + Use: "set", + Short: "Set preconfigured config defaults", + Args: cobra.ExactArgs(1), + Run: func(cmd *cobra.Command, args []string) { + datadir.OnDataDirs(func(dataDir string) { + cfg, err := getConfigForArg(args[0]) + if err != nil { + reportErrorf("%v", err) + } + file := filepath.Join(dataDir, config.ConfigFilename) + if _, err := os.Stat(file); !forceUpdate && err == nil { + fmt.Printf("A config.json file already exists for this data directory. Would you like to overwrite it? (Y/n)") + reader := bufio.NewReader(os.Stdin) + resp, err := reader.ReadString('\n') + resp = strings.TrimSpace(resp) + if err != nil { + reportErrorf("Failed to read response: %v", err) + } + if strings.ToLower(resp) == "n" { + reportInfof("Exiting without overwriting existing config.") + return + } + } + err = codecs.SaveNonDefaultValuesToFile(file, cfg, config.GetDefaultLocal(), nil, true) + if err != nil { + reportErrorf("Error saving updated config file '%s' - %s", file, err) + } + }) + }, +} + +// getConfigForArg returns a Local config w/ options updated acorrding to the profil specified by configType +func getConfigForArg(configType string) (config.Local, error) { + cfg := config.GetDefaultLocal() + if updater, ok := profileNames[configType]; ok { + return updater(cfg), nil + } + return config.Local{}, fmt.Errorf("invalid profile type %v", configType) +} From 8a4012f918a097aed9f71a7a6fadb8553349bfd0 Mon Sep 17 00:00:00 2001 From: Ignacio Corderi Date: Mon, 27 Feb 2023 16:58:30 -0300 Subject: [PATCH 57/81] ledger: cleanup the store package to prepare for the kv impl (#5139) --- Makefile | 2 +- cmd/catchpointdump/database.go | 7 +- cmd/catchpointdump/file.go | 4 +- ledger/acctdeltas.go | 130 +++++------ ledger/acctdeltas_test.go | 211 +++++++++--------- ledger/acctonline.go | 18 +- ledger/acctonline_test.go | 8 +- ledger/acctupdates.go | 20 +- ledger/acctupdates_test.go | 79 +++---- ledger/applications_test.go | 8 +- ledger/archival_test.go | 4 +- ledger/bulletin.go | 4 +- ledger/catchpointtracker.go | 94 ++++---- ledger/catchpointtracker_test.go | 82 +++---- ledger/catchpointwriter.go | 6 +- ledger/catchpointwriter_test.go | 28 +-- ledger/catchupaccessor.go | 132 +++++------ ledger/catchupaccessor_test.go | 20 +- ledger/ledger.go | 11 +- ledger/ledger_test.go | 16 +- ledger/lruaccts.go | 14 +- ledger/lruaccts_test.go | 44 ++-- ledger/lrukv.go | 12 +- ledger/lrukv_test.go | 22 +- ledger/lruonlineaccts.go | 14 +- ledger/lruonlineaccts_test.go | 32 +-- ledger/lruresources.go | 14 +- ledger/lruresources_test.go | 36 +-- ledger/metrics.go | 4 +- ledger/notifier.go | 4 +- ledger/onlineaccountscache.go | 4 +- ledger/onlineaccountscache_test.go | 18 +- ledger/persistedaccts_list.go | 6 +- ledger/persistedaccts_list_test.go | 28 +-- ledger/persistedonlineaccts_list.go | 6 +- ledger/persistedonlineaccts_list_test.go | 28 +-- ledger/store/trackerdb/catchpoint.go | 190 ++++++++++++++++ ledger/store/{ => trackerdb}/data.go | 2 +- ledger/store/{ => trackerdb}/data_test.go | 2 +- ledger/store/{ => trackerdb}/hashing.go | 2 +- .../store/{ => trackerdb}/hashkind_string.go | 2 +- ledger/store/{ => trackerdb}/interface.go | 40 +++- ledger/store/{ => trackerdb}/msgp_gen.go | 2 +- ledger/store/{ => trackerdb}/msgp_gen_test.go | 2 +- ledger/store/trackerdb/params.go | 41 ++++ .../sqlitedriver}/accountsV2.go | 27 +-- .../sqlitedriver}/accountsV2_test.go | 2 +- .../sqlitedriver}/catchpoint.go | 192 ++-------------- .../catchpointPendingHashesIter.go | 2 +- .../sqlitedriver}/catchpoint_test.go | 11 +- .../sqlitedriver}/encodedAccountsIter.go | 9 +- .../{ => trackerdb/sqlitedriver}/kvsIter.go | 2 +- .../sqlitedriver}/merkle_commiter.go | 8 +- .../sqlitedriver}/orderedAccountsIter.go | 39 ++-- .../{ => trackerdb/sqlitedriver}/schema.go | 30 ++- .../sqlitedriver}/schema_test.go | 15 +- .../store/{ => trackerdb/sqlitedriver}/sql.go | 33 +-- .../{ => trackerdb/sqlitedriver}/sql_test.go | 2 +- .../sqlitedriver/store_sqlite_impl.go} | 130 +++-------- .../{ => trackerdb/sqlitedriver}/testing.go | 7 +- .../sqlitedriver}/trackerdbV2.go | 116 ++-------- ledger/store/trackerdb/store.go | 108 +++++++++ ledger/store/trackerdb/utils.go | 62 +++++ ledger/store/trackerdb/version.go | 22 ++ ledger/tracker.go | 20 +- ledger/tracker_test.go | 4 +- ledger/trackerdb.go | 12 +- ledger/txtail.go | 12 +- ledger/txtail_test.go | 11 +- 69 files changed, 1245 insertions(+), 1084 deletions(-) create mode 100644 ledger/store/trackerdb/catchpoint.go rename ledger/store/{ => trackerdb}/data.go (99%) rename ledger/store/{ => trackerdb}/data_test.go (99%) rename ledger/store/{ => trackerdb}/hashing.go (99%) rename ledger/store/{ => trackerdb}/hashkind_string.go (97%) rename ledger/store/{ => trackerdb}/interface.go (87%) rename ledger/store/{ => trackerdb}/msgp_gen.go (99%) rename ledger/store/{ => trackerdb}/msgp_gen_test.go (99%) create mode 100644 ledger/store/trackerdb/params.go rename ledger/store/{ => trackerdb/sqlitedriver}/accountsV2.go (96%) rename ledger/store/{ => trackerdb/sqlitedriver}/accountsV2_test.go (99%) rename ledger/store/{ => trackerdb/sqlitedriver}/catchpoint.go (66%) rename ledger/store/{ => trackerdb/sqlitedriver}/catchpointPendingHashesIter.go (99%) rename ledger/store/{ => trackerdb/sqlitedriver}/catchpoint_test.go (92%) rename ledger/store/{ => trackerdb/sqlitedriver}/encodedAccountsIter.go (94%) rename ledger/store/{ => trackerdb/sqlitedriver}/kvsIter.go (98%) rename ledger/store/{ => trackerdb/sqlitedriver}/merkle_commiter.go (91%) rename ledger/store/{ => trackerdb/sqlitedriver}/orderedAccountsIter.go (91%) rename ledger/store/{ => trackerdb/sqlitedriver}/schema.go (97%) rename ledger/store/{ => trackerdb/sqlitedriver}/schema_test.go (96%) rename ledger/store/{ => trackerdb/sqlitedriver}/sql.go (95%) rename ledger/store/{ => trackerdb/sqlitedriver}/sql_test.go (99%) rename ledger/store/{store.go => trackerdb/sqlitedriver/store_sqlite_impl.go} (57%) rename ledger/store/{ => trackerdb/sqlitedriver}/testing.go (94%) rename ledger/store/{ => trackerdb/sqlitedriver}/trackerdbV2.go (84%) create mode 100644 ledger/store/trackerdb/store.go create mode 100644 ledger/store/trackerdb/utils.go create mode 100644 ledger/store/trackerdb/version.go diff --git a/Makefile b/Makefile index b9ad2e319b..d17855e6dc 100644 --- a/Makefile +++ b/Makefile @@ -85,7 +85,7 @@ GOLDFLAGS := $(GOLDFLAGS_BASE) \ UNIT_TEST_SOURCES := $(sort $(shell GOPATH=$(GOPATH) && GO111MODULE=off && go list ./... | grep -v /go-algorand/test/ )) ALGOD_API_PACKAGES := $(sort $(shell GOPATH=$(GOPATH) && GO111MODULE=off && cd daemon/algod/api; go list ./... )) -MSGP_GENERATE := ./protocol ./protocol/test ./crypto ./crypto/merklearray ./crypto/merklesignature ./crypto/stateproof ./data/basics ./data/transactions ./data/stateproofmsg ./data/committee ./data/bookkeeping ./data/hashable ./agreement ./rpcs ./network ./node ./ledger ./ledger/ledgercore ./ledger/store ./ledger/encoded ./stateproof ./data/account ./daemon/algod/api/spec/v2 +MSGP_GENERATE := ./protocol ./protocol/test ./crypto ./crypto/merklearray ./crypto/merklesignature ./crypto/stateproof ./data/basics ./data/transactions ./data/stateproofmsg ./data/committee ./data/bookkeeping ./data/hashable ./agreement ./rpcs ./network ./node ./ledger ./ledger/ledgercore ./ledger/store/trackerdb ./ledger/encoded ./stateproof ./data/account ./daemon/algod/api/spec/v2 default: build diff --git a/cmd/catchpointdump/database.go b/cmd/catchpointdump/database.go index 29c05e78d1..a45b300e5e 100644 --- a/cmd/catchpointdump/database.go +++ b/cmd/catchpointdump/database.go @@ -26,7 +26,8 @@ import ( "github.com/algorand/go-algorand/crypto/merkletrie" "github.com/algorand/go-algorand/ledger" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" + "github.com/algorand/go-algorand/ledger/store/trackerdb/sqlitedriver" "github.com/algorand/go-algorand/util/db" ) @@ -107,11 +108,11 @@ func checkDatabase(databaseName string, outFile *os.File) error { var stats merkletrie.Stats err = dbAccessor.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { - committer, err := store.MakeMerkleCommitter(tx, ledgerTrackerStaging) + committer, err := sqlitedriver.MakeMerkleCommitter(tx, ledgerTrackerStaging) if err != nil { return err } - trie, err := merkletrie.MakeTrie(committer, store.TrieMemoryConfig) + trie, err := merkletrie.MakeTrie(committer, trackerdb.TrieMemoryConfig) if err != nil { return err } diff --git a/cmd/catchpointdump/file.go b/cmd/catchpointdump/file.go index 79a426c0a5..50ae82fe95 100644 --- a/cmd/catchpointdump/file.go +++ b/cmd/catchpointdump/file.go @@ -39,7 +39,7 @@ import ( "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/ledger" "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb/sqlitedriver" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" @@ -319,7 +319,7 @@ func printAccountsDatabase(databaseName string, stagingTables bool, fileHeader l totals.RewardsLevel) } return dbAccessor.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { - arw := store.NewAccountsSQLReaderWriter(tx) + arw := sqlitedriver.NewAccountsSQLReaderWriter(tx) fmt.Printf("\n") printDumpingCatchpointProgressLine(0, 50, 0) diff --git a/ledger/acctdeltas.go b/ledger/acctdeltas.go index 480c165d9a..ea9b8be850 100644 --- a/ledger/acctdeltas.go +++ b/ledger/acctdeltas.go @@ -26,14 +26,14 @@ import ( "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/ledger/encoded" "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/protocol" ) // resourceDelta is used as part of the compactResourcesDeltas to describe a change to a single resource. type resourceDelta struct { - oldResource store.PersistedResourcesData - newResource store.ResourcesData + oldResource trackerdb.PersistedResourcesData + newResource trackerdb.ResourcesData nAcctDeltas int address basics.Address } @@ -51,8 +51,8 @@ type compactResourcesDeltas struct { } type accountDelta struct { - oldAcct store.PersistedAccountData - newAcct store.BaseAccountData + oldAcct trackerdb.PersistedAccountData + newAcct trackerdb.BaseAccountData nAcctDeltas int address basics.Address } @@ -74,8 +74,8 @@ type compactAccountDeltas struct { // oldAcct represents the "old" state of the account in the DB, and compared against newAcct[0] // to determine if the acct became online or went offline. type onlineAccountDelta struct { - oldAcct store.PersistedOnlineAccountData - newAcct []store.BaseOnlineAccountData + oldAcct trackerdb.PersistedOnlineAccountData + newAcct []trackerdb.BaseOnlineAccountData nOnlineAcctDeltas int address basics.Address updRound []uint64 @@ -100,8 +100,8 @@ const MaxEncodedBaseAccountDataSize = 350 const MaxEncodedBaseResourceDataSize = 20000 // prepareNormalizedBalancesV5 converts an array of encodedBalanceRecordV5 into an equal size array of normalizedAccountBalances. -func prepareNormalizedBalancesV5(bals []encoded.BalanceRecordV5, proto config.ConsensusParams) (normalizedAccountBalances []store.NormalizedAccountBalance, err error) { - normalizedAccountBalances = make([]store.NormalizedAccountBalance, len(bals)) +func prepareNormalizedBalancesV5(bals []encoded.BalanceRecordV5, proto config.ConsensusParams) (normalizedAccountBalances []trackerdb.NormalizedAccountBalance, err error) { + normalizedAccountBalances = make([]trackerdb.NormalizedAccountBalance, len(bals)) for i, balance := range bals { normalizedAccountBalances[i].Address = balance.Address var accountDataV5 basics.AccountData @@ -113,20 +113,20 @@ func prepareNormalizedBalancesV5(bals []encoded.BalanceRecordV5, proto config.Co normalizedAccountBalances[i].NormalizedBalance = accountDataV5.NormalizedOnlineBalance(proto) type resourcesRow struct { aidx basics.CreatableIndex - store.ResourcesData + trackerdb.ResourcesData } var resources []resourcesRow - addResourceRow := func(_ context.Context, _ int64, aidx basics.CreatableIndex, rd *store.ResourcesData) error { + addResourceRow := func(_ context.Context, _ int64, aidx basics.CreatableIndex, rd *trackerdb.ResourcesData) error { resources = append(resources, resourcesRow{aidx: aidx, ResourcesData: *rd}) return nil } - if err = store.AccountDataResources(context.Background(), &accountDataV5, 0, addResourceRow); err != nil { + if err = trackerdb.AccountDataResources(context.Background(), &accountDataV5, 0, addResourceRow); err != nil { return nil, err } normalizedAccountBalances[i].AccountHashes = make([][]byte, 1) - normalizedAccountBalances[i].AccountHashes[0] = store.AccountHashBuilder(balance.Address, accountDataV5, balance.AccountData) + normalizedAccountBalances[i].AccountHashes[0] = trackerdb.AccountHashBuilder(balance.Address, accountDataV5, balance.AccountData) if len(resources) > 0 { - normalizedAccountBalances[i].Resources = make(map[basics.CreatableIndex]store.ResourcesData, len(resources)) + normalizedAccountBalances[i].Resources = make(map[basics.CreatableIndex]trackerdb.ResourcesData, len(resources)) normalizedAccountBalances[i].EncodedResources = make(map[basics.CreatableIndex][]byte, len(resources)) } for _, resource := range resources { @@ -139,8 +139,8 @@ func prepareNormalizedBalancesV5(bals []encoded.BalanceRecordV5, proto config.Co } // prepareNormalizedBalancesV6 converts an array of encoded.BalanceRecordV6 into an equal size array of normalizedAccountBalances. -func prepareNormalizedBalancesV6(bals []encoded.BalanceRecordV6, proto config.ConsensusParams) (normalizedAccountBalances []store.NormalizedAccountBalance, err error) { - normalizedAccountBalances = make([]store.NormalizedAccountBalance, len(bals)) +func prepareNormalizedBalancesV6(bals []encoded.BalanceRecordV6, proto config.ConsensusParams) (normalizedAccountBalances []trackerdb.NormalizedAccountBalance, err error) { + normalizedAccountBalances = make([]trackerdb.NormalizedAccountBalance, len(bals)) for i, balance := range bals { normalizedAccountBalances[i].Address = balance.Address err = protocol.Decode(balance.AccountData, &(normalizedAccountBalances[i].AccountData)) @@ -163,19 +163,19 @@ func prepareNormalizedBalancesV6(bals []encoded.BalanceRecordV6, proto config.Co normalizedAccountBalances[i].PartialBalance = true } else { normalizedAccountBalances[i].AccountHashes = make([][]byte, 1+len(balance.Resources)) - normalizedAccountBalances[i].AccountHashes[0] = store.AccountHashBuilderV6(balance.Address, &normalizedAccountBalances[i].AccountData, balance.AccountData) + normalizedAccountBalances[i].AccountHashes[0] = trackerdb.AccountHashBuilderV6(balance.Address, &normalizedAccountBalances[i].AccountData, balance.AccountData) curHashIdx++ } if len(balance.Resources) > 0 { - normalizedAccountBalances[i].Resources = make(map[basics.CreatableIndex]store.ResourcesData, len(balance.Resources)) + normalizedAccountBalances[i].Resources = make(map[basics.CreatableIndex]trackerdb.ResourcesData, len(balance.Resources)) normalizedAccountBalances[i].EncodedResources = make(map[basics.CreatableIndex][]byte, len(balance.Resources)) for cidx, res := range balance.Resources { - var resData store.ResourcesData + var resData trackerdb.ResourcesData err = protocol.Decode(res, &resData) if err != nil { return nil, err } - normalizedAccountBalances[i].AccountHashes[curHashIdx], err = store.ResourcesHashBuilderV6(&resData, balance.Address, basics.CreatableIndex(cidx), resData.UpdateRound, res) + normalizedAccountBalances[i].AccountHashes[curHashIdx], err = trackerdb.ResourcesHashBuilderV6(&resData, balance.Address, basics.CreatableIndex(cidx), resData.UpdateRound, res) if err != nil { return nil, err } @@ -232,7 +232,7 @@ func makeCompactResourceDeltas(stateDeltas []ledgercore.StateDelta, baseRound ba newEntry := resourceDelta{ nAcctDeltas: 1, address: res.Addr, - newResource: store.MakeResourcesData(deltaRound * updateRoundMultiplier), + newResource: trackerdb.MakeResourcesData(deltaRound * updateRoundMultiplier), } newEntry.newResource.SetAssetData(res.Params, res.Holding) // baseResources caches deleted entries, and they have addrid = 0 @@ -244,7 +244,7 @@ func makeCompactResourceDeltas(stateDeltas []ledgercore.StateDelta, baseRound ba outResourcesDeltas.insert(newEntry) } else { if pad, has := baseAccounts.read(res.Addr); has { - newEntry.oldResource = store.PersistedResourcesData{Addrid: pad.Rowid} + newEntry.oldResource = trackerdb.PersistedResourcesData{Addrid: pad.Rowid} } newEntry.oldResource.Aidx = basics.CreatableIndex(res.Aidx) outResourcesDeltas.insertMissing(newEntry) @@ -270,7 +270,7 @@ func makeCompactResourceDeltas(stateDeltas []ledgercore.StateDelta, baseRound ba newEntry := resourceDelta{ nAcctDeltas: 1, address: res.Addr, - newResource: store.MakeResourcesData(deltaRound * updateRoundMultiplier), + newResource: trackerdb.MakeResourcesData(deltaRound * updateRoundMultiplier), } newEntry.newResource.SetAppData(res.Params, res.State) baseResourceData, has := baseResources.read(res.Addr, basics.CreatableIndex(res.Aidx)) @@ -280,7 +280,7 @@ func makeCompactResourceDeltas(stateDeltas []ledgercore.StateDelta, baseRound ba outResourcesDeltas.insert(newEntry) } else { if pad, has := baseAccounts.read(res.Addr); has { - newEntry.oldResource = store.PersistedResourcesData{Addrid: pad.Rowid} + newEntry.oldResource = trackerdb.PersistedResourcesData{Addrid: pad.Rowid} } newEntry.oldResource.Aidx = basics.CreatableIndex(res.Aidx) outResourcesDeltas.insertMissing(newEntry) @@ -294,7 +294,7 @@ func makeCompactResourceDeltas(stateDeltas []ledgercore.StateDelta, baseRound ba // resourcesLoadOld updates the entries on the deltas.oldResource map that matches the provided addresses. // The round number of the persistedAccountData is not updated by this function, and the caller is responsible // for populating this field. -func (a *compactResourcesDeltas) resourcesLoadOld(tx store.TransactionScope, knownAddresses map[basics.Address]int64) (err error) { +func (a *compactResourcesDeltas) resourcesLoadOld(tx trackerdb.TransactionScope, knownAddresses map[basics.Address]int64) (err error) { if len(a.misses) == 0 { return nil } @@ -334,7 +334,7 @@ func (a *compactResourcesDeltas) resourcesLoadOld(tx store.TransactionScope, kno switch err { case nil: if len(resDataBuf) > 0 { - persistedResData := store.PersistedResourcesData{Addrid: addrid, Aidx: aidx} + persistedResData := trackerdb.PersistedResourcesData{Addrid: addrid, Aidx: aidx} err = protocol.Decode(resDataBuf, &persistedResData.Data) if err != nil { return err @@ -346,7 +346,7 @@ func (a *compactResourcesDeltas) resourcesLoadOld(tx store.TransactionScope, kno } case sql.ErrNoRows: // we don't have that account, just return an empty record. - a.updateOld(missIdx, store.PersistedResourcesData{Addrid: addrid, Aidx: aidx}) + a.updateOld(missIdx, trackerdb.PersistedResourcesData{Addrid: addrid, Aidx: aidx}) err = nil default: // unexpected error - let the caller know that we couldn't complete the operation. @@ -395,7 +395,7 @@ func (a *compactResourcesDeltas) insertMissing(delta resourceDelta) { } // updateOld updates existing or inserts a new partial entry with only old field filled -func (a *compactResourcesDeltas) updateOld(idx int, old store.PersistedResourcesData) { +func (a *compactResourcesDeltas) updateOld(idx int, old trackerdb.PersistedResourcesData) { a.deltas[idx].oldResource = old } @@ -440,7 +440,7 @@ func makeCompactAccountDeltas(stateDeltas []ledgercore.StateDelta, baseRound bas // it's a new entry. newEntry := accountDelta{ nAcctDeltas: 1, - newAcct: store.BaseAccountData{ + newAcct: trackerdb.BaseAccountData{ UpdateRound: deltaRound * updateRoundMultiplier, }, address: addr, @@ -461,7 +461,7 @@ func makeCompactAccountDeltas(stateDeltas []ledgercore.StateDelta, baseRound bas // accountsLoadOld updates the entries on the deltas.old map that matches the provided addresses. // The round number of the persistedAccountData is not updated by this function, and the caller is responsible // for populating this field. -func (a *compactAccountDeltas) accountsLoadOld(tx store.TransactionScope) (err error) { +func (a *compactAccountDeltas) accountsLoadOld(tx trackerdb.TransactionScope) (err error) { // TODO: this function only needs a reader's scope to the datastore if len(a.misses) == 0 { return nil @@ -480,7 +480,7 @@ func (a *compactAccountDeltas) accountsLoadOld(tx store.TransactionScope) (err e switch err { case nil: if len(acctDataBuf) > 0 { - persistedAcctData := &store.PersistedAccountData{Addr: addr, Rowid: rowid} + persistedAcctData := &trackerdb.PersistedAccountData{Addr: addr, Rowid: rowid} err = protocol.Decode(acctDataBuf, &persistedAcctData.AccountData) if err != nil { return err @@ -488,11 +488,11 @@ func (a *compactAccountDeltas) accountsLoadOld(tx store.TransactionScope) (err e a.updateOld(idx, *persistedAcctData) } else { // to retain backward compatibility, we will treat this condition as if we don't have the account. - a.updateOld(idx, store.PersistedAccountData{Addr: addr, Rowid: rowid}) + a.updateOld(idx, trackerdb.PersistedAccountData{Addr: addr, Rowid: rowid}) } case sql.ErrNoRows: // we don't have that account, just return an empty record. - a.updateOld(idx, store.PersistedAccountData{Addr: addr}) + a.updateOld(idx, trackerdb.PersistedAccountData{Addr: addr}) // Note: the err will be ignored in this case since `err` is being shadowed. // this behaviour is equivalent to `err = nil` default: @@ -543,12 +543,12 @@ func (a *compactAccountDeltas) insertMissing(delta accountDelta) { } // updateOld updates existing or inserts a new partial entry with only old field filled -func (a *compactAccountDeltas) updateOld(idx int, old store.PersistedAccountData) { +func (a *compactAccountDeltas) updateOld(idx int, old trackerdb.PersistedAccountData) { a.deltas[idx].oldAcct = old } func (c *onlineAccountDelta) append(acctDelta ledgercore.AccountData, deltaRound basics.Round) { - var baseEntry store.BaseOnlineAccountData + var baseEntry trackerdb.BaseOnlineAccountData baseEntry.SetCoreAccountData(&acctDelta) c.newAcct = append(c.newAcct, baseEntry) c.updRound = append(c.updRound, uint64(deltaRound)) @@ -603,7 +603,7 @@ func makeCompactOnlineAccountDeltas(accountDeltas []ledgercore.AccountDeltas, ba // accountsLoadOld updates the entries on the deltas.old map that matches the provided addresses. // The round number of the persistedAccountData is not updated by this function, and the caller is responsible // for populating this field. -func (a *compactOnlineAccountDeltas) accountsLoadOld(tx store.TransactionScope) (err error) { +func (a *compactOnlineAccountDeltas) accountsLoadOld(tx trackerdb.TransactionScope) (err error) { if len(a.misses) == 0 { return nil } @@ -620,7 +620,7 @@ func (a *compactOnlineAccountDeltas) accountsLoadOld(tx store.TransactionScope) switch err { case nil: if len(acctDataBuf) > 0 { - persistedAcctData := &store.PersistedOnlineAccountData{Addr: addr, Rowid: rowid} + persistedAcctData := &trackerdb.PersistedOnlineAccountData{Addr: addr, Rowid: rowid} err = protocol.Decode(acctDataBuf, &persistedAcctData.AccountData) if err != nil { return err @@ -628,11 +628,11 @@ func (a *compactOnlineAccountDeltas) accountsLoadOld(tx store.TransactionScope) a.updateOld(idx, *persistedAcctData) } else { // empty data means offline account - a.updateOld(idx, store.PersistedOnlineAccountData{Addr: addr, Rowid: rowid}) + a.updateOld(idx, trackerdb.PersistedOnlineAccountData{Addr: addr, Rowid: rowid}) } case sql.ErrNoRows: // we don't have that account, just return an empty record. - a.updateOld(idx, store.PersistedOnlineAccountData{Addr: addr}) + a.updateOld(idx, trackerdb.PersistedOnlineAccountData{Addr: addr}) default: // unexpected error - let the caller know that we couldn't complete the operation. return err @@ -681,7 +681,7 @@ func (a *compactOnlineAccountDeltas) insertMissing(delta onlineAccountDelta) { } // updateOld updates existing or inserts a new partial entry with only old field filled -func (a *compactOnlineAccountDeltas) updateOld(idx int, old store.PersistedOnlineAccountData) { +func (a *compactOnlineAccountDeltas) updateOld(idx int, old trackerdb.PersistedOnlineAccountData) { a.deltas[idx].oldAcct = old } @@ -704,10 +704,10 @@ func accountDataToOnline(address basics.Address, ad *ledgercore.AccountData, pro // accountsNewRound is a convenience wrapper for accountsNewRoundImpl func accountsNewRound( - tx store.TransactionScope, + tx trackerdb.TransactionScope, updates compactAccountDeltas, resources compactResourcesDeltas, kvPairs map[string]modifiedKvValue, creatables map[basics.CreatableIndex]ledgercore.ModifiedCreatable, proto config.ConsensusParams, lastUpdateRound basics.Round, -) (updatedAccounts []store.PersistedAccountData, updatedResources map[basics.Address][]store.PersistedResourcesData, updatedKVs map[string]store.PersistedKVData, err error) { +) (updatedAccounts []trackerdb.PersistedAccountData, updatedResources map[basics.Address][]trackerdb.PersistedResourcesData, updatedKVs map[string]trackerdb.PersistedKVData, err error) { hasAccounts := updates.len() > 0 hasResources := resources.len() > 0 hasKvPairs := len(kvPairs) > 0 @@ -723,10 +723,10 @@ func accountsNewRound( } func onlineAccountsNewRound( - tx store.TransactionScope, + tx trackerdb.TransactionScope, updates compactOnlineAccountDeltas, proto config.ConsensusParams, lastUpdateRound basics.Round, -) (updatedAccounts []store.PersistedOnlineAccountData, err error) { +) (updatedAccounts []trackerdb.PersistedOnlineAccountData, err error) { hasAccounts := updates.len() > 0 writer, err := tx.MakeOnlineAccountsOptimizedWriter(hasAccounts) @@ -742,11 +742,11 @@ func onlineAccountsNewRound( // accountsNewRoundImpl updates the accountbase and assetcreators tables by applying the provided deltas to the accounts / creatables. // The function returns a persistedAccountData for the modified accounts which can be stored in the base cache. func accountsNewRoundImpl( - writer store.AccountsWriter, + writer trackerdb.AccountsWriter, updates compactAccountDeltas, resources compactResourcesDeltas, kvPairs map[string]modifiedKvValue, creatables map[basics.CreatableIndex]ledgercore.ModifiedCreatable, proto config.ConsensusParams, lastUpdateRound basics.Round, -) (updatedAccounts []store.PersistedAccountData, updatedResources map[basics.Address][]store.PersistedResourcesData, updatedKVs map[string]store.PersistedKVData, err error) { - updatedAccounts = make([]store.PersistedAccountData, updates.len()) +) (updatedAccounts []trackerdb.PersistedAccountData, updatedResources map[basics.Address][]trackerdb.PersistedResourcesData, updatedKVs map[string]trackerdb.PersistedKVData, err error) { + updatedAccounts = make([]trackerdb.PersistedAccountData, updates.len()) updatedAccountIdx := 0 newAddressesRowIDs := make(map[basics.Address]int64) for i := 0; i < updates.len(); i++ { @@ -777,7 +777,7 @@ func accountsNewRoundImpl( if err == nil { // we deleted the entry successfully. updatedAccounts[updatedAccountIdx].Rowid = 0 - updatedAccounts[updatedAccountIdx].AccountData = store.BaseAccountData{} + updatedAccounts[updatedAccountIdx].AccountData = trackerdb.BaseAccountData{} if rowsAffected != 1 { err = fmt.Errorf("failed to delete accountbase row for account %v, rowid %d", data.address, data.oldAcct.Rowid) } @@ -807,7 +807,7 @@ func accountsNewRoundImpl( updatedAccountIdx++ } - updatedResources = make(map[basics.Address][]store.PersistedResourcesData) + updatedResources = make(map[basics.Address][]trackerdb.PersistedResourcesData) // the resources update is going to be made in three parts: // on the first loop, we will find out all the entries that need to be deleted, and parepare a pendingResourcesDeletion map. @@ -832,7 +832,7 @@ func accountsNewRoundImpl( } pendingResourcesDeletion[resourceKey{addrid: data.oldResource.Addrid, aidx: data.oldResource.Aidx}] = struct{}{} - entry := store.PersistedResourcesData{Addrid: 0, Aidx: data.oldResource.Aidx, Data: store.MakeResourcesData(0), Round: lastUpdateRound} + entry := trackerdb.PersistedResourcesData{Addrid: 0, Aidx: data.oldResource.Aidx, Data: trackerdb.MakeResourcesData(0), Round: lastUpdateRound} deltas := updatedResources[data.address] deltas = append(deltas, entry) updatedResources[data.address] = deltas @@ -854,7 +854,7 @@ func accountsNewRoundImpl( return } } - var entry store.PersistedResourcesData + var entry trackerdb.PersistedResourcesData if data.oldResource.Data.IsEmpty() { // IsEmpty means we don't have a previous value. Note, can't use oldResource.data.MsgIsZero // because of possibility of empty asset holdings or app local state after opting in, @@ -863,7 +863,7 @@ func accountsNewRoundImpl( // if we didn't had it before, and we don't have anything now, just skip it. // set zero addrid to mark this entry invalid for subsequent addr to addrid resolution // because the base account might gone. - entry = store.PersistedResourcesData{Addrid: 0, Aidx: aidx, Data: store.MakeResourcesData(0), Round: lastUpdateRound} + entry = trackerdb.PersistedResourcesData{Addrid: 0, Aidx: aidx, Data: trackerdb.MakeResourcesData(0), Round: lastUpdateRound} } else { // create a new entry. if !data.newResource.IsApp() && !data.newResource.IsAsset() { @@ -880,7 +880,7 @@ func accountsNewRoundImpl( rowsAffected, err = writer.UpdateResource(addrid, aidx, data.newResource) if err == nil { // rowid doesn't change on update. - entry = store.PersistedResourcesData{Addrid: addrid, Aidx: aidx, Data: data.newResource, Round: lastUpdateRound} + entry = trackerdb.PersistedResourcesData{Addrid: addrid, Aidx: aidx, Data: data.newResource, Round: lastUpdateRound} if rowsAffected != 1 { err = fmt.Errorf("failed to update resources row for addr %s (%d), aidx %d", addr, addrid, aidx) } @@ -889,7 +889,7 @@ func accountsNewRoundImpl( _, err = writer.InsertResource(addrid, aidx, data.newResource) if err == nil { // set the returned persisted account states so that we could store that as the baseResources in commitRound - entry = store.PersistedResourcesData{Addrid: addrid, Aidx: aidx, Data: data.newResource, Round: lastUpdateRound} + entry = trackerdb.PersistedResourcesData{Addrid: addrid, Aidx: aidx, Data: data.newResource, Round: lastUpdateRound} } } } @@ -908,7 +908,7 @@ func accountsNewRoundImpl( rowsAffected, err = writer.UpdateResource(addrid, aidx, data.newResource) if err == nil { // rowid doesn't change on update. - entry = store.PersistedResourcesData{Addrid: addrid, Aidx: aidx, Data: data.newResource, Round: lastUpdateRound} + entry = trackerdb.PersistedResourcesData{Addrid: addrid, Aidx: aidx, Data: data.newResource, Round: lastUpdateRound} if rowsAffected != 1 { err = fmt.Errorf("failed to update resources row for addr %s (%d), aidx %d", addr, addrid, aidx) } @@ -943,7 +943,7 @@ func accountsNewRoundImpl( } } - updatedKVs = make(map[string]store.PersistedKVData, len(kvPairs)) + updatedKVs = make(map[string]trackerdb.PersistedKVData, len(kvPairs)) for key, mv := range kvPairs { if mv.data != nil { // reminder: check oldData for nil here, b/c bytes.Equal conflates nil and "". @@ -951,13 +951,13 @@ func accountsNewRoundImpl( continue // changed back within the delta span } err = writer.UpsertKvPair(key, mv.data) - updatedKVs[key] = store.PersistedKVData{Value: mv.data, Round: lastUpdateRound} + updatedKVs[key] = trackerdb.PersistedKVData{Value: mv.data, Round: lastUpdateRound} } else { if mv.oldData == nil { // Came and went within the delta span continue } err = writer.DeleteKvPair(key) - updatedKVs[key] = store.PersistedKVData{Value: nil, Round: lastUpdateRound} + updatedKVs[key] = trackerdb.PersistedKVData{Value: nil, Round: lastUpdateRound} } if err != nil { return @@ -979,9 +979,9 @@ func accountsNewRoundImpl( } func onlineAccountsNewRoundImpl( - writer store.OnlineAccountsWriter, updates compactOnlineAccountDeltas, + writer trackerdb.OnlineAccountsWriter, updates compactOnlineAccountDeltas, proto config.ConsensusParams, lastUpdateRound basics.Round, -) (updatedAccounts []store.PersistedOnlineAccountData, err error) { +) (updatedAccounts []trackerdb.PersistedOnlineAccountData, err error) { for i := 0; i < updates.len(); i++ { data := updates.getByIdx(i) @@ -1005,7 +1005,7 @@ func onlineAccountsNewRoundImpl( normBalance := newAcct.NormalizedOnlineBalance(proto) rowid, err = writer.InsertOnlineAccount(data.address, normBalance, newAcct, updRound, uint64(newAcct.VoteLastValid)) if err == nil { - updated := store.PersistedOnlineAccountData{ + updated := trackerdb.PersistedOnlineAccountData{ Addr: data.address, AccountData: newAcct, Round: lastUpdateRound, @@ -1028,11 +1028,11 @@ func onlineAccountsNewRoundImpl( err = fmt.Errorf("empty voting data but online account %s: %v", data.address.String(), newAcct) } else { var rowid int64 - rowid, err = writer.InsertOnlineAccount(data.address, 0, store.BaseOnlineAccountData{}, updRound, 0) + rowid, err = writer.InsertOnlineAccount(data.address, 0, trackerdb.BaseOnlineAccountData{}, updRound, 0) if err == nil { - updated := store.PersistedOnlineAccountData{ + updated := trackerdb.PersistedOnlineAccountData{ Addr: data.address, - AccountData: store.BaseOnlineAccountData{}, + AccountData: trackerdb.BaseOnlineAccountData{}, Round: lastUpdateRound, Rowid: rowid, UpdRound: basics.Round(updRound), @@ -1048,7 +1048,7 @@ func onlineAccountsNewRoundImpl( normBalance := newAcct.NormalizedOnlineBalance(proto) rowid, err = writer.InsertOnlineAccount(data.address, normBalance, newAcct, updRound, uint64(newAcct.VoteLastValid)) if err == nil { - updated := store.PersistedOnlineAccountData{ + updated := trackerdb.PersistedOnlineAccountData{ Addr: data.address, AccountData: newAcct, Round: lastUpdateRound, diff --git a/ledger/acctdeltas_test.go b/ledger/acctdeltas_test.go index 3032b741dc..1b405dfc1f 100644 --- a/ledger/acctdeltas_test.go +++ b/ledger/acctdeltas_test.go @@ -41,8 +41,9 @@ import ( "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/ledger/encoded" "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/ledger/store" storetesting "github.com/algorand/go-algorand/ledger/store/testing" + "github.com/algorand/go-algorand/ledger/store/trackerdb" + "github.com/algorand/go-algorand/ledger/store/trackerdb/sqlitedriver" ledgertesting "github.com/algorand/go-algorand/ledger/testing" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" @@ -50,7 +51,7 @@ import ( "github.com/algorand/go-algorand/util/db" ) -func checkAccounts(t *testing.T, tx store.TransactionScope, rnd basics.Round, accts map[basics.Address]basics.AccountData) { +func checkAccounts(t *testing.T, tx trackerdb.TransactionScope, rnd basics.Round, accts map[basics.Address]basics.AccountData) { arw, err := tx.MakeAccountsReaderWriter() require.NoError(t, err) @@ -97,7 +98,7 @@ func checkAccounts(t *testing.T, tx store.TransactionScope, rnd basics.Round, ac d, err := aor.LookupAccount(ledgertesting.RandomAddress()) require.NoError(t, err) require.Equal(t, rnd, d.Round) - require.Equal(t, d.AccountData, store.BaseAccountData{}) + require.Equal(t, d.AccountData, trackerdb.BaseAccountData{}) proto := config.Consensus[protocol.ConsensusCurrentVersion] @@ -148,11 +149,11 @@ func TestAccountDBInit(t *testing.T) { proto := config.Consensus[protocol.ConsensusCurrentVersion] - dbs, _ := store.DbOpenTrackerTest(t, true) + dbs, _ := sqlitedriver.DbOpenTrackerTest(t, true) dbs.SetLogger(logging.TestingLog(t)) defer dbs.Close() - err := dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + err := dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { accts := ledgertesting.RandomAccounts(20, true) newDB := tx.AccountsInitTest(t, accts, protocol.ConsensusCurrentVersion) require.True(t, newDB) @@ -209,11 +210,11 @@ func TestAccountDBRound(t *testing.T) { proto := config.Consensus[protocol.ConsensusCurrentVersion] - dbs, _ := store.DbOpenTrackerTest(t, true) + dbs, _ := sqlitedriver.DbOpenTrackerTest(t, true) dbs.SetLogger(logging.TestingLog(t)) defer dbs.Close() - dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) error { + dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error { arw, err := tx.MakeAccountsReaderWriter() require.NoError(t, err) @@ -365,11 +366,11 @@ func TestAccountDBInMemoryAcct(t *testing.T) { for i, test := range tests { - dbs, _ := store.DbOpenTrackerTest(t, true) + dbs, _ := sqlitedriver.DbOpenTrackerTest(t, true) dbs.SetLogger(logging.TestingLog(t)) defer dbs.Close() - dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) error { + dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error { accts := ledgertesting.RandomAccounts(1, true) tx.AccountsInitTest(t, accts, protocol.ConsensusCurrentVersion) addr := ledgertesting.RandomAddress() @@ -387,14 +388,14 @@ func TestAccountDBInMemoryAcct(t *testing.T) { outAccountDeltas := makeCompactAccountDeltas(stateDeltas, basics.Round(1), true, baseAccounts) require.Equal(t, 1, len(outAccountDeltas.deltas)) - require.Equal(t, accountDelta{newAcct: store.BaseAccountData{UpdateRound: lastRound}, nAcctDeltas: numAcctDeltas, address: addr}, outAccountDeltas.deltas[0]) + require.Equal(t, accountDelta{newAcct: trackerdb.BaseAccountData{UpdateRound: lastRound}, nAcctDeltas: numAcctDeltas, address: addr}, outAccountDeltas.deltas[0]) require.Equal(t, 1, len(outAccountDeltas.misses)) outResourcesDeltas := makeCompactResourceDeltas(stateDeltas, basics.Round(1), true, baseAccounts, baseResources) require.Equal(t, 1, len(outResourcesDeltas.deltas)) require.Equal(t, resourceDelta{ - oldResource: store.PersistedResourcesData{Aidx: 100}, newResource: store.MakeResourcesData(lastRound - 1), + oldResource: trackerdb.PersistedResourcesData{Aidx: 100}, newResource: trackerdb.MakeResourcesData(lastRound - 1), nAcctDeltas: numResDeltas, address: addr, }, outResourcesDeltas.deltas[0], @@ -416,13 +417,13 @@ func TestAccountDBInMemoryAcct(t *testing.T) { require.NoError(t, err) require.Equal(t, 1, len(updatedAccts)) // we store empty even for deleted accounts require.Equal(t, - store.PersistedAccountData{Addr: addr, Round: basics.Round(lastRound)}, + trackerdb.PersistedAccountData{Addr: addr, Round: basics.Round(lastRound)}, updatedAccts[0], ) require.Equal(t, 1, len(updatesResources[addr])) // we store empty even for deleted resources require.Equal(t, - store.PersistedResourcesData{Addrid: 0, Aidx: 100, Data: store.MakeResourcesData(0), Round: basics.Round(lastRound)}, + trackerdb.PersistedResourcesData{Addrid: 0, Aidx: 100, Data: trackerdb.MakeResourcesData(0), Round: basics.Round(lastRound)}, updatesResources[addr][0], ) @@ -436,11 +437,11 @@ func TestAccountDBInMemoryAcct(t *testing.T) { func TestAccountStorageWithStateProofID(t *testing.T) { partitiontest.PartitionTest(t) - dbs, _ := store.DbOpenTrackerTest(t, true) + dbs, _ := sqlitedriver.DbOpenTrackerTest(t, true) dbs.SetLogger(logging.TestingLog(t)) defer dbs.Close() - dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { accts := ledgertesting.RandomAccounts(20, false) _ = tx.AccountsInitTest(t, accts, protocol.ConsensusCurrentVersion) checkAccounts(t, tx, 0, accts) @@ -597,7 +598,7 @@ func generateRandomTestingAccountBalances(numAccounts int) (updates map[basics.A return } -func benchmarkInitBalances(b testing.TB, numAccounts int, tx store.TransactionScope, proto protocol.ConsensusVersion) (updates map[basics.Address]basics.AccountData) { +func benchmarkInitBalances(b testing.TB, numAccounts int, tx trackerdb.TransactionScope, proto protocol.ConsensusVersion) (updates map[basics.Address]basics.AccountData) { updates = generateRandomTestingAccountBalances(numAccounts) tx.AccountsInitTest(b, updates, proto) return @@ -611,12 +612,12 @@ func cleanupTestDb(dbs db.Pair, dbName string, inMemory bool) { } func benchmarkReadingAllBalances(b *testing.B, inMemory bool) { - dbs, _ := store.DbOpenTrackerTest(b, true) + dbs, _ := sqlitedriver.DbOpenTrackerTest(b, true) dbs.SetLogger(logging.TestingLog(b)) defer dbs.Close() bal := make(map[basics.Address]basics.AccountData) - err := dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + err := dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { benchmarkInitBalances(b, b.N, tx, protocol.ConsensusCurrentVersion) arw, err := tx.MakeAccountsReaderWriter() if err != nil { @@ -647,14 +648,14 @@ func BenchmarkReadingAllBalancesDisk(b *testing.B) { } func benchmarkReadingRandomBalances(b *testing.B, inMemory bool) { - dbs, fn := store.DbOpenTrackerTest(b, true) + dbs, fn := sqlitedriver.DbOpenTrackerTest(b, true) dbs.SetLogger(logging.TestingLog(b)) defer dbs.CleanupTest(fn, inMemory) - err := dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + err := dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { accounts := benchmarkInitBalances(b, b.N, tx, protocol.ConsensusCurrentVersion) - ar, err := dbs.MakeAccountsReader() + ar, err := dbs.MakeAccountsOptimizedReader() require.NoError(b, err) defer ar.Close() @@ -696,11 +697,11 @@ func TestAccountsDbQueriesCreateClose(t *testing.T) { defer dbs.Close() err := dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { - store.AccountsInitTest(t, tx, make(map[basics.Address]basics.AccountData), protocol.ConsensusCurrentVersion) + sqlitedriver.AccountsInitTest(t, tx, make(map[basics.Address]basics.AccountData), protocol.ConsensusCurrentVersion) return nil }) require.NoError(t, err) - qs, err := store.AccountsInitDbQueries(dbs.Rdb.Handle) + qs, err := sqlitedriver.AccountsInitDbQueries(dbs.Rdb.Handle) require.NoError(t, err) // TODO[store-refactor]: internals are opaque, once we move the the remainder of accountdb we can mvoe this too // require.NotNil(t, qs.listCreatablesStmt) @@ -754,7 +755,7 @@ func benchmarkWriteCatchpointStagingBalancesSub(b *testing.B, ascendingOrder boo chunk.Balances = make([]encoded.BalanceRecordV6, chunkSize) for i := uint64(0); i < chunkSize; i++ { var randomAccount encoded.BalanceRecordV6 - accountData := store.BaseAccountData{RewardsBase: accountsLoaded + i} + accountData := trackerdb.BaseAccountData{RewardsBase: accountsLoaded + i} accountData.MicroAlgos.Raw = crypto.RandUint63() randomAccount.AccountData = protocol.Encode(&accountData) crypto.RandBytes(randomAccount.Address[:]) @@ -770,7 +771,7 @@ func benchmarkWriteCatchpointStagingBalancesSub(b *testing.B, ascendingOrder boo normalizedAccountBalances, err := prepareNormalizedBalancesV6(chunk.Balances, proto) require.NoError(b, err) b.StartTimer() - err = l.trackerDBs.Batch(func(ctx context.Context, tx store.BatchScope) (err error) { + err = l.trackerDBs.Batch(func(ctx context.Context, tx trackerdb.BatchScope) (err error) { cw, err := tx.MakeCatchpointWriter() if err != nil { return err @@ -812,11 +813,11 @@ func TestLookupKeysByPrefix(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - dbs, fn := store.DbOpenTrackerTest(t, false) + dbs, fn := sqlitedriver.DbOpenTrackerTest(t, false) dbs.SetLogger(logging.TestingLog(t)) defer dbs.CleanupTest(fn, false) - err := dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + err := dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { // return account data, initialize DB tables from AccountsInitTest _ = benchmarkInitBalances(t, 1, tx, protocol.ConsensusCurrentVersion) @@ -824,7 +825,7 @@ func TestLookupKeysByPrefix(t *testing.T) { }) require.NoError(t, err) - qs, err := dbs.MakeAccountsReader() + qs, err := dbs.MakeAccountsOptimizedReader() require.NoError(t, err) defer qs.Close() @@ -851,7 +852,7 @@ func TestLookupKeysByPrefix(t *testing.T) { {key: []byte(`™£´´∂ƒ∂ƒßƒ©∑®ƒß∂†¬∆`), value: []byte("random Bluh")}, } - err = dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + err = dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { // writer is only for kvstore writer, err := tx.MakeAccountsOptimizedWriter(true, true, true, true) if err != nil { @@ -998,11 +999,11 @@ func TestLookupKeysByPrefix(t *testing.T) { func BenchmarkLookupKeyByPrefix(b *testing.B) { // learn something from BenchmarkWritingRandomBalancesDisk - dbs, fn := store.DbOpenTrackerTest(b, false) + dbs, fn := sqlitedriver.DbOpenTrackerTest(b, false) dbs.SetLogger(logging.TestingLog(b)) defer dbs.CleanupTest(fn, false) - err := dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + err := dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { // return account data, initialize DB tables from AccountsInitTest _ = benchmarkInitBalances(b, 1, tx, protocol.ConsensusCurrentVersion) @@ -1010,7 +1011,7 @@ func BenchmarkLookupKeyByPrefix(b *testing.B) { }) require.NoError(b, err) - qs, err := dbs.MakeAccountsReader() + qs, err := dbs.MakeAccountsOptimizedReader() require.NoError(b, err) defer qs.Close() @@ -1026,7 +1027,7 @@ func BenchmarkLookupKeyByPrefix(b *testing.B) { var prefix string // make writer to DB - err = dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + err = dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { // writer is only for kvstore writer, err := tx.MakeAccountsOptimizedWriter(true, true, true, true) if err != nil { @@ -1076,7 +1077,7 @@ func (a *compactResourcesDeltas) upsert(delta resourceDelta) { } // upsertOld updates existing or inserts a new partial entry with only old field filled -func (a *compactAccountDeltas) upsertOld(old store.PersistedAccountData) { +func (a *compactAccountDeltas) upsertOld(old trackerdb.PersistedAccountData) { addr := old.Addr if idx, exist := a.cache[addr]; exist { a.deltas[idx].oldAcct = old @@ -1111,7 +1112,7 @@ func TestCompactAccountDeltas(t *testing.T) { a.Zero(ad.len()) a.Panics(func() { ad.getByIdx(0) }) - sample1 := accountDelta{newAcct: store.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: 123}}, address: addr} + sample1 := accountDelta{newAcct: trackerdb.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: 123}}, address: addr} ad.upsert(addr, sample1) data, idx = ad.get(addr) a.NotEqual(-1, idx) @@ -1122,7 +1123,7 @@ func TestCompactAccountDeltas(t *testing.T) { a.Equal(addr, data.address) a.Equal(sample1, data) - sample2 := accountDelta{newAcct: store.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: 456}}, address: addr} + sample2 := accountDelta{newAcct: trackerdb.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: 456}}, address: addr} ad.upsert(addr, sample2) data, idx = ad.get(addr) a.NotEqual(-1, idx) @@ -1143,7 +1144,7 @@ func TestCompactAccountDeltas(t *testing.T) { a.Equal(addr, data.address) a.Equal(sample2, data) - old1 := store.PersistedAccountData{Addr: addr, AccountData: store.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: 789}}} + old1 := trackerdb.PersistedAccountData{Addr: addr, AccountData: trackerdb.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: 789}}} ad.upsertOld(old1) a.Equal(1, ad.len()) data = ad.getByIdx(0) @@ -1151,7 +1152,7 @@ func TestCompactAccountDeltas(t *testing.T) { a.Equal(accountDelta{newAcct: sample2.newAcct, oldAcct: old1, address: addr}, data) addr1 := ledgertesting.RandomAddress() - old2 := store.PersistedAccountData{Addr: addr1, AccountData: store.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: 789}}} + old2 := trackerdb.PersistedAccountData{Addr: addr1, AccountData: trackerdb.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: 789}}} ad.upsertOld(old2) a.Equal(2, ad.len()) data = ad.getByIdx(0) @@ -1180,7 +1181,7 @@ func TestCompactAccountDeltas(t *testing.T) { } // upsertOld updates existing or inserts a new partial entry with only old field filled -func (a *compactResourcesDeltas) upsertOld(addr basics.Address, old store.PersistedResourcesData) { +func (a *compactResourcesDeltas) upsertOld(addr basics.Address, old trackerdb.PersistedResourcesData) { if idx, exist := a.cache[accountCreatable{address: addr, index: old.Aidx}]; exist { a.deltas[idx].oldResource = old return @@ -1207,7 +1208,7 @@ func TestCompactResourceDeltas(t *testing.T) { a.Zero(ad.len()) a.Panics(func() { ad.getByIdx(0) }) - sample1 := resourceDelta{newResource: store.ResourcesData{Total: 123}, address: addr, oldResource: store.PersistedResourcesData{Aidx: 1}} + sample1 := resourceDelta{newResource: trackerdb.ResourcesData{Total: 123}, address: addr, oldResource: trackerdb.PersistedResourcesData{Aidx: 1}} ad.upsert(sample1) data, idx = ad.get(addr, 1) a.NotEqual(-1, idx) @@ -1218,7 +1219,7 @@ func TestCompactResourceDeltas(t *testing.T) { a.Equal(addr, data.address) a.Equal(sample1, data) - sample2 := resourceDelta{newResource: store.ResourcesData{Total: 456}, address: addr, oldResource: store.PersistedResourcesData{Aidx: 1}} + sample2 := resourceDelta{newResource: trackerdb.ResourcesData{Total: 456}, address: addr, oldResource: trackerdb.PersistedResourcesData{Aidx: 1}} ad.upsert(sample2) data, idx = ad.get(addr, 1) a.NotEqual(-1, idx) @@ -1239,7 +1240,7 @@ func TestCompactResourceDeltas(t *testing.T) { a.Equal(addr, data.address) a.Equal(sample2, data) - old1 := store.PersistedResourcesData{Addrid: 111, Aidx: 1, Data: store.ResourcesData{Total: 789}} + old1 := trackerdb.PersistedResourcesData{Addrid: 111, Aidx: 1, Data: trackerdb.ResourcesData{Total: 789}} ad.upsertOld(addr, old1) a.Equal(1, ad.len()) data = ad.getByIdx(0) @@ -1247,7 +1248,7 @@ func TestCompactResourceDeltas(t *testing.T) { a.Equal(resourceDelta{newResource: sample2.newResource, oldResource: old1, address: addr}, data) addr1 := ledgertesting.RandomAddress() - old2 := store.PersistedResourcesData{Addrid: 222, Aidx: 2, Data: store.ResourcesData{Total: 789}} + old2 := trackerdb.PersistedResourcesData{Addrid: 222, Aidx: 2, Data: trackerdb.ResourcesData{Total: 789}} ad.upsertOld(addr1, old2) a.Equal(2, ad.len()) data = ad.getByIdx(0) @@ -1291,7 +1292,7 @@ func TestLookupAccountAddressFromAddressID(t *testing.T) { } addrsids := make(map[basics.Address]int64) err := dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { - store.AccountsInitTest(t, tx, make(map[basics.Address]basics.AccountData), protocol.ConsensusCurrentVersion) + sqlitedriver.AccountsInitTest(t, tx, make(map[basics.Address]basics.AccountData), protocol.ConsensusCurrentVersion) for i := range addrs { res, err := tx.ExecContext(ctx, "INSERT INTO accountbase (address, data) VALUES (?, ?)", addrs[i][:], []byte{12, 3, 4}) @@ -1309,7 +1310,7 @@ func TestLookupAccountAddressFromAddressID(t *testing.T) { require.NoError(t, err) err = dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { - arw := store.NewAccountsSQLReaderWriter(tx) + arw := sqlitedriver.NewAccountsSQLReaderWriter(tx) for addr, addrid := range addrsids { retAddr, err := arw.LookupAccountAddressFromAddressID(ctx, addrid) @@ -1417,7 +1418,7 @@ func (m *mockAccountWriter) setResource(addr basics.Address, cidx basics.Creatab return nil } -func (m *mockAccountWriter) Lookup(addr basics.Address) (pad store.PersistedAccountData, ok bool, err error) { +func (m *mockAccountWriter) Lookup(addr basics.Address) (pad trackerdb.PersistedAccountData, ok bool, err error) { rowid, ok := m.addresses[addr] if !ok { return @@ -1433,7 +1434,7 @@ func (m *mockAccountWriter) Lookup(addr basics.Address) (pad store.PersistedAcco return } -func (m *mockAccountWriter) LookupResource(addr basics.Address, cidx basics.CreatableIndex) (prd store.PersistedResourcesData, ok bool, err error) { +func (m *mockAccountWriter) LookupResource(addr basics.Address, cidx basics.CreatableIndex) (prd trackerdb.PersistedResourcesData, ok bool, err error) { rowid, ok := m.addresses[addr] if !ok { return @@ -1460,7 +1461,7 @@ func (m *mockAccountWriter) LookupResource(addr basics.Address, cidx basics.Crea return } -func (m *mockAccountWriter) InsertAccount(addr basics.Address, normBalance uint64, data store.BaseAccountData) (rowid int64, err error) { +func (m *mockAccountWriter) InsertAccount(addr basics.Address, normBalance uint64, data trackerdb.BaseAccountData) (rowid int64, err error) { rowid, ok := m.addresses[addr] if ok { err = fmt.Errorf("insertAccount: addr %s, rowid %d: UNIQUE constraint failed", addr.String(), rowid) @@ -1487,7 +1488,7 @@ func (m *mockAccountWriter) DeleteAccount(rowid int64) (rowsAffected int64, err return 1, nil } -func (m *mockAccountWriter) UpdateAccount(rowid int64, normBalance uint64, data store.BaseAccountData) (rowsAffected int64, err error) { +func (m *mockAccountWriter) UpdateAccount(rowid int64, normBalance uint64, data trackerdb.BaseAccountData) (rowsAffected int64, err error) { if _, ok := m.rowids[rowid]; !ok { return 0, fmt.Errorf("updateAccount: not found rowid %d", rowid) } @@ -1502,13 +1503,13 @@ func (m *mockAccountWriter) UpdateAccount(rowid int64, normBalance uint64, data return 1, nil } -func (m *mockAccountWriter) InsertResource(addrid int64, aidx basics.CreatableIndex, data store.ResourcesData) (rowid int64, err error) { +func (m *mockAccountWriter) InsertResource(addrid int64, aidx basics.CreatableIndex, data trackerdb.ResourcesData) (rowid int64, err error) { key := mockResourcesKey{addrid, aidx} if _, ok := m.resources[key]; ok { return 0, fmt.Errorf("insertResource: (%d, %d): UNIQUE constraint failed", addrid, aidx) } // use persistedResourcesData.AccountResource for conversion - prd := store.PersistedResourcesData{Data: data} + prd := trackerdb.PersistedResourcesData{Data: data} new := prd.AccountResource() m.resources[key] = new return 1, nil @@ -1523,14 +1524,14 @@ func (m *mockAccountWriter) DeleteResource(addrid int64, aidx basics.CreatableIn return 1, nil } -func (m *mockAccountWriter) UpdateResource(addrid int64, aidx basics.CreatableIndex, data store.ResourcesData) (rowsAffected int64, err error) { +func (m *mockAccountWriter) UpdateResource(addrid int64, aidx basics.CreatableIndex, data trackerdb.ResourcesData) (rowsAffected int64, err error) { key := mockResourcesKey{addrid, aidx} old, ok := m.resources[key] if !ok { return 0, fmt.Errorf("updateResource: not found (%d, %d)", addrid, aidx) } // use persistedResourcesData.AccountResource for conversion - prd := store.PersistedResourcesData{Data: data} + prd := trackerdb.PersistedResourcesData{Data: data} new := prd.AccountResource() if new == old { return 0, nil @@ -1762,7 +1763,7 @@ func TestAccountUnorderedUpdates(t *testing.T) { a.NoError(err) a.True(ok) baseAccounts.write(pad) - baseAccounts.write(store.PersistedAccountData{Addr: addr2}) + baseAccounts.write(trackerdb.PersistedAccountData{Addr: addr2}) acctDeltas := makeCompactAccountDeltas(updates, dbRound, false, baseAccounts) a.Empty(acctDeltas.misses) @@ -1855,7 +1856,7 @@ func TestAccountsNewRoundDeletedResourceEntries(t *testing.T) { a.NoError(err) a.True(ok) baseAccounts.write(pad) - baseAccounts.write(store.PersistedAccountData{Addr: addr2}) // put an empty record for addr2 to get rid of lookups + baseAccounts.write(trackerdb.PersistedAccountData{Addr: addr2}) // put an empty record for addr2 to get rid of lookups acctDeltas := makeCompactAccountDeltas(updates, dbRound, false, baseAccounts) a.Empty(acctDeltas.misses) @@ -1901,7 +1902,7 @@ func TestAccountsNewRoundDeletedResourceEntries(t *testing.T) { a.Equal(1, len(upd)) a.Equal(int64(0), upd[0].Addrid) a.Equal(basics.CreatableIndex(aidx), upd[0].Aidx) - a.Equal(store.MakeResourcesData(uint64(0)), upd[0].Data) + a.Equal(trackerdb.MakeResourcesData(uint64(0)), upd[0].Data) } } @@ -1909,7 +1910,7 @@ func BenchmarkLRUResources(b *testing.B) { var baseResources lruResources baseResources.init(nil, 1000, 850) - var data store.PersistedResourcesData + var data trackerdb.PersistedResourcesData var has bool addrs := make([]basics.Address, 850) for i := 0; i < 850; i++ { @@ -1947,7 +1948,7 @@ func initBoxDatabase(b *testing.B, totalBoxes, boxSize int) (db.Pair, func(), er tx, err := dbs.Wdb.Handle.Begin() require.NoError(b, err) - _, err = store.AccountsInitLightTest(b, tx, make(map[basics.Address]basics.AccountData), proto) + _, err = sqlitedriver.AccountsInitLightTest(b, tx, make(map[basics.Address]basics.AccountData), proto) require.NoError(b, err) err = tx.Commit() require.NoError(b, err) @@ -1958,7 +1959,7 @@ func initBoxDatabase(b *testing.B, totalBoxes, boxSize int) (db.Pair, func(), er for batch := 0; batch <= batchCount; batch++ { tx, err = dbs.Wdb.Handle.Begin() require.NoError(b, err) - writer, err := store.MakeAccountsSQLWriter(tx, false, false, true, false) + writer, err := sqlitedriver.MakeAccountsSQLWriter(tx, false, false, true, false) require.NoError(b, err) for boxIdx := 0; boxIdx < totalBoxes/batchCount; boxIdx++ { err = writer.UpsertKvPair(fmt.Sprintf("%d", cnt), make([]byte, boxSize)) @@ -2000,7 +2001,7 @@ func BenchmarkBoxDatabaseRead(b *testing.B) { require.NoError(b, err) var v sql.NullString for i := 0; i < b.N; i++ { - var pv store.PersistedKVData + var pv trackerdb.PersistedKVData boxName := boxNames[i%totalBoxes] b.StartTimer() err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d", boxName))).Scan(&pv.Round, &v) @@ -2031,7 +2032,7 @@ func BenchmarkBoxDatabaseRead(b *testing.B) { require.NoError(b, err) var v sql.NullString for i := 0; i < b.N+lookback; i++ { - var pv store.PersistedKVData + var pv trackerdb.PersistedKVData boxName := boxNames[i%totalBoxes] err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d", boxName))).Scan(&pv.Round, &v) require.NoError(b, err) @@ -2078,11 +2079,11 @@ func TestAccountOnlineQueries(t *testing.T) { proto := config.Consensus[protocol.ConsensusCurrentVersion] - dbs, _ := store.DbOpenTrackerTest(t, true) + dbs, _ := sqlitedriver.DbOpenTrackerTest(t, true) dbs.SetLogger(logging.TestingLog(t)) defer dbs.Close() - err := dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) error { + err := dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error { arw, err := tx.MakeAccountsReaderWriter() if err != nil { @@ -2395,7 +2396,7 @@ type mockOnlineAccountsWriter struct { rowid int64 } -func (w *mockOnlineAccountsWriter) InsertOnlineAccount(addr basics.Address, normBalance uint64, data store.BaseOnlineAccountData, updRound uint64, voteLastValid uint64) (rowid int64, err error) { +func (w *mockOnlineAccountsWriter) InsertOnlineAccount(addr basics.Address, normBalance uint64, data trackerdb.BaseOnlineAccountData, updRound uint64, voteLastValid uint64) (rowid int64, err error) { w.rowid++ return w.rowid, nil } @@ -2422,7 +2423,7 @@ func TestAccountOnlineAccountsNewRound(t *testing.T) { // acct B is new and offline deltaB := onlineAccountDelta{ address: addrB, - newAcct: []store.BaseOnlineAccountData{{ + newAcct: []trackerdb.BaseOnlineAccountData{{ MicroAlgos: basics.MicroAlgos{Raw: 200_000_000}, }}, updRound: []uint64{1}, @@ -2431,9 +2432,9 @@ func TestAccountOnlineAccountsNewRound(t *testing.T) { // acct C is new and online deltaC := onlineAccountDelta{ address: addrC, - newAcct: []store.BaseOnlineAccountData{{ + newAcct: []trackerdb.BaseOnlineAccountData{{ MicroAlgos: basics.MicroAlgos{Raw: 300_000_000}, - BaseVotingData: store.BaseVotingData{VoteFirstValid: 500}, + BaseVotingData: trackerdb.BaseVotingData{VoteFirstValid: 500}, }}, newStatus: []basics.Status{basics.Online}, updRound: []uint64{2}, @@ -2441,15 +2442,15 @@ func TestAccountOnlineAccountsNewRound(t *testing.T) { // acct D is old and went offline deltaD := onlineAccountDelta{ address: addrD, - oldAcct: store.PersistedOnlineAccountData{ + oldAcct: trackerdb.PersistedOnlineAccountData{ Addr: addrD, - AccountData: store.BaseOnlineAccountData{ + AccountData: trackerdb.BaseOnlineAccountData{ MicroAlgos: basics.MicroAlgos{Raw: 400_000_000}, - BaseVotingData: store.BaseVotingData{VoteFirstValid: 500}, + BaseVotingData: trackerdb.BaseVotingData{VoteFirstValid: 500}, }, Rowid: 1, }, - newAcct: []store.BaseOnlineAccountData{{ + newAcct: []trackerdb.BaseOnlineAccountData{{ MicroAlgos: basics.MicroAlgos{Raw: 400_000_000}, }}, newStatus: []basics.Status{basics.Offline}, @@ -2459,17 +2460,17 @@ func TestAccountOnlineAccountsNewRound(t *testing.T) { // acct E is old online deltaE := onlineAccountDelta{ address: addrE, - oldAcct: store.PersistedOnlineAccountData{ + oldAcct: trackerdb.PersistedOnlineAccountData{ Addr: addrE, - AccountData: store.BaseOnlineAccountData{ + AccountData: trackerdb.BaseOnlineAccountData{ MicroAlgos: basics.MicroAlgos{Raw: 500_000_000}, - BaseVotingData: store.BaseVotingData{VoteFirstValid: 500}, + BaseVotingData: trackerdb.BaseVotingData{VoteFirstValid: 500}, }, Rowid: 2, }, - newAcct: []store.BaseOnlineAccountData{{ + newAcct: []trackerdb.BaseOnlineAccountData{{ MicroAlgos: basics.MicroAlgos{Raw: 500_000_000}, - BaseVotingData: store.BaseVotingData{VoteFirstValid: 600}, + BaseVotingData: trackerdb.BaseVotingData{VoteFirstValid: 600}, }}, newStatus: []basics.Status{basics.Online}, updRound: []uint64{4}, @@ -2520,13 +2521,13 @@ func TestAccountOnlineAccountsNewRoundFlip(t *testing.T) { // acct A is new, offline and then online deltaA := onlineAccountDelta{ address: addrA, - newAcct: []store.BaseOnlineAccountData{ + newAcct: []trackerdb.BaseOnlineAccountData{ { MicroAlgos: basics.MicroAlgos{Raw: 100_000_000}, }, { MicroAlgos: basics.MicroAlgos{Raw: 100_000_000}, - BaseVotingData: store.BaseVotingData{VoteFirstValid: 100}, + BaseVotingData: trackerdb.BaseVotingData{VoteFirstValid: 100}, }, }, updRound: []uint64{1, 2}, @@ -2535,10 +2536,10 @@ func TestAccountOnlineAccountsNewRoundFlip(t *testing.T) { // acct B is new and online and then offline deltaB := onlineAccountDelta{ address: addrB, - newAcct: []store.BaseOnlineAccountData{ + newAcct: []trackerdb.BaseOnlineAccountData{ { MicroAlgos: basics.MicroAlgos{Raw: 200_000_000}, - BaseVotingData: store.BaseVotingData{VoteFirstValid: 200}, + BaseVotingData: trackerdb.BaseVotingData{VoteFirstValid: 200}, }, { MicroAlgos: basics.MicroAlgos{Raw: 200_000_000}, @@ -2550,18 +2551,18 @@ func TestAccountOnlineAccountsNewRoundFlip(t *testing.T) { // acct C is old online, then online and then offline deltaC := onlineAccountDelta{ address: addrC, - oldAcct: store.PersistedOnlineAccountData{ + oldAcct: trackerdb.PersistedOnlineAccountData{ Addr: addrC, - AccountData: store.BaseOnlineAccountData{ + AccountData: trackerdb.BaseOnlineAccountData{ MicroAlgos: basics.MicroAlgos{Raw: 300_000_000}, - BaseVotingData: store.BaseVotingData{VoteFirstValid: 300}, + BaseVotingData: trackerdb.BaseVotingData{VoteFirstValid: 300}, }, Rowid: 1, }, - newAcct: []store.BaseOnlineAccountData{ + newAcct: []trackerdb.BaseOnlineAccountData{ { MicroAlgos: basics.MicroAlgos{Raw: 300_000_000}, - BaseVotingData: store.BaseVotingData{VoteFirstValid: 301}, + BaseVotingData: trackerdb.BaseVotingData{VoteFirstValid: 301}, }, { MicroAlgos: basics.MicroAlgos{Raw: 300_000_000}, @@ -2595,10 +2596,10 @@ func TestAccountOnlineRoundParams(t *testing.T) { require.NoError(t, err) defer tx.Rollback() - arw := store.NewAccountsSQLReaderWriter(tx) + arw := sqlitedriver.NewAccountsSQLReaderWriter(tx) var accts map[basics.Address]basics.AccountData - store.AccountsInitTest(t, tx, accts, protocol.ConsensusCurrentVersion) + sqlitedriver.AccountsInitTest(t, tx, accts, protocol.ConsensusCurrentVersion) // entry i is for round i+1 since db initialized with entry for round 0 const maxRounds = 40 // any number @@ -2649,9 +2650,9 @@ func TestOnlineAccountsDeletion(t *testing.T) { defer tx.Rollback() var accts map[basics.Address]basics.AccountData - store.AccountsInitTest(t, tx, accts, protocol.ConsensusCurrentVersion) + sqlitedriver.AccountsInitTest(t, tx, accts, protocol.ConsensusCurrentVersion) - arw := store.NewAccountsSQLReaderWriter(tx) + arw := sqlitedriver.NewAccountsSQLReaderWriter(tx) updates := compactOnlineAccountDeltas{} addrA := ledgertesting.RandomAddress() @@ -2659,17 +2660,17 @@ func TestOnlineAccountsDeletion(t *testing.T) { deltaA := onlineAccountDelta{ address: addrA, - newAcct: []store.BaseOnlineAccountData{ + newAcct: []trackerdb.BaseOnlineAccountData{ { MicroAlgos: basics.MicroAlgos{Raw: 100_000_000}, - BaseVotingData: store.BaseVotingData{VoteFirstValid: 100}, + BaseVotingData: trackerdb.BaseVotingData{VoteFirstValid: 100}, }, { MicroAlgos: basics.MicroAlgos{Raw: 100_000_000}, }, { MicroAlgos: basics.MicroAlgos{Raw: 100_000_000}, - BaseVotingData: store.BaseVotingData{VoteFirstValid: 600}, + BaseVotingData: trackerdb.BaseVotingData{VoteFirstValid: 600}, }, }, updRound: []uint64{1, 3, 6}, @@ -2678,14 +2679,14 @@ func TestOnlineAccountsDeletion(t *testing.T) { // acct B is new and online and then offline deltaB := onlineAccountDelta{ address: addrB, - newAcct: []store.BaseOnlineAccountData{ + newAcct: []trackerdb.BaseOnlineAccountData{ { MicroAlgos: basics.MicroAlgos{Raw: 200_000_000}, - BaseVotingData: store.BaseVotingData{VoteFirstValid: 300}, + BaseVotingData: trackerdb.BaseVotingData{VoteFirstValid: 300}, }, { MicroAlgos: basics.MicroAlgos{Raw: 200_000_000}, - BaseVotingData: store.BaseVotingData{VoteFirstValid: 700}, + BaseVotingData: trackerdb.BaseVotingData{VoteFirstValid: 700}, }, }, updRound: []uint64{3, 7}, @@ -2693,7 +2694,7 @@ func TestOnlineAccountsDeletion(t *testing.T) { } updates.deltas = append(updates.deltas, deltaA, deltaB) - writer, err := store.MakeOnlineAccountsSQLWriter(tx, updates.len() > 0) + writer, err := sqlitedriver.MakeOnlineAccountsSQLWriter(tx, updates.len() > 0) if err != nil { return } @@ -2705,11 +2706,11 @@ func TestOnlineAccountsDeletion(t *testing.T) { require.NoError(t, err) require.Len(t, updated, 5) - queries, err := store.OnlineAccountsInitDbQueries(tx) + queries, err := sqlitedriver.OnlineAccountsInitDbQueries(tx) require.NoError(t, err) var count int64 - var history []store.PersistedOnlineAccountData + var history []trackerdb.PersistedOnlineAccountData var validThrough basics.Round for _, rnd := range []basics.Round{1, 2, 3} { err = arw.OnlineAccountsDelete(rnd) @@ -2766,8 +2767,8 @@ func TestOnlineAccountsDeletion(t *testing.T) { } } -func randomBaseAccountData() store.BaseAccountData { - vd := store.BaseVotingData{ +func randomBaseAccountData() trackerdb.BaseAccountData { + vd := trackerdb.BaseVotingData{ VoteFirstValid: basics.Round(crypto.RandUint64()), VoteLastValid: basics.Round(crypto.RandUint64()), VoteKeyDilution: crypto.RandUint64(), @@ -2776,7 +2777,7 @@ func randomBaseAccountData() store.BaseAccountData { crypto.RandBytes(vd.StateProofID[:]) crypto.RandBytes(vd.SelectionID[:]) - baseAD := store.BaseAccountData{ + baseAD := trackerdb.BaseAccountData{ Status: basics.Online, MicroAlgos: basics.MicroAlgos{Raw: crypto.RandUint64()}, RewardsBase: crypto.RandUint64(), @@ -2813,12 +2814,12 @@ func makeString(len int) string { return s } -func randomAssetResourceData() store.ResourcesData { +func randomAssetResourceData() trackerdb.ResourcesData { currentConsensusParams := config.Consensus[protocol.ConsensusCurrentVersion] // resourcesData is suiteable for keeping asset params, holding, app params, app local state // but only asset + holding or app + local state can appear there - rdAsset := store.ResourcesData{ + rdAsset := trackerdb.ResourcesData{ Total: crypto.RandUint64(), Decimals: uint32(crypto.RandUint63() % uint64(math.MaxUint32)), DefaultFrozen: true, @@ -2839,10 +2840,10 @@ func randomAssetResourceData() store.ResourcesData { return rdAsset } -func randomAppResourceData() store.ResourcesData { +func randomAppResourceData() trackerdb.ResourcesData { currentConsensusParams := config.Consensus[protocol.ConsensusCurrentVersion] - rdApp := store.ResourcesData{ + rdApp := trackerdb.ResourcesData{ SchemaNumUint: crypto.RandUint64(), SchemaNumByteSlice: crypto.RandUint64(), diff --git a/ledger/acctonline.go b/ledger/acctonline.go index 6a3d8fb390..6e80d1bcfc 100644 --- a/ledger/acctonline.go +++ b/ledger/acctonline.go @@ -32,7 +32,7 @@ import ( "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/util/metrics" ) @@ -53,17 +53,17 @@ type modifiedOnlineAccount struct { // //msgp:ignore cachedOnlineAccount type cachedOnlineAccount struct { - store.BaseOnlineAccountData + trackerdb.BaseOnlineAccountData updRound basics.Round } // onlineAccounts tracks history of online accounts type onlineAccounts struct { // Connection to the database. - dbs store.TrackerStore + dbs trackerdb.TrackerStore // Prepared SQL statements for fast accounts DB lookups. - accountsq store.OnlineAccountsReader + accountsq trackerdb.OnlineAccountsReader // cachedDBRoundOnline is always exactly tracker DB round (and therefore, onlineAccountsRound()), // cached to use in lookup functions @@ -153,7 +153,7 @@ func (ao *onlineAccounts) initializeFromDisk(l ledgerForTracker, lastBalancesRou ao.dbs = l.trackerDB() ao.log = l.trackerLog() - err = ao.dbs.Snapshot(func(ctx context.Context, tx store.SnapshotScope) error { + err = ao.dbs.Snapshot(func(ctx context.Context, tx trackerdb.SnapshotScope) error { ar, err := tx.MakeAccountsReader() if err != nil { return err @@ -181,7 +181,7 @@ func (ao *onlineAccounts) initializeFromDisk(l ledgerForTracker, lastBalancesRou return } - ao.accountsq, err = ao.dbs.MakeOnlineAccountsReader() + ao.accountsq, err = ao.dbs.MakeOnlineAccountsOptimizedReader() if err != nil { return } @@ -409,7 +409,7 @@ func (ao *onlineAccounts) prepareCommit(dcc *deferredCommitContext) error { // commitRound closure is called within the same transaction for all trackers // it receives current offset and dbRound -func (ao *onlineAccounts) commitRound(ctx context.Context, tx store.TransactionScope, dcc *deferredCommitContext) (err error) { +func (ao *onlineAccounts) commitRound(ctx context.Context, tx trackerdb.TransactionScope, dcc *deferredCommitContext) (err error) { offset := dcc.offset dbRound := dcc.oldBase @@ -629,7 +629,7 @@ func (ao *onlineAccounts) lookupOnlineAccountData(rnd basics.Round, addr basics. var paramsOffset uint64 var rewardsProto config.ConsensusParams var rewardsLevel uint64 - var persistedData store.PersistedOnlineAccountData + var persistedData trackerdb.PersistedOnlineAccountData // the loop serves retrying logic if the database advanced while // the function was analyzing deltas or caches. @@ -828,7 +828,7 @@ func (ao *onlineAccounts) TopOnlineAccounts(rnd basics.Round, voteRnd basics.Rou var accts map[basics.Address]*ledgercore.OnlineAccount start := time.Now() ledgerAccountsonlinetopCount.Inc(nil) - err = ao.dbs.Snapshot(func(ctx context.Context, tx store.SnapshotScope) (err error) { + err = ao.dbs.Snapshot(func(ctx context.Context, tx trackerdb.SnapshotScope) (err error) { ar, err := tx.MakeAccountsReader() if err != nil { return err diff --git a/ledger/acctonline_test.go b/ledger/acctonline_test.go index 03636f1129..98d1c1175b 100644 --- a/ledger/acctonline_test.go +++ b/ledger/acctonline_test.go @@ -29,7 +29,7 @@ import ( "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" ledgertesting "github.com/algorand/go-algorand/ledger/testing" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/test/partitiontest" @@ -78,7 +78,7 @@ func commitSyncPartial(t *testing.T, oa *onlineAccounts, ml *mockLedgerForTracke err := lt.prepareCommit(dcc) require.NoError(t, err) } - err := ml.trackers.dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + err := ml.trackers.dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { arw, err := tx.MakeAccountsReaderWriter() if err != nil { return err @@ -808,7 +808,7 @@ func TestAcctOnlineRoundParamsCache(t *testing.T) { var dbOnlineRoundParams []ledgercore.OnlineRoundParamsData var endRound basics.Round - err := ao.dbs.Snapshot(func(ctx context.Context, tx store.SnapshotScope) (err error) { + err := ao.dbs.Snapshot(func(ctx context.Context, tx trackerdb.SnapshotScope) (err error) { ar, err := tx.MakeAccountsReader() if err != nil { return err @@ -1297,7 +1297,7 @@ func TestAcctOnlineVotersLongerHistory(t *testing.T) { // DB has all the required history tho var dbOnlineRoundParams []ledgercore.OnlineRoundParamsData var endRound basics.Round - err = oa.dbs.Snapshot(func(ctx context.Context, tx store.SnapshotScope) (err error) { + err = oa.dbs.Snapshot(func(ctx context.Context, tx trackerdb.SnapshotScope) (err error) { ar, err := tx.MakeAccountsReader() if err != nil { return err diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index 8e08571667..378cd479dd 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -35,7 +35,7 @@ import ( "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/logging/telemetryspec" "github.com/algorand/go-algorand/protocol" @@ -152,10 +152,10 @@ type modifiedKvValue struct { type accountUpdates struct { // Connection to the database. - dbs store.TrackerStore + dbs trackerdb.TrackerStore // Optimized reader for fast accounts DB lookups. - accountsq store.AccountsReader + accountsq trackerdb.AccountsReader // cachedDBRound is always exactly tracker DB round (and therefore, accountsRound()), // cached to use in lookup functions @@ -931,7 +931,7 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker, lastBalancesRou start := time.Now() ledgerAccountsinitCount.Inc(nil) - err = au.dbs.Snapshot(func(ctx context.Context, tx store.SnapshotScope) error { + err = au.dbs.Snapshot(func(ctx context.Context, tx trackerdb.SnapshotScope) error { ar, err := tx.MakeAccountsReader() if err != nil { return err @@ -951,7 +951,7 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker, lastBalancesRou return } - au.accountsq, err = au.dbs.MakeAccountsReader() + au.accountsq, err = au.dbs.MakeAccountsOptimizedReader() if err != nil { return } @@ -1075,8 +1075,8 @@ func (au *accountUpdates) lookupLatest(addr basics.Address) (data basics.Account var offset uint64 var rewardsProto config.ConsensusParams var rewardsLevel uint64 - var persistedData store.PersistedAccountData - var persistedResources []store.PersistedResourcesData + var persistedData trackerdb.PersistedAccountData + var persistedResources []trackerdb.PersistedResourcesData var resourceDbRound basics.Round withRewards := true @@ -1300,7 +1300,7 @@ func (au *accountUpdates) lookupResource(rnd basics.Round, addr basics.Address, } }() var offset uint64 - var persistedData store.PersistedResourcesData + var persistedData trackerdb.PersistedResourcesData for { currentDbRound := au.cachedDBRound currentDeltaLen := len(au.deltas) @@ -1422,7 +1422,7 @@ func (au *accountUpdates) lookupWithoutRewards(rnd basics.Round, addr basics.Add } }() var offset uint64 - var persistedData store.PersistedAccountData + var persistedData trackerdb.PersistedAccountData for { currentDbRound := au.cachedDBRound currentDeltaLen := len(au.deltas) @@ -1669,7 +1669,7 @@ func (au *accountUpdates) prepareCommit(dcc *deferredCommitContext) error { // commitRound is called within the same transaction for all trackers it // receives current offset and dbRound -func (au *accountUpdates) commitRound(ctx context.Context, tx store.TransactionScope, dcc *deferredCommitContext) (err error) { +func (au *accountUpdates) commitRound(ctx context.Context, tx trackerdb.TransactionScope, dcc *deferredCommitContext) (err error) { offset := dcc.offset dbRound := dcc.oldBase diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go index 10a49ef59e..baa3e8aa81 100644 --- a/ledger/acctupdates_test.go +++ b/ledger/acctupdates_test.go @@ -37,7 +37,8 @@ import ( "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/ledger/internal" "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" + "github.com/algorand/go-algorand/ledger/store/trackerdb/sqlitedriver" ledgertesting "github.com/algorand/go-algorand/ledger/testing" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" @@ -49,7 +50,7 @@ var testPoolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff var testSinkAddr = basics.Address{0x2c, 0x2a, 0x6c, 0xe9, 0xa9, 0xa7, 0xc2, 0x8c, 0x22, 0x95, 0xfd, 0x32, 0x4f, 0x77, 0xa5, 0x4, 0x8b, 0x42, 0xc2, 0xb7, 0xa8, 0x54, 0x84, 0xb6, 0x80, 0xb1, 0xe1, 0x3d, 0x59, 0x9b, 0xeb, 0x36} type mockLedgerForTracker struct { - dbs store.TrackerStore + dbs trackerdb.TrackerStore blocks []blockEntry deltas []ledgercore.StateDelta log logging.Logger @@ -92,7 +93,7 @@ func setupAccts(niter int) []map[basics.Address]basics.AccountData { } func makeMockLedgerForTrackerWithLogger(t testing.TB, inMemory bool, initialBlocksCount int, consensusVersion protocol.ConsensusVersion, accts []map[basics.Address]basics.AccountData, l logging.Logger) *mockLedgerForTracker { - dbs, fileName := store.DbOpenTrackerTest(t, inMemory) + dbs, fileName := sqlitedriver.DbOpenTrackerTest(t, inMemory) dbs.SetLogger(l) blocks := randomInitChain(consensusVersion, initialBlocksCount) @@ -164,7 +165,7 @@ func (ml *mockLedgerForTracker) fork(t testing.TB) *mockLedgerForTracker { dbs.Rdb.SetLogger(dblogger) dbs.Wdb.SetLogger(dblogger) - newLedgerTracker.dbs = store.CreateTrackerSQLStore(dbs) + newLedgerTracker.dbs = sqlitedriver.CreateTrackerSQLStore(dbs) return newLedgerTracker } @@ -217,7 +218,7 @@ func (ml *mockLedgerForTracker) BlockHdr(rnd basics.Round) (bookkeeping.BlockHea return ml.blocks[int(rnd)].block.BlockHeader, nil } -func (ml *mockLedgerForTracker) trackerDB() store.TrackerStore { +func (ml *mockLedgerForTracker) trackerDB() trackerdb.TrackerStore { return ml.dbs } @@ -261,7 +262,7 @@ func (au *accountUpdates) allBalances(rnd basics.Round) (bals map[basics.Address return } - err = au.dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) error { + err = au.dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error { var err0 error arw, err := tx.MakeAccountsReaderWriter() if err != nil { @@ -567,7 +568,7 @@ func testAcctUpdates(t *testing.T, conf config.Local) { // check the account totals. var dbRound basics.Round - err := ml.dbs.Snapshot(func(ctx context.Context, tx store.SnapshotScope) (err error) { + err := ml.dbs.Snapshot(func(ctx context.Context, tx trackerdb.SnapshotScope) (err error) { ar, err := tx.MakeAccountsReader() if err != nil { return err @@ -585,7 +586,7 @@ func testAcctUpdates(t *testing.T, conf config.Local) { expectedTotals := ledgertesting.CalculateNewRoundAccountTotals(t, updates, rewardsLevels[dbRound], proto, nil, ledgercore.AccountTotals{}) var actualTotals ledgercore.AccountTotals - err = ml.dbs.Snapshot(func(ctx context.Context, tx store.SnapshotScope) (err error) { + err = ml.dbs.Snapshot(func(ctx context.Context, tx trackerdb.SnapshotScope) (err error) { ar, err := tx.MakeAccountsReader() if err != nil { return err @@ -697,26 +698,26 @@ func BenchmarkBalancesChanges(b *testing.B) { func BenchmarkCalibrateNodesPerPage(b *testing.B) { b.Skip("This benchmark was used to tune up the NodesPerPage; it's not really useful otherwise") - defaultNodesPerPage := store.MerkleCommitterNodesPerPage + defaultNodesPerPage := trackerdb.MerkleCommitterNodesPerPage for nodesPerPage := 32; nodesPerPage < 300; nodesPerPage++ { b.Run(fmt.Sprintf("Test_merkleCommitterNodesPerPage_%d", nodesPerPage), func(b *testing.B) { - store.MerkleCommitterNodesPerPage = int64(nodesPerPage) + trackerdb.MerkleCommitterNodesPerPage = int64(nodesPerPage) BenchmarkBalancesChanges(b) }) } - store.MerkleCommitterNodesPerPage = defaultNodesPerPage + trackerdb.MerkleCommitterNodesPerPage = defaultNodesPerPage } func BenchmarkCalibrateCacheNodeSize(b *testing.B) { //b.Skip("This benchmark was used to tune up the TrieCachedNodesCount; it's not really useful otherwise") - defaultTrieCachedNodesCount := store.TrieCachedNodesCount + defaultTrieCachedNodesCount := trackerdb.TrieCachedNodesCount for cacheSize := 3000; cacheSize < 50000; cacheSize += 1000 { b.Run(fmt.Sprintf("Test_cacheSize_%d", cacheSize), func(b *testing.B) { - store.TrieCachedNodesCount = cacheSize + trackerdb.TrieCachedNodesCount = cacheSize BenchmarkBalancesChanges(b) }) } - store.TrieCachedNodesCount = defaultTrieCachedNodesCount + trackerdb.TrieCachedNodesCount = defaultTrieCachedNodesCount } // The TestAcctUpdatesUpdatesCorrectness conduct a correctless test for the accounts update in the following way - @@ -996,12 +997,12 @@ func TestListCreatables(t *testing.T) { numElementsPerSegement := 25 // set up the database - dbs, _ := store.DbOpenTrackerTest(t, true) + dbs, _ := sqlitedriver.DbOpenTrackerTest(t, true) dblogger := logging.TestingLog(t) dbs.SetLogger(dblogger) defer dbs.Close() - err := dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + err := dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { proto := config.Consensus[protocol.ConsensusCurrentVersion] accts := make(map[basics.Address]basics.AccountData) @@ -1375,20 +1376,20 @@ func BenchmarkLargeMerkleTrieRebuild(b *testing.B) { var updates compactAccountDeltas for k := 0; i < accountsNumber-5-2 && k < 1024; k++ { addr := ledgertesting.RandomAddress() - acctData := store.BaseAccountData{} + acctData := trackerdb.BaseAccountData{} acctData.MicroAlgos.Raw = 1 updates.upsert(addr, accountDelta{newAcct: acctData}) i++ } - err := ml.dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + err := ml.dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { _, _, _, err = accountsNewRound(tx, updates, compactResourcesDeltas{}, nil, nil, proto, basics.Round(1)) return }) require.NoError(b, err) } - err := ml.dbs.Batch(func(ctx context.Context, tx store.BatchScope) (err error) { + err := ml.dbs.Batch(func(ctx context.Context, tx trackerdb.BatchScope) (err error) { aw, err := tx.MakeAccountsWriter() if err != nil { return err @@ -1460,17 +1461,17 @@ func TestCompactDeltas(t *testing.T) { // check deltas with missing accounts delta, _ := outAccountDeltas.get(addrs[0]) - require.Equal(t, store.PersistedAccountData{}, delta.oldAcct) + require.Equal(t, trackerdb.PersistedAccountData{}, delta.oldAcct) require.NotEmpty(t, delta.newAcct) require.Equal(t, ledgercore.ModifiedCreatable{Creator: addrs[2], Created: true, Ndeltas: 1}, outCreatableDeltas[100]) // check deltas without missing accounts - baseAccounts.write(store.PersistedAccountData{Addr: addrs[0], AccountData: store.BaseAccountData{}}) + baseAccounts.write(trackerdb.PersistedAccountData{Addr: addrs[0], AccountData: trackerdb.BaseAccountData{}}) outAccountDeltas = makeCompactAccountDeltas(stateDeltas, basics.Round(1), true, baseAccounts) require.Equal(t, 0, len(outAccountDeltas.misses)) delta, _ = outAccountDeltas.get(addrs[0]) - require.Equal(t, store.PersistedAccountData{Addr: addrs[0]}, delta.oldAcct) - require.Equal(t, store.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: 2}, UpdateRound: 2}, delta.newAcct) + require.Equal(t, trackerdb.PersistedAccountData{Addr: addrs[0]}, delta.oldAcct) + require.Equal(t, trackerdb.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: 2}, UpdateRound: 2}, delta.newAcct) require.Equal(t, ledgercore.ModifiedCreatable{Creator: addrs[2], Created: true, Ndeltas: 1}, outCreatableDeltas[100]) baseAccounts.init(nil, 100, 80) @@ -1482,8 +1483,8 @@ func TestCompactDeltas(t *testing.T) { stateDeltas[1].Creatables[100] = ledgercore.ModifiedCreatable{Creator: addrs[2], Created: false} stateDeltas[1].Creatables[101] = ledgercore.ModifiedCreatable{Creator: addrs[4], Created: true} - baseAccounts.write(store.PersistedAccountData{Addr: addrs[0], AccountData: store.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: 1}}}) - baseAccounts.write(store.PersistedAccountData{Addr: addrs[3], AccountData: store.BaseAccountData{}}) + baseAccounts.write(trackerdb.PersistedAccountData{Addr: addrs[0], AccountData: trackerdb.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: 1}}}) + baseAccounts.write(trackerdb.PersistedAccountData{Addr: addrs[3], AccountData: trackerdb.BaseAccountData{}}) outAccountDeltas = makeCompactAccountDeltas(stateDeltas, basics.Round(1), true, baseAccounts) outCreatableDeltas = compactCreatableDeltas(stateDeltas) @@ -1530,22 +1531,22 @@ func TestCompactDeltasResources(t *testing.T) { delta, _ := outResourcesDeltas.get(addrs[0], 100) require.NotEmpty(t, delta.newResource) require.True(t, !delta.newResource.IsApp() && !delta.newResource.IsAsset()) - require.Equal(t, store.ResourceFlagsNotHolding, delta.newResource.ResourceFlags) + require.Equal(t, trackerdb.ResourceFlagsNotHolding, delta.newResource.ResourceFlags) delta, _ = outResourcesDeltas.get(addrs[1], 101) require.NotEmpty(t, delta.newResource) require.True(t, !delta.newResource.IsApp() && !delta.newResource.IsAsset()) - require.Equal(t, store.ResourceFlagsNotHolding, delta.newResource.ResourceFlags) + require.Equal(t, trackerdb.ResourceFlagsNotHolding, delta.newResource.ResourceFlags) delta, _ = outResourcesDeltas.get(addrs[2], 102) require.NotEmpty(t, delta.newResource) require.True(t, !delta.newResource.IsApp() && !delta.newResource.IsAsset()) - require.Equal(t, store.ResourceFlagsNotHolding, delta.newResource.ResourceFlags) + require.Equal(t, trackerdb.ResourceFlagsNotHolding, delta.newResource.ResourceFlags) delta, _ = outResourcesDeltas.get(addrs[3], 103) require.NotEmpty(t, delta.newResource) require.True(t, !delta.newResource.IsApp() && !delta.newResource.IsAsset()) - require.Equal(t, store.ResourceFlagsNotHolding, delta.newResource.ResourceFlags) + require.Equal(t, trackerdb.ResourceFlagsNotHolding, delta.newResource.ResourceFlags) // check actual data on non-empty input stateDeltas = make([]ledgercore.StateDelta, 1) @@ -1617,19 +1618,19 @@ func TestCompactDeltasResources(t *testing.T) { for i := int64(0); i < 4; i++ { delta, idx := outResourcesDeltas.get(addrs[i], basics.CreatableIndex(100+i)) require.NotEqual(t, -1, idx) - require.Equal(t, store.PersistedResourcesData{Aidx: basics.CreatableIndex(100 + i)}, delta.oldResource) + require.Equal(t, trackerdb.PersistedResourcesData{Aidx: basics.CreatableIndex(100 + i)}, delta.oldResource) if i%2 == 0 { delta, idx = outResourcesDeltas.get(addrs[i], basics.CreatableIndex(200+i)) require.NotEqual(t, -1, idx) - require.Equal(t, store.PersistedResourcesData{Aidx: basics.CreatableIndex(200 + i)}, delta.oldResource) + require.Equal(t, trackerdb.PersistedResourcesData{Aidx: basics.CreatableIndex(200 + i)}, delta.oldResource) } } // check deltas without missing accounts for i := int64(0); i < 4; i++ { - baseResources.write(store.PersistedResourcesData{Addrid: i + 1, Aidx: basics.CreatableIndex(100 + i)}, addrs[i]) + baseResources.write(trackerdb.PersistedResourcesData{Addrid: i + 1, Aidx: basics.CreatableIndex(100 + i)}, addrs[i]) if i%2 == 0 { - baseResources.write(store.PersistedResourcesData{Addrid: i + 1, Aidx: basics.CreatableIndex(200 + i)}, addrs[i]) + baseResources.write(trackerdb.PersistedResourcesData{Addrid: i + 1, Aidx: basics.CreatableIndex(200 + i)}, addrs[i]) } } @@ -1641,11 +1642,11 @@ func TestCompactDeltasResources(t *testing.T) { for i := int64(0); i < 4; i++ { delta, idx := outResourcesDeltas.get(addrs[i], basics.CreatableIndex(100+i)) require.NotEqual(t, -1, idx) - require.Equal(t, store.PersistedResourcesData{Addrid: i + 1, Aidx: basics.CreatableIndex(100 + i)}, delta.oldResource) + require.Equal(t, trackerdb.PersistedResourcesData{Addrid: i + 1, Aidx: basics.CreatableIndex(100 + i)}, delta.oldResource) if i%2 == 0 { delta, idx = outResourcesDeltas.get(addrs[i], basics.CreatableIndex(200+i)) require.NotEqual(t, -1, idx) - require.Equal(t, store.PersistedResourcesData{Addrid: i + 1, Aidx: basics.CreatableIndex(200 + i)}, delta.oldResource) + require.Equal(t, trackerdb.PersistedResourcesData{Addrid: i + 1, Aidx: basics.CreatableIndex(200 + i)}, delta.oldResource) } } @@ -1661,7 +1662,7 @@ func TestCompactDeltasResources(t *testing.T) { appLocalState204 := basics.AppLocalState{KeyValue: basics.TealKeyValue{"204": basics.TealValue{Type: basics.TealBytesType, Bytes: "204"}}} stateDeltas[1].Accts.UpsertAppResource(addrs[4], 104, ledgercore.AppParamsDelta{Params: &appParams104}, ledgercore.AppLocalStateDelta{LocalState: &appLocalState204}) - baseResources.write(store.PersistedResourcesData{Addrid: 5 /* 4+1 */, Aidx: basics.CreatableIndex(104)}, addrs[4]) + baseResources.write(trackerdb.PersistedResourcesData{Addrid: 5 /* 4+1 */, Aidx: basics.CreatableIndex(104)}, addrs[4]) outResourcesDeltas = makeCompactResourceDeltas(stateDeltas, basics.Round(1), true, baseAccounts, baseResources) require.Equal(t, 0, len(outResourcesDeltas.misses)) @@ -2158,7 +2159,7 @@ func TestAcctUpdatesResources(t *testing.T) { err := au.prepareCommit(dcc) require.NoError(t, err) - err = ml.trackers.dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + err = ml.trackers.dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { arw, err := tx.MakeAccountsReaderWriter() if err != nil { return err @@ -2445,7 +2446,7 @@ func auCommitSync(t *testing.T, rnd basics.Round, au *accountUpdates, ml *mockLe err := au.prepareCommit(dcc) require.NoError(t, err) - err = ml.trackers.dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + err = ml.trackers.dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { arw, err := tx.MakeAccountsReaderWriter() if err != nil { return err @@ -2746,7 +2747,7 @@ func TestAcctUpdatesLookupStateDelta(t *testing.T) { // Stores KVMods for each round. These are used as the source of truth for comparing retireved StateDeltas. var roundMods = make(map[basics.Round]map[string]ledgercore.KvValueDelta) - // Sets up random keys & values to store. + // Sets up random keys & values to trackerdb. kvCnt := 1000 kvsPerBlock := 100 curKV := 0 diff --git a/ledger/applications_test.go b/ledger/applications_test.go index 1887da254e..03041dcb50 100644 --- a/ledger/applications_test.go +++ b/ledger/applications_test.go @@ -31,7 +31,7 @@ import ( "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/logic" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" ledgertesting "github.com/algorand/go-algorand/ledger/testing" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" @@ -757,12 +757,12 @@ return` pad, err := l.accts.accountsq.LookupAccount(userLocal) a.NoError(err) - a.Equal(store.BaseAccountData{}, pad.AccountData) + a.Equal(trackerdb.BaseAccountData{}, pad.AccountData) a.Zero(pad.Rowid) prd, err := l.accts.accountsq.LookupResources(userLocal, basics.CreatableIndex(appIdx), basics.AppCreatable) a.NoError(err) a.Zero(prd.Addrid) - emptyResourceData := store.MakeResourcesData(0) + emptyResourceData := trackerdb.MakeResourcesData(0) a.Equal(emptyResourceData, prd.Data) } @@ -898,7 +898,7 @@ return` prd, err := l.accts.accountsq.LookupResources(creator, basics.CreatableIndex(appIdx), basics.AppCreatable) a.NoError(err) a.Zero(prd.Addrid) - emptyResourceData := store.MakeResourcesData(0) + emptyResourceData := trackerdb.MakeResourcesData(0) a.Equal(emptyResourceData, prd.Data) } diff --git a/ledger/archival_test.go b/ledger/archival_test.go index d8783d9882..f30821a351 100644 --- a/ledger/archival_test.go +++ b/ledger/archival_test.go @@ -39,8 +39,8 @@ import ( "github.com/algorand/go-algorand/data/transactions/logic" "github.com/algorand/go-algorand/ledger/internal" "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/ledger/store" "github.com/algorand/go-algorand/ledger/store/blockdb" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/test/partitiontest" @@ -76,7 +76,7 @@ func (wl *wrappedLedger) Latest() basics.Round { return wl.l.Latest() } -func (wl *wrappedLedger) trackerDB() store.TrackerStore { +func (wl *wrappedLedger) trackerDB() trackerdb.TrackerStore { return wl.l.trackerDB() } diff --git a/ledger/bulletin.go b/ledger/bulletin.go index 19baf2820b..cea20f1e70 100644 --- a/ledger/bulletin.go +++ b/ledger/bulletin.go @@ -25,7 +25,7 @@ import ( "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" ) // notifier is a struct that encapsulates a single-shot channel; it will only be signaled once. @@ -116,7 +116,7 @@ func (b *bulletin) prepareCommit(dcc *deferredCommitContext) error { return nil } -func (b *bulletin) commitRound(context.Context, store.TransactionScope, *deferredCommitContext) error { +func (b *bulletin) commitRound(context.Context, trackerdb.TransactionScope, *deferredCommitContext) error { return nil } diff --git a/ledger/catchpointtracker.go b/ledger/catchpointtracker.go index 1f796efde5..154fe86ce0 100644 --- a/ledger/catchpointtracker.go +++ b/ledger/catchpointtracker.go @@ -41,7 +41,7 @@ import ( "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/logging/telemetryspec" "github.com/algorand/go-algorand/protocol" @@ -91,14 +91,14 @@ type catchpointTracker struct { enableGeneratingCatchpointFiles bool // Prepared SQL statements for fast accounts DB lookups. - accountsq store.AccountsReader + accountsq trackerdb.AccountsReader // log copied from ledger log logging.Logger // Connection to the database. - dbs store.TrackerStore - catchpointStore store.CatchpointReaderWriter + dbs trackerdb.TrackerStore + catchpointStore trackerdb.CatchpointReaderWriter // The last catchpoint label that was written to the database. Should always align with what's in the database. // note that this is the last catchpoint *label* and not the catchpoint file. @@ -210,7 +210,7 @@ func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basic } } - return ct.dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) error { + return ct.dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error { crw, err := tx.MakeCatchpointReaderWriter() if err != nil { return err @@ -222,7 +222,7 @@ func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basic } // Clear the db record. - return crw.WriteCatchpointStateUint64(ctx, store.CatchpointStateWritingFirstStageInfo, 0) + return crw.WriteCatchpointStateUint64(ctx, trackerdb.CatchpointStateWritingFirstStageInfo, 0) }) } @@ -230,7 +230,7 @@ func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basic // a crash. func (ct *catchpointTracker) finishFirstStageAfterCrash(dbRound basics.Round) error { v, err := ct.catchpointStore.ReadCatchpointStateUint64( - context.Background(), store.CatchpointStateWritingFirstStageInfo) + context.Background(), trackerdb.CatchpointStateWritingFirstStageInfo) if err != nil { return err } @@ -240,9 +240,9 @@ func (ct *catchpointTracker) finishFirstStageAfterCrash(dbRound basics.Round) er // First, delete the unfinished data file. relCatchpointDataFilePath := filepath.Join( - store.CatchpointDirName, + trackerdb.CatchpointDirName, makeCatchpointDataFilePath(dbRound)) - err = store.RemoveSingleCatchpointFileFromDisk(ct.dbDirectory, relCatchpointDataFilePath) + err = trackerdb.RemoveSingleCatchpointFileFromDisk(ct.dbDirectory, relCatchpointDataFilePath) if err != nil { return err } @@ -259,9 +259,9 @@ func (ct *catchpointTracker) finishCatchpointsAfterCrash(catchpointLookback uint for _, record := range records { // First, delete the unfinished catchpoint file. relCatchpointFilePath := filepath.Join( - store.CatchpointDirName, - store.MakeCatchpointFilePath(basics.Round(record.Round))) - err = store.RemoveSingleCatchpointFileFromDisk(ct.dbDirectory, relCatchpointFilePath) + trackerdb.CatchpointDirName, + trackerdb.MakeCatchpointFilePath(basics.Round(record.Round))) + err = trackerdb.RemoveSingleCatchpointFileFromDisk(ct.dbDirectory, relCatchpointFilePath) if err != nil { return err } @@ -285,7 +285,7 @@ func (ct *catchpointTracker) recoverFromCrash(dbRound basics.Round) error { ctx := context.Background() catchpointLookback, err := ct.catchpointStore.ReadCatchpointStateUint64( - ctx, store.CatchpointStateCatchpointLookback) + ctx, trackerdb.CatchpointStateCatchpointLookback) if err != nil { return err } @@ -327,20 +327,20 @@ func (ct *catchpointTracker) loadFromDisk(l ledgerForTracker, dbRound basics.Rou ct.catchpointDataSlowWriting = make(chan struct{}, 1) close(ct.catchpointDataSlowWriting) - err = ct.dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) error { + err = ct.dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error { return ct.initializeHashes(ctx, tx, dbRound) }) if err != nil { return err } - ct.accountsq, err = ct.dbs.MakeAccountsReader() + ct.accountsq, err = ct.dbs.MakeAccountsOptimizedReader() if err != nil { return } ct.lastCatchpointLabel, err = ct.catchpointStore.ReadCatchpointStateString( - context.Background(), store.CatchpointStateLastCatchpoint) + context.Background(), trackerdb.CatchpointStateLastCatchpoint) if err != nil { return } @@ -482,7 +482,7 @@ func (ct *catchpointTracker) prepareCommit(dcc *deferredCommitContext) error { return nil } -func (ct *catchpointTracker) commitRound(ctx context.Context, tx store.TransactionScope, dcc *deferredCommitContext) (err error) { +func (ct *catchpointTracker) commitRound(ctx context.Context, tx trackerdb.TransactionScope, dcc *deferredCommitContext) (err error) { treeTargetRound := basics.Round(0) offset := dcc.offset dbRound := dcc.oldBase @@ -503,7 +503,7 @@ func (ct *catchpointTracker) commitRound(ctx context.Context, tx store.Transacti } if ct.catchpointEnabled() { - var mc store.MerkleCommitter + var mc trackerdb.MerkleCommitter mc, err = tx.MakeMerkleCommitter(false) if err != nil { return @@ -511,7 +511,7 @@ func (ct *catchpointTracker) commitRound(ctx context.Context, tx store.Transacti var trie *merkletrie.Trie if ct.balancesTrie == nil { - trie, err = merkletrie.MakeTrie(mc, store.TrieMemoryConfig) + trie, err = merkletrie.MakeTrie(mc, trackerdb.TrieMemoryConfig) if err != nil { ct.log.Warnf("unable to create merkle trie during committedUpTo: %v", err) return err @@ -543,13 +543,13 @@ func (ct *catchpointTracker) commitRound(ctx context.Context, tx store.Transacti } if dcc.catchpointFirstStage { - err = crw.WriteCatchpointStateUint64(ctx, store.CatchpointStateWritingFirstStageInfo, 1) + err = crw.WriteCatchpointStateUint64(ctx, trackerdb.CatchpointStateWritingFirstStageInfo, 1) if err != nil { return err } } - err = crw.WriteCatchpointStateUint64(ctx, store.CatchpointStateCatchpointLookback, dcc.catchpointLookback) + err = crw.WriteCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchpointLookback, dcc.catchpointLookback) if err != nil { return err } @@ -709,7 +709,7 @@ func repackCatchpoint(ctx context.Context, header CatchpointFileHeader, biggestC // Create a catchpoint (a label and possibly a file with db record) and remove // the unfinished catchpoint record. -func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound basics.Round, round basics.Round, dataInfo store.CatchpointFirstStageInfo, blockHash crypto.Digest) error { +func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound basics.Round, round basics.Round, dataInfo trackerdb.CatchpointFirstStageInfo, blockHash crypto.Digest) error { startTime := time.Now() label := ledgercore.MakeCatchpointLabel( round, blockHash, dataInfo.TrieBalancesHash, dataInfo.Totals).String() @@ -719,7 +719,7 @@ func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound round, accountsRound, label) err := ct.catchpointStore.WriteCatchpointStateString( - ctx, store.CatchpointStateLastCatchpoint, label) + ctx, trackerdb.CatchpointStateLastCatchpoint, label) if err != nil { return err } @@ -732,7 +732,7 @@ func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound return nil } - catchpointDataFilePath := filepath.Join(ct.dbDirectory, store.CatchpointDirName) + catchpointDataFilePath := filepath.Join(ct.dbDirectory, trackerdb.CatchpointDirName) catchpointDataFilePath = filepath.Join(catchpointDataFilePath, makeCatchpointDataFilePath(accountsRound)) @@ -759,7 +759,7 @@ func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound } relCatchpointFilePath := - filepath.Join(store.CatchpointDirName, store.MakeCatchpointFilePath(round)) + filepath.Join(trackerdb.CatchpointDirName, trackerdb.MakeCatchpointFilePath(round)) absCatchpointFilePath := filepath.Join(ct.dbDirectory, relCatchpointFilePath) err = os.MkdirAll(filepath.Dir(absCatchpointFilePath), 0700) @@ -777,7 +777,7 @@ func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound return err } - err = ct.dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + err = ct.dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { crw, err := tx.MakeCatchpointReaderWriter() if err != nil { return err @@ -868,8 +868,8 @@ func (ct *catchpointTracker) pruneFirstStageRecordsData(ctx context.Context, max for _, round := range rounds { relCatchpointDataFilePath := - filepath.Join(store.CatchpointDirName, makeCatchpointDataFilePath(round)) - err = store.RemoveSingleCatchpointFileFromDisk(ct.dbDirectory, relCatchpointDataFilePath) + filepath.Join(trackerdb.CatchpointDirName, makeCatchpointDataFilePath(round)) + err = trackerdb.RemoveSingleCatchpointFileFromDisk(ct.dbDirectory, relCatchpointDataFilePath) if err != nil { return err } @@ -941,7 +941,7 @@ func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccoun for i := 0; i < accountsDeltas.len(); i++ { delta := accountsDeltas.getByIdx(i) if !delta.oldAcct.AccountData.IsEmpty() { - deleteHash := store.AccountHashBuilderV6(delta.address, &delta.oldAcct.AccountData, protocol.Encode(&delta.oldAcct.AccountData)) + deleteHash := trackerdb.AccountHashBuilderV6(delta.address, &delta.oldAcct.AccountData, protocol.Encode(&delta.oldAcct.AccountData)) deleted, err = ct.balancesTrie.Delete(deleteHash) if err != nil { return fmt.Errorf("failed to delete hash '%s' from merkle trie for account %v: %w", hex.EncodeToString(deleteHash), delta.address, err) @@ -954,7 +954,7 @@ func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccoun } if !delta.newAcct.IsEmpty() { - addHash := store.AccountHashBuilderV6(delta.address, &delta.newAcct, protocol.Encode(&delta.newAcct)) + addHash := trackerdb.AccountHashBuilderV6(delta.address, &delta.newAcct, protocol.Encode(&delta.newAcct)) added, err = ct.balancesTrie.Add(addHash) if err != nil { return fmt.Errorf("attempted to add duplicate hash '%s' to merkle trie for account %v: %w", hex.EncodeToString(addHash), delta.address, err) @@ -971,7 +971,7 @@ func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccoun resDelta := resourcesDeltas.getByIdx(i) addr := resDelta.address if !resDelta.oldResource.Data.IsEmpty() { - deleteHash, err := store.ResourcesHashBuilderV6(&resDelta.oldResource.Data, addr, resDelta.oldResource.Aidx, resDelta.oldResource.Data.UpdateRound, protocol.Encode(&resDelta.oldResource.Data)) + deleteHash, err := trackerdb.ResourcesHashBuilderV6(&resDelta.oldResource.Data, addr, resDelta.oldResource.Aidx, resDelta.oldResource.Data.UpdateRound, protocol.Encode(&resDelta.oldResource.Data)) if err != nil { return err } @@ -987,7 +987,7 @@ func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccoun } if !resDelta.newResource.IsEmpty() { - addHash, err := store.ResourcesHashBuilderV6(&resDelta.newResource, addr, resDelta.oldResource.Aidx, resDelta.newResource.UpdateRound, protocol.Encode(&resDelta.newResource)) + addHash, err := trackerdb.ResourcesHashBuilderV6(&resDelta.newResource, addr, resDelta.oldResource.Aidx, resDelta.newResource.UpdateRound, protocol.Encode(&resDelta.newResource)) if err != nil { return err } @@ -1012,7 +1012,7 @@ func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccoun if mv.data != nil && bytes.Equal(mv.oldData, mv.data) { continue // changed back within the delta span } - deleteHash := store.KvHashBuilderV6(key, mv.oldData) + deleteHash := trackerdb.KvHashBuilderV6(key, mv.oldData) deleted, err = ct.balancesTrie.Delete(deleteHash) if err != nil { return fmt.Errorf("failed to delete kv hash '%s' from merkle trie for key %v: %w", hex.EncodeToString(deleteHash), key, err) @@ -1025,7 +1025,7 @@ func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccoun } if mv.data != nil { - addHash := store.KvHashBuilderV6(key, mv.data) + addHash := trackerdb.KvHashBuilderV6(key, mv.data) added, err = ct.balancesTrie.Add(addHash) if err != nil { return fmt.Errorf("attempted to add duplicate kv hash '%s' from merkle trie for key %v: %w", hex.EncodeToString(addHash), key, err) @@ -1083,7 +1083,7 @@ func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, account BalancesWriteTime: uint64(updatingBalancesDuration.Nanoseconds()), } - catchpointDataFilePath := filepath.Join(ct.dbDirectory, store.CatchpointDirName) + catchpointDataFilePath := filepath.Join(ct.dbDirectory, trackerdb.CatchpointDirName) catchpointDataFilePath = filepath.Join(catchpointDataFilePath, makeCatchpointDataFilePath(accountsRound)) @@ -1101,7 +1101,7 @@ func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, account var catchpointWriter *catchpointWriter start := time.Now() ledgerGeneratecatchpointCount.Inc(nil) - err = ct.dbs.TransactionContext(ctx, func(dbCtx context.Context, tx store.TransactionScope) (err error) { + err = ct.dbs.TransactionContext(ctx, func(dbCtx context.Context, tx trackerdb.TransactionScope) (err error) { catchpointWriter, err = makeCatchpointWriter(dbCtx, catchpointDataFilePath, tx, ResourcesPerCatchpointFileChunk) if err != nil { return @@ -1177,7 +1177,7 @@ func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, account return catchpointWriter.totalKVs, catchpointWriter.totalAccounts, catchpointWriter.chunkNum, catchpointWriter.biggestChunkLen, nil } -func (ct *catchpointTracker) recordFirstStageInfo(ctx context.Context, tx store.TransactionScope, accountsRound basics.Round, totalKVs uint64, totalAccounts uint64, totalChunks uint64, biggestChunkLen uint64) error { +func (ct *catchpointTracker) recordFirstStageInfo(ctx context.Context, tx trackerdb.TransactionScope, accountsRound basics.Round, totalKVs uint64, totalAccounts uint64, totalChunks uint64, biggestChunkLen uint64) error { arw, err := tx.MakeAccountsReaderWriter() if err != nil { return err @@ -1194,7 +1194,7 @@ func (ct *catchpointTracker) recordFirstStageInfo(ctx context.Context, tx store. return err } if ct.balancesTrie == nil { - trie, err := merkletrie.MakeTrie(mc, store.TrieMemoryConfig) + trie, err := merkletrie.MakeTrie(mc, trackerdb.TrieMemoryConfig) if err != nil { return err } @@ -1213,7 +1213,7 @@ func (ct *catchpointTracker) recordFirstStageInfo(ctx context.Context, tx store. return err } - info := store.CatchpointFirstStageInfo{ + info := trackerdb.CatchpointFirstStageInfo{ Totals: accountTotals, TotalAccounts: totalAccounts, TotalKVs: totalKVs, @@ -1232,7 +1232,7 @@ func makeCatchpointDataFilePath(accountsRound basics.Round) string { // after a successful insert operation to the database, it would delete up to 2 old entries, as needed. // deleting 2 entries while inserting single entry allow us to adjust the size of the backing storage and have the // database and storage realign. -func (ct *catchpointTracker) recordCatchpointFile(ctx context.Context, crw store.CatchpointReaderWriter, round basics.Round, relCatchpointFilePath string, fileSize int64) (err error) { +func (ct *catchpointTracker) recordCatchpointFile(ctx context.Context, crw trackerdb.CatchpointReaderWriter, round basics.Round, relCatchpointFilePath string, fileSize int64) (err error) { if ct.catchpointFileHistoryLength != 0 { err = crw.StoreCatchpoint(ctx, round, relCatchpointFilePath, "", fileSize) if err != nil { @@ -1240,7 +1240,7 @@ func (ct *catchpointTracker) recordCatchpointFile(ctx context.Context, crw store return } } else { - err = store.RemoveSingleCatchpointFileFromDisk(ct.dbDirectory, relCatchpointFilePath) + err = trackerdb.RemoveSingleCatchpointFileFromDisk(ct.dbDirectory, relCatchpointFilePath) if err != nil { ct.log.Warnf("catchpointTracker.recordCatchpointFile() unable to remove file (%s): %v", relCatchpointFilePath, err) return @@ -1255,7 +1255,7 @@ func (ct *catchpointTracker) recordCatchpointFile(ctx context.Context, crw store return fmt.Errorf("unable to delete catchpoint file, getOldestCatchpointFiles failed : %v", err) } for round, fileToDelete := range filesToDelete { - err = store.RemoveSingleCatchpointFileFromDisk(ct.dbDirectory, fileToDelete) + err = trackerdb.RemoveSingleCatchpointFileFromDisk(ct.dbDirectory, fileToDelete) if err != nil { return err } @@ -1275,7 +1275,7 @@ func (ct *catchpointTracker) GetCatchpointStream(round basics.Round) (ReadCloseS ledgerGetcatchpointCount.Inc(nil) // TODO: we need to generalize this, check @cce PoC PR, he has something // somewhat broken for some KVs.. - err := ct.dbs.Snapshot(func(ctx context.Context, tx store.SnapshotScope) (err error) { + err := ct.dbs.Snapshot(func(ctx context.Context, tx trackerdb.SnapshotScope) (err error) { cr, err := tx.MakeCatchpointReader() if err != nil { return err @@ -1317,7 +1317,7 @@ func (ct *catchpointTracker) GetCatchpointStream(round basics.Round) (ReadCloseS // if the database doesn't know about that round, see if we have that file anyway: relCatchpointFilePath := - filepath.Join(store.CatchpointDirName, store.MakeCatchpointFilePath(round)) + filepath.Join(trackerdb.CatchpointDirName, trackerdb.MakeCatchpointFilePath(round)) absCatchpointFilePath := filepath.Join(ct.dbDirectory, relCatchpointFilePath) file, err := os.OpenFile(absCatchpointFilePath, os.O_RDONLY, 0666) if err == nil && file != nil { @@ -1346,7 +1346,7 @@ func (ct *catchpointTracker) catchpointEnabled() bool { // initializeHashes initializes account/resource/kv hashes. // as part of the initialization, it tests if a hash table matches to account base and updates the former. -func (ct *catchpointTracker) initializeHashes(ctx context.Context, tx store.TransactionScope, rnd basics.Round) error { +func (ct *catchpointTracker) initializeHashes(ctx context.Context, tx trackerdb.TransactionScope, rnd basics.Round) error { arw, err := tx.MakeAccountsReaderWriter() if err != nil { return err @@ -1375,7 +1375,7 @@ func (ct *catchpointTracker) initializeHashes(ctx context.Context, tx store.Tran return fmt.Errorf("initializeHashes was unable to makeMerkleCommitter: %v", err) } - trie, err := merkletrie.MakeTrie(committer, store.TrieMemoryConfig) + trie, err := merkletrie.MakeTrie(committer, trackerdb.TrieMemoryConfig) if err != nil { return fmt.Errorf("initializeHashes was unable to MakeTrie: %v", err) } @@ -1470,7 +1470,7 @@ func (ct *catchpointTracker) initializeHashes(ctx context.Context, tx store.Tran if err != nil { return err } - hash := store.KvHashBuilderV6(string(k), v) + hash := trackerdb.KvHashBuilderV6(string(k), v) trieHashCount++ pendingTrieHashes++ added, err := trie.Add(hash) diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go index 4fd95c1f8d..0775d36dad 100644 --- a/ledger/catchpointtracker_test.go +++ b/ledger/catchpointtracker_test.go @@ -39,7 +39,7 @@ import ( "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" ledgertesting "github.com/algorand/go-algorand/ledger/testing" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" @@ -93,7 +93,7 @@ func TestGetCatchpointStream(t *testing.T) { filesToCreate := 4 temporaryDirectory := t.TempDir() - catchpointsDirectory := filepath.Join(temporaryDirectory, store.CatchpointDirName) + catchpointsDirectory := filepath.Join(temporaryDirectory, trackerdb.CatchpointDirName) err := os.Mkdir(catchpointsDirectory, 0777) require.NoError(t, err) @@ -101,7 +101,7 @@ func TestGetCatchpointStream(t *testing.T) { // Create the catchpoint files with dummy data for i := 0; i < filesToCreate; i++ { - fileName := filepath.Join(store.CatchpointDirName, fmt.Sprintf("%d.catchpoint", i)) + fileName := filepath.Join(trackerdb.CatchpointDirName, fmt.Sprintf("%d.catchpoint", i)) data := []byte{byte(i), byte(i + 1), byte(i + 2)} err = os.WriteFile(filepath.Join(temporaryDirectory, fileName), data, 0666) require.NoError(t, err) @@ -127,7 +127,7 @@ func TestGetCatchpointStream(t *testing.T) { require.Equal(t, int64(3), len) // File deleted, but record in the database - err = os.Remove(filepath.Join(temporaryDirectory, store.CatchpointDirName, "2.catchpoint")) + err = os.Remove(filepath.Join(temporaryDirectory, trackerdb.CatchpointDirName, "2.catchpoint")) require.NoError(t, err) reader, err = ct.GetCatchpointStream(basics.Round(2)) require.Equal(t, ledgercore.ErrNoEntry{}, err) @@ -173,7 +173,7 @@ func TestAcctUpdatesDeleteStoredCatchpoints(t *testing.T) { dummyCatchpointFiles := make([]string, dummyCatchpointFilesToCreate) for i := 0; i < dummyCatchpointFilesToCreate; i++ { file := fmt.Sprintf("%s%c%d%c%d%cdummy_catchpoint_file-%d", - store.CatchpointDirName, os.PathSeparator, + trackerdb.CatchpointDirName, os.PathSeparator, i/10, os.PathSeparator, i/2, os.PathSeparator, i) @@ -209,11 +209,11 @@ func TestSchemaUpdateDeleteStoredCatchpoints(t *testing.T) { partitiontest.PartitionTest(t) // we don't want to run this test before the binary is compiled against the latest database upgrade schema. - if store.AccountDBVersion < 6 { + if trackerdb.AccountDBVersion < 6 { return } temporaryDirectroy := t.TempDir() - tempCatchpointDir := filepath.Join(temporaryDirectroy, store.CatchpointDirName) + tempCatchpointDir := filepath.Join(temporaryDirectroy, trackerdb.CatchpointDirName) // creating empty catchpoint directories emptyDirPath := path.Join(tempCatchpointDir, "2f", "e1") @@ -250,7 +250,7 @@ func TestSchemaUpdateDeleteStoredCatchpoints(t *testing.T) { _, err = trackerDBInitialize(ml, true, ct.dbDirectory) require.NoError(t, err) - emptyDirs, err := store.GetEmptyDirs(tempCatchpointDir) + emptyDirs, err := trackerdb.GetEmptyDirs(tempCatchpointDir) require.NoError(t, err) onlyTempDirEmpty := len(emptyDirs) == 0 require.Equal(t, onlyTempDirEmpty, true) @@ -302,7 +302,7 @@ func TestRecordCatchpointFile(t *testing.T) { context.Background(), accountsRound, time.Second) require.NoError(t, err) - err = ct.createCatchpoint(context.Background(), accountsRound, round, store.CatchpointFirstStageInfo{BiggestChunkLen: biggestChunkLen}, crypto.Digest{}) + err = ct.createCatchpoint(context.Background(), accountsRound, round, trackerdb.CatchpointFirstStageInfo{BiggestChunkLen: biggestChunkLen}, crypto.Digest{}) require.NoError(t, err) } @@ -310,7 +310,7 @@ func TestRecordCatchpointFile(t *testing.T) { require.NoError(t, err) require.Equal(t, conf.CatchpointFileHistoryLength, numberOfCatchpointFiles) - emptyDirs, err := store.GetEmptyDirs(temporaryDirectory) + emptyDirs, err := trackerdb.GetEmptyDirs(temporaryDirectory) require.NoError(t, err) onlyCatchpointDirEmpty := len(emptyDirs) == 0 || (len(emptyDirs) == 1 && emptyDirs[0] == temporaryDirectory) @@ -332,7 +332,7 @@ func BenchmarkLargeCatchpointDataWriting(b *testing.B) { ct.initialize(cfg, ".") temporaryDirectroy := b.TempDir() - catchpointsDirectory := filepath.Join(temporaryDirectroy, store.CatchpointDirName) + catchpointsDirectory := filepath.Join(temporaryDirectroy, trackerdb.CatchpointDirName) err := os.Mkdir(catchpointsDirectory, 0777) require.NoError(b, err) @@ -344,7 +344,7 @@ func BenchmarkLargeCatchpointDataWriting(b *testing.B) { // at this point, the database was created. We want to fill the accounts data accountsNumber := 6000000 * b.N - err = ml.dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + err = ml.dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { arw, err := tx.MakeAccountsReaderWriter() if err != nil { return err @@ -354,7 +354,7 @@ func BenchmarkLargeCatchpointDataWriting(b *testing.B) { var updates compactAccountDeltas for k := 0; i < accountsNumber-5-2 && k < 1024; k++ { addr := ledgertesting.RandomAddress() - acctData := store.BaseAccountData{} + acctData := trackerdb.BaseAccountData{} acctData.MicroAlgos.Raw = 1 updates.upsert(addr, accountDelta{newAcct: acctData}) i++ @@ -566,7 +566,7 @@ func (bt *blockingTracker) prepareCommit(*deferredCommitContext) error { } // commitRound is not used by the blockingTracker -func (bt *blockingTracker) commitRound(context.Context, store.TransactionScope, *deferredCommitContext) error { +func (bt *blockingTracker) commitRound(context.Context, trackerdb.TransactionScope, *deferredCommitContext) error { return nil } @@ -853,7 +853,7 @@ func TestFirstStageInfoPruning(t *testing.T) { defer ct.close() temporaryDirectory := t.TempDir() - catchpointsDirectory := filepath.Join(temporaryDirectory, store.CatchpointDirName) + catchpointsDirectory := filepath.Join(temporaryDirectory, trackerdb.CatchpointDirName) err := os.Mkdir(catchpointsDirectory, 0777) require.NoError(t, err) @@ -941,7 +941,7 @@ func TestFirstStagePersistence(t *testing.T) { defer ml.Close() tempDirectory := t.TempDir() - catchpointsDirectory := filepath.Join(tempDirectory, store.CatchpointDirName) + catchpointsDirectory := filepath.Join(tempDirectory, trackerdb.CatchpointDirName) cfg := config.GetDefaultLocal() cfg.CatchpointInterval = 4 @@ -993,7 +993,7 @@ func TestFirstStagePersistence(t *testing.T) { // Insert unfinished first stage record. err = cps2.WriteCatchpointStateUint64( - context.Background(), store.CatchpointStateWritingFirstStageInfo, 1) + context.Background(), trackerdb.CatchpointStateWritingFirstStageInfo, 1) require.NoError(t, err) // Delete the database record. @@ -1017,7 +1017,7 @@ func TestFirstStagePersistence(t *testing.T) { // Check that the unfinished first stage record is deleted. v, err := ct2.catchpointStore.ReadCatchpointStateUint64( - context.Background(), store.CatchpointStateWritingFirstStageInfo) + context.Background(), trackerdb.CatchpointStateWritingFirstStageInfo) require.NoError(t, err) require.Zero(t, v) } @@ -1044,7 +1044,7 @@ func TestSecondStagePersistence(t *testing.T) { defer ml.Close() tempDirectory := t.TempDir() - catchpointsDirectory := filepath.Join(tempDirectory, store.CatchpointDirName) + catchpointsDirectory := filepath.Join(tempDirectory, trackerdb.CatchpointDirName) cfg := config.GetDefaultLocal() cfg.CatchpointInterval = 4 @@ -1058,7 +1058,7 @@ func TestSecondStagePersistence(t *testing.T) { firstStageRound := secondStageRound - basics.Round(protoParams.CatchpointLookback) catchpointDataFilePath := filepath.Join(catchpointsDirectory, makeCatchpointDataFilePath(firstStageRound)) - var firstStageInfo store.CatchpointFirstStageInfo + var firstStageInfo trackerdb.CatchpointFirstStageInfo var catchpointData []byte // Add blocks until the first catchpoint round. @@ -1099,7 +1099,7 @@ func TestSecondStagePersistence(t *testing.T) { // Check that the data file exists. catchpointFilePath := - filepath.Join(catchpointsDirectory, store.MakeCatchpointFilePath(secondStageRound)) + filepath.Join(catchpointsDirectory, trackerdb.MakeCatchpointFilePath(secondStageRound)) info, err := os.Stat(catchpointFilePath) require.NoError(t, err) @@ -1351,30 +1351,30 @@ func TestHashContract(t *testing.T) { type testCase struct { genHash func() []byte expectedHex string - expectedHashKind store.HashKind + expectedHashKind trackerdb.HashKind } accountCase := func(genHash func() []byte, expectedHex string) testCase { return testCase{ - genHash, expectedHex, store.AccountHK, + genHash, expectedHex, trackerdb.AccountHK, } } resourceAssetCase := func(genHash func() []byte, expectedHex string) testCase { return testCase{ - genHash, expectedHex, store.AssetHK, + genHash, expectedHex, trackerdb.AssetHK, } } resourceAppCase := func(genHash func() []byte, expectedHex string) testCase { return testCase{ - genHash, expectedHex, store.AppHK, + genHash, expectedHex, trackerdb.AppHK, } } kvCase := func(genHash func() []byte, expectedHex string) testCase { return testCase{ - genHash, expectedHex, store.KvHK, + genHash, expectedHex, trackerdb.KvHK, } } @@ -1383,19 +1383,19 @@ func TestHashContract(t *testing.T) { accounts := []testCase{ accountCase( func() []byte { - b := store.BaseAccountData{ + b := trackerdb.BaseAccountData{ UpdateRound: 1024, } - return store.AccountHashBuilderV6(a, &b, protocol.Encode(&b)) + return trackerdb.AccountHashBuilderV6(a, &b, protocol.Encode(&b)) }, "0000040000c3c39a72c146dc6bcb87b499b63ef730145a8fe4a187c96e9a52f74ef17f54", ), accountCase( func() []byte { - b := store.BaseAccountData{ + b := trackerdb.BaseAccountData{ RewardsBase: 10000, } - return store.AccountHashBuilderV6(a, &b, protocol.Encode(&b)) + return trackerdb.AccountHashBuilderV6(a, &b, protocol.Encode(&b)) }, "0000271000804b58bcc81190c3c7343c1db9c737621ff0438104bdd20a25d12aa4e9b6e5", ), @@ -1404,14 +1404,14 @@ func TestHashContract(t *testing.T) { resourceAssets := []testCase{ resourceAssetCase( func() []byte { - r := store.ResourcesData{ + r := trackerdb.ResourcesData{ Amount: 1000, Decimals: 3, AssetName: "test", Manager: a, } - bytes, err := store.ResourcesHashBuilderV6(&r, a, 7, 1024, protocol.Encode(&r)) + bytes, err := trackerdb.ResourcesHashBuilderV6(&r, a, 7, 1024, protocol.Encode(&r)) require.NoError(t, err) return bytes }, @@ -1422,14 +1422,14 @@ func TestHashContract(t *testing.T) { resourceApps := []testCase{ resourceAppCase( func() []byte { - r := store.ResourcesData{ + r := trackerdb.ResourcesData{ ApprovalProgram: []byte{1, 3, 10, 15}, ClearStateProgram: []byte{15, 10, 3, 1}, LocalStateSchemaNumUint: 2, GlobalStateSchemaNumUint: 2, } - bytes, err := store.ResourcesHashBuilderV6(&r, a, 7, 1024, protocol.Encode(&r)) + bytes, err := trackerdb.ResourcesHashBuilderV6(&r, a, 7, 1024, protocol.Encode(&r)) require.NoError(t, err) return bytes }, @@ -1440,7 +1440,7 @@ func TestHashContract(t *testing.T) { kvs := []testCase{ kvCase( func() []byte { - return store.KvHashBuilderV6("sample key", []byte("sample value")) + return trackerdb.KvHashBuilderV6("sample key", []byte("sample value")) }, "0000000003cca3d1a8d7d724daa445c795ad277a7a64b351b4b9407f738841282f9c348b", ), @@ -1450,12 +1450,12 @@ func TestHashContract(t *testing.T) { for i, tc := range allCases { t.Run(fmt.Sprintf("index=%d", i), func(t *testing.T) { h := tc.genHash() - require.Equal(t, byte(tc.expectedHashKind), h[store.HashKindEncodingIndex]) + require.Equal(t, byte(tc.expectedHashKind), h[trackerdb.HashKindEncodingIndex]) require.Equal(t, tc.expectedHex, hex.EncodeToString(h)) }) } - hasTestCoverageForKind := func(hk store.HashKind) bool { + hasTestCoverageForKind := func(hk trackerdb.HashKind) bool { for _, c := range allCases { if c.expectedHashKind == hk { return true @@ -1464,10 +1464,10 @@ func TestHashContract(t *testing.T) { return false } - require.True(t, strings.HasPrefix(store.HashKind(255).String(), "HashKind(")) + require.True(t, strings.HasPrefix(trackerdb.HashKind(255).String(), "HashKind(")) for i := byte(0); i < 255; i++ { - if !strings.HasPrefix(store.HashKind(i).String(), "HashKind(") { - require.True(t, hasTestCoverageForKind(store.HashKind(i)), fmt.Sprintf("Missing test coverage for HashKind ordinal value = %d", i)) + if !strings.HasPrefix(trackerdb.HashKind(i).String(), "HashKind(") { + require.True(t, hasTestCoverageForKind(trackerdb.HashKind(i)), fmt.Sprintf("Missing test coverage for HashKind ordinal value = %d", i)) } } } @@ -1596,7 +1596,7 @@ func TestCatchpointLargeAccountCountCatchpointGeneration(t *testing.T) { ct := newCatchpointTracker(t, ml, conf, ".") temporaryDirectory := t.TempDir() - catchpointsDirectory := filepath.Join(temporaryDirectory, store.CatchpointDirName) + catchpointsDirectory := filepath.Join(temporaryDirectory, trackerdb.CatchpointDirName) err := os.Mkdir(catchpointsDirectory, 0777) require.NoError(t, err) defer os.RemoveAll(catchpointsDirectory) diff --git a/ledger/catchpointwriter.go b/ledger/catchpointwriter.go index bbb58da34a..f912fbf6f9 100644 --- a/ledger/catchpointwriter.go +++ b/ledger/catchpointwriter.go @@ -25,7 +25,7 @@ import ( "path/filepath" "github.com/algorand/go-algorand/ledger/encoded" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/protocol" ) @@ -46,7 +46,7 @@ const ( // has the option of throttling the CPU utilization in between the calls. type catchpointWriter struct { ctx context.Context - tx store.TransactionScope + tx trackerdb.TransactionScope filePath string totalAccounts uint64 totalKVs uint64 @@ -91,7 +91,7 @@ func (chunk catchpointFileChunkV6) empty() bool { return len(chunk.Balances) == 0 && len(chunk.KVs) == 0 } -func makeCatchpointWriter(ctx context.Context, filePath string, tx store.TransactionScope, maxResourcesPerChunk int) (*catchpointWriter, error) { +func makeCatchpointWriter(ctx context.Context, filePath string, tx trackerdb.TransactionScope, maxResourcesPerChunk int) (*catchpointWriter, error) { arw, err := tx.MakeAccountsReaderWriter() if err != nil { return nil, err diff --git a/ledger/catchpointwriter_test.go b/ledger/catchpointwriter_test.go index fc689d5190..5fd0518bbc 100644 --- a/ledger/catchpointwriter_test.go +++ b/ledger/catchpointwriter_test.go @@ -41,7 +41,7 @@ import ( "github.com/algorand/go-algorand/data/txntest" "github.com/algorand/go-algorand/ledger/encoded" "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" ledgertesting "github.com/algorand/go-algorand/ledger/testing" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" @@ -128,7 +128,7 @@ func TestBasicCatchpointWriter(t *testing.T) { au.close() fileName := filepath.Join(temporaryDirectory, "15.data") - err = ml.trackerDB().Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + err = ml.trackerDB().Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { writer, err := makeCatchpointWriter(context.Background(), fileName, tx, ResourcesPerCatchpointFileChunk) if err != nil { return err @@ -183,7 +183,7 @@ func TestBasicCatchpointWriter(t *testing.T) { require.Equal(t, io.EOF, err) } -func testWriteCatchpoint(t *testing.T, rdb store.TrackerStore, datapath string, filepath string, maxResourcesPerChunk int) CatchpointFileHeader { +func testWriteCatchpoint(t *testing.T, rdb trackerdb.TrackerStore, datapath string, filepath string, maxResourcesPerChunk int) CatchpointFileHeader { var totalAccounts uint64 var totalChunks uint64 var biggestChunkLen uint64 @@ -193,7 +193,7 @@ func testWriteCatchpoint(t *testing.T, rdb store.TrackerStore, datapath string, maxResourcesPerChunk = ResourcesPerCatchpointFileChunk } - err := rdb.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + err := rdb.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { writer, err := makeCatchpointWriter(context.Background(), datapath, tx, maxResourcesPerChunk) if err != nil { return err @@ -288,7 +288,7 @@ func TestCatchpointReadDatabaseOverflowSingleAccount(t *testing.T) { au.close() catchpointDataFilePath := filepath.Join(temporaryDirectory, "15.data") - err = ml.trackerDB().Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + err = ml.trackerDB().Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { expectedTotalAccounts := uint64(1) totalAccountsWritten := uint64(0) totalResources := 0 @@ -381,7 +381,7 @@ func TestCatchpointReadDatabaseOverflowAccounts(t *testing.T) { au.close() catchpointDataFilePath := filepath.Join(temporaryDirectory, "15.data") - err = ml.trackerDB().Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + err = ml.trackerDB().Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { arw, err := tx.MakeAccountsReaderWriter() if err != nil { return err @@ -477,7 +477,7 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) { // no errors on read, hashes match ctx := context.Background() - err = l.trackerDBs.TransactionContext(ctx, func(ctx context.Context, tx store.TransactionScope) (err error) { + err = l.trackerDBs.TransactionContext(ctx, func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { arw, err := tx.MakeAccountsReaderWriter() if err != nil { return nil @@ -486,7 +486,7 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) { // save the existing hash committer, err := tx.MakeMerkleCommitter(false) require.NoError(t, err) - trie, err := merkletrie.MakeTrie(committer, store.TrieMemoryConfig) + trie, err := merkletrie.MakeTrie(committer, trackerdb.TrieMemoryConfig) require.NoError(t, err) h1, err := trie.RootHash() @@ -500,7 +500,7 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) { // rebuild the MT committer, err = tx.MakeMerkleCommitter(false) require.NoError(t, err) - trie, err = merkletrie.MakeTrie(committer, store.TrieMemoryConfig) + trie, err = merkletrie.MakeTrie(committer, trackerdb.TrieMemoryConfig) require.NoError(t, err) h, err := trie.RootHash() @@ -540,7 +540,7 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) { require.NoError(t, err) } -func testNewLedgerFromCatchpoint(t *testing.T, catchpointWriterReadAccess store.TrackerStore, filepath string) *Ledger { +func testNewLedgerFromCatchpoint(t *testing.T, catchpointWriterReadAccess trackerdb.TrackerStore, filepath string) *Ledger { // create a ledger. var initState ledgercore.InitState initState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion @@ -592,7 +592,7 @@ func testNewLedgerFromCatchpoint(t *testing.T, catchpointWriterReadAccess store. err = accessor.BuildMerkleTrie(context.Background(), nil) require.NoError(t, err) - err = l.trackerDBs.Batch(func(ctx context.Context, tx store.BatchScope) error { + err = l.trackerDBs.Batch(func(ctx context.Context, tx trackerdb.BatchScope) error { cw, err := tx.MakeCatchpointWriter() if err != nil { return err @@ -602,14 +602,14 @@ func testNewLedgerFromCatchpoint(t *testing.T, catchpointWriterReadAccess store. }) require.NoError(t, err) - balanceTrieStats := func(db store.TrackerStore) merkletrie.Stats { + balanceTrieStats := func(db trackerdb.TrackerStore) merkletrie.Stats { var stats merkletrie.Stats - err = db.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + err = db.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { committer, err := tx.MakeMerkleCommitter(false) if err != nil { return err } - trie, err := merkletrie.MakeTrie(committer, store.TrieMemoryConfig) + trie, err := merkletrie.MakeTrie(committer, trackerdb.TrieMemoryConfig) if err != nil { return err } diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go index d24c3a158b..0a6dd4b5c4 100644 --- a/ledger/catchupaccessor.go +++ b/ledger/catchupaccessor.go @@ -33,8 +33,8 @@ import ( "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/ledger/encoded" "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/ledger/store" "github.com/algorand/go-algorand/ledger/store/blockdb" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" @@ -95,19 +95,19 @@ type CatchpointCatchupAccessor interface { } type stagingWriter interface { - writeBalances(context.Context, []store.NormalizedAccountBalance) error - writeCreatables(context.Context, []store.NormalizedAccountBalance) error - writeHashes(context.Context, []store.NormalizedAccountBalance) error + writeBalances(context.Context, []trackerdb.NormalizedAccountBalance) error + writeCreatables(context.Context, []trackerdb.NormalizedAccountBalance) error + writeHashes(context.Context, []trackerdb.NormalizedAccountBalance) error writeKVs(context.Context, []encoded.KVRecordV6) error isShared() bool } type stagingWriterImpl struct { - wdb store.TrackerStore + wdb trackerdb.TrackerStore } -func (w *stagingWriterImpl) writeBalances(ctx context.Context, balances []store.NormalizedAccountBalance) error { - return w.wdb.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { +func (w *stagingWriterImpl) writeBalances(ctx context.Context, balances []trackerdb.NormalizedAccountBalance) error { + return w.wdb.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { crw, err := tx.MakeCatchpointReaderWriter() if err != nil { return err @@ -117,7 +117,7 @@ func (w *stagingWriterImpl) writeBalances(ctx context.Context, balances []store. } func (w *stagingWriterImpl) writeKVs(ctx context.Context, kvrs []encoded.KVRecordV6) error { - return w.wdb.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + return w.wdb.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { crw, err := tx.MakeCatchpointReaderWriter() if err != nil { return err @@ -129,15 +129,15 @@ func (w *stagingWriterImpl) writeKVs(ctx context.Context, kvrs []encoded.KVRecor for i := 0; i < len(kvrs); i++ { keys[i] = kvrs[i].Key values[i] = kvrs[i].Value - hashes[i] = store.KvHashBuilderV6(string(keys[i]), values[i]) + hashes[i] = trackerdb.KvHashBuilderV6(string(keys[i]), values[i]) } return crw.WriteCatchpointStagingKVs(ctx, keys, values, hashes) }) } -func (w *stagingWriterImpl) writeCreatables(ctx context.Context, balances []store.NormalizedAccountBalance) error { - return w.wdb.Transaction(func(ctx context.Context, tx store.TransactionScope) error { +func (w *stagingWriterImpl) writeCreatables(ctx context.Context, balances []trackerdb.NormalizedAccountBalance) error { + return w.wdb.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error { crw, err := tx.MakeCatchpointReaderWriter() if err != nil { return err @@ -147,8 +147,8 @@ func (w *stagingWriterImpl) writeCreatables(ctx context.Context, balances []stor }) } -func (w *stagingWriterImpl) writeHashes(ctx context.Context, balances []store.NormalizedAccountBalance) error { - return w.wdb.Transaction(func(ctx context.Context, tx store.TransactionScope) error { +func (w *stagingWriterImpl) writeHashes(ctx context.Context, balances []trackerdb.NormalizedAccountBalance) error { + return w.wdb.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error { crw, err := tx.MakeCatchpointReaderWriter() if err != nil { return err @@ -165,7 +165,7 @@ func (w *stagingWriterImpl) isShared() bool { // catchpointCatchupAccessorImpl is the concrete implementation of the CatchpointCatchupAccessor interface type catchpointCatchupAccessorImpl struct { ledger *Ledger - catchpointStore store.CatchpointReaderWriter + catchpointStore trackerdb.CatchpointReaderWriter stagingWriter stagingWriter @@ -229,9 +229,9 @@ func MakeCatchpointCatchupAccessor(ledger *Ledger, log logging.Logger) Catchpoin // GetState returns the current state of the catchpoint catchup func (c *catchpointCatchupAccessorImpl) GetState(ctx context.Context) (state CatchpointCatchupState, err error) { var istate uint64 - istate, err = c.catchpointStore.ReadCatchpointStateUint64(ctx, store.CatchpointStateCatchupState) + istate, err = c.catchpointStore.ReadCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupState) if err != nil { - return 0, fmt.Errorf("unable to read catchpoint catchup state '%s': %v", store.CatchpointStateCatchupState, err) + return 0, fmt.Errorf("unable to read catchpoint catchup state '%s': %v", trackerdb.CatchpointStateCatchupState, err) } state = CatchpointCatchupState(istate) return @@ -242,18 +242,18 @@ func (c *catchpointCatchupAccessorImpl) SetState(ctx context.Context, state Catc if state < CatchpointCatchupStateInactive || state > catchpointCatchupStateLast { return fmt.Errorf("invalid catchpoint catchup state provided : %d", state) } - err = c.catchpointStore.WriteCatchpointStateUint64(ctx, store.CatchpointStateCatchupState, uint64(state)) + err = c.catchpointStore.WriteCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupState, uint64(state)) if err != nil { - return fmt.Errorf("unable to write catchpoint catchup state '%s': %v", store.CatchpointStateCatchupState, err) + return fmt.Errorf("unable to write catchpoint catchup state '%s': %v", trackerdb.CatchpointStateCatchupState, err) } return } // GetLabel returns the current catchpoint catchup label func (c *catchpointCatchupAccessorImpl) GetLabel(ctx context.Context) (label string, err error) { - label, err = c.catchpointStore.ReadCatchpointStateString(ctx, store.CatchpointStateCatchupLabel) + label, err = c.catchpointStore.ReadCatchpointStateString(ctx, trackerdb.CatchpointStateCatchupLabel) if err != nil { - return "", fmt.Errorf("unable to read catchpoint catchup state '%s': %v", store.CatchpointStateCatchupLabel, err) + return "", fmt.Errorf("unable to read catchpoint catchup state '%s': %v", trackerdb.CatchpointStateCatchupLabel, err) } return } @@ -265,9 +265,9 @@ func (c *catchpointCatchupAccessorImpl) SetLabel(ctx context.Context, label stri if err != nil { return } - err = c.catchpointStore.WriteCatchpointStateString(ctx, store.CatchpointStateCatchupLabel, label) + err = c.catchpointStore.WriteCatchpointStateString(ctx, trackerdb.CatchpointStateCatchupLabel, label) if err != nil { - return fmt.Errorf("unable to write catchpoint catchup state '%s': %v", store.CatchpointStateCatchupLabel, err) + return fmt.Errorf("unable to write catchpoint catchup state '%s': %v", trackerdb.CatchpointStateCatchupLabel, err) } return } @@ -279,7 +279,7 @@ func (c *catchpointCatchupAccessorImpl) ResetStagingBalances(ctx context.Context } start := time.Now() ledgerResetstagingbalancesCount.Inc(nil) - err = c.ledger.trackerDB().Batch(func(ctx context.Context, tx store.BatchScope) (err error) { + err = c.ledger.trackerDB().Batch(func(ctx context.Context, tx trackerdb.BatchScope) (err error) { crw, err := tx.MakeCatchpointWriter() if err != nil { return err @@ -290,23 +290,23 @@ func (c *catchpointCatchupAccessorImpl) ResetStagingBalances(ctx context.Context return fmt.Errorf("unable to reset catchpoint catchup balances : %v", err) } if !newCatchup { - err = crw.WriteCatchpointStateUint64(ctx, store.CatchpointStateCatchupBalancesRound, 0) + err = crw.WriteCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupBalancesRound, 0) if err != nil { return err } - err = crw.WriteCatchpointStateUint64(ctx, store.CatchpointStateCatchupBlockRound, 0) + err = crw.WriteCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupBlockRound, 0) if err != nil { return err } - err = crw.WriteCatchpointStateString(ctx, store.CatchpointStateCatchupLabel, "") + err = crw.WriteCatchpointStateString(ctx, trackerdb.CatchpointStateCatchupLabel, "") if err != nil { return err } - err = crw.WriteCatchpointStateUint64(ctx, store.CatchpointStateCatchupState, 0) + err = crw.WriteCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupState, 0) if err != nil { - return fmt.Errorf("unable to write catchpoint catchup state '%s': %v", store.CatchpointStateCatchupState, err) + return fmt.Errorf("unable to write catchpoint catchup state '%s': %v", trackerdb.CatchpointStateCatchupState, err) } } return @@ -372,7 +372,7 @@ func (c *catchpointCatchupAccessorImpl) processStagingContent(ctx context.Contex // TotalAccounts, TotalAccounts, Catchpoint, BlockHeaderDigest, BalancesRound start := time.Now() ledgerProcessstagingcontentCount.Inc(nil) - err = c.ledger.trackerDB().Batch(func(ctx context.Context, tx store.BatchScope) (err error) { + err = c.ledger.trackerDB().Batch(func(ctx context.Context, tx trackerdb.BatchScope) (err error) { cw, err := tx.MakeCatchpointWriter() if err != nil { return err @@ -383,14 +383,14 @@ func (c *catchpointCatchupAccessorImpl) processStagingContent(ctx context.Contex return err } - err = cw.WriteCatchpointStateUint64(ctx, store.CatchpointStateCatchupBlockRound, uint64(fileHeader.BlocksRound)) + err = cw.WriteCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupBlockRound, uint64(fileHeader.BlocksRound)) if err != nil { - return fmt.Errorf("CatchpointCatchupAccessorImpl::processStagingContent: unable to write catchpoint catchup state '%s': %v", store.CatchpointStateCatchupBlockRound, err) + return fmt.Errorf("CatchpointCatchupAccessorImpl::processStagingContent: unable to write catchpoint catchup state '%s': %v", trackerdb.CatchpointStateCatchupBlockRound, err) } if fileHeader.Version == CatchpointFileVersionV6 { - err = cw.WriteCatchpointStateUint64(ctx, store.CatchpointStateCatchupHashRound, uint64(fileHeader.BlocksRound)) + err = cw.WriteCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupHashRound, uint64(fileHeader.BlocksRound)) if err != nil { - return fmt.Errorf("CatchpointCatchupAccessorImpl::processStagingContent: unable to write catchpoint catchup state '%s': %v", store.CatchpointStateCatchupHashRound, err) + return fmt.Errorf("CatchpointCatchupAccessorImpl::processStagingContent: unable to write catchpoint catchup state '%s': %v", trackerdb.CatchpointStateCatchupHashRound, err) } } err = aw.AccountsPutTotals(fileHeader.Totals, true) @@ -418,7 +418,7 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte start := time.Now() ledgerProcessstagingbalancesCount.Inc(nil) - var normalizedAccountBalances []store.NormalizedAccountBalance + var normalizedAccountBalances []trackerdb.NormalizedAccountBalance var expectingMoreEntries []bool var chunkKVs []encoded.KVRecordV6 @@ -654,7 +654,7 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte // The function is _not_ a general purpose way to count hashes by hash kind. func countHashes(hashes [][]byte) (accountCount, kvCount uint64) { for _, hash := range hashes { - if hash[store.HashKindEncodingIndex] == byte(store.KvHK) { + if hash[trackerdb.HashKindEncodingIndex] == byte(trackerdb.KvHK) { kvCount++ } else { accountCount++ @@ -665,8 +665,8 @@ func countHashes(hashes [][]byte) (accountCount, kvCount uint64) { // BuildMerkleTrie would process the catchpointpendinghashes and insert all the items in it into the merkle trie func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, progressUpdates func(uint64, uint64)) (err error) { - trackerdb := c.ledger.trackerDB() - err = trackerdb.Batch(func(ctx context.Context, tx store.BatchScope) (err error) { + dbs := c.ledger.trackerDB() + err = dbs.Batch(func(ctx context.Context, tx trackerdb.BatchScope) (err error) { crw, err := tx.MakeCatchpointWriter() if err != nil { return err @@ -697,7 +697,7 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro defer wg.Done() defer close(writerQueue) - err := trackerdb.Snapshot(func(transactionCtx context.Context, tx store.SnapshotScope) (err error) { + err := dbs.Snapshot(func(transactionCtx context.Context, tx trackerdb.SnapshotScope) (err error) { it := tx.MakeCatchpointPendingHashesIterator(trieRebuildAccountChunkSize) var hashes [][]byte for { @@ -733,16 +733,16 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro uncommitedHashesCount := 0 keepWriting := true accountHashesWritten, kvHashesWritten := uint64(0), uint64(0) - var mc store.MerkleCommitter + var mc trackerdb.MerkleCommitter - err := trackerdb.Transaction(func(transactionCtx context.Context, tx store.TransactionScope) (err error) { + err := dbs.Transaction(func(transactionCtx context.Context, tx trackerdb.TransactionScope) (err error) { // create the merkle trie for the balances mc, err = tx.MakeMerkleCommitter(true) if err != nil { return } - trie, err = merkletrie.MakeTrie(mc, store.TrieMemoryConfig) + trie, err = merkletrie.MakeTrie(mc, trackerdb.TrieMemoryConfig) return err }) if err != nil { @@ -764,7 +764,7 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro continue } - err = trackerdb.Transaction(func(transactionCtx context.Context, tx store.TransactionScope) (err error) { + err = dbs.Transaction(func(transactionCtx context.Context, tx trackerdb.TransactionScope) (err error) { mc, err = tx.MakeMerkleCommitter(true) if err != nil { return @@ -774,7 +774,7 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro var added bool added, err = trie.Add(hash) if !added { - return fmt.Errorf("CatchpointCatchupAccessorImpl::BuildMerkleTrie: The provided catchpoint file contained the same account more than once. hash = '%s' hash kind = %s", hex.EncodeToString(hash), store.HashKind(hash[store.HashKindEncodingIndex])) + return fmt.Errorf("CatchpointCatchupAccessorImpl::BuildMerkleTrie: The provided catchpoint file contained the same account more than once. hash = '%s' hash kind = %s", hex.EncodeToString(hash), trackerdb.HashKind(hash[trackerdb.HashKindEncodingIndex])) } if err != nil { return @@ -794,7 +794,7 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro } if uncommitedHashesCount >= trieRebuildCommitFrequency { - err = trackerdb.Transaction(func(transactionCtx context.Context, tx store.TransactionScope) (err error) { + err = dbs.Transaction(func(transactionCtx context.Context, tx trackerdb.TransactionScope) (err error) { // set a long 30-second window for the evict before warning is generated. _, err = tx.ResetTransactionWarnDeadline(transactionCtx, time.Now().Add(30*time.Second)) if err != nil { @@ -827,7 +827,7 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro return } if uncommitedHashesCount > 0 { - err = trackerdb.Transaction(func(transactionCtx context.Context, tx store.TransactionScope) (err error) { + err = dbs.Transaction(func(transactionCtx context.Context, tx trackerdb.TransactionScope) (err error) { // set a long 30-second window for the evict before warning is generated. _, err = tx.ResetTransactionWarnDeadline(transactionCtx, time.Now().Add(30*time.Second)) if err != nil { @@ -862,9 +862,9 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro // GetCatchupBlockRound returns the latest block round matching the current catchpoint func (c *catchpointCatchupAccessorImpl) GetCatchupBlockRound(ctx context.Context) (round basics.Round, err error) { var iRound uint64 - iRound, err = c.catchpointStore.ReadCatchpointStateUint64(ctx, store.CatchpointStateCatchupBlockRound) + iRound, err = c.catchpointStore.ReadCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupBlockRound) if err != nil { - return 0, fmt.Errorf("unable to read catchpoint catchup state '%s': %v", store.CatchpointStateCatchpointLookback, err) + return 0, fmt.Errorf("unable to read catchpoint catchup state '%s': %v", trackerdb.CatchpointStateCatchpointLookback, err) } return basics.Round(iRound), nil } @@ -876,21 +876,21 @@ func (c *catchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, bl var totals ledgercore.AccountTotals var catchpointLabel string - catchpointLabel, err = c.catchpointStore.ReadCatchpointStateString(ctx, store.CatchpointStateCatchupLabel) + catchpointLabel, err = c.catchpointStore.ReadCatchpointStateString(ctx, trackerdb.CatchpointStateCatchupLabel) if err != nil { - return fmt.Errorf("unable to read catchpoint catchup state '%s': %v", store.CatchpointStateCatchupLabel, err) + return fmt.Errorf("unable to read catchpoint catchup state '%s': %v", trackerdb.CatchpointStateCatchupLabel, err) } var iRound uint64 - iRound, err = c.catchpointStore.ReadCatchpointStateUint64(ctx, store.CatchpointStateCatchupBlockRound) + iRound, err = c.catchpointStore.ReadCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupBlockRound) if err != nil { - return fmt.Errorf("unable to read catchpoint catchup state '%s': %v", store.CatchpointStateCatchupBlockRound, err) + return fmt.Errorf("unable to read catchpoint catchup state '%s': %v", trackerdb.CatchpointStateCatchupBlockRound, err) } blockRound = basics.Round(iRound) start := time.Now() ledgerVerifycatchpointCount.Inc(nil) - err = c.ledger.trackerDB().Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + err = c.ledger.trackerDB().Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { arw, err := tx.MakeAccountsReaderWriter() if err != nil { return err @@ -902,7 +902,7 @@ func (c *catchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, bl return fmt.Errorf("unable to make MerkleCommitter: %v", err0) } var trie *merkletrie.Trie - trie, err = merkletrie.MakeTrie(mc, store.TrieMemoryConfig) + trie, err = merkletrie.MakeTrie(mc, trackerdb.TrieMemoryConfig) if err != nil { return fmt.Errorf("unable to make trie: %v", err) } @@ -946,15 +946,15 @@ func (c *catchpointCatchupAccessorImpl) StoreBalancesRound(ctx context.Context, balancesRound := blk.Round() - basics.Round(catchpointLookback) start := time.Now() ledgerStorebalancesroundCount.Inc(nil) - err = c.ledger.trackerDB().Batch(func(ctx context.Context, tx store.BatchScope) (err error) { + err = c.ledger.trackerDB().Batch(func(ctx context.Context, tx trackerdb.BatchScope) (err error) { crw, err := tx.MakeCatchpointWriter() if err != nil { return err } - err = crw.WriteCatchpointStateUint64(ctx, store.CatchpointStateCatchupBalancesRound, uint64(balancesRound)) + err = crw.WriteCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupBalancesRound, uint64(balancesRound)) if err != nil { - return fmt.Errorf("CatchpointCatchupAccessorImpl::StoreBalancesRound: unable to write catchpoint catchup state '%s': %v", store.CatchpointStateCatchupBalancesRound, err) + return fmt.Errorf("CatchpointCatchupAccessorImpl::StoreBalancesRound: unable to write catchpoint catchup state '%s': %v", trackerdb.CatchpointStateCatchupBalancesRound, err) } return }) @@ -1046,7 +1046,7 @@ func (c *catchpointCatchupAccessorImpl) CompleteCatchup(ctx context.Context) (er func (c *catchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err error) { start := time.Now() ledgerCatchpointFinishBalsCount.Inc(nil) - err = c.ledger.trackerDB().Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + err = c.ledger.trackerDB().Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { crw, err := tx.MakeCatchpointReaderWriter() if err != nil { return err @@ -1060,12 +1060,12 @@ func (c *catchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err var balancesRound, hashRound uint64 var totals ledgercore.AccountTotals - balancesRound, err = crw.ReadCatchpointStateUint64(ctx, store.CatchpointStateCatchupBalancesRound) + balancesRound, err = crw.ReadCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupBalancesRound) if err != nil { return err } - hashRound, err = crw.ReadCatchpointStateUint64(ctx, store.CatchpointStateCatchupHashRound) + hashRound, err = crw.ReadCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupHashRound) if err != nil { return err } @@ -1093,7 +1093,7 @@ func (c *catchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err return err } { - tp := store.TrackerDBParams{ + tp := trackerdb.Params{ InitAccounts: c.ledger.GenesisAccounts(), InitProto: c.ledger.GenesisProtoVersion(), GenesisHash: c.ledger.GenesisHash(), @@ -1123,31 +1123,31 @@ func (c *catchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err return err } - err = crw.WriteCatchpointStateUint64(ctx, store.CatchpointStateCatchupBalancesRound, 0) + err = crw.WriteCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupBalancesRound, 0) if err != nil { return err } - err = crw.WriteCatchpointStateUint64(ctx, store.CatchpointStateCatchupBlockRound, 0) + err = crw.WriteCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupBlockRound, 0) if err != nil { return err } - err = crw.WriteCatchpointStateString(ctx, store.CatchpointStateCatchupLabel, "") + err = crw.WriteCatchpointStateString(ctx, trackerdb.CatchpointStateCatchupLabel, "") if err != nil { return err } if hashRound != 0 { - err = crw.WriteCatchpointStateUint64(ctx, store.CatchpointStateCatchupHashRound, 0) + err = crw.WriteCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupHashRound, 0) if err != nil { return err } } - err = crw.WriteCatchpointStateUint64(ctx, store.CatchpointStateCatchupState, 0) + err = crw.WriteCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupState, 0) if err != nil { - return fmt.Errorf("unable to write catchpoint catchup state '%s': %v", store.CatchpointStateCatchupState, err) + return fmt.Errorf("unable to write catchpoint catchup state '%s': %v", trackerdb.CatchpointStateCatchupState, err) } return diff --git a/ledger/catchupaccessor_test.go b/ledger/catchupaccessor_test.go index bb4d8aa163..5cbf169874 100644 --- a/ledger/catchupaccessor_test.go +++ b/ledger/catchupaccessor_test.go @@ -34,7 +34,7 @@ import ( "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/ledger/encoded" "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" ledgertesting "github.com/algorand/go-algorand/ledger/testing" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" @@ -61,7 +61,7 @@ func createTestingEncodedChunks(accountsCount uint64) (encodedAccountChunks [][] chunk.Balances = make([]encoded.BalanceRecordV6, chunkSize) for i := uint64(0); i < chunkSize; i++ { var randomAccount encoded.BalanceRecordV6 - accountData := store.BaseAccountData{} + accountData := trackerdb.BaseAccountData{} accountData.MicroAlgos.Raw = crypto.RandUint63() randomAccount.AccountData = protocol.Encode(&accountData) // have the first account be the zero address @@ -409,7 +409,7 @@ func TestCatchupAccessorResourceCountMismatch(t *testing.T) { var balances catchpointFileChunkV6 balances.Balances = make([]encoded.BalanceRecordV6, 1) var randomAccount encoded.BalanceRecordV6 - accountData := store.BaseAccountData{} + accountData := trackerdb.BaseAccountData{} accountData.MicroAlgos.Raw = crypto.RandUint63() accountData.TotalAppParams = 1 randomAccount.AccountData = protocol.Encode(&accountData) @@ -428,11 +428,11 @@ type testStagingWriter struct { hashes map[[4 + crypto.DigestSize]byte]int } -func (w *testStagingWriter) writeBalances(ctx context.Context, balances []store.NormalizedAccountBalance) error { +func (w *testStagingWriter) writeBalances(ctx context.Context, balances []trackerdb.NormalizedAccountBalance) error { return nil } -func (w *testStagingWriter) writeCreatables(ctx context.Context, balances []store.NormalizedAccountBalance) error { +func (w *testStagingWriter) writeCreatables(ctx context.Context, balances []trackerdb.NormalizedAccountBalance) error { return nil } @@ -440,7 +440,7 @@ func (w *testStagingWriter) writeKVs(ctx context.Context, kvrs []encoded.KVRecor return nil } -func (w *testStagingWriter) writeHashes(ctx context.Context, balances []store.NormalizedAccountBalance) error { +func (w *testStagingWriter) writeHashes(ctx context.Context, balances []trackerdb.NormalizedAccountBalance) error { for _, bal := range balances { for _, hash := range bal.AccountHashes { var key [4 + crypto.DigestSize]byte @@ -477,8 +477,8 @@ func TestCatchupAccessorProcessStagingBalances(t *testing.T) { } catchpointAccessor := makeTestCatchpointCatchupAccessor(&l, log, writer) - randomSimpleBaseAcct := func() store.BaseAccountData { - accountData := store.BaseAccountData{ + randomSimpleBaseAcct := func() trackerdb.BaseAccountData { + accountData := trackerdb.BaseAccountData{ RewardsBase: crypto.RandUint63(), MicroAlgos: basics.MicroAlgos{Raw: crypto.RandUint63()}, AuthAddr: ledgertesting.RandomAddress(), @@ -486,7 +486,7 @@ func TestCatchupAccessorProcessStagingBalances(t *testing.T) { return accountData } - encodedBalanceRecordFromBase := func(addr basics.Address, base store.BaseAccountData, resources map[uint64]msgp.Raw, more bool) encoded.BalanceRecordV6 { + encodedBalanceRecordFromBase := func(addr basics.Address, base trackerdb.BaseAccountData, resources map[uint64]msgp.Raw, more bool) encoded.BalanceRecordV6 { ebr := encoded.BalanceRecordV6{ Address: addr, AccountData: protocol.Encode(&base), @@ -518,7 +518,7 @@ func TestCatchupAccessorProcessStagingBalances(t *testing.T) { acctX.TotalAssets = acctXNumRes acctXRes1 := make(map[uint64]msgp.Raw, acctXNumRes/2+1) acctXRes2 := make(map[uint64]msgp.Raw, acctXNumRes/2) - emptyRes := store.ResourcesData{ResourceFlags: store.ResourceFlagsEmptyAsset} + emptyRes := trackerdb.ResourcesData{ResourceFlags: trackerdb.ResourceFlagsEmptyAsset} emptyResEnc := protocol.Encode(&emptyRes) for i := 0; i < acctXNumRes; i++ { if i <= acctXNumRes/2 { diff --git a/ledger/ledger.go b/ledger/ledger.go index 1064ad54f3..50222d2f9e 100644 --- a/ledger/ledger.go +++ b/ledger/ledger.go @@ -35,8 +35,9 @@ import ( "github.com/algorand/go-algorand/ledger/apply" "github.com/algorand/go-algorand/ledger/internal" "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/ledger/store" "github.com/algorand/go-algorand/ledger/store/blockdb" + "github.com/algorand/go-algorand/ledger/store/trackerdb" + "github.com/algorand/go-algorand/ledger/store/trackerdb/sqlitedriver" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" @@ -49,7 +50,7 @@ type Ledger struct { // Database connections to the DBs storing blocks and tracker state. // We use potentially different databases to avoid SQLite contention // during catchup. - trackerDBs store.TrackerStore + trackerDBs trackerdb.TrackerStore blockDBs db.Pair // blockQ is the buffer of added blocks that will be flushed to @@ -273,7 +274,7 @@ func (l *Ledger) verifyMatchingGenesisHash() (err error) { return } -func openLedgerDB(dbPathPrefix string, dbMem bool) (trackerDBs store.TrackerStore, blockDBs db.Pair, err error) { +func openLedgerDB(dbPathPrefix string, dbMem bool) (trackerDBs trackerdb.TrackerStore, blockDBs db.Pair, err error) { // Backwards compatibility: we used to store both blocks and tracker // state in a single SQLite db file. var trackerDBFilename string @@ -297,7 +298,7 @@ func openLedgerDB(dbPathPrefix string, dbMem bool) (trackerDBs store.TrackerStor outErr := make(chan error, 2) go func() { var lerr error - trackerDBs, lerr = store.OpenTrackerSQLStore(trackerDBFilename, dbMem) + trackerDBs, lerr = sqlitedriver.OpenTrackerSQLStore(trackerDBFilename, dbMem) outErr <- lerr }() @@ -765,7 +766,7 @@ func (l *Ledger) GetCatchpointStream(round basics.Round) (ReadCloseSizer, error) } // ledgerForTracker methods -func (l *Ledger) trackerDB() store.TrackerStore { +func (l *Ledger) trackerDB() trackerdb.TrackerStore { return l.trackerDBs } diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go index 694d159eba..54a2f60fa7 100644 --- a/ledger/ledger_test.go +++ b/ledger/ledger_test.go @@ -41,7 +41,7 @@ import ( "github.com/algorand/go-algorand/data/transactions/logic" "github.com/algorand/go-algorand/data/transactions/verify" "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" ledgertesting "github.com/algorand/go-algorand/ledger/testing" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" @@ -2285,7 +2285,7 @@ func TestLedgerReloadTxTailHistoryAccess(t *testing.T) { // reset tables and re-init again, similary to the catchpount apply code // since the ledger has only genesis accounts, this recreates them - err = l.trackerDBs.Batch(func(ctx context.Context, tx store.BatchScope) error { + err = l.trackerDBs.Batch(func(ctx context.Context, tx trackerdb.BatchScope) error { arw, err := tx.MakeAccountsWriter() if err != nil { return err @@ -2295,7 +2295,7 @@ func TestLedgerReloadTxTailHistoryAccess(t *testing.T) { if err0 != nil { return err0 } - tp := store.TrackerDBParams{ + tp := trackerdb.Params{ InitAccounts: l.GenesisAccounts(), InitProto: l.GenesisProtoVersion(), GenesisHash: l.GenesisHash(), @@ -2428,10 +2428,10 @@ int %d // 10001000 func TestLedgerMigrateV6ShrinkDeltas(t *testing.T) { partitiontest.PartitionTest(t) - prevAccountDBVersion := store.AccountDBVersion - store.AccountDBVersion = 6 + prevAccountDBVersion := trackerdb.AccountDBVersion + trackerdb.AccountDBVersion = 6 defer func() { - store.AccountDBVersion = prevAccountDBVersion + trackerdb.AccountDBVersion = prevAccountDBVersion }() dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64()) testProtocolVersion := protocol.ConsensusVersion("test-protocol-migrate-shrink-deltas") @@ -2454,7 +2454,7 @@ func TestLedgerMigrateV6ShrinkDeltas(t *testing.T) { blockDB.Close() }() // create tables so online accounts can still be written - err = trackerDB.Batch(func(ctx context.Context, tx store.BatchScope) error { + err = trackerDB.Batch(func(ctx context.Context, tx trackerdb.BatchScope) error { if err := tx.AccountsUpdateSchemaTest(ctx); err != nil { return err } @@ -2629,7 +2629,7 @@ func TestLedgerMigrateV6ShrinkDeltas(t *testing.T) { l.Close() cfg.MaxAcctLookback = shorterLookback - store.AccountDBVersion = 7 + trackerdb.AccountDBVersion = 7 // delete tables since we want to check they can be made from other data err = trackerDB.ResetToV6Test(context.Background()) require.NoError(t, err) diff --git a/ledger/lruaccts.go b/ledger/lruaccts.go index a6962cab81..9351604de2 100644 --- a/ledger/lruaccts.go +++ b/ledger/lruaccts.go @@ -18,7 +18,7 @@ package ledger import ( "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/logging" ) @@ -35,7 +35,7 @@ type lruAccounts struct { // pendingAccounts are used as a way to avoid taking a write-lock. When the caller needs to "materialize" these, // it would call flushPendingWrites and these would be merged into the accounts/accountsList // if lruAccounts is set with pendingWrites 0, then pendingAccounts is nil - pendingAccounts chan store.PersistedAccountData + pendingAccounts chan trackerdb.PersistedAccountData // log interface; used for logging the threshold event. log logging.Logger // pendingWritesWarnThreshold is the threshold beyond we would write a warning for exceeding the number of pendingAccounts entries @@ -52,7 +52,7 @@ func (m *lruAccounts) init(log logging.Logger, pendingWrites int, pendingWritesW if pendingWrites > 0 { m.accountsList = newPersistedAccountList().allocateFreeNodes(pendingWrites) m.accounts = make(map[basics.Address]*persistedAccountDataListNode, pendingWrites) - m.pendingAccounts = make(chan store.PersistedAccountData, pendingWrites) + m.pendingAccounts = make(chan trackerdb.PersistedAccountData, pendingWrites) m.notFound = make(map[basics.Address]struct{}, pendingWrites) m.pendingNotFound = make(chan basics.Address, pendingWrites) } @@ -62,11 +62,11 @@ func (m *lruAccounts) init(log logging.Logger, pendingWrites int, pendingWritesW // read the persistedAccountData object that the lruAccounts has for the given address. // thread locking semantics : read lock -func (m *lruAccounts) read(addr basics.Address) (data store.PersistedAccountData, has bool) { +func (m *lruAccounts) read(addr basics.Address) (data trackerdb.PersistedAccountData, has bool) { if el := m.accounts[addr]; el != nil { return *el.Value, true } - return store.PersistedAccountData{}, false + return trackerdb.PersistedAccountData{}, false } // readNotFound returns whether we have attempted to read this address but it did not exist in the db. @@ -109,7 +109,7 @@ outer2: // writePending write a single persistedAccountData entry to the pendingAccounts buffer. // the function doesn't block, and in case of a buffer overflow the entry would not be added. // thread locking semantics : no lock is required. -func (m *lruAccounts) writePending(acct store.PersistedAccountData) { +func (m *lruAccounts) writePending(acct trackerdb.PersistedAccountData) { select { case m.pendingAccounts <- acct: default: @@ -131,7 +131,7 @@ func (m *lruAccounts) writeNotFoundPending(addr basics.Address) { // version of what's already on the cache or not. In all cases, the entry is going // to be promoted to the front of the list. // thread locking semantics : write lock -func (m *lruAccounts) write(acctData store.PersistedAccountData) { +func (m *lruAccounts) write(acctData trackerdb.PersistedAccountData) { if m.accounts == nil { return } diff --git a/ledger/lruaccts_test.go b/ledger/lruaccts_test.go index d5f8fdcab9..e77a0c3bf9 100644 --- a/ledger/lruaccts_test.go +++ b/ledger/lruaccts_test.go @@ -25,7 +25,7 @@ import ( "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -39,11 +39,11 @@ func TestLRUBasicAccounts(t *testing.T) { accountsNum := 50 // write 50 accounts for i := 0; i < accountsNum; i++ { - acct := store.PersistedAccountData{ + acct := trackerdb.PersistedAccountData{ Addr: basics.Address(crypto.Hash([]byte{byte(i)})), Round: basics.Round(i), Rowid: int64(i), - AccountData: store.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, + AccountData: trackerdb.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } baseAcct.write(acct) } @@ -64,7 +64,7 @@ func TestLRUBasicAccounts(t *testing.T) { addr := basics.Address(crypto.Hash([]byte{byte(i)})) acct, has := baseAcct.read(addr) require.False(t, has) - require.Equal(t, store.PersistedAccountData{}, acct) + require.Equal(t, trackerdb.PersistedAccountData{}, acct) } baseAcct.prune(accountsNum / 2) @@ -83,7 +83,7 @@ func TestLRUBasicAccounts(t *testing.T) { require.Equal(t, int64(i), acct.Rowid) } else { require.False(t, has) - require.Equal(t, store.PersistedAccountData{}, acct) + require.Equal(t, trackerdb.PersistedAccountData{}, acct) } } } @@ -99,11 +99,11 @@ func TestLRUAccountsDisable(t *testing.T) { for i := 0; i < accountsNum; i++ { go func(i int) { time.Sleep(time.Duration((crypto.RandUint64() % 50)) * time.Millisecond) - acct := store.PersistedAccountData{ + acct := trackerdb.PersistedAccountData{ Addr: basics.Address(crypto.Hash([]byte{byte(i)})), Round: basics.Round(i), Rowid: int64(i), - AccountData: store.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, + AccountData: trackerdb.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } baseAcct.writePending(acct) }(i) @@ -113,11 +113,11 @@ func TestLRUAccountsDisable(t *testing.T) { require.Empty(t, baseAcct.accounts) for i := 0; i < accountsNum; i++ { - acct := store.PersistedAccountData{ + acct := trackerdb.PersistedAccountData{ Addr: basics.Address(crypto.Hash([]byte{byte(i)})), Round: basics.Round(i), Rowid: int64(i), - AccountData: store.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, + AccountData: trackerdb.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } baseAcct.write(acct) } @@ -134,11 +134,11 @@ func TestLRUAccountsPendingWrites(t *testing.T) { for i := 0; i < accountsNum; i++ { go func(i int) { time.Sleep(time.Duration((crypto.RandUint64() % 50)) * time.Millisecond) - acct := store.PersistedAccountData{ + acct := trackerdb.PersistedAccountData{ Addr: basics.Address(crypto.Hash([]byte{byte(i)})), Round: basics.Round(i), Rowid: int64(i), - AccountData: store.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, + AccountData: trackerdb.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } baseAcct.writePending(acct) }(i) @@ -186,11 +186,11 @@ func TestLRUAccountsPendingWritesWarning(t *testing.T) { baseAcct.init(log, pendingWritesBuffer, pendingWritesThreshold) for j := 0; j < 50; j++ { for i := 0; i < j; i++ { - acct := store.PersistedAccountData{ + acct := trackerdb.PersistedAccountData{ Addr: basics.Address(crypto.Hash([]byte{byte(i)})), Round: basics.Round(i), Rowid: int64(i), - AccountData: store.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, + AccountData: trackerdb.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } baseAcct.writePending(acct) } @@ -212,11 +212,11 @@ func TestLRUAccountsOmittedPendingWrites(t *testing.T) { baseAcct.init(log, pendingWritesBuffer, pendingWritesThreshold) for i := 0; i < pendingWritesBuffer*2; i++ { - acct := store.PersistedAccountData{ + acct := trackerdb.PersistedAccountData{ Addr: basics.Address(crypto.Hash([]byte{byte(i)})), Round: basics.Round(i), Rowid: int64(i), - AccountData: store.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, + AccountData: trackerdb.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } baseAcct.writePending(acct) } @@ -239,7 +239,7 @@ func TestLRUAccountsOmittedPendingWrites(t *testing.T) { addr := basics.Address(crypto.Hash([]byte{byte(i)})) acct, has := baseAcct.read(addr) require.False(t, has) - require.Equal(t, store.PersistedAccountData{}, acct) + require.Equal(t, trackerdb.PersistedAccountData{}, acct) } } @@ -252,7 +252,7 @@ func BenchmarkLRUAccountsWrite(b *testing.B) { benchLruWrite(b, fillerAccounts, accounts) } -func benchLruWrite(b *testing.B, fillerAccounts []store.PersistedAccountData, accounts []store.PersistedAccountData) { +func benchLruWrite(b *testing.B, fillerAccounts []trackerdb.PersistedAccountData, accounts []trackerdb.PersistedAccountData) { b.ResetTimer() b.StopTimer() var baseAcct lruAccounts @@ -268,26 +268,26 @@ func benchLruWrite(b *testing.B, fillerAccounts []store.PersistedAccountData, ac } } -func fillLRUAccounts(baseAcct lruAccounts, fillerAccounts []store.PersistedAccountData) lruAccounts { +func fillLRUAccounts(baseAcct lruAccounts, fillerAccounts []trackerdb.PersistedAccountData) lruAccounts { for _, account := range fillerAccounts { baseAcct.write(account) } return baseAcct } -func generatePersistedAccountData(startRound, endRound int) []store.PersistedAccountData { - accounts := make([]store.PersistedAccountData, endRound-startRound) +func generatePersistedAccountData(startRound, endRound int) []trackerdb.PersistedAccountData { + accounts := make([]trackerdb.PersistedAccountData, endRound-startRound) buffer := make([]byte, 4) for i := startRound; i < endRound; i++ { binary.BigEndian.PutUint32(buffer, uint32(i)) digest := crypto.Hash(buffer) - accounts[i-startRound] = store.PersistedAccountData{ + accounts[i-startRound] = trackerdb.PersistedAccountData{ Addr: basics.Address(digest), Round: basics.Round(i + startRound), Rowid: int64(i), - AccountData: store.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, + AccountData: trackerdb.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } } return accounts diff --git a/ledger/lrukv.go b/ledger/lrukv.go index 420f87f5dc..8955d3d2bc 100644 --- a/ledger/lrukv.go +++ b/ledger/lrukv.go @@ -17,13 +17,13 @@ package ledger import ( - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/logging" ) //msgp:ignore cachedKVData type cachedKVData struct { - store.PersistedKVData + trackerdb.PersistedKVData // kv key key string @@ -67,11 +67,11 @@ func (m *lruKV) init(log logging.Logger, pendingWrites int, pendingWritesWarnThr // read the persistedKVData object that the lruKV has for the given key. // thread locking semantics : read lock -func (m *lruKV) read(key string) (data store.PersistedKVData, has bool) { +func (m *lruKV) read(key string) (data trackerdb.PersistedKVData, has bool) { if el := m.kvs[key]; el != nil { return el.Value.PersistedKVData, true } - return store.PersistedKVData{}, false + return trackerdb.PersistedKVData{}, false } // flushPendingWrites flushes the pending writes to the main lruKV cache. @@ -94,7 +94,7 @@ func (m *lruKV) flushPendingWrites() { // writePending write a single persistedKVData entry to the pendingKVs buffer. // the function doesn't block, and in case of a buffer overflow the entry would not be added. // thread locking semantics : no lock is required. -func (m *lruKV) writePending(kv store.PersistedKVData, key string) { +func (m *lruKV) writePending(kv trackerdb.PersistedKVData, key string) { select { case m.pendingKVs <- cachedKVData{PersistedKVData: kv, key: key}: default: @@ -106,7 +106,7 @@ func (m *lruKV) writePending(kv store.PersistedKVData, key string) { // version of what's already on the cache or not. In all cases, the entry is going // to be promoted to the front of the list. // thread locking semantics : write lock -func (m *lruKV) write(kvData store.PersistedKVData, key string) { +func (m *lruKV) write(kvData trackerdb.PersistedKVData, key string) { if m.kvs == nil { return } diff --git a/ledger/lrukv_test.go b/ledger/lrukv_test.go index ce0eb02c03..0b0347e240 100644 --- a/ledger/lrukv_test.go +++ b/ledger/lrukv_test.go @@ -25,7 +25,7 @@ import ( "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -40,7 +40,7 @@ func TestLRUBasicKV(t *testing.T) { // write 50 KVs for i := 0; i < kvNum; i++ { kvValue := fmt.Sprintf("kv %d value", i) - kv := store.PersistedKVData{ + kv := trackerdb.PersistedKVData{ Value: []byte(kvValue), Round: basics.Round(i), } @@ -59,7 +59,7 @@ func TestLRUBasicKV(t *testing.T) { for i := kvNum; i < kvNum*2; i++ { kv, has := baseKV.read(fmt.Sprintf("key%d", i)) require.False(t, has) - require.Equal(t, store.PersistedKVData{}, kv) + require.Equal(t, trackerdb.PersistedKVData{}, kv) } baseKV.prune(kvNum / 2) @@ -75,7 +75,7 @@ func TestLRUBasicKV(t *testing.T) { require.Equal(t, fmt.Sprintf("kv %d value", i), string(kv.Value)) } else { require.False(t, has) - require.Equal(t, store.PersistedKVData{}, kv) + require.Equal(t, trackerdb.PersistedKVData{}, kv) } } } @@ -92,7 +92,7 @@ func TestLRUKVDisable(t *testing.T) { go func(i int) { time.Sleep(time.Duration((crypto.RandUint64() % 50)) * time.Millisecond) kvValue := fmt.Sprintf("kv %d value", i) - kv := store.PersistedKVData{ + kv := trackerdb.PersistedKVData{ Value: []byte(kvValue), Round: basics.Round(i), } @@ -105,7 +105,7 @@ func TestLRUKVDisable(t *testing.T) { for i := 0; i < kvNum; i++ { kvValue := fmt.Sprintf("kv %d value", i) - kv := store.PersistedKVData{ + kv := trackerdb.PersistedKVData{ Value: []byte(kvValue), Round: basics.Round(i), } @@ -126,7 +126,7 @@ func TestLRUKVPendingWrites(t *testing.T) { go func(i int) { time.Sleep(time.Duration((crypto.RandUint64() % 50)) * time.Millisecond) kvValue := fmt.Sprintf("kv %d value", i) - kv := store.PersistedKVData{ + kv := trackerdb.PersistedKVData{ Value: []byte(kvValue), Round: basics.Round(i), } @@ -177,7 +177,7 @@ func TestLRUKVPendingWritesWarning(t *testing.T) { for j := 0; j < 50; j++ { for i := 0; i < j; i++ { kvValue := fmt.Sprintf("kv %d value", i) - kv := store.PersistedKVData{ + kv := trackerdb.PersistedKVData{ Value: []byte(kvValue), Round: basics.Round(i), } @@ -202,7 +202,7 @@ func TestLRUKVOmittedPendingWrites(t *testing.T) { for i := 0; i < pendingWritesBuffer*2; i++ { kvValue := fmt.Sprintf("kv %d value", i) - kv := store.PersistedKVData{ + kv := trackerdb.PersistedKVData{ Value: []byte(kvValue), Round: basics.Round(i), } @@ -223,7 +223,7 @@ func TestLRUKVOmittedPendingWrites(t *testing.T) { for i := pendingWritesBuffer; i < pendingWritesBuffer*2; i++ { kv, has := baseKV.read(fmt.Sprintf("key%d", i)) require.False(t, has) - require.Equal(t, store.PersistedKVData{}, kv) + require.Equal(t, trackerdb.PersistedKVData{}, kv) } } @@ -265,7 +265,7 @@ func generatePersistedKVData(startRound, endRound int) []cachedKVData { kvValue := fmt.Sprintf("kv %d value", i) kvs[i-startRound] = cachedKVData{ - PersistedKVData: store.PersistedKVData{ + PersistedKVData: trackerdb.PersistedKVData{ Value: []byte(kvValue), Round: basics.Round(i + startRound), }, diff --git a/ledger/lruonlineaccts.go b/ledger/lruonlineaccts.go index cc8917bf57..40f08917b2 100644 --- a/ledger/lruonlineaccts.go +++ b/ledger/lruonlineaccts.go @@ -18,7 +18,7 @@ package ledger import ( "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/logging" ) @@ -35,7 +35,7 @@ type lruOnlineAccounts struct { // pendingAccounts are used as a way to avoid taking a write-lock. When the caller needs to "materialize" these, // it would call flushPendingWrites and these would be merged into the accounts/accountsList // if lruOnlineAccounts is set with pendingWrites 0, then pendingAccounts is nil - pendingAccounts chan store.PersistedOnlineAccountData + pendingAccounts chan trackerdb.PersistedOnlineAccountData // log interface; used for logging the threshold event. log logging.Logger // pendingWritesWarnThreshold is the threshold beyond we would write a warning for exceeding the number of pendingAccounts entries @@ -48,7 +48,7 @@ func (m *lruOnlineAccounts) init(log logging.Logger, pendingWrites int, pendingW if pendingWrites > 0 { m.accountsList = newPersistedOnlineAccountList().allocateFreeNodes(pendingWrites) m.accounts = make(map[basics.Address]*persistedOnlineAccountDataListNode, pendingWrites) - m.pendingAccounts = make(chan store.PersistedOnlineAccountData, pendingWrites) + m.pendingAccounts = make(chan trackerdb.PersistedOnlineAccountData, pendingWrites) } m.log = log m.pendingWritesWarnThreshold = pendingWritesWarnThreshold @@ -56,11 +56,11 @@ func (m *lruOnlineAccounts) init(log logging.Logger, pendingWrites int, pendingW // read the persistedAccountData object that the lruAccounts has for the given address. // thread locking semantics : read lock -func (m *lruOnlineAccounts) read(addr basics.Address) (data store.PersistedOnlineAccountData, has bool) { +func (m *lruOnlineAccounts) read(addr basics.Address) (data trackerdb.PersistedOnlineAccountData, has bool) { if el := m.accounts[addr]; el != nil { return *el.Value, true } - return store.PersistedOnlineAccountData{}, false + return trackerdb.PersistedOnlineAccountData{}, false } // flushPendingWrites flushes the pending writes to the main lruAccounts cache. @@ -83,7 +83,7 @@ func (m *lruOnlineAccounts) flushPendingWrites() { // writePending write a single persistedOnlineAccountData entry to the pendingAccounts buffer. // the function doesn't block, and in case of a buffer overflow the entry would not be added. // thread locking semantics : no lock is required. -func (m *lruOnlineAccounts) writePending(acct store.PersistedOnlineAccountData) { +func (m *lruOnlineAccounts) writePending(acct trackerdb.PersistedOnlineAccountData) { select { case m.pendingAccounts <- acct: default: @@ -95,7 +95,7 @@ func (m *lruOnlineAccounts) writePending(acct store.PersistedOnlineAccountData) // version of what's already on the cache or not. In all cases, the entry is going // to be promoted to the front of the list. // thread locking semantics : write lock -func (m *lruOnlineAccounts) write(acctData store.PersistedOnlineAccountData) { +func (m *lruOnlineAccounts) write(acctData trackerdb.PersistedOnlineAccountData) { if m.accounts == nil { return } diff --git a/ledger/lruonlineaccts_test.go b/ledger/lruonlineaccts_test.go index 84acdb684e..a433973368 100644 --- a/ledger/lruonlineaccts_test.go +++ b/ledger/lruonlineaccts_test.go @@ -24,7 +24,7 @@ import ( "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -38,11 +38,11 @@ func TestLRUOnlineAccountsBasic(t *testing.T) { accountsNum := 50 // write 50 accounts for i := 0; i < accountsNum; i++ { - acct := store.PersistedOnlineAccountData{ + acct := trackerdb.PersistedOnlineAccountData{ Addr: basics.Address(crypto.Hash([]byte{byte(i)})), Round: basics.Round(i), Rowid: int64(i), - AccountData: store.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, + AccountData: trackerdb.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } baseOnlineAcct.write(acct) } @@ -63,7 +63,7 @@ func TestLRUOnlineAccountsBasic(t *testing.T) { addr := basics.Address(crypto.Hash([]byte{byte(i)})) acct, has := baseOnlineAcct.read(addr) require.False(t, has) - require.Equal(t, store.PersistedOnlineAccountData{}, acct) + require.Equal(t, trackerdb.PersistedOnlineAccountData{}, acct) } baseOnlineAcct.prune(accountsNum / 2) @@ -82,7 +82,7 @@ func TestLRUOnlineAccountsBasic(t *testing.T) { require.Equal(t, int64(i), acct.Rowid) } else { require.False(t, has) - require.Equal(t, store.PersistedOnlineAccountData{}, acct) + require.Equal(t, trackerdb.PersistedOnlineAccountData{}, acct) } } } @@ -98,11 +98,11 @@ func TestLRUOnlineAccountsDisable(t *testing.T) { for i := 0; i < accountsNum; i++ { go func(i int) { time.Sleep(time.Duration((crypto.RandUint64() % 50)) * time.Millisecond) - acct := store.PersistedOnlineAccountData{ + acct := trackerdb.PersistedOnlineAccountData{ Addr: basics.Address(crypto.Hash([]byte{byte(i)})), Round: basics.Round(i), Rowid: int64(i), - AccountData: store.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, + AccountData: trackerdb.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } baseOnlineAcct.writePending(acct) }(i) @@ -112,11 +112,11 @@ func TestLRUOnlineAccountsDisable(t *testing.T) { require.Empty(t, baseOnlineAcct.accounts) for i := 0; i < accountsNum; i++ { - acct := store.PersistedOnlineAccountData{ + acct := trackerdb.PersistedOnlineAccountData{ Addr: basics.Address(crypto.Hash([]byte{byte(i)})), Round: basics.Round(i), Rowid: int64(i), - AccountData: store.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, + AccountData: trackerdb.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } baseOnlineAcct.write(acct) } @@ -133,11 +133,11 @@ func TestLRUOnlineAccountsPendingWrites(t *testing.T) { for i := 0; i < accountsNum; i++ { go func(i int) { time.Sleep(time.Duration((crypto.RandUint64() % 50)) * time.Millisecond) - acct := store.PersistedOnlineAccountData{ + acct := trackerdb.PersistedOnlineAccountData{ Addr: basics.Address(crypto.Hash([]byte{byte(i)})), Round: basics.Round(i), Rowid: int64(i), - AccountData: store.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, + AccountData: trackerdb.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } baseOnlineAcct.writePending(acct) }(i) @@ -175,11 +175,11 @@ func TestLRUOnlineAccountsPendingWritesWarning(t *testing.T) { baseOnlineAcct.init(log, pendingWritesBuffer, pendingWritesThreshold) for j := 0; j < 50; j++ { for i := 0; i < j; i++ { - acct := store.PersistedOnlineAccountData{ + acct := trackerdb.PersistedOnlineAccountData{ Addr: basics.Address(crypto.Hash([]byte{byte(i)})), Round: basics.Round(i), Rowid: int64(i), - AccountData: store.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, + AccountData: trackerdb.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } baseOnlineAcct.writePending(acct) } @@ -201,11 +201,11 @@ func TestLRUOnlineAccountsOmittedPendingWrites(t *testing.T) { baseOnlineAcct.init(log, pendingWritesBuffer, pendingWritesThreshold) for i := 0; i < pendingWritesBuffer*2; i++ { - acct := store.PersistedOnlineAccountData{ + acct := trackerdb.PersistedOnlineAccountData{ Addr: basics.Address(crypto.Hash([]byte{byte(i)})), Round: basics.Round(i), Rowid: int64(i), - AccountData: store.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, + AccountData: trackerdb.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } baseOnlineAcct.writePending(acct) } @@ -228,6 +228,6 @@ func TestLRUOnlineAccountsOmittedPendingWrites(t *testing.T) { addr := basics.Address(crypto.Hash([]byte{byte(i)})) acct, has := baseOnlineAcct.read(addr) require.False(t, has) - require.Equal(t, store.PersistedOnlineAccountData{}, acct) + require.Equal(t, trackerdb.PersistedOnlineAccountData{}, acct) } } diff --git a/ledger/lruresources.go b/ledger/lruresources.go index 779ca3cb95..9886b29b12 100644 --- a/ledger/lruresources.go +++ b/ledger/lruresources.go @@ -18,13 +18,13 @@ package ledger import ( "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/logging" ) //msgp:ignore cachedResourceData type cachedResourceData struct { - store.PersistedResourcesData + trackerdb.PersistedResourcesData address basics.Address } @@ -73,11 +73,11 @@ func (m *lruResources) init(log logging.Logger, pendingWrites int, pendingWrites // read the persistedResourcesData object that the lruResources has for the given address and creatable index. // thread locking semantics : read lock -func (m *lruResources) read(addr basics.Address, aidx basics.CreatableIndex) (data store.PersistedResourcesData, has bool) { +func (m *lruResources) read(addr basics.Address, aidx basics.CreatableIndex) (data trackerdb.PersistedResourcesData, has bool) { if el := m.resources[accountCreatable{address: addr, index: aidx}]; el != nil { return el.Value.PersistedResourcesData, true } - return store.PersistedResourcesData{}, false + return trackerdb.PersistedResourcesData{}, false } // readNotFound returns whether we have attempted to read this address but it did not exist in the db. @@ -89,7 +89,7 @@ func (m *lruResources) readNotFound(addr basics.Address, idx basics.CreatableInd // read the persistedResourcesData object that the lruResources has for the given address. // thread locking semantics : read lock -func (m *lruResources) readAll(addr basics.Address) (ret []store.PersistedResourcesData) { +func (m *lruResources) readAll(addr basics.Address) (ret []trackerdb.PersistedResourcesData) { for ac, pd := range m.resources { if ac.address == addr { ret = append(ret, pd.Value.PersistedResourcesData) @@ -131,7 +131,7 @@ outer2: // writePending write a single persistedAccountData entry to the pendingResources buffer. // the function doesn't block, and in case of a buffer overflow the entry would not be added. // thread locking semantics : no lock is required. -func (m *lruResources) writePending(acct store.PersistedResourcesData, addr basics.Address) { +func (m *lruResources) writePending(acct trackerdb.PersistedResourcesData, addr basics.Address) { select { case m.pendingResources <- cachedResourceData{PersistedResourcesData: acct, address: addr}: default: @@ -153,7 +153,7 @@ func (m *lruResources) writeNotFoundPending(addr basics.Address, idx basics.Crea // version of what's already on the cache or not. In all cases, the entry is going // to be promoted to the front of the list. // thread locking semantics : write lock -func (m *lruResources) write(resData store.PersistedResourcesData, addr basics.Address) { +func (m *lruResources) write(resData trackerdb.PersistedResourcesData, addr basics.Address) { if m.resources == nil { return } diff --git a/ledger/lruresources_test.go b/ledger/lruresources_test.go index 8424fe2fe3..a000adffec 100644 --- a/ledger/lruresources_test.go +++ b/ledger/lruresources_test.go @@ -25,7 +25,7 @@ import ( "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -40,11 +40,11 @@ func TestLRUBasicResources(t *testing.T) { // write 50 resources for i := 0; i < resourcesNum; i++ { addr := basics.Address(crypto.Hash([]byte{byte(i)})) - res := store.PersistedResourcesData{ + res := trackerdb.PersistedResourcesData{ Addrid: int64(i), Aidx: basics.CreatableIndex(i), Round: basics.Round(i), - Data: store.ResourcesData{Total: uint64(i)}, + Data: trackerdb.ResourcesData{Total: uint64(i)}, } baseRes.write(res, addr) } @@ -65,7 +65,7 @@ func TestLRUBasicResources(t *testing.T) { addr := basics.Address(crypto.Hash([]byte{byte(i)})) res, has := baseRes.read(addr, basics.CreatableIndex(i%resourcesNum)) require.False(t, has) - require.Equal(t, store.PersistedResourcesData{}, res) + require.Equal(t, trackerdb.PersistedResourcesData{}, res) } baseRes.prune(resourcesNum / 2) @@ -84,7 +84,7 @@ func TestLRUBasicResources(t *testing.T) { require.Equal(t, basics.CreatableIndex(i), res.Aidx) } else { require.False(t, has) - require.Equal(t, store.PersistedResourcesData{}, res) + require.Equal(t, trackerdb.PersistedResourcesData{}, res) } } } @@ -101,11 +101,11 @@ func TestLRUResourcesDisable(t *testing.T) { go func(i int) { time.Sleep(time.Duration((crypto.RandUint64() % 50)) * time.Millisecond) addr := basics.Address(crypto.Hash([]byte{byte(i)})) - res := store.PersistedResourcesData{ + res := trackerdb.PersistedResourcesData{ Addrid: int64(i), Aidx: basics.CreatableIndex(i), Round: basics.Round(i), - Data: store.ResourcesData{Total: uint64(i)}, + Data: trackerdb.ResourcesData{Total: uint64(i)}, } baseRes.writePending(res, addr) baseRes.writeNotFoundPending(addr, basics.CreatableIndex(i)) @@ -119,11 +119,11 @@ func TestLRUResourcesDisable(t *testing.T) { for i := 0; i < resourceNum; i++ { addr := basics.Address(crypto.Hash([]byte{byte(i)})) - res := store.PersistedResourcesData{ + res := trackerdb.PersistedResourcesData{ Addrid: int64(i), Aidx: basics.CreatableIndex(i), Round: basics.Round(i), - Data: store.ResourcesData{Total: uint64(i)}, + Data: trackerdb.ResourcesData{Total: uint64(i)}, } baseRes.write(res, addr) } @@ -142,11 +142,11 @@ func TestLRUResourcesPendingWrites(t *testing.T) { go func(i int) { time.Sleep(time.Duration((crypto.RandUint64() % 50)) * time.Millisecond) addr := basics.Address(crypto.Hash([]byte{byte(i)})) - res := store.PersistedResourcesData{ + res := trackerdb.PersistedResourcesData{ Addrid: int64(i), Aidx: basics.CreatableIndex(i), Round: basics.Round(i), - Data: store.ResourcesData{Total: uint64(i)}, + Data: trackerdb.ResourcesData{Total: uint64(i)}, } baseRes.writePending(res, addr) }(i) @@ -195,11 +195,11 @@ func TestLRUResourcesPendingWritesWarning(t *testing.T) { for j := 0; j < 50; j++ { for i := 0; i < j; i++ { addr := basics.Address(crypto.Hash([]byte{byte(i)})) - res := store.PersistedResourcesData{ + res := trackerdb.PersistedResourcesData{ Addrid: int64(i), Aidx: basics.CreatableIndex(i), Round: basics.Round(i), - Data: store.ResourcesData{Total: uint64(i)}, + Data: trackerdb.ResourcesData{Total: uint64(i)}, } baseRes.writePending(res, addr) } @@ -222,11 +222,11 @@ func TestLRUResourcesOmittedPendingWrites(t *testing.T) { for i := 0; i < pendingWritesBuffer*2; i++ { addr := basics.Address(crypto.Hash([]byte{byte(i)})) - res := store.PersistedResourcesData{ + res := trackerdb.PersistedResourcesData{ Addrid: int64(i), Aidx: basics.CreatableIndex(i), Round: basics.Round(i), - Data: store.ResourcesData{Total: uint64(i)}, + Data: trackerdb.ResourcesData{Total: uint64(i)}, } baseRes.writePending(res, addr) } @@ -249,7 +249,7 @@ func TestLRUResourcesOmittedPendingWrites(t *testing.T) { addr := basics.Address(crypto.Hash([]byte{byte(i)})) res, has := baseRes.read(addr, basics.CreatableIndex(i)) require.False(t, has) - require.Equal(t, store.PersistedResourcesData{}, res) + require.Equal(t, trackerdb.PersistedResourcesData{}, res) } } @@ -294,11 +294,11 @@ func generatePersistedResourcesData(startRound, endRound int) []cachedResourceDa digest := crypto.Hash(buffer) accounts[i-startRound] = cachedResourceData{ - PersistedResourcesData: store.PersistedResourcesData{ + PersistedResourcesData: trackerdb.PersistedResourcesData{ Addrid: int64(i), Aidx: basics.CreatableIndex(i), Round: basics.Round(i + startRound), - Data: store.ResourcesData{Total: uint64(i)}, + Data: trackerdb.ResourcesData{Total: uint64(i)}, }, address: basics.Address(digest), } diff --git a/ledger/metrics.go b/ledger/metrics.go index f4561edf5c..16030c95f0 100644 --- a/ledger/metrics.go +++ b/ledger/metrics.go @@ -22,7 +22,7 @@ import ( "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/util/metrics" ) @@ -70,7 +70,7 @@ func (mt *metricsTracker) prepareCommit(dcc *deferredCommitContext) error { return nil } -func (mt *metricsTracker) commitRound(context.Context, store.TransactionScope, *deferredCommitContext) error { +func (mt *metricsTracker) commitRound(context.Context, trackerdb.TransactionScope, *deferredCommitContext) error { return nil } diff --git a/ledger/notifier.go b/ledger/notifier.go index 6154683ae5..9a2a08f959 100644 --- a/ledger/notifier.go +++ b/ledger/notifier.go @@ -25,7 +25,7 @@ import ( "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" ) type blockDeltaPair struct { @@ -113,7 +113,7 @@ func (bn *blockNotifier) prepareCommit(dcc *deferredCommitContext) error { return nil } -func (bn *blockNotifier) commitRound(context.Context, store.TransactionScope, *deferredCommitContext) error { +func (bn *blockNotifier) commitRound(context.Context, trackerdb.TransactionScope, *deferredCommitContext) error { return nil } diff --git a/ledger/onlineaccountscache.go b/ledger/onlineaccountscache.go index e983ff024f..8340d2d4d5 100644 --- a/ledger/onlineaccountscache.go +++ b/ledger/onlineaccountscache.go @@ -20,7 +20,7 @@ import ( "container/list" "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" ) // Worst case memory usage = 2500 * 320 * 150B = 120MB @@ -35,7 +35,7 @@ type onlineAccountsCache struct { // init initializes the onlineAccountsCache for use. // thread locking semantics : write lock -func (o *onlineAccountsCache) init(accts []store.PersistedOnlineAccountData, maxCacheSize int) { +func (o *onlineAccountsCache) init(accts []trackerdb.PersistedOnlineAccountData, maxCacheSize int) { o.accounts = make(map[basics.Address]*list.List) o.maxCacheSize = maxCacheSize diff --git a/ledger/onlineaccountscache_test.go b/ledger/onlineaccountscache_test.go index 6cb97f6101..45dde5278b 100644 --- a/ledger/onlineaccountscache_test.go +++ b/ledger/onlineaccountscache_test.go @@ -24,7 +24,7 @@ import ( "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" ledgertesting "github.com/algorand/go-algorand/ledger/testing" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/test/partitiontest" @@ -45,7 +45,7 @@ func TestOnlineAccountsCacheBasic(t *testing.T) { for i := 0; i < roundsNum; i++ { acct := cachedOnlineAccount{ updRound: basics.Round(i), - BaseOnlineAccountData: store.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}, BaseVotingData: store.BaseVotingData{VoteLastValid: 1000}}, + BaseOnlineAccountData: trackerdb.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}, BaseVotingData: trackerdb.BaseVotingData{VoteLastValid: 1000}}, } written := oac.writeFront(addr, acct) require.True(t, written) @@ -62,7 +62,7 @@ func TestOnlineAccountsCacheBasic(t *testing.T) { for i := proto.MaxBalLookback; i < uint64(roundsNum)+proto.MaxBalLookback; i++ { acct := cachedOnlineAccount{ updRound: basics.Round(i), - BaseOnlineAccountData: store.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: i}, BaseVotingData: store.BaseVotingData{VoteLastValid: 1000}}, + BaseOnlineAccountData: trackerdb.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: i}, BaseVotingData: trackerdb.BaseVotingData{VoteLastValid: 1000}}, } written := oac.writeFront(addr, acct) require.True(t, written) @@ -89,7 +89,7 @@ func TestOnlineAccountsCacheBasic(t *testing.T) { // attempt to insert a value with the updRound less than latest, expect it to have ignored acct = cachedOnlineAccount{ updRound: basics.Round(uint64(roundsNum) + proto.MaxBalLookback - 1), - BaseOnlineAccountData: store.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: 100}, BaseVotingData: store.BaseVotingData{VoteLastValid: 1000}}, + BaseOnlineAccountData: trackerdb.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: 100}, BaseVotingData: trackerdb.BaseVotingData{VoteLastValid: 1000}}, } written := oac.writeFront(addr, acct) require.False(t, written) @@ -110,13 +110,13 @@ func TestOnlineAccountsCachePruneOffline(t *testing.T) { for i := 0; i < roundsNum; i++ { acct := cachedOnlineAccount{ updRound: basics.Round(i), - BaseOnlineAccountData: store.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}, BaseVotingData: store.BaseVotingData{VoteLastValid: 1000}}, + BaseOnlineAccountData: trackerdb.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}, BaseVotingData: trackerdb.BaseVotingData{VoteLastValid: 1000}}, } oac.writeFront(addr, acct) } acct := cachedOnlineAccount{ updRound: basics.Round(roundsNum), - BaseOnlineAccountData: store.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(roundsNum)}}, + BaseOnlineAccountData: trackerdb.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(roundsNum)}}, } oac.writeFront(addr, acct) @@ -140,7 +140,7 @@ func TestOnlineAccountsCacheMaxEntries(t *testing.T) { lastAddr = ledgertesting.RandomAddress() acct := cachedOnlineAccount{ updRound: basics.Round(i), - BaseOnlineAccountData: store.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}, BaseVotingData: store.BaseVotingData{VoteLastValid: 1000}}, + BaseOnlineAccountData: trackerdb.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}, BaseVotingData: trackerdb.BaseVotingData{VoteLastValid: 1000}}, } written := oac.writeFront(lastAddr, acct) require.True(t, written) @@ -148,7 +148,7 @@ func TestOnlineAccountsCacheMaxEntries(t *testing.T) { acct := cachedOnlineAccount{ updRound: basics.Round(100), - BaseOnlineAccountData: store.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(100)}, BaseVotingData: store.BaseVotingData{VoteLastValid: 1000}}, + BaseOnlineAccountData: trackerdb.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(100)}, BaseVotingData: trackerdb.BaseVotingData{VoteLastValid: 1000}}, } written := oac.writeFront(ledgertesting.RandomAddress(), acct) require.False(t, written) @@ -159,7 +159,7 @@ func TestOnlineAccountsCacheMaxEntries(t *testing.T) { // set one to be expired acct = cachedOnlineAccount{ updRound: basics.Round(maxCacheSize), - BaseOnlineAccountData: store.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(100)}, BaseVotingData: store.BaseVotingData{}}, + BaseOnlineAccountData: trackerdb.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(100)}, BaseVotingData: trackerdb.BaseVotingData{}}, } written = oac.writeFront(lastAddr, acct) require.True(t, written) diff --git a/ledger/persistedaccts_list.go b/ledger/persistedaccts_list.go index f29f2857d0..cd2a46b94a 100644 --- a/ledger/persistedaccts_list.go +++ b/ledger/persistedaccts_list.go @@ -16,7 +16,7 @@ package ledger -import "github.com/algorand/go-algorand/ledger/store" +import "github.com/algorand/go-algorand/ledger/store/trackerdb" // persistedAccountDataList represents a doubly linked list. // must initiate with newPersistedAccountList. @@ -33,7 +33,7 @@ type persistedAccountDataListNode struct { // element (l.Front()). next, prev *persistedAccountDataListNode - Value *store.PersistedAccountData + Value *trackerdb.PersistedAccountData } func newPersistedAccountList() *persistedAccountDataList { @@ -101,7 +101,7 @@ func (l *persistedAccountDataList) remove(e *persistedAccountDataListNode) { } // pushFront inserts a new element e with value v at the front of list l and returns e. -func (l *persistedAccountDataList) pushFront(v *store.PersistedAccountData) *persistedAccountDataListNode { +func (l *persistedAccountDataList) pushFront(v *trackerdb.PersistedAccountData) *persistedAccountDataListNode { newNode := l.getNewNode() newNode.Value = v return l.insertValue(newNode, &l.root) diff --git a/ledger/persistedaccts_list_test.go b/ledger/persistedaccts_list_test.go index 41881520cc..d4c8599444 100644 --- a/ledger/persistedaccts_list_test.go +++ b/ledger/persistedaccts_list_test.go @@ -20,7 +20,7 @@ import ( "testing" "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -78,9 +78,9 @@ func checkListLen(t *testing.T, l dataList, len int) bool { func TestRemoveFromListAD(t *testing.T) { partitiontest.PartitionTest(t) l := newPersistedAccountList() - e1 := l.pushFront(&store.PersistedAccountData{Addr: basics.Address{1}}) - e2 := l.pushFront(&store.PersistedAccountData{Addr: basics.Address{2}}) - e3 := l.pushFront(&store.PersistedAccountData{Addr: basics.Address{3}}) + e1 := l.pushFront(&trackerdb.PersistedAccountData{Addr: basics.Address{1}}) + e2 := l.pushFront(&trackerdb.PersistedAccountData{Addr: basics.Address{2}}) + e3 := l.pushFront(&trackerdb.PersistedAccountData{Addr: basics.Address{3}}) checkListPointersAD(t, l, []*persistedAccountDataListNode{e3, e2, e1}) l.remove(e2) @@ -98,7 +98,7 @@ func TestAddingNewNodeWithAllocatedFreeListAD(t *testing.T) { return } // test elements - e1 := l.pushFront(&store.PersistedAccountData{Addr: basics.Address{1}}) + e1 := l.pushFront(&trackerdb.PersistedAccountData{Addr: basics.Address{1}}) checkListPointersAD(t, l, []*persistedAccountDataListNode{e1}) if countListSize(l.freeList) != 9 { @@ -168,11 +168,11 @@ func TestMultielementListPositioningAD(t *testing.T) { l := newPersistedAccountList() checkListPointersAD(t, l, []*persistedAccountDataListNode{}) // test elements - e2 := l.pushFront(&store.PersistedAccountData{Addr: basics.Address{2}}) - e1 := l.pushFront(&store.PersistedAccountData{Addr: basics.Address{1}}) - e3 := l.pushFront(&store.PersistedAccountData{Addr: basics.Address{3}}) - e4 := l.pushFront(&store.PersistedAccountData{Addr: basics.Address{4}}) - e5 := l.pushFront(&store.PersistedAccountData{Addr: basics.Address{5}}) + e2 := l.pushFront(&trackerdb.PersistedAccountData{Addr: basics.Address{2}}) + e1 := l.pushFront(&trackerdb.PersistedAccountData{Addr: basics.Address{1}}) + e3 := l.pushFront(&trackerdb.PersistedAccountData{Addr: basics.Address{3}}) + e4 := l.pushFront(&trackerdb.PersistedAccountData{Addr: basics.Address{4}}) + e5 := l.pushFront(&trackerdb.PersistedAccountData{Addr: basics.Address{5}}) checkListPointersAD(t, l, []*persistedAccountDataListNode{e5, e4, e3, e1, e2}) @@ -200,7 +200,7 @@ func TestMultielementListPositioningAD(t *testing.T) { l.moveToFront(e1) // no movement checkListPointersAD(t, l, []*persistedAccountDataListNode{e1, e3, e4}) - e2 = l.pushFront(&store.PersistedAccountData{Addr: basics.Address{2}}) + e2 = l.pushFront(&trackerdb.PersistedAccountData{Addr: basics.Address{2}}) checkListPointersAD(t, l, []*persistedAccountDataListNode{e2, e1, e3, e4}) l.remove(e3) // removing from middle @@ -226,7 +226,7 @@ func TestSingleElementListPositioningAD(t *testing.T) { partitiontest.PartitionTest(t) l := newPersistedAccountList() checkListPointersAD(t, l, []*persistedAccountDataListNode{}) - e := l.pushFront(&store.PersistedAccountData{Addr: basics.Address{1}}) + e := l.pushFront(&trackerdb.PersistedAccountData{Addr: basics.Address{1}}) checkListPointersAD(t, l, []*persistedAccountDataListNode{e}) l.moveToFront(e) checkListPointersAD(t, l, []*persistedAccountDataListNode{e}) @@ -237,8 +237,8 @@ func TestSingleElementListPositioningAD(t *testing.T) { func TestRemovedNodeShouldBeMovedToFreeListAD(t *testing.T) { partitiontest.PartitionTest(t) l := newPersistedAccountList() - e1 := l.pushFront(&store.PersistedAccountData{Addr: basics.Address{1}}) - e2 := l.pushFront(&store.PersistedAccountData{Addr: basics.Address{2}}) + e1 := l.pushFront(&trackerdb.PersistedAccountData{Addr: basics.Address{1}}) + e2 := l.pushFront(&trackerdb.PersistedAccountData{Addr: basics.Address{2}}) checkListPointersAD(t, l, []*persistedAccountDataListNode{e2, e1}) diff --git a/ledger/persistedonlineaccts_list.go b/ledger/persistedonlineaccts_list.go index 9a00695abe..0f080e5916 100644 --- a/ledger/persistedonlineaccts_list.go +++ b/ledger/persistedonlineaccts_list.go @@ -16,7 +16,7 @@ package ledger -import "github.com/algorand/go-algorand/ledger/store" +import "github.com/algorand/go-algorand/ledger/store/trackerdb" // persistedOnlineAccountDataList represents a doubly linked list. // must initiate with newPersistedAccountList. @@ -33,7 +33,7 @@ type persistedOnlineAccountDataListNode struct { // element (l.Front()). next, prev *persistedOnlineAccountDataListNode - Value *store.PersistedOnlineAccountData + Value *trackerdb.PersistedOnlineAccountData } func newPersistedOnlineAccountList() *persistedOnlineAccountDataList { @@ -101,7 +101,7 @@ func (l *persistedOnlineAccountDataList) remove(e *persistedOnlineAccountDataLis } // pushFront inserts a new element e with value v at the front of list l and returns e. -func (l *persistedOnlineAccountDataList) pushFront(v *store.PersistedOnlineAccountData) *persistedOnlineAccountDataListNode { +func (l *persistedOnlineAccountDataList) pushFront(v *trackerdb.PersistedOnlineAccountData) *persistedOnlineAccountDataListNode { newNode := l.getNewNode() newNode.Value = v return l.insertValue(newNode, &l.root) diff --git a/ledger/persistedonlineaccts_list_test.go b/ledger/persistedonlineaccts_list_test.go index 56184fd2e7..afbfbe9a8a 100644 --- a/ledger/persistedonlineaccts_list_test.go +++ b/ledger/persistedonlineaccts_list_test.go @@ -20,7 +20,7 @@ import ( "testing" "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -46,9 +46,9 @@ func (l *persistedOnlineAccountDataListNode) getPrev() dataListNode { func TestRemoveFromListOAD(t *testing.T) { partitiontest.PartitionTest(t) l := newPersistedOnlineAccountList() - e1 := l.pushFront(&store.PersistedOnlineAccountData{Addr: basics.Address{1}}) - e2 := l.pushFront(&store.PersistedOnlineAccountData{Addr: basics.Address{2}}) - e3 := l.pushFront(&store.PersistedOnlineAccountData{Addr: basics.Address{3}}) + e1 := l.pushFront(&trackerdb.PersistedOnlineAccountData{Addr: basics.Address{1}}) + e2 := l.pushFront(&trackerdb.PersistedOnlineAccountData{Addr: basics.Address{2}}) + e3 := l.pushFront(&trackerdb.PersistedOnlineAccountData{Addr: basics.Address{3}}) checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e3, e2, e1}) l.remove(e2) @@ -66,7 +66,7 @@ func TestAddingNewNodeWithAllocatedFreeListOAD(t *testing.T) { return } // test elements - e1 := l.pushFront(&store.PersistedOnlineAccountData{Addr: basics.Address{1}}) + e1 := l.pushFront(&trackerdb.PersistedOnlineAccountData{Addr: basics.Address{1}}) checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e1}) if countListSize(l.freeList) != 9 { @@ -90,11 +90,11 @@ func TestMultielementListPositioningOAD(t *testing.T) { l := newPersistedOnlineAccountList() checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{}) // test elements - e2 := l.pushFront(&store.PersistedOnlineAccountData{Addr: basics.Address{2}}) - e1 := l.pushFront(&store.PersistedOnlineAccountData{Addr: basics.Address{1}}) - e3 := l.pushFront(&store.PersistedOnlineAccountData{Addr: basics.Address{3}}) - e4 := l.pushFront(&store.PersistedOnlineAccountData{Addr: basics.Address{4}}) - e5 := l.pushFront(&store.PersistedOnlineAccountData{Addr: basics.Address{5}}) + e2 := l.pushFront(&trackerdb.PersistedOnlineAccountData{Addr: basics.Address{2}}) + e1 := l.pushFront(&trackerdb.PersistedOnlineAccountData{Addr: basics.Address{1}}) + e3 := l.pushFront(&trackerdb.PersistedOnlineAccountData{Addr: basics.Address{3}}) + e4 := l.pushFront(&trackerdb.PersistedOnlineAccountData{Addr: basics.Address{4}}) + e5 := l.pushFront(&trackerdb.PersistedOnlineAccountData{Addr: basics.Address{5}}) checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e5, e4, e3, e1, e2}) @@ -122,7 +122,7 @@ func TestMultielementListPositioningOAD(t *testing.T) { l.moveToFront(e1) // no movement checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e1, e3, e4}) - e2 = l.pushFront(&store.PersistedOnlineAccountData{Addr: basics.Address{2}}) + e2 = l.pushFront(&trackerdb.PersistedOnlineAccountData{Addr: basics.Address{2}}) checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e2, e1, e3, e4}) l.remove(e3) // removing from middle @@ -148,7 +148,7 @@ func TestSingleElementListPositioningOD(t *testing.T) { partitiontest.PartitionTest(t) l := newPersistedOnlineAccountList() checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{}) - e := l.pushFront(&store.PersistedOnlineAccountData{Addr: basics.Address{1}}) + e := l.pushFront(&trackerdb.PersistedOnlineAccountData{Addr: basics.Address{1}}) checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e}) l.moveToFront(e) checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e}) @@ -159,8 +159,8 @@ func TestSingleElementListPositioningOD(t *testing.T) { func TestRemovedNodeShouldBeMovedToFreeListOAD(t *testing.T) { partitiontest.PartitionTest(t) l := newPersistedOnlineAccountList() - e1 := l.pushFront(&store.PersistedOnlineAccountData{Addr: basics.Address{1}}) - e2 := l.pushFront(&store.PersistedOnlineAccountData{Addr: basics.Address{2}}) + e1 := l.pushFront(&trackerdb.PersistedOnlineAccountData{Addr: basics.Address{1}}) + e2 := l.pushFront(&trackerdb.PersistedOnlineAccountData{Addr: basics.Address{2}}) checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e2, e1}) diff --git a/ledger/store/trackerdb/catchpoint.go b/ledger/store/trackerdb/catchpoint.go new file mode 100644 index 0000000000..b371e0d347 --- /dev/null +++ b/ledger/store/trackerdb/catchpoint.go @@ -0,0 +1,190 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package trackerdb + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/crypto/merkletrie" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/ledger/ledgercore" +) + +// TrieMemoryConfig is the memory configuration setup used for the merkle trie. +var TrieMemoryConfig = merkletrie.MemoryConfig{ + NodesCountPerPage: MerkleCommitterNodesPerPage, + CachedNodesCount: TrieCachedNodesCount, + PageFillFactor: 0.95, + MaxChildrenPagesThreshold: 64, +} + +// MerkleCommitterNodesPerPage controls how many nodes will be stored in a single page +// value was calibrated using BenchmarkCalibrateNodesPerPage +var MerkleCommitterNodesPerPage = int64(116) + +// TrieCachedNodesCount defines how many balances trie nodes we would like to keep around in memory. +// value was calibrated using BenchmarkCalibrateCacheNodeSize +var TrieCachedNodesCount = 9000 + +// CatchpointDirName represents the directory name in which all the catchpoints files are stored +var CatchpointDirName = "catchpoints" + +// CatchpointState is used to store catchpoint related variables into the catchpointstate table. +// +//msgp:ignore CatchpointState +type CatchpointState string + +const ( + // CatchpointStateLastCatchpoint is written by a node once a catchpoint label is created for a round + CatchpointStateLastCatchpoint = CatchpointState("lastCatchpoint") + // CatchpointStateWritingFirstStageInfo state variable is set to 1 if catchpoint's first stage is unfinished, + // and is 0 otherwise. Used to clear / restart the first stage after a crash. + // This key is set in the same db transaction as the account updates, so the + // unfinished first stage corresponds to the current db round. + CatchpointStateWritingFirstStageInfo = CatchpointState("writingFirstStageInfo") + // CatchpointStateWritingCatchpoint if there is an unfinished catchpoint, this state variable is set to + // the catchpoint's round. Otherwise, it is set to 0. + // DEPRECATED. + CatchpointStateWritingCatchpoint = CatchpointState("writingCatchpoint") + // CatchpointStateCatchupState is the state of the catchup process. The variable is stored only during the catchpoint catchup process, and removed afterward. + CatchpointStateCatchupState = CatchpointState("catchpointCatchupState") + // CatchpointStateCatchupLabel is the label to which the currently catchpoint catchup process is trying to catchup to. + CatchpointStateCatchupLabel = CatchpointState("catchpointCatchupLabel") + // CatchpointStateCatchupBlockRound is the block round that is associated with the current running catchpoint catchup. + CatchpointStateCatchupBlockRound = CatchpointState("catchpointCatchupBlockRound") + // CatchpointStateCatchupBalancesRound is the balance round that is associated with the current running catchpoint catchup. Typically it would be + // equal to CatchpointStateCatchupBlockRound - 320. + CatchpointStateCatchupBalancesRound = CatchpointState("catchpointCatchupBalancesRound") + // CatchpointStateCatchupHashRound is the round that is associated with the hash of the merkle trie. Normally, it's identical to CatchpointStateCatchupBalancesRound, + // however, it could differ when we catchup from a catchpoint that was created using a different version : in this case, + // we set it to zero in order to reset the merkle trie. This would force the merkle trie to be re-build on startup ( if needed ). + CatchpointStateCatchupHashRound = CatchpointState("catchpointCatchupHashRound") + // CatchpointStateCatchpointLookback is the number of rounds we keep catchpoints for + CatchpointStateCatchpointLookback = CatchpointState("catchpointLookback") +) + +// UnfinishedCatchpointRecord represents a stored record of an unfinished catchpoint. +type UnfinishedCatchpointRecord struct { + Round basics.Round + BlockHash crypto.Digest +} + +// NormalizedAccountBalance is a staging area for a catchpoint file account information before it's being added to the catchpoint staging tables. +type NormalizedAccountBalance struct { + // The public key address to which the account belongs. + Address basics.Address + // accountData contains the baseAccountData for that account. + AccountData BaseAccountData + // resources is a map, where the key is the creatable index, and the value is the resource data. + Resources map[basics.CreatableIndex]ResourcesData + // encodedAccountData contains the baseAccountData encoded bytes that are going to be written to the accountbase table. + EncodedAccountData []byte + // accountHashes contains a list of all the hashes that would need to be added to the merkle trie for that account. + // on V6, we could have multiple hashes, since we have separate account/resource hashes. + AccountHashes [][]byte + // normalizedBalance contains the normalized balance for the account. + NormalizedBalance uint64 + // encodedResources provides the encoded form of the resources + EncodedResources map[basics.CreatableIndex][]byte + // partial balance indicates that the original account balance was split into multiple parts in catchpoint creation time + PartialBalance bool +} + +// CatchpointFirstStageInfo For the `catchpointfirststageinfo` table. +type CatchpointFirstStageInfo struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + + Totals ledgercore.AccountTotals `codec:"accountTotals"` + TrieBalancesHash crypto.Digest `codec:"trieBalancesHash"` + // Total number of accounts in the catchpoint data file. Only set when catchpoint + // data files are generated. + TotalAccounts uint64 `codec:"accountsCount"` + + // Total number of accounts in the catchpoint data file. Only set when catchpoint + // data files are generated. + TotalKVs uint64 `codec:"kvsCount"` + + // Total number of chunks in the catchpoint data file. Only set when catchpoint + // data files are generated. + TotalChunks uint64 `codec:"chunksCount"` + // BiggestChunkLen is the size in the bytes of the largest chunk, used when re-packing. + BiggestChunkLen uint64 `codec:"biggestChunk"` +} + +// MakeCatchpointFilePath builds the path of a catchpoint file. +func MakeCatchpointFilePath(round basics.Round) string { + irnd := int64(round) / 256 + outStr := "" + for irnd > 0 { + outStr = filepath.Join(outStr, fmt.Sprintf("%02x", irnd%256)) + irnd = irnd / 256 + } + outStr = filepath.Join(outStr, strconv.FormatInt(int64(round), 10)+".catchpoint") + return outStr +} + +// RemoveSingleCatchpointFileFromDisk removes a single catchpoint file from the disk. this function does not leave empty directories +func RemoveSingleCatchpointFileFromDisk(dbDirectory, fileToDelete string) (err error) { + absCatchpointFileName := filepath.Join(dbDirectory, fileToDelete) + err = os.Remove(absCatchpointFileName) + if err == nil || os.IsNotExist(err) { + // it's ok if the file doesn't exist. + err = nil + } else { + // we can't delete the file, abort - + return fmt.Errorf("unable to delete old catchpoint file '%s' : %v", absCatchpointFileName, err) + } + splitedDirName := strings.Split(fileToDelete, string(os.PathSeparator)) + + var subDirectoriesToScan []string + //build a list of all the subdirs + currentSubDir := "" + for _, element := range splitedDirName { + currentSubDir = filepath.Join(currentSubDir, element) + subDirectoriesToScan = append(subDirectoriesToScan, currentSubDir) + } + + // iterating over the list of directories. starting from the sub dirs and moving up. + // skipping the file itself. + for i := len(subDirectoriesToScan) - 2; i >= 0; i-- { + absSubdir := filepath.Join(dbDirectory, subDirectoriesToScan[i]) + if _, err := os.Stat(absSubdir); os.IsNotExist(err) { + continue + } + + isEmpty, err := isDirEmpty(absSubdir) + if err != nil { + return fmt.Errorf("unable to read old catchpoint directory '%s' : %v", subDirectoriesToScan[i], err) + } + if isEmpty { + err = os.Remove(absSubdir) + if err != nil { + if os.IsNotExist(err) { + continue + } + return fmt.Errorf("unable to delete old catchpoint directory '%s' : %v", subDirectoriesToScan[i], err) + } + } + } + + return nil +} diff --git a/ledger/store/data.go b/ledger/store/trackerdb/data.go similarity index 99% rename from ledger/store/data.go rename to ledger/store/trackerdb/data.go index 58edc7d456..5ab3faad1f 100644 --- a/ledger/store/data.go +++ b/ledger/store/trackerdb/data.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package store +package trackerdb import ( "context" diff --git a/ledger/store/data_test.go b/ledger/store/trackerdb/data_test.go similarity index 99% rename from ledger/store/data_test.go rename to ledger/store/trackerdb/data_test.go index b6afe29a07..5bcd321aa3 100644 --- a/ledger/store/data_test.go +++ b/ledger/store/trackerdb/data_test.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package store +package trackerdb import ( "encoding/binary" diff --git a/ledger/store/hashing.go b/ledger/store/trackerdb/hashing.go similarity index 99% rename from ledger/store/hashing.go rename to ledger/store/trackerdb/hashing.go index fa409a2c60..7a866087cf 100644 --- a/ledger/store/hashing.go +++ b/ledger/store/trackerdb/hashing.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package store +package trackerdb import ( "encoding/binary" diff --git a/ledger/store/hashkind_string.go b/ledger/store/trackerdb/hashkind_string.go similarity index 97% rename from ledger/store/hashkind_string.go rename to ledger/store/trackerdb/hashkind_string.go index 3b60c2f1e9..1a8c0f196b 100644 --- a/ledger/store/hashkind_string.go +++ b/ledger/store/trackerdb/hashkind_string.go @@ -1,6 +1,6 @@ // Code generated by "stringer -type=HashKind"; DO NOT EDIT. -package store +package trackerdb import "strconv" diff --git a/ledger/store/interface.go b/ledger/store/trackerdb/interface.go similarity index 87% rename from ledger/store/interface.go rename to ledger/store/trackerdb/interface.go index 6a45de647f..e03b6b5c3a 100644 --- a/ledger/store/interface.go +++ b/ledger/store/trackerdb/interface.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package store +package trackerdb import ( "context" @@ -23,6 +23,7 @@ import ( "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/ledger/encoded" "github.com/algorand/go-algorand/ledger/ledgercore" ) @@ -171,3 +172,40 @@ type CatchpointReaderWriter interface { CatchpointReader CatchpointWriter } + +// MerkleCommitter allows storing and loading merkletrie pages from a sqlite database. +type MerkleCommitter interface { + StorePage(page uint64, content []byte) error + LoadPage(page uint64) (content []byte, err error) +} + +// OrderedAccountsIter is an iterator for Ordered Accounts. +type OrderedAccountsIter interface { + Next(ctx context.Context) (acct []AccountAddressHash, processedRecords int, err error) + Close(ctx context.Context) (err error) +} + +// AccountAddressHash is used by Next to return a single account address and the associated hash. +type AccountAddressHash struct { + Addrid int64 + Digest []byte +} + +// KVsIter is an iterator for an application Key/Values. +type KVsIter interface { + Next() bool + KeyValue() (k []byte, v []byte, err error) + Close() +} + +// EncodedAccountsBatchIter is an iterator for a accounts. +type EncodedAccountsBatchIter interface { + Next(ctx context.Context, accountCount int, resourceCount int) (bals []encoded.BalanceRecordV6, numAccountsProcessed uint64, err error) + Close() +} + +// CatchpointPendingHashesIter is an iterator for pending hashes. +type CatchpointPendingHashesIter interface { + Next(ctx context.Context) (hashes [][]byte, err error) + Close() +} diff --git a/ledger/store/msgp_gen.go b/ledger/store/trackerdb/msgp_gen.go similarity index 99% rename from ledger/store/msgp_gen.go rename to ledger/store/trackerdb/msgp_gen.go index 58d544780e..ad4df82ef0 100644 --- a/ledger/store/msgp_gen.go +++ b/ledger/store/trackerdb/msgp_gen.go @@ -1,4 +1,4 @@ -package store +package trackerdb // Code generated by github.com/algorand/msgp DO NOT EDIT. diff --git a/ledger/store/msgp_gen_test.go b/ledger/store/trackerdb/msgp_gen_test.go similarity index 99% rename from ledger/store/msgp_gen_test.go rename to ledger/store/trackerdb/msgp_gen_test.go index c903eec33d..be8a232c32 100644 --- a/ledger/store/msgp_gen_test.go +++ b/ledger/store/trackerdb/msgp_gen_test.go @@ -1,7 +1,7 @@ //go:build !skip_msgp_testing // +build !skip_msgp_testing -package store +package trackerdb // Code generated by github.com/algorand/msgp DO NOT EDIT. diff --git a/ledger/store/trackerdb/params.go b/ledger/store/trackerdb/params.go new file mode 100644 index 0000000000..64d5be9313 --- /dev/null +++ b/ledger/store/trackerdb/params.go @@ -0,0 +1,41 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package trackerdb + +import ( + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/util/db" +) + +// Params contains parameters for initializing trackerDB +type Params struct { + InitAccounts map[basics.Address]basics.AccountData + InitProto protocol.ConsensusVersion + GenesisHash crypto.Digest + FromCatchpoint bool + CatchpointEnabled bool + DbPathPrefix string + BlockDb db.Pair +} + +// InitParams params used during db init +type InitParams struct { + SchemaVersion int32 + VacuumOnStartup bool +} diff --git a/ledger/store/accountsV2.go b/ledger/store/trackerdb/sqlitedriver/accountsV2.go similarity index 96% rename from ledger/store/accountsV2.go rename to ledger/store/trackerdb/sqlitedriver/accountsV2.go index 5f20c23c63..7776409346 100644 --- a/ledger/store/accountsV2.go +++ b/ledger/store/trackerdb/sqlitedriver/accountsV2.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package store +package sqlitedriver import ( "bytes" @@ -28,6 +28,7 @@ import ( "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/ledger/ledgercore" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" "github.com/stretchr/testify/require" @@ -105,7 +106,7 @@ func (r *accountsV2Reader) AccountsAllTest() (bals map[basics.Address]basics.Acc return } - var data BaseAccountData + var data trackerdb.BaseAccountData err = protocol.Decode(buf, &data) if err != nil { return @@ -216,7 +217,7 @@ ORDER BY normalizedonlinebalance DESC, address DESC LIMIT ? OFFSET ?`, rnd, n, o return nil, err } - var data BaseOnlineAccountData + var data trackerdb.BaseOnlineAccountData err = protocol.Decode(buf, &data) if err != nil { return nil, err @@ -246,20 +247,20 @@ ORDER BY normalizedonlinebalance DESC, address DESC LIMIT ? OFFSET ?`, rnd, n, o } // OnlineAccountsAll returns all online accounts -func (r *accountsV2Reader) OnlineAccountsAll(maxAccounts uint64) ([]PersistedOnlineAccountData, error) { +func (r *accountsV2Reader) OnlineAccountsAll(maxAccounts uint64) ([]trackerdb.PersistedOnlineAccountData, error) { rows, err := r.q.Query("SELECT rowid, address, updround, data FROM onlineaccounts ORDER BY address, updround ASC") if err != nil { return nil, err } defer rows.Close() - result := make([]PersistedOnlineAccountData, 0, maxAccounts) + result := make([]trackerdb.PersistedOnlineAccountData, 0, maxAccounts) var numAccounts uint64 seenAddr := make([]byte, len(basics.Address{})) for rows.Next() { var addrbuf []byte var buf []byte - data := PersistedOnlineAccountData{} + data := trackerdb.PersistedOnlineAccountData{} err := rows.Scan(&data.Rowid, &addrbuf, &data.UpdRound, &buf) if err != nil { return nil, err @@ -321,7 +322,7 @@ func (r *accountsV2Reader) TotalKVs(ctx context.Context) (total uint64, err erro } // LoadTxTail returns the tx tails -func (r *accountsV2Reader) LoadTxTail(ctx context.Context, dbRound basics.Round) (roundData []*TxTailRound, roundHash []crypto.Digest, baseRound basics.Round, err error) { +func (r *accountsV2Reader) LoadTxTail(ctx context.Context, dbRound basics.Round) (roundData []*trackerdb.TxTailRound, roundHash []crypto.Digest, baseRound basics.Round, err error) { rows, err := r.q.QueryContext(ctx, "SELECT rnd, data FROM txtail ORDER BY rnd DESC") if err != nil { return nil, nil, 0, err @@ -339,7 +340,7 @@ func (r *accountsV2Reader) LoadTxTail(ctx context.Context, dbRound basics.Round) if round != expectedRound { return nil, nil, 0, fmt.Errorf("txtail table contain unexpected round %d; round %d was expected", round, expectedRound) } - tail := &TxTailRound{} + tail := &trackerdb.TxTailRound{} err = protocol.Decode(data, tail) if err != nil { return nil, nil, 0, err @@ -459,7 +460,7 @@ func (r *accountsV2Reader) LoadAllFullAccounts( return } - var data BaseAccountData + var data trackerdb.BaseAccountData err = protocol.Decode(buf, &data) if err != nil { return @@ -486,7 +487,7 @@ func (r *accountsV2Reader) LoadAllFullAccounts( } // LoadFullAccount converts BaseAccountData into basics.AccountData and loads all resources as needed -func (r *accountsV2Reader) LoadFullAccount(ctx context.Context, resourcesTable string, addr basics.Address, addrid int64, data BaseAccountData) (ad basics.AccountData, err error) { +func (r *accountsV2Reader) LoadFullAccount(ctx context.Context, resourcesTable string, addr basics.Address, addrid int64, data trackerdb.BaseAccountData) (ad basics.AccountData, err error) { ad = data.GetAccountData() hasResources := false @@ -526,12 +527,12 @@ func (r *accountsV2Reader) LoadFullAccount(ctx context.Context, resourcesTable s if err != nil { return } - var resData ResourcesData + var resData trackerdb.ResourcesData err = protocol.Decode(buf, &resData) if err != nil { return } - if resData.ResourceFlags == ResourceFlagsNotHolding { + if resData.ResourceFlags == trackerdb.ResourceFlagsNotHolding { err = fmt.Errorf("addr %s (%d) aidx = %d resourceFlagsNotHolding should not be persisted", addr.String(), addrid, aidx) return } @@ -667,7 +668,7 @@ func (w *accountsV2Writer) OnlineAccountsDelete(forgetBefore basics.Round) (err // reset the state prevAddr = addrbuf - var oad BaseOnlineAccountData + var oad trackerdb.BaseOnlineAccountData err = protocol.Decode(buf, &oad) if err != nil { return diff --git a/ledger/store/accountsV2_test.go b/ledger/store/trackerdb/sqlitedriver/accountsV2_test.go similarity index 99% rename from ledger/store/accountsV2_test.go rename to ledger/store/trackerdb/sqlitedriver/accountsV2_test.go index c06d982007..1e4a2217f5 100644 --- a/ledger/store/accountsV2_test.go +++ b/ledger/store/trackerdb/sqlitedriver/accountsV2_test.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package store +package sqlitedriver import ( "testing" diff --git a/ledger/store/catchpoint.go b/ledger/store/trackerdb/sqlitedriver/catchpoint.go similarity index 66% rename from ledger/store/catchpoint.go rename to ledger/store/trackerdb/sqlitedriver/catchpoint.go index 7b8915de02..9bf15eb47f 100644 --- a/ledger/store/catchpoint.go +++ b/ledger/store/trackerdb/sqlitedriver/catchpoint.go @@ -14,107 +14,23 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package store +package sqlitedriver import ( "context" "database/sql" "errors" "fmt" - "os" - "path/filepath" - "strings" "time" "github.com/algorand/go-algorand/crypto" - "github.com/algorand/go-algorand/crypto/merkletrie" "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/ledger/ledgercore" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" "github.com/mattn/go-sqlite3" ) -// TrieMemoryConfig is the memory configuration setup used for the merkle trie. -var TrieMemoryConfig = merkletrie.MemoryConfig{ - NodesCountPerPage: MerkleCommitterNodesPerPage, - CachedNodesCount: TrieCachedNodesCount, - PageFillFactor: 0.95, - MaxChildrenPagesThreshold: 64, -} - -// MerkleCommitterNodesPerPage controls how many nodes will be stored in a single page -// value was calibrated using BenchmarkCalibrateNodesPerPage -var MerkleCommitterNodesPerPage = int64(116) - -// TrieCachedNodesCount defines how many balances trie nodes we would like to keep around in memory. -// value was calibrated using BenchmarkCalibrateCacheNodeSize -var TrieCachedNodesCount = 9000 - -// CatchpointDirName represents the directory name in which all the catchpoints files are stored -var CatchpointDirName = "catchpoints" - -// CatchpointState is used to store catchpoint related variables into the catchpointstate table. -// -//msgp:ignore CatchpointState -type CatchpointState string - -const ( - // CatchpointStateLastCatchpoint is written by a node once a catchpoint label is created for a round - CatchpointStateLastCatchpoint = CatchpointState("lastCatchpoint") - // CatchpointStateWritingFirstStageInfo state variable is set to 1 if catchpoint's first stage is unfinished, - // and is 0 otherwise. Used to clear / restart the first stage after a crash. - // This key is set in the same db transaction as the account updates, so the - // unfinished first stage corresponds to the current db round. - CatchpointStateWritingFirstStageInfo = CatchpointState("writingFirstStageInfo") - // catchpointStateWritingCatchpoint if there is an unfinished catchpoint, this state variable is set to - // the catchpoint's round. Otherwise, it is set to 0. - // DEPRECATED. - catchpointStateWritingCatchpoint = CatchpointState("writingCatchpoint") - // CatchpointStateCatchupState is the state of the catchup process. The variable is stored only during the catchpoint catchup process, and removed afterward. - CatchpointStateCatchupState = CatchpointState("catchpointCatchupState") - // CatchpointStateCatchupLabel is the label to which the currently catchpoint catchup process is trying to catchup to. - CatchpointStateCatchupLabel = CatchpointState("catchpointCatchupLabel") - // CatchpointStateCatchupBlockRound is the block round that is associated with the current running catchpoint catchup. - CatchpointStateCatchupBlockRound = CatchpointState("catchpointCatchupBlockRound") - // CatchpointStateCatchupBalancesRound is the balance round that is associated with the current running catchpoint catchup. Typically it would be - // equal to CatchpointStateCatchupBlockRound - 320. - CatchpointStateCatchupBalancesRound = CatchpointState("catchpointCatchupBalancesRound") - // CatchpointStateCatchupHashRound is the round that is associated with the hash of the merkle trie. Normally, it's identical to CatchpointStateCatchupBalancesRound, - // however, it could differ when we catchup from a catchpoint that was created using a different version : in this case, - // we set it to zero in order to reset the merkle trie. This would force the merkle trie to be re-build on startup ( if needed ). - CatchpointStateCatchupHashRound = CatchpointState("catchpointCatchupHashRound") - // CatchpointStateCatchpointLookback is the number of rounds we keep catchpoints for - CatchpointStateCatchpointLookback = CatchpointState("catchpointLookback") -) - -// UnfinishedCatchpointRecord represents a stored record of an unfinished catchpoint. -type UnfinishedCatchpointRecord struct { - Round basics.Round - BlockHash crypto.Digest -} - -// NormalizedAccountBalance is a staging area for a catchpoint file account information before it's being added to the catchpoint staging tables. -type NormalizedAccountBalance struct { - // The public key address to which the account belongs. - Address basics.Address - // accountData contains the baseAccountData for that account. - AccountData BaseAccountData - // resources is a map, where the key is the creatable index, and the value is the resource data. - Resources map[basics.CreatableIndex]ResourcesData - // encodedAccountData contains the baseAccountData encoded bytes that are going to be written to the accountbase table. - EncodedAccountData []byte - // accountHashes contains a list of all the hashes that would need to be added to the merkle trie for that account. - // on V6, we could have multiple hashes, since we have separate account/resource hashes. - AccountHashes [][]byte - // normalizedBalance contains the normalized balance for the account. - NormalizedBalance uint64 - // encodedResources provides the encoded form of the resources - EncodedResources map[basics.CreatableIndex][]byte - // partial balance indicates that the original account balance was split into multiple parts in catchpoint creation time - PartialBalance bool -} - type catchpointReader struct { q db.Queryable } @@ -128,27 +44,6 @@ type catchpointReaderWriter struct { catchpointWriter } -// CatchpointFirstStageInfo For the `catchpointfirststageinfo` table. -type CatchpointFirstStageInfo struct { - _struct struct{} `codec:",omitempty,omitemptyarray"` - - Totals ledgercore.AccountTotals `codec:"accountTotals"` - TrieBalancesHash crypto.Digest `codec:"trieBalancesHash"` - // Total number of accounts in the catchpoint data file. Only set when catchpoint - // data files are generated. - TotalAccounts uint64 `codec:"accountsCount"` - - // Total number of accounts in the catchpoint data file. Only set when catchpoint - // data files are generated. - TotalKVs uint64 `codec:"kvsCount"` - - // Total number of chunks in the catchpoint data file. Only set when catchpoint - // data files are generated. - TotalChunks uint64 `codec:"chunksCount"` - // BiggestChunkLen is the size in the bytes of the largest chunk, used when re-packing. - BiggestChunkLen uint64 `codec:"biggestChunk"` -} - // NewCatchpointSQLReaderWriter creates a Catchpoint SQL reader+writer func NewCatchpointSQLReaderWriter(e db.Executable) *catchpointReaderWriter { return &catchpointReaderWriter{ @@ -190,7 +85,7 @@ func (cr *catchpointReader) GetOldestCatchpointFiles(ctx context.Context, fileCo return } -func (cr *catchpointReader) ReadCatchpointStateUint64(ctx context.Context, stateName CatchpointState) (val uint64, err error) { +func (cr *catchpointReader) ReadCatchpointStateUint64(ctx context.Context, stateName trackerdb.CatchpointState) (val uint64, err error) { err = db.Retry(func() (err error) { query := "SELECT intval FROM catchpointstate WHERE id=?" var v sql.NullInt64 @@ -209,7 +104,7 @@ func (cr *catchpointReader) ReadCatchpointStateUint64(ctx context.Context, state return val, err } -func (cr *catchpointReader) ReadCatchpointStateString(ctx context.Context, stateName CatchpointState) (val string, err error) { +func (cr *catchpointReader) ReadCatchpointStateString(ctx context.Context, stateName trackerdb.CatchpointState) (val string, err error) { err = db.Retry(func() (err error) { query := "SELECT strval FROM catchpointstate WHERE id=?" var v sql.NullString @@ -229,8 +124,8 @@ func (cr *catchpointReader) ReadCatchpointStateString(ctx context.Context, state return val, err } -func (cr *catchpointReader) SelectUnfinishedCatchpoints(ctx context.Context) ([]UnfinishedCatchpointRecord, error) { - var res []UnfinishedCatchpointRecord +func (cr *catchpointReader) SelectUnfinishedCatchpoints(ctx context.Context) ([]trackerdb.UnfinishedCatchpointRecord, error) { + var res []trackerdb.UnfinishedCatchpointRecord f := func() error { query := "SELECT round, blockhash FROM unfinishedcatchpoints ORDER BY round" @@ -242,7 +137,7 @@ func (cr *catchpointReader) SelectUnfinishedCatchpoints(ctx context.Context) ([] // Clear `res` in case this function is repeated. res = res[:0] for rows.Next() { - var record UnfinishedCatchpointRecord + var record trackerdb.UnfinishedCatchpointRecord var blockHash []byte err = rows.Scan(&record.Round, &blockHash) if err != nil { @@ -262,7 +157,7 @@ func (cr *catchpointReader) SelectUnfinishedCatchpoints(ctx context.Context) ([] return res, nil } -func (cr *catchpointReader) SelectCatchpointFirstStageInfo(ctx context.Context, round basics.Round) (CatchpointFirstStageInfo, bool /*exists*/, error) { +func (cr *catchpointReader) SelectCatchpointFirstStageInfo(ctx context.Context, round basics.Round) (trackerdb.CatchpointFirstStageInfo, bool /*exists*/, error) { var data []byte f := func() error { query := "SELECT info FROM catchpointfirststageinfo WHERE round=?" @@ -275,17 +170,17 @@ func (cr *catchpointReader) SelectCatchpointFirstStageInfo(ctx context.Context, } err := db.Retry(f) if err != nil { - return CatchpointFirstStageInfo{}, false, err + return trackerdb.CatchpointFirstStageInfo{}, false, err } if data == nil { - return CatchpointFirstStageInfo{}, false, nil + return trackerdb.CatchpointFirstStageInfo{}, false, nil } - var res CatchpointFirstStageInfo + var res trackerdb.CatchpointFirstStageInfo err = protocol.Decode(data, &res) if err != nil { - return CatchpointFirstStageInfo{}, false, err + return trackerdb.CatchpointFirstStageInfo{}, false, err } return res, true, nil @@ -337,7 +232,7 @@ func (cw *catchpointWriter) StoreCatchpoint(ctx context.Context, round basics.Ro return } -func (cw *catchpointWriter) WriteCatchpointStateUint64(ctx context.Context, stateName CatchpointState, setValue uint64) (err error) { +func (cw *catchpointWriter) WriteCatchpointStateUint64(ctx context.Context, stateName trackerdb.CatchpointState, setValue uint64) (err error) { err = db.Retry(func() (err error) { if setValue == 0 { return deleteCatchpointStateImpl(ctx, cw.e, stateName) @@ -351,7 +246,7 @@ func (cw *catchpointWriter) WriteCatchpointStateUint64(ctx context.Context, stat return err } -func (cw *catchpointWriter) WriteCatchpointStateString(ctx context.Context, stateName CatchpointState, setValue string) (err error) { +func (cw *catchpointWriter) WriteCatchpointStateString(ctx context.Context, stateName trackerdb.CatchpointState, setValue string) (err error) { err = db.Retry(func() (err error) { if setValue == "" { return deleteCatchpointStateImpl(ctx, cw.e, stateName) @@ -383,13 +278,13 @@ func (cw *catchpointWriter) DeleteUnfinishedCatchpoint(ctx context.Context, roun return db.Retry(f) } -func deleteCatchpointStateImpl(ctx context.Context, e db.Executable, stateName CatchpointState) error { +func deleteCatchpointStateImpl(ctx context.Context, e db.Executable, stateName trackerdb.CatchpointState) error { query := "DELETE FROM catchpointstate WHERE id=?" _, err := e.ExecContext(ctx, query, stateName) return err } -func (cw *catchpointWriter) InsertOrReplaceCatchpointFirstStageInfo(ctx context.Context, round basics.Round, info *CatchpointFirstStageInfo) error { +func (cw *catchpointWriter) InsertOrReplaceCatchpointFirstStageInfo(ctx context.Context, round basics.Round, info *trackerdb.CatchpointFirstStageInfo) error { infoSerialized := protocol.Encode(info) f := func() error { query := "INSERT OR REPLACE INTO catchpointfirststageinfo(round, info) VALUES(?, ?)" @@ -409,7 +304,7 @@ func (cw *catchpointWriter) DeleteOldCatchpointFirstStageInfo(ctx context.Contex } // WriteCatchpointStagingBalances inserts all the account balances in the provided array into the catchpoint balance staging table catchpointbalances. -func (cw *catchpointWriter) WriteCatchpointStagingBalances(ctx context.Context, bals []NormalizedAccountBalance) error { +func (cw *catchpointWriter) WriteCatchpointStagingBalances(ctx context.Context, bals []trackerdb.NormalizedAccountBalance) error { selectAcctStmt, err := cw.e.PrepareContext(ctx, "SELECT rowid FROM catchpointbalances WHERE address = ?") if err != nil { return err @@ -476,7 +371,7 @@ func (cw *catchpointWriter) WriteCatchpointStagingBalances(ctx context.Context, } // WriteCatchpointStagingHashes inserts all the account hashes in the provided array into the catchpoint pending hashes table catchpointpendinghashes. -func (cw *catchpointWriter) WriteCatchpointStagingHashes(ctx context.Context, bals []NormalizedAccountBalance) error { +func (cw *catchpointWriter) WriteCatchpointStagingHashes(ctx context.Context, bals []trackerdb.NormalizedAccountBalance) error { insertStmt, err := cw.e.PrepareContext(ctx, "INSERT INTO catchpointpendinghashes(data) VALUES(?)") if err != nil { return err @@ -504,7 +399,7 @@ func (cw *catchpointWriter) WriteCatchpointStagingHashes(ctx context.Context, ba // WriteCatchpointStagingCreatable inserts all the creatables in the provided array into the catchpoint asset creator staging table catchpointassetcreators. // note that we cannot insert the resources here : in order to insert the resources, we need the rowid of the accountbase entry. This is being inserted by // writeCatchpointStagingBalances via a separate go-routine. -func (cw *catchpointWriter) WriteCatchpointStagingCreatable(ctx context.Context, bals []NormalizedAccountBalance) error { +func (cw *catchpointWriter) WriteCatchpointStagingCreatable(ctx context.Context, bals []trackerdb.NormalizedAccountBalance) error { var insertCreatorsStmt *sql.Stmt var err error insertCreatorsStmt, err = cw.e.PrepareContext(ctx, "INSERT INTO catchpointassetcreators(asset, creator, ctype) VALUES(?, ?, ?)") @@ -670,7 +565,7 @@ func (crw *catchpointReaderWriter) DeleteStoredCatchpoints(ctx context.Context, } for round, fileName := range fileNames { - err = RemoveSingleCatchpointFileFromDisk(dbDirectory, fileName) + err = trackerdb.RemoveSingleCatchpointFileFromDisk(dbDirectory, fileName) if err != nil { return err } @@ -683,50 +578,3 @@ func (crw *catchpointReaderWriter) DeleteStoredCatchpoints(ctx context.Context, } return nil } - -// RemoveSingleCatchpointFileFromDisk removes a single catchpoint file from the disk. this function does not leave empty directories -func RemoveSingleCatchpointFileFromDisk(dbDirectory, fileToDelete string) (err error) { - absCatchpointFileName := filepath.Join(dbDirectory, fileToDelete) - err = os.Remove(absCatchpointFileName) - if err == nil || os.IsNotExist(err) { - // it's ok if the file doesn't exist. - err = nil - } else { - // we can't delete the file, abort - - return fmt.Errorf("unable to delete old catchpoint file '%s' : %v", absCatchpointFileName, err) - } - splitedDirName := strings.Split(fileToDelete, string(os.PathSeparator)) - - var subDirectoriesToScan []string - //build a list of all the subdirs - currentSubDir := "" - for _, element := range splitedDirName { - currentSubDir = filepath.Join(currentSubDir, element) - subDirectoriesToScan = append(subDirectoriesToScan, currentSubDir) - } - - // iterating over the list of directories. starting from the sub dirs and moving up. - // skipping the file itself. - for i := len(subDirectoriesToScan) - 2; i >= 0; i-- { - absSubdir := filepath.Join(dbDirectory, subDirectoriesToScan[i]) - if _, err := os.Stat(absSubdir); os.IsNotExist(err) { - continue - } - - isEmpty, err := isDirEmpty(absSubdir) - if err != nil { - return fmt.Errorf("unable to read old catchpoint directory '%s' : %v", subDirectoriesToScan[i], err) - } - if isEmpty { - err = os.Remove(absSubdir) - if err != nil { - if os.IsNotExist(err) { - continue - } - return fmt.Errorf("unable to delete old catchpoint directory '%s' : %v", subDirectoriesToScan[i], err) - } - } - } - - return nil -} diff --git a/ledger/store/catchpointPendingHashesIter.go b/ledger/store/trackerdb/sqlitedriver/catchpointPendingHashesIter.go similarity index 99% rename from ledger/store/catchpointPendingHashesIter.go rename to ledger/store/trackerdb/sqlitedriver/catchpointPendingHashesIter.go index 69b616789c..32d2614e1d 100644 --- a/ledger/store/catchpointPendingHashesIter.go +++ b/ledger/store/trackerdb/sqlitedriver/catchpointPendingHashesIter.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package store +package sqlitedriver import ( "context" diff --git a/ledger/store/catchpoint_test.go b/ledger/store/trackerdb/sqlitedriver/catchpoint_test.go similarity index 92% rename from ledger/store/catchpoint_test.go rename to ledger/store/trackerdb/sqlitedriver/catchpoint_test.go index ad69ecbaf6..e2c57c0f01 100644 --- a/ledger/store/catchpoint_test.go +++ b/ledger/store/trackerdb/sqlitedriver/catchpoint_test.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package store +package sqlitedriver import ( "context" @@ -24,6 +24,7 @@ import ( "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" storetesting "github.com/algorand/go-algorand/ledger/store/testing" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/test/partitiontest" "github.com/stretchr/testify/require" ) @@ -43,7 +44,7 @@ func TestCatchpointFirstStageInfoTable(t *testing.T) { crw := NewCatchpointSQLReaderWriter(dbs.Wdb.Handle) for _, round := range []basics.Round{4, 6, 8} { - info := CatchpointFirstStageInfo{ + info := trackerdb.CatchpointFirstStageInfo{ TotalAccounts: uint64(round) * 10, } err = crw.InsertOrReplaceCatchpointFirstStageInfo(ctx, round, &info) @@ -55,7 +56,7 @@ func TestCatchpointFirstStageInfoTable(t *testing.T) { require.NoError(t, err) require.True(t, exists) - infoExpected := CatchpointFirstStageInfo{ + infoExpected := trackerdb.CatchpointFirstStageInfo{ TotalAccounts: uint64(round) * 10, } require.Equal(t, infoExpected, info) @@ -101,7 +102,7 @@ func TestUnfinishedCatchpointsTable(t *testing.T) { ret, err := cts.SelectUnfinishedCatchpoints(context.Background()) require.NoError(t, err) - expected := []UnfinishedCatchpointRecord{ + expected := []trackerdb.UnfinishedCatchpointRecord{ { Round: 3, BlockHash: d3, @@ -118,7 +119,7 @@ func TestUnfinishedCatchpointsTable(t *testing.T) { ret, err = cts.SelectUnfinishedCatchpoints(context.Background()) require.NoError(t, err) - expected = []UnfinishedCatchpointRecord{ + expected = []trackerdb.UnfinishedCatchpointRecord{ { Round: 5, BlockHash: d5, diff --git a/ledger/store/encodedAccountsIter.go b/ledger/store/trackerdb/sqlitedriver/encodedAccountsIter.go similarity index 94% rename from ledger/store/encodedAccountsIter.go rename to ledger/store/trackerdb/sqlitedriver/encodedAccountsIter.go index 60ce3a0ec6..f48a4f82c7 100644 --- a/ledger/store/encodedAccountsIter.go +++ b/ledger/store/trackerdb/sqlitedriver/encodedAccountsIter.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package store +package sqlitedriver import ( "context" @@ -22,6 +22,7 @@ import ( "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/ledger/encoded" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/msgp/msgp" ) @@ -67,9 +68,9 @@ func (iterator *encodedAccountsBatchIter) Next(ctx context.Context, accountCount // gather up to accountCount encoded accounts. bals = make([]encoded.BalanceRecordV6, 0, accountCount) var encodedRecord encoded.BalanceRecordV6 - var baseAcct BaseAccountData + var baseAcct trackerdb.BaseAccountData var numAcct int - baseCb := func(addr basics.Address, rowid int64, accountData *BaseAccountData, encodedAccountData []byte) (err error) { + baseCb := func(addr basics.Address, rowid int64, accountData *trackerdb.BaseAccountData, encodedAccountData []byte) (err error) { encodedRecord = encoded.BalanceRecordV6{Address: addr, AccountData: encodedAccountData} baseAcct = *accountData numAcct++ @@ -79,7 +80,7 @@ func (iterator *encodedAccountsBatchIter) Next(ctx context.Context, accountCount var totalResources int // emptyCount := 0 - resCb := func(addr basics.Address, cidx basics.CreatableIndex, resData *ResourcesData, encodedResourceData []byte, lastResource bool) error { + resCb := func(addr basics.Address, cidx basics.CreatableIndex, resData *trackerdb.ResourcesData, encodedResourceData []byte, lastResource bool) error { emptyBaseAcct := baseAcct.TotalAppParams == 0 && baseAcct.TotalAppLocalStates == 0 && baseAcct.TotalAssetParams == 0 && baseAcct.TotalAssets == 0 if !emptyBaseAcct && resData != nil { diff --git a/ledger/store/kvsIter.go b/ledger/store/trackerdb/sqlitedriver/kvsIter.go similarity index 98% rename from ledger/store/kvsIter.go rename to ledger/store/trackerdb/sqlitedriver/kvsIter.go index f831fa203a..9cd1c9a7a1 100644 --- a/ledger/store/kvsIter.go +++ b/ledger/store/trackerdb/sqlitedriver/kvsIter.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package store +package sqlitedriver import ( "context" diff --git a/ledger/store/merkle_commiter.go b/ledger/store/trackerdb/sqlitedriver/merkle_commiter.go similarity index 91% rename from ledger/store/merkle_commiter.go rename to ledger/store/trackerdb/sqlitedriver/merkle_commiter.go index f908c24d02..41589345fe 100644 --- a/ledger/store/merkle_commiter.go +++ b/ledger/store/trackerdb/sqlitedriver/merkle_commiter.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package store +package sqlitedriver import "database/sql" @@ -26,12 +26,6 @@ type merkleCommitter struct { selectStmt *sql.Stmt } -// MerkleCommitter allows storing and loading merkletrie pages from a sqlite database. -type MerkleCommitter interface { - StorePage(page uint64, content []byte) error - LoadPage(page uint64) (content []byte, err error) -} - // MakeMerkleCommitter creates a MerkleCommitter object that implements the merkletrie.Committer interface allowing storing and loading // merkletrie pages from a sqlite database. func MakeMerkleCommitter(tx *sql.Tx, staging bool) (mc *merkleCommitter, err error) { diff --git a/ledger/store/orderedAccountsIter.go b/ledger/store/trackerdb/sqlitedriver/orderedAccountsIter.go similarity index 91% rename from ledger/store/orderedAccountsIter.go rename to ledger/store/trackerdb/sqlitedriver/orderedAccountsIter.go index 017de0cafc..3175010c7e 100644 --- a/ledger/store/orderedAccountsIter.go +++ b/ledger/store/trackerdb/sqlitedriver/orderedAccountsIter.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package store +package sqlitedriver import ( "context" @@ -24,6 +24,7 @@ import ( "math" "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/protocol" ) @@ -71,7 +72,7 @@ const ( type pendingBaseRow struct { addr basics.Address rowid int64 - accountData *BaseAccountData + accountData *trackerdb.BaseAccountData encodedAccountData []byte } @@ -91,12 +92,6 @@ func MakeOrderedAccountsIter(tx *sql.Tx, accountCount int) *orderedAccountsIter } } -// accountAddressHash is used by Next to return a single account address and the associated hash. -type accountAddressHash struct { - Addrid int64 - Digest []byte -} - // Next returns an array containing the account address and hash // the Next function works in multiple processing stages, where it first processes the current accounts and order them // followed by returning the ordered accounts. In the first phase, it would return empty accountAddressHash array @@ -105,7 +100,7 @@ type accountAddressHash struct { // the processedRecords would be zero. If err is sql.ErrNoRows it means that the iterator have completed it's work and no further // accounts exists. Otherwise, the caller is expected to keep calling "Next" to retrieve the next set of accounts // ( or let the Next function make some progress toward that goal ) -func (iterator *orderedAccountsIter) Next(ctx context.Context) (acct []accountAddressHash, processedRecords int, err error) { +func (iterator *orderedAccountsIter) Next(ctx context.Context) (acct []trackerdb.AccountAddressHash, processedRecords int, err error) { if iterator.step == oaiStepDeleteOldOrderingTable { // although we're going to delete this table anyway when completing the iterator execution, we'll try to // clean up any intermediate table. @@ -146,8 +141,8 @@ func (iterator *orderedAccountsIter) Next(ctx context.Context) (acct []accountAd } if iterator.step == oaiStepInsertAccountData { var lastAddrID int64 - baseCb := func(addr basics.Address, rowid int64, accountData *BaseAccountData, encodedAccountData []byte) (err error) { - hash := AccountHashBuilderV6(addr, accountData, encodedAccountData) + baseCb := func(addr basics.Address, rowid int64, accountData *trackerdb.BaseAccountData, encodedAccountData []byte) (err error) { + hash := trackerdb.AccountHashBuilderV6(addr, accountData, encodedAccountData) _, err = iterator.insertStmt.ExecContext(ctx, rowid, hash) if err != nil { return @@ -156,9 +151,9 @@ func (iterator *orderedAccountsIter) Next(ctx context.Context) (acct []accountAd return nil } - resCb := func(addr basics.Address, cidx basics.CreatableIndex, resData *ResourcesData, encodedResourceData []byte, lastResource bool) error { + resCb := func(addr basics.Address, cidx basics.CreatableIndex, resData *trackerdb.ResourcesData, encodedResourceData []byte, lastResource bool) error { if resData != nil { - hash, err := ResourcesHashBuilderV6(resData, addr, cidx, resData.UpdateRound, encodedResourceData) + hash, err := trackerdb.ResourcesHashBuilderV6(resData, addr, cidx, resData.UpdateRound, encodedResourceData) if err != nil { return err } @@ -226,7 +221,7 @@ func (iterator *orderedAccountsIter) Next(ctx context.Context) (acct []accountAd } if iterator.step == oaiStepIterateOverOrderedTable { - acct = make([]accountAddressHash, iterator.accountCount) + acct = make([]trackerdb.AccountAddressHash, iterator.accountCount) acctIdx := 0 for iterator.hashesRows.Next() { err = iterator.hashesRows.Scan(&(acct[acctIdx].Addrid), &(acct[acctIdx].Digest)) @@ -282,8 +277,8 @@ func (iterator *orderedAccountsIter) Close(ctx context.Context) (err error) { func processAllBaseAccountRecords( baseRows *sql.Rows, resRows *sql.Rows, - baseCb func(addr basics.Address, rowid int64, accountData *BaseAccountData, encodedAccountData []byte) error, - resCb func(addr basics.Address, creatableIdx basics.CreatableIndex, resData *ResourcesData, encodedResourceData []byte, lastResource bool) error, + baseCb func(addr basics.Address, rowid int64, accountData *trackerdb.BaseAccountData, encodedAccountData []byte) error, + resCb func(addr basics.Address, creatableIdx basics.CreatableIndex, resData *trackerdb.ResourcesData, encodedResourceData []byte, lastResource bool) error, pendingBase pendingBaseRow, pendingResource pendingResourceRow, accountCount int, resourceCount int, ) (int, pendingBaseRow, pendingResourceRow, error) { var addr basics.Address @@ -291,7 +286,7 @@ func processAllBaseAccountRecords( var err error count := 0 - var accountData BaseAccountData + var accountData trackerdb.BaseAccountData var addrbuf []byte var buf []byte var rowid int64 @@ -319,7 +314,7 @@ func processAllBaseAccountRecords( copy(addr[:], addrbuf) - accountData = BaseAccountData{} + accountData = trackerdb.BaseAccountData{} err = protocol.Decode(buf, &accountData) if err != nil { return 0, pendingBaseRow{}, pendingResourceRow{}, err @@ -363,8 +358,8 @@ func processAllBaseAccountRecords( func processAllResources( resRows *sql.Rows, - addr basics.Address, accountData *BaseAccountData, acctRowid int64, pr pendingResourceRow, resourceCount int, - callback func(addr basics.Address, creatableIdx basics.CreatableIndex, resData *ResourcesData, encodedResourceData []byte, lastResource bool) error, + addr basics.Address, accountData *trackerdb.BaseAccountData, acctRowid int64, pr pendingResourceRow, resourceCount int, + callback func(addr basics.Address, creatableIdx basics.CreatableIndex, resData *trackerdb.ResourcesData, encodedResourceData []byte, lastResource bool) error, ) (pendingResourceRow, int, error) { var err error count := 0 @@ -374,7 +369,7 @@ func processAllResources( var buf []byte var addrid int64 var aidx basics.CreatableIndex - var resData ResourcesData + var resData trackerdb.ResourcesData for { if pr.addrid != 0 { // some accounts may not have resources, consider the following case: @@ -413,7 +408,7 @@ func processAllResources( return pendingResourceRow{addrid, aidx, buf}, count, err } } - resData = ResourcesData{} + resData = trackerdb.ResourcesData{} err = protocol.Decode(buf, &resData) if err != nil { return pendingResourceRow{}, count, err diff --git a/ledger/store/schema.go b/ledger/store/trackerdb/sqlitedriver/schema.go similarity index 97% rename from ledger/store/schema.go rename to ledger/store/trackerdb/sqlitedriver/schema.go index 339c660a11..5ca781e009 100644 --- a/ledger/store/schema.go +++ b/ledger/store/trackerdb/sqlitedriver/schema.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package store +package sqlitedriver import ( "bytes" @@ -30,6 +30,7 @@ import ( "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/ledger/store/blockdb" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" @@ -165,11 +166,6 @@ var accountsResetExprs = []string{ `DROP TABLE IF EXISTS unfinishedcatchpoints`, } -// AccountDBVersion is the database version that this binary would know how to support and how to upgrade to. -// details about the content of each of the versions can be found in the upgrade functions upgradeDatabaseSchemaXXXX -// and their descriptions. -var AccountDBVersion = int32(9) - // accountsInit fills the database using tx with initAccounts if the // database has not been initialized yet. // @@ -459,7 +455,7 @@ func performResourceTableMigration(ctx context.Context, tx *sql.Tx, log func(pro if err != nil { return err } - var newAccountData BaseAccountData + var newAccountData trackerdb.BaseAccountData newAccountData.SetAccountData(&accountData) encodedAcctData = protocol.Encode(&newAccountData) @@ -483,7 +479,7 @@ func performResourceTableMigration(ctx context.Context, tx *sql.Tx, log func(pro if err != nil { return err } - insertResourceCallback := func(ctx context.Context, rowID int64, cidx basics.CreatableIndex, rd *ResourcesData) error { + insertResourceCallback := func(ctx context.Context, rowID int64, cidx basics.CreatableIndex, rd *trackerdb.ResourcesData) error { var err error if rd != nil { encodedData := protocol.Encode(rd) @@ -491,7 +487,7 @@ func performResourceTableMigration(ctx context.Context, tx *sql.Tx, log func(pro } return err } - err = AccountDataResources(ctx, &accountData, rowID, insertResourceCallback) + err = trackerdb.AccountDataResources(ctx, &accountData, rowID, insertResourceCallback) if err != nil { return err } @@ -564,7 +560,7 @@ func performTxTailTableMigration(ctx context.Context, tx *sql.Tx, blockDb db.Acc return fmt.Errorf("block for round %d ( %d - %d ) cannot be retrieved : %w", rnd, firstRound, dbRound, err) } - tail, err := TxTailRoundFromBlock(blk) + tail, err := trackerdb.TxTailRoundFromBlock(blk) if err != nil { return err } @@ -672,9 +668,9 @@ func performOnlineAccountsTableMigration(ctx context.Context, tx *sql.Tx, progre } type acctState struct { - old BaseAccountData + old trackerdb.BaseAccountData oldEnc []byte - new BaseAccountData + new trackerdb.BaseAccountData newEnc []byte } acctRehash := make(map[basics.Address]acctState) @@ -693,7 +689,7 @@ func performOnlineAccountsTableMigration(ctx context.Context, tx *sql.Tx, progre err = fmt.Errorf("account DB address length mismatch: %d != %d", len(addrbuf), len(addr)) return err } - var ba BaseAccountData + var ba trackerdb.BaseAccountData err = protocol.Decode(encodedAcctData, &ba) if err != nil { return err @@ -705,7 +701,7 @@ func performOnlineAccountsTableMigration(ctx context.Context, tx *sql.Tx, progre copy(addr[:], addrbuf) return fmt.Errorf("non valid norm balance for online account %s", addr.String()) } - var baseOnlineAD BaseOnlineAccountData + var baseOnlineAD trackerdb.BaseOnlineAccountData baseOnlineAD.BaseVotingData = ba.BaseVotingData baseOnlineAD.MicroAlgos = ba.MicroAlgos baseOnlineAD.RewardsBase = ba.RewardsBase @@ -760,12 +756,12 @@ func performOnlineAccountsTableMigration(ctx context.Context, tx *sql.Tx, progre return nil } - trie, err := merkletrie.MakeTrie(mc, TrieMemoryConfig) + trie, err := merkletrie.MakeTrie(mc, trackerdb.TrieMemoryConfig) if err != nil { return fmt.Errorf("accountsInitialize was unable to MakeTrie: %v", err) } for addr, state := range acctRehash { - deleteHash := AccountHashBuilderV6(addr, &state.old, state.oldEnc) + deleteHash := trackerdb.AccountHashBuilderV6(addr, &state.old, state.oldEnc) deleted, err := trie.Delete(deleteHash) if err != nil { return fmt.Errorf("performOnlineAccountsTableMigration failed to delete hash '%s' from merkle trie for account %v: %w", hex.EncodeToString(deleteHash), addr, err) @@ -774,7 +770,7 @@ func performOnlineAccountsTableMigration(ctx context.Context, tx *sql.Tx, progre log.Warnf("performOnlineAccountsTableMigration failed to delete hash '%s' from merkle trie for account %v", hex.EncodeToString(deleteHash), addr) } - addHash := AccountHashBuilderV6(addr, &state.new, state.newEnc) + addHash := trackerdb.AccountHashBuilderV6(addr, &state.new, state.newEnc) added, err := trie.Add(addHash) if err != nil { return fmt.Errorf("performOnlineAccountsTableMigration attempted to add duplicate hash '%s' to merkle trie for account %v: %w", hex.EncodeToString(addHash), addr, err) diff --git a/ledger/store/schema_test.go b/ledger/store/trackerdb/sqlitedriver/schema_test.go similarity index 96% rename from ledger/store/schema_test.go rename to ledger/store/trackerdb/sqlitedriver/schema_test.go index cab8c9b71e..581082447b 100644 --- a/ledger/store/schema_test.go +++ b/ledger/store/trackerdb/sqlitedriver/schema_test.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package store +package sqlitedriver import ( "context" @@ -30,6 +30,7 @@ import ( "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" storetesting "github.com/algorand/go-algorand/ledger/store/testing" + "github.com/algorand/go-algorand/ledger/store/trackerdb" ledgertesting "github.com/algorand/go-algorand/ledger/testing" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/test/partitiontest" @@ -148,7 +149,7 @@ func TestAccountDBTxTailLoad(t *testing.T) { roundData := make([][]byte, 1500) const retainSize = 1001 for i := startRound; i <= endRound; i++ { - data := TxTailRound{Hdr: bookkeeping.BlockHeader{TimeStamp: int64(i)}} + data := trackerdb.TxTailRound{Hdr: bookkeeping.BlockHeader{TimeStamp: int64(i)}} roundData[i-1] = protocol.Encode(&data) } forgetBefore := (endRound + 1).SubSaturate(retainSize) @@ -223,7 +224,7 @@ func TestRemoveOfflineStateProofID(t *testing.T) { mc, err := MakeMerkleCommitter(tx, false) require.NoError(t, err) - trie, err := merkletrie.MakeTrie(mc, TrieMemoryConfig) + trie, err := merkletrie.MakeTrie(mc, trackerdb.TrieMemoryConfig) require.NoError(t, err) var addr basics.Address @@ -233,13 +234,13 @@ func TestRemoveOfflineStateProofID(t *testing.T) { err = rows.Scan(&addrbuf, &encodedAcctData) require.NoError(t, err) copy(addr[:], addrbuf) - var ba BaseAccountData + var ba trackerdb.BaseAccountData err = protocol.Decode(encodedAcctData, &ba) require.NoError(t, err) if expected && ba.Status != basics.Online { require.Equal(t, merklesignature.Commitment{}, ba.StateProofID) } - addHash := AccountHashBuilderV6(addr, &ba, encodedAcctData) + addHash := trackerdb.AccountHashBuilderV6(addr, &ba, encodedAcctData) added, err := trie.Add(addHash) require.NoError(t, err) require.True(t, added) @@ -264,7 +265,7 @@ func TestRemoveOfflineStateProofID(t *testing.T) { // get the new hash and ensure it does not match to the old one (data migrated) mc, err := MakeMerkleCommitter(tx, false) require.NoError(t, err) - trie, err := merkletrie.MakeTrie(mc, TrieMemoryConfig) + trie, err := merkletrie.MakeTrie(mc, trackerdb.TrieMemoryConfig) require.NoError(t, err) newRoot, err := trie.RootHash() @@ -283,7 +284,7 @@ func TestRemoveOfflineStateProofID(t *testing.T) { var encodedAcctData []byte err = rows.Scan(&addrid, &encodedAcctData) require.NoError(t, err) - var ba BaseAccountData + var ba trackerdb.BaseAccountData err = protocol.Decode(encodedAcctData, &ba) require.NoError(t, err) if ba.Status != basics.Online { diff --git a/ledger/store/sql.go b/ledger/store/trackerdb/sqlitedriver/sql.go similarity index 95% rename from ledger/store/sql.go rename to ledger/store/trackerdb/sqlitedriver/sql.go index 42c1ebf8fe..52e11e6a3c 100644 --- a/ledger/store/sql.go +++ b/ledger/store/trackerdb/sqlitedriver/sql.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package store +package sqlitedriver import ( "database/sql" @@ -22,6 +22,7 @@ import ( "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/ledger/ledgercore" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" ) @@ -251,7 +252,7 @@ func (qs *accountsDbQueries) ListCreatables(maxIdx basics.CreatableIndex, maxRes // After check source code, a []byte slice destination is definitely cloned. // LookupKeyValue returns the application boxed value associated with the key. -func (qs *accountsDbQueries) LookupKeyValue(key string) (pv PersistedKVData, err error) { +func (qs *accountsDbQueries) LookupKeyValue(key string) (pv trackerdb.PersistedKVData, err error) { err = db.Retry(func() error { var rawkey []byte var val []byte @@ -370,7 +371,7 @@ func (qs *accountsDbQueries) LookupCreator(cidx basics.CreatableIndex, ctype bas } // LookupResources returns the requested resource. -func (qs *accountsDbQueries) LookupResources(addr basics.Address, aidx basics.CreatableIndex, ctype basics.CreatableType) (data PersistedResourcesData, err error) { +func (qs *accountsDbQueries) LookupResources(addr basics.Address, aidx basics.CreatableIndex, ctype basics.CreatableType) (data trackerdb.PersistedResourcesData, err error) { err = db.Retry(func() error { var buf []byte var rowid sql.NullInt64 @@ -391,7 +392,7 @@ func (qs *accountsDbQueries) LookupResources(addr basics.Address, aidx basics.Cr } return nil } - data.Data = MakeResourcesData(0) + data.Data = trackerdb.MakeResourcesData(0) // we don't have that account, just return the database round. return nil } @@ -407,7 +408,7 @@ func (qs *accountsDbQueries) LookupResources(addr basics.Address, aidx basics.Cr } // LookupAllResources returns all resources associated with the given address. -func (qs *accountsDbQueries) LookupAllResources(addr basics.Address) (data []PersistedResourcesData, rnd basics.Round, err error) { +func (qs *accountsDbQueries) LookupAllResources(addr basics.Address) (data []trackerdb.PersistedResourcesData, rnd basics.Round, err error) { err = db.Retry(func() error { // Query for all resources rows, err := qs.lookupAllResourcesStmt.Query(addr[:]) @@ -434,12 +435,12 @@ func (qs *accountsDbQueries) LookupAllResources(addr basics.Address) (data []Per rnd = dbRound break } - var resData ResourcesData + var resData trackerdb.ResourcesData err = protocol.Decode(buf, &resData) if err != nil { return err } - data = append(data, PersistedResourcesData{ + data = append(data, trackerdb.PersistedResourcesData{ Addrid: addrid.Int64, Aidx: basics.CreatableIndex(aidx.Int64), Data: resData, @@ -455,7 +456,7 @@ func (qs *accountsDbQueries) LookupAllResources(addr basics.Address) (data []Per // LookupAccount looks up for a the account data given it's address. It returns the persistedAccountData, which includes the current database round and the matching // account data, if such was found. If no matching account data could be found for the given address, an empty account data would // be retrieved. -func (qs *accountsDbQueries) LookupAccount(addr basics.Address) (data PersistedAccountData, err error) { +func (qs *accountsDbQueries) LookupAccount(addr basics.Address) (data trackerdb.PersistedAccountData, err error) { err = db.Retry(func() error { var buf []byte var rowid sql.NullInt64 @@ -483,7 +484,7 @@ func (qs *accountsDbQueries) LookupAccount(addr basics.Address) (data PersistedA } // LookupOnline returns the online account data for the given address. -func (qs *onlineAccountsDbQueries) LookupOnline(addr basics.Address, rnd basics.Round) (data PersistedOnlineAccountData, err error) { +func (qs *onlineAccountsDbQueries) LookupOnline(addr basics.Address, rnd basics.Round) (data trackerdb.PersistedOnlineAccountData, err error) { err = db.Retry(func() error { var buf []byte var rowid sql.NullInt64 @@ -530,7 +531,7 @@ func (qs *onlineAccountsDbQueries) LookupOnlineTotalsHistory(round basics.Round) return basics.MicroAlgos{Raw: data.OnlineSupply}, err } -func (qs *onlineAccountsDbQueries) LookupOnlineHistory(addr basics.Address) (result []PersistedOnlineAccountData, rnd basics.Round, err error) { +func (qs *onlineAccountsDbQueries) LookupOnlineHistory(addr basics.Address) (result []trackerdb.PersistedOnlineAccountData, rnd basics.Round, err error) { err = db.Retry(func() error { rows, err := qs.lookupOnlineHistoryStmt.Query(addr[:]) if err != nil { @@ -540,7 +541,7 @@ func (qs *onlineAccountsDbQueries) LookupOnlineHistory(addr basics.Address) (res for rows.Next() { var buf []byte - data := PersistedOnlineAccountData{} + data := trackerdb.PersistedOnlineAccountData{} err := rows.Scan(&data.Rowid, &data.UpdRound, &rnd, &buf) if err != nil { return err @@ -613,7 +614,7 @@ func (w *onlineAccountsSQLWriter) Close() { } } -func (w accountsSQLWriter) InsertAccount(addr basics.Address, normBalance uint64, data BaseAccountData) (rowid int64, err error) { +func (w accountsSQLWriter) InsertAccount(addr basics.Address, normBalance uint64, data trackerdb.BaseAccountData) (rowid int64, err error) { result, err := w.insertStmt.Exec(addr[:], normBalance, protocol.Encode(&data)) if err != nil { return @@ -631,7 +632,7 @@ func (w accountsSQLWriter) DeleteAccount(rowid int64) (rowsAffected int64, err e return } -func (w accountsSQLWriter) UpdateAccount(rowid int64, normBalance uint64, data BaseAccountData) (rowsAffected int64, err error) { +func (w accountsSQLWriter) UpdateAccount(rowid int64, normBalance uint64, data trackerdb.BaseAccountData) (rowsAffected int64, err error) { result, err := w.updateStmt.Exec(normBalance, protocol.Encode(&data), rowid) if err != nil { return @@ -640,7 +641,7 @@ func (w accountsSQLWriter) UpdateAccount(rowid int64, normBalance uint64, data B return } -func (w accountsSQLWriter) InsertResource(addrid int64, aidx basics.CreatableIndex, data ResourcesData) (rowid int64, err error) { +func (w accountsSQLWriter) InsertResource(addrid int64, aidx basics.CreatableIndex, data trackerdb.ResourcesData) (rowid int64, err error) { result, err := w.insertResourceStmt.Exec(addrid, aidx, protocol.Encode(&data)) if err != nil { return @@ -658,7 +659,7 @@ func (w accountsSQLWriter) DeleteResource(addrid int64, aidx basics.CreatableInd return } -func (w accountsSQLWriter) UpdateResource(addrid int64, aidx basics.CreatableIndex, data ResourcesData) (rowsAffected int64, err error) { +func (w accountsSQLWriter) UpdateResource(addrid int64, aidx basics.CreatableIndex, data trackerdb.ResourcesData) (rowsAffected int64, err error) { result, err := w.updateResourceStmt.Exec(protocol.Encode(&data), addrid, aidx) if err != nil { return @@ -710,7 +711,7 @@ func (w accountsSQLWriter) DeleteCreatable(cidx basics.CreatableIndex, ctype bas return } -func (w onlineAccountsSQLWriter) InsertOnlineAccount(addr basics.Address, normBalance uint64, data BaseOnlineAccountData, updRound uint64, voteLastValid uint64) (rowid int64, err error) { +func (w onlineAccountsSQLWriter) InsertOnlineAccount(addr basics.Address, normBalance uint64, data trackerdb.BaseOnlineAccountData, updRound uint64, voteLastValid uint64) (rowid int64, err error) { result, err := w.insertStmt.Exec(addr[:], normBalance, protocol.Encode(&data), updRound, voteLastValid) if err != nil { return diff --git a/ledger/store/sql_test.go b/ledger/store/trackerdb/sqlitedriver/sql_test.go similarity index 99% rename from ledger/store/sql_test.go rename to ledger/store/trackerdb/sqlitedriver/sql_test.go index 3bacf343ac..3fe71be4f9 100644 --- a/ledger/store/sql_test.go +++ b/ledger/store/trackerdb/sqlitedriver/sql_test.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package store +package sqlitedriver import ( "testing" diff --git a/ledger/store/store.go b/ledger/store/trackerdb/sqlitedriver/store_sqlite_impl.go similarity index 57% rename from ledger/store/store.go rename to ledger/store/trackerdb/sqlitedriver/store_sqlite_impl.go index 5b1e580df5..cf222c31d0 100644 --- a/ledger/store/store.go +++ b/ledger/store/trackerdb/sqlitedriver/store_sqlite_impl.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package store +package sqlitedriver import ( "context" @@ -25,6 +25,7 @@ import ( "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" @@ -35,91 +36,18 @@ type trackerSQLStore struct { pair db.Pair } -type batchFn func(ctx context.Context, tx BatchScope) error - -// BatchScope is the write scope to the store. -type BatchScope interface { - MakeCatchpointWriter() (CatchpointWriter, error) - MakeAccountsWriter() (AccountsWriterExt, error) - MakeAccountsOptimizedWriter(hasAccounts, hasResources, hasKvPairs, hasCreatables bool) (AccountsWriter, error) - - RunMigrations(ctx context.Context, params TrackerDBParams, log logging.Logger, targetVersion int32) (mgr TrackerDBInitParams, err error) - ResetTransactionWarnDeadline(ctx context.Context, deadline time.Time) (prevDeadline time.Time, err error) - - AccountsInitTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) (newDatabase bool) - AccountsUpdateSchemaTest(ctx context.Context) (err error) -} type sqlBatchScope struct { tx *sql.Tx } -type snapshotFn func(ctx context.Context, tx SnapshotScope) error - -// SnapshotScope is the read scope to the store. -type SnapshotScope interface { - MakeAccountsReader() (AccountsReaderExt, error) - MakeCatchpointReader() (CatchpointReader, error) - - MakeCatchpointPendingHashesIterator(hashCount int) *catchpointPendingHashesIterator -} type sqlSnapshotScope struct { tx *sql.Tx } -type transactionFn func(ctx context.Context, tx TransactionScope) error - -// TransactionScope is the read/write scope to the store. -type TransactionScope interface { - MakeCatchpointReaderWriter() (CatchpointReaderWriter, error) - MakeAccountsReaderWriter() (AccountsReaderWriter, error) - MakeAccountsOptimizedReader() (AccountsReader, error) - MakeAccountsOptimizedWriter(hasAccounts, hasResources, hasKvPairs, hasCreatables bool) (AccountsWriter, error) - MakeOnlineAccountsOptimizedWriter(hasAccounts bool) (w OnlineAccountsWriter, err error) - MakeOnlineAccountsOptimizedReader() (OnlineAccountsReader, error) - - MakeMerkleCommitter(staging bool) (MerkleCommitter, error) - - MakeOrderedAccountsIter(accountCount int) *orderedAccountsIter - MakeKVsIter(ctx context.Context) (*kvsIter, error) - MakeEncodedAccoutsBatchIter() *encodedAccountsBatchIter - - RunMigrations(ctx context.Context, params TrackerDBParams, log logging.Logger, targetVersion int32) (mgr TrackerDBInitParams, err error) - ResetTransactionWarnDeadline(ctx context.Context, deadline time.Time) (prevDeadline time.Time, err error) - - AccountsInitTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) (newDatabase bool) - AccountsInitLightTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabase bool, err error) -} type sqlTransactionScope struct { tx *sql.Tx } -// TrackerStore is the interface for the tracker db. -type TrackerStore interface { - SetLogger(log logging.Logger) - SetSynchronousMode(ctx context.Context, mode db.SynchronousMode, fullfsync bool) (err error) - IsSharedCacheConnection() bool - - Batch(fn batchFn) (err error) - BatchContext(ctx context.Context, fn batchFn) (err error) - - Snapshot(fn snapshotFn) (err error) - SnapshotContext(ctx context.Context, fn snapshotFn) (err error) - - Transaction(fn transactionFn) (err error) - TransactionContext(ctx context.Context, fn transactionFn) (err error) - - MakeAccountsReader() (AccountsReader, error) - MakeOnlineAccountsReader() (OnlineAccountsReader, error) - - MakeCatchpointReaderWriter() (CatchpointReaderWriter, error) - - Vacuum(ctx context.Context) (stats db.VacuumStats, err error) - Close() - CleanupTest(dbName string, inMemory bool) - - ResetToV6Test(ctx context.Context) error -} - // OpenTrackerSQLStore opens the sqlite database store func OpenTrackerSQLStore(dbFilename string, dbMem bool) (store *trackerSQLStore, err error) { db, err := db.OpenPair(dbFilename, dbMem) @@ -149,45 +77,45 @@ func (s *trackerSQLStore) IsSharedCacheConnection() bool { return s.pair.Wdb.IsSharedCacheConnection() } -func (s *trackerSQLStore) Batch(fn batchFn) (err error) { +func (s *trackerSQLStore) Batch(fn trackerdb.BatchFn) (err error) { return s.BatchContext(context.Background(), fn) } -func (s *trackerSQLStore) BatchContext(ctx context.Context, fn batchFn) (err error) { +func (s *trackerSQLStore) BatchContext(ctx context.Context, fn trackerdb.BatchFn) (err error) { return s.pair.Wdb.AtomicContext(ctx, func(ctx context.Context, tx *sql.Tx) error { return fn(ctx, sqlBatchScope{tx}) }) } -func (s *trackerSQLStore) Snapshot(fn snapshotFn) (err error) { +func (s *trackerSQLStore) Snapshot(fn trackerdb.SnapshotFn) (err error) { return s.SnapshotContext(context.Background(), fn) } -func (s *trackerSQLStore) SnapshotContext(ctx context.Context, fn snapshotFn) (err error) { +func (s *trackerSQLStore) SnapshotContext(ctx context.Context, fn trackerdb.SnapshotFn) (err error) { return s.pair.Rdb.AtomicContext(ctx, func(ctx context.Context, tx *sql.Tx) error { return fn(ctx, sqlSnapshotScope{tx}) }) } -func (s *trackerSQLStore) Transaction(fn transactionFn) (err error) { +func (s *trackerSQLStore) Transaction(fn trackerdb.TransactionFn) (err error) { return s.TransactionContext(context.Background(), fn) } -func (s *trackerSQLStore) TransactionContext(ctx context.Context, fn transactionFn) (err error) { +func (s *trackerSQLStore) TransactionContext(ctx context.Context, fn trackerdb.TransactionFn) (err error) { return s.pair.Wdb.AtomicContext(ctx, func(ctx context.Context, tx *sql.Tx) error { return fn(ctx, sqlTransactionScope{tx}) }) } -func (s *trackerSQLStore) MakeAccountsReader() (AccountsReader, error) { +func (s *trackerSQLStore) MakeAccountsOptimizedReader() (trackerdb.AccountsReader, error) { return AccountsInitDbQueries(s.pair.Rdb.Handle) } -func (s *trackerSQLStore) MakeOnlineAccountsReader() (OnlineAccountsReader, error) { +func (s *trackerSQLStore) MakeOnlineAccountsOptimizedReader() (trackerdb.OnlineAccountsReader, error) { return OnlineAccountsInitDbQueries(s.pair.Rdb.Handle) } -func (s *trackerSQLStore) MakeCatchpointReaderWriter() (CatchpointReaderWriter, error) { +func (s *trackerSQLStore) MakeCatchpointReaderWriter() (trackerdb.CatchpointReaderWriter, error) { w := NewCatchpointSQLReaderWriter(s.pair.Wdb.Handle) return w, nil } @@ -229,47 +157,47 @@ func (s *trackerSQLStore) Close() { s.pair.Close() } -func (txs sqlTransactionScope) MakeCatchpointReaderWriter() (CatchpointReaderWriter, error) { +func (txs sqlTransactionScope) MakeCatchpointReaderWriter() (trackerdb.CatchpointReaderWriter, error) { return NewCatchpointSQLReaderWriter(txs.tx), nil } -func (txs sqlTransactionScope) MakeAccountsReaderWriter() (AccountsReaderWriter, error) { +func (txs sqlTransactionScope) MakeAccountsReaderWriter() (trackerdb.AccountsReaderWriter, error) { return NewAccountsSQLReaderWriter(txs.tx), nil } -func (txs sqlTransactionScope) MakeAccountsOptimizedReader() (AccountsReader, error) { +func (txs sqlTransactionScope) MakeAccountsOptimizedReader() (trackerdb.AccountsReader, error) { return AccountsInitDbQueries(txs.tx) } -func (txs sqlTransactionScope) MakeAccountsOptimizedWriter(hasAccounts, hasResources, hasKvPairs, hasCreatables bool) (AccountsWriter, error) { +func (txs sqlTransactionScope) MakeAccountsOptimizedWriter(hasAccounts, hasResources, hasKvPairs, hasCreatables bool) (trackerdb.AccountsWriter, error) { return MakeAccountsSQLWriter(txs.tx, hasAccounts, hasResources, hasKvPairs, hasCreatables) } -func (txs sqlTransactionScope) MakeOnlineAccountsOptimizedWriter(hasAccounts bool) (w OnlineAccountsWriter, err error) { +func (txs sqlTransactionScope) MakeOnlineAccountsOptimizedWriter(hasAccounts bool) (w trackerdb.OnlineAccountsWriter, err error) { return MakeOnlineAccountsSQLWriter(txs.tx, hasAccounts) } -func (txs sqlTransactionScope) MakeOnlineAccountsOptimizedReader() (r OnlineAccountsReader, err error) { +func (txs sqlTransactionScope) MakeOnlineAccountsOptimizedReader() (r trackerdb.OnlineAccountsReader, err error) { return OnlineAccountsInitDbQueries(txs.tx) } -func (txs sqlTransactionScope) MakeMerkleCommitter(staging bool) (MerkleCommitter, error) { +func (txs sqlTransactionScope) MakeMerkleCommitter(staging bool) (trackerdb.MerkleCommitter, error) { return MakeMerkleCommitter(txs.tx, staging) } -func (txs sqlTransactionScope) MakeOrderedAccountsIter(accountCount int) *orderedAccountsIter { +func (txs sqlTransactionScope) MakeOrderedAccountsIter(accountCount int) trackerdb.OrderedAccountsIter { return MakeOrderedAccountsIter(txs.tx, accountCount) } -func (txs sqlTransactionScope) MakeKVsIter(ctx context.Context) (*kvsIter, error) { +func (txs sqlTransactionScope) MakeKVsIter(ctx context.Context) (trackerdb.KVsIter, error) { return MakeKVsIter(ctx, txs.tx) } -func (txs sqlTransactionScope) MakeEncodedAccoutsBatchIter() *encodedAccountsBatchIter { +func (txs sqlTransactionScope) MakeEncodedAccoutsBatchIter() trackerdb.EncodedAccountsBatchIter { return MakeEncodedAccoutsBatchIter(txs.tx) } -func (txs sqlTransactionScope) RunMigrations(ctx context.Context, params TrackerDBParams, log logging.Logger, targetVersion int32) (mgr TrackerDBInitParams, err error) { +func (txs sqlTransactionScope) RunMigrations(ctx context.Context, params trackerdb.Params, log logging.Logger, targetVersion int32) (mgr trackerdb.InitParams, err error) { return RunMigrations(ctx, txs.tx, params, log, targetVersion) } @@ -285,19 +213,19 @@ func (txs sqlTransactionScope) AccountsInitLightTest(tb testing.TB, initAccounts return AccountsInitLightTest(tb, txs.tx, initAccounts, proto) } -func (bs sqlBatchScope) MakeCatchpointWriter() (CatchpointWriter, error) { +func (bs sqlBatchScope) MakeCatchpointWriter() (trackerdb.CatchpointWriter, error) { return NewCatchpointSQLReaderWriter(bs.tx), nil } -func (bs sqlBatchScope) MakeAccountsWriter() (AccountsWriterExt, error) { +func (bs sqlBatchScope) MakeAccountsWriter() (trackerdb.AccountsWriterExt, error) { return NewAccountsSQLReaderWriter(bs.tx), nil } -func (bs sqlBatchScope) MakeAccountsOptimizedWriter(hasAccounts, hasResources, hasKvPairs, hasCreatables bool) (AccountsWriter, error) { +func (bs sqlBatchScope) MakeAccountsOptimizedWriter(hasAccounts, hasResources, hasKvPairs, hasCreatables bool) (trackerdb.AccountsWriter, error) { return MakeAccountsSQLWriter(bs.tx, hasAccounts, hasResources, hasKvPairs, hasCreatables) } -func (bs sqlBatchScope) RunMigrations(ctx context.Context, params TrackerDBParams, log logging.Logger, targetVersion int32) (mgr TrackerDBInitParams, err error) { +func (bs sqlBatchScope) RunMigrations(ctx context.Context, params trackerdb.Params, log logging.Logger, targetVersion int32) (mgr trackerdb.InitParams, err error) { return RunMigrations(ctx, bs.tx, params, log, targetVersion) } @@ -313,14 +241,14 @@ func (bs sqlBatchScope) AccountsUpdateSchemaTest(ctx context.Context) (err error return AccountsUpdateSchemaTest(ctx, bs.tx) } -func (ss sqlSnapshotScope) MakeAccountsReader() (AccountsReaderExt, error) { +func (ss sqlSnapshotScope) MakeAccountsReader() (trackerdb.AccountsReaderExt, error) { return NewAccountsSQLReaderWriter(ss.tx), nil } -func (ss sqlSnapshotScope) MakeCatchpointReader() (CatchpointReader, error) { +func (ss sqlSnapshotScope) MakeCatchpointReader() (trackerdb.CatchpointReader, error) { return NewCatchpointSQLReaderWriter(ss.tx), nil } -func (ss sqlSnapshotScope) MakeCatchpointPendingHashesIterator(hashCount int) *catchpointPendingHashesIterator { +func (ss sqlSnapshotScope) MakeCatchpointPendingHashesIterator(hashCount int) trackerdb.CatchpointPendingHashesIter { return MakeCatchpointPendingHashesIterator(hashCount, ss.tx) } diff --git a/ledger/store/testing.go b/ledger/store/trackerdb/sqlitedriver/testing.go similarity index 94% rename from ledger/store/testing.go rename to ledger/store/trackerdb/sqlitedriver/testing.go index 507bb48c35..a1965fac7d 100644 --- a/ledger/store/testing.go +++ b/ledger/store/trackerdb/sqlitedriver/testing.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package store +package sqlitedriver import ( "context" @@ -26,6 +26,7 @@ import ( "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" @@ -33,7 +34,7 @@ import ( ) // DbOpenTrackerTest opens a sqlite db file for testing purposes. -func DbOpenTrackerTest(t testing.TB, inMemory bool) (TrackerStore, string) { +func DbOpenTrackerTest(t testing.TB, inMemory bool) (trackerdb.TrackerStore, string) { fn := fmt.Sprintf("%s.%d", strings.ReplaceAll(t.Name(), "/", "."), crypto.RandUint64()) dbs, err := db.OpenPair(fn, inMemory) @@ -43,7 +44,7 @@ func DbOpenTrackerTest(t testing.TB, inMemory bool) (TrackerStore, string) { } // SetDbTrackerTestLogging sets a testing logger on a database. -func SetDbTrackerTestLogging(t testing.TB, dbs TrackerStore) { +func SetDbTrackerTestLogging(t testing.TB, dbs trackerdb.TrackerStore) { dblogger := logging.TestingLog(t) dbs.SetLogger(dblogger) } diff --git a/ledger/store/trackerdbV2.go b/ledger/store/trackerdb/sqlitedriver/trackerdbV2.go similarity index 84% rename from ledger/store/trackerdbV2.go rename to ledger/store/trackerdb/sqlitedriver/trackerdbV2.go index 19a8fbcaa9..358d33d2d9 100644 --- a/ledger/store/trackerdbV2.go +++ b/ledger/store/trackerdb/sqlitedriver/trackerdbV2.go @@ -14,41 +14,28 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package store +package sqlitedriver import ( "context" "database/sql" "encoding/hex" "fmt" - "io" "os" "path/filepath" - "strconv" "time" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/crypto/merkletrie" "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/logging" - "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" ) -// TrackerDBParams contains parameters for initializing trackerDB -type TrackerDBParams struct { - InitAccounts map[basics.Address]basics.AccountData - InitProto protocol.ConsensusVersion - GenesisHash crypto.Digest - FromCatchpoint bool - CatchpointEnabled bool - DbPathPrefix string - BlockDb db.Pair -} - type trackerDBSchemaInitializer struct { - TrackerDBParams + trackerdb.Params // schemaVersion contains current db version schemaVersion int32 @@ -60,26 +47,20 @@ type trackerDBSchemaInitializer struct { log logging.Logger } -// TrackerDBInitParams params used during db init -type TrackerDBInitParams struct { - SchemaVersion int32 - VacuumOnStartup bool -} - // RunMigrations initializes the accounts DB if needed and return current account round. // as part of the initialization, it tests the current database schema version, and perform upgrade // procedures to bring it up to the database schema supported by the binary. -func RunMigrations(ctx context.Context, tx *sql.Tx, params TrackerDBParams, log logging.Logger, targetVersion int32) (mgr TrackerDBInitParams, err error) { +func RunMigrations(ctx context.Context, tx *sql.Tx, params trackerdb.Params, log logging.Logger, targetVersion int32) (mgr trackerdb.InitParams, err error) { // check current database version. dbVersion, err := db.GetUserVersion(ctx, tx) if err != nil { - return TrackerDBInitParams{}, fmt.Errorf("trackerDBInitialize unable to read database schema version : %v", err) + return trackerdb.InitParams{}, fmt.Errorf("trackerDBInitialize unable to read database schema version : %v", err) } tu := trackerDBSchemaInitializer{ - TrackerDBParams: params, - schemaVersion: dbVersion, - log: log, + Params: params, + schemaVersion: dbVersion, + log: log, } // if database version is greater than supported by current binary, write a warning. This would keep the existing @@ -151,13 +132,13 @@ func RunMigrations(ctx context.Context, tx *sql.Tx, params TrackerDBParams, log return } default: - return TrackerDBInitParams{}, fmt.Errorf("trackerDBInitialize unable to upgrade database from schema version %d", tu.schemaVersion) + return trackerdb.InitParams{}, fmt.Errorf("trackerDBInitialize unable to upgrade database from schema version %d", tu.schemaVersion) } } tu.log.Infof("trackerDBInitialize database schema upgrade complete") } - return TrackerDBInitParams{tu.schemaVersion, tu.vacuumOnStartup}, nil + return trackerdb.InitParams{SchemaVersion: tu.schemaVersion, VacuumOnStartup: tu.vacuumOnStartup}, nil } func (tu *trackerDBSchemaInitializer) setVersion(ctx context.Context, tx *sql.Tx, version int32) (err error) { @@ -246,14 +227,14 @@ func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema1(ctx context.Context tu.log.Infof("upgradeDatabaseSchema1 preparing queries") tu.log.Infof("upgradeDatabaseSchema1 resetting prior catchpoints") // delete the last catchpoint label if we have any. - err = crw.WriteCatchpointStateString(ctx, CatchpointStateLastCatchpoint, "") + err = crw.WriteCatchpointStateString(ctx, trackerdb.CatchpointStateLastCatchpoint, "") if err != nil { return fmt.Errorf("upgradeDatabaseSchema1 unable to clear prior catchpoint : %v", err) } tu.log.Infof("upgradeDatabaseSchema1 deleting stored catchpoints") // delete catchpoints. - err = crw.DeleteStoredCatchpoints(ctx, tu.TrackerDBParams.DbPathPrefix) + err = crw.DeleteStoredCatchpoints(ctx, tu.Params.DbPathPrefix) if err != nil { return fmt.Errorf("upgradeDatabaseSchema1 unable to delete stored catchpoints : %v", err) } @@ -314,7 +295,7 @@ func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema4(ctx context.Context tu.log.Errorf("upgradeDatabaseSchema4: failed to create merkle committer: %v", err) goto done } - trie, err := merkletrie.MakeTrie(mc, TrieMemoryConfig) + trie, err := merkletrie.MakeTrie(mc, trackerdb.TrieMemoryConfig) if err != nil { tu.log.Errorf("upgradeDatabaseSchema4: failed to create merkle trie: %v", err) goto done @@ -322,7 +303,7 @@ func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema4(ctx context.Context var totalHashesDeleted int for _, addr := range addresses { - hash := AccountHashBuilder(addr, basics.AccountData{}, []byte{0x80}) + hash := trackerdb.AccountHashBuilder(addr, basics.AccountData{}, []byte{0x80}) deleted, err := trie.Delete(hash) if err != nil { tu.log.Errorf("upgradeDatabaseSchema4: failed to delete hash '%s' from merkle trie for account %v: %v", hex.EncodeToString(hash), addr, err) @@ -358,7 +339,7 @@ func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema5(ctx context.Context return fmt.Errorf("upgradeDatabaseSchema5 unable to create resources table : %v", err) } - err = removeEmptyDirsOnSchemaUpgrade(tu.TrackerDBParams.DbPathPrefix) + err = removeEmptyDirsOnSchemaUpgrade(tu.Params.DbPathPrefix) if err != nil { return fmt.Errorf("upgradeDatabaseSchema5 unable to clear empty catchpoint directories : %v", err) } @@ -391,7 +372,7 @@ func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema5(ctx context.Context func (tu *trackerDBSchemaInitializer) deleteUnfinishedCatchpoint(ctx context.Context, tx *sql.Tx) error { cts := NewCatchpointSQLReaderWriter(tx) // Delete an unfinished catchpoint if there is one. - round, err := cts.ReadCatchpointStateUint64(ctx, catchpointStateWritingCatchpoint) + round, err := cts.ReadCatchpointStateUint64(ctx, trackerdb.CatchpointStateWritingCatchpoint) if err != nil { return err } @@ -400,26 +381,14 @@ func (tu *trackerDBSchemaInitializer) deleteUnfinishedCatchpoint(ctx context.Con } relCatchpointFilePath := filepath.Join( - CatchpointDirName, - MakeCatchpointFilePath(basics.Round(round))) - err = RemoveSingleCatchpointFileFromDisk(tu.DbPathPrefix, relCatchpointFilePath) + trackerdb.CatchpointDirName, + trackerdb.MakeCatchpointFilePath(basics.Round(round))) + err = trackerdb.RemoveSingleCatchpointFileFromDisk(tu.DbPathPrefix, relCatchpointFilePath) if err != nil { return err } - return cts.WriteCatchpointStateUint64(ctx, catchpointStateWritingCatchpoint, 0) -} - -// MakeCatchpointFilePath builds the path of a catchpoint file. -func MakeCatchpointFilePath(round basics.Round) string { - irnd := int64(round) / 256 - outStr := "" - for irnd > 0 { - outStr = filepath.Join(outStr, fmt.Sprintf("%02x", irnd%256)) - irnd = irnd / 256 - } - outStr = filepath.Join(outStr, strconv.FormatInt(int64(round), 10)+".catchpoint") - return outStr + return cts.WriteCatchpointStateUint64(ctx, trackerdb.CatchpointStateWritingCatchpoint, 0) } // upgradeDatabaseSchema6 upgrades the database schema from version 6 to version 7, @@ -510,54 +479,13 @@ func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema8(ctx context.Context return tu.setVersion(ctx, tx, 9) } -// Review: this is an odd method to have here - -// isDirEmpty returns if a given directory is empty or not. -func isDirEmpty(path string) (bool, error) { - dir, err := os.Open(path) - if err != nil { - return false, err - } - defer dir.Close() - _, err = dir.Readdirnames(1) - if err != io.EOF { - return false, err - } - return true, nil -} - -// GetEmptyDirs returns a slice of paths for empty directories which are located in PathToScan arg -func GetEmptyDirs(PathToScan string) ([]string, error) { - var emptyDir []string - err := filepath.Walk(PathToScan, func(path string, f os.FileInfo, errIn error) error { - if errIn != nil { - return errIn - } - if !f.IsDir() { - return nil - } - isEmpty, err := isDirEmpty(path) - if err != nil { - if os.IsNotExist(err) { - return filepath.SkipDir - } - return err - } - if isEmpty { - emptyDir = append(emptyDir, path) - } - return nil - }) - return emptyDir, err -} - func removeEmptyDirsOnSchemaUpgrade(dbDirectory string) (err error) { - catchpointRootDir := filepath.Join(dbDirectory, CatchpointDirName) + catchpointRootDir := filepath.Join(dbDirectory, trackerdb.CatchpointDirName) if _, err := os.Stat(catchpointRootDir); os.IsNotExist(err) { return nil } for { - emptyDirs, err := GetEmptyDirs(catchpointRootDir) + emptyDirs, err := trackerdb.GetEmptyDirs(catchpointRootDir) if err != nil { return err } diff --git a/ledger/store/trackerdb/store.go b/ledger/store/trackerdb/store.go new file mode 100644 index 0000000000..c339736b2b --- /dev/null +++ b/ledger/store/trackerdb/store.go @@ -0,0 +1,108 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package trackerdb + +import ( + "context" + "testing" + "time" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/util/db" +) + +// BatchScope is the write scope to the store. +type BatchScope interface { + MakeCatchpointWriter() (CatchpointWriter, error) + MakeAccountsWriter() (AccountsWriterExt, error) + MakeAccountsOptimizedWriter(hasAccounts, hasResources, hasKvPairs, hasCreatables bool) (AccountsWriter, error) + + RunMigrations(ctx context.Context, params Params, log logging.Logger, targetVersion int32) (mgr InitParams, err error) + ResetTransactionWarnDeadline(ctx context.Context, deadline time.Time) (prevDeadline time.Time, err error) + + AccountsInitTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) (newDatabase bool) + AccountsUpdateSchemaTest(ctx context.Context) (err error) +} + +// SnapshotScope is the read scope to the store. +type SnapshotScope interface { + MakeAccountsReader() (AccountsReaderExt, error) + MakeCatchpointReader() (CatchpointReader, error) + + MakeCatchpointPendingHashesIterator(hashCount int) CatchpointPendingHashesIter +} + +// TransactionScope is the read/write scope to the store. +type TransactionScope interface { + MakeCatchpointReaderWriter() (CatchpointReaderWriter, error) + MakeAccountsReaderWriter() (AccountsReaderWriter, error) + MakeAccountsOptimizedReader() (AccountsReader, error) + MakeAccountsOptimizedWriter(hasAccounts, hasResources, hasKvPairs, hasCreatables bool) (AccountsWriter, error) + MakeOnlineAccountsOptimizedWriter(hasAccounts bool) (w OnlineAccountsWriter, err error) + MakeOnlineAccountsOptimizedReader() (OnlineAccountsReader, error) + + MakeMerkleCommitter(staging bool) (MerkleCommitter, error) + + MakeOrderedAccountsIter(accountCount int) OrderedAccountsIter + MakeKVsIter(ctx context.Context) (KVsIter, error) + MakeEncodedAccoutsBatchIter() EncodedAccountsBatchIter + + RunMigrations(ctx context.Context, params Params, log logging.Logger, targetVersion int32) (mgr InitParams, err error) + ResetTransactionWarnDeadline(ctx context.Context, deadline time.Time) (prevDeadline time.Time, err error) + + AccountsInitTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) (newDatabase bool) + AccountsInitLightTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabase bool, err error) +} + +// BatchFn is the callback lambda used in `Batch`. +type BatchFn func(ctx context.Context, tx BatchScope) error + +// SnapshotFn is the callback lambda used in `Snapshot`. +type SnapshotFn func(ctx context.Context, tx SnapshotScope) error + +// TransactionFn is the callback lambda used in `Transaction`. +type TransactionFn func(ctx context.Context, tx TransactionScope) error + +// TrackerStore is the interface for the tracker db. +type TrackerStore interface { + SetLogger(log logging.Logger) + SetSynchronousMode(ctx context.Context, mode db.SynchronousMode, fullfsync bool) (err error) + IsSharedCacheConnection() bool + + Batch(fn BatchFn) (err error) + BatchContext(ctx context.Context, fn BatchFn) (err error) + + Snapshot(fn SnapshotFn) (err error) + SnapshotContext(ctx context.Context, fn SnapshotFn) (err error) + + Transaction(fn TransactionFn) (err error) + TransactionContext(ctx context.Context, fn TransactionFn) (err error) + + MakeAccountsOptimizedReader() (AccountsReader, error) + MakeOnlineAccountsOptimizedReader() (OnlineAccountsReader, error) + + MakeCatchpointReaderWriter() (CatchpointReaderWriter, error) + + Vacuum(ctx context.Context) (stats db.VacuumStats, err error) + Close() + CleanupTest(dbName string, inMemory bool) + + ResetToV6Test(ctx context.Context) error +} diff --git a/ledger/store/trackerdb/utils.go b/ledger/store/trackerdb/utils.go new file mode 100644 index 0000000000..0d9e8b4d8a --- /dev/null +++ b/ledger/store/trackerdb/utils.go @@ -0,0 +1,62 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package trackerdb + +import ( + "io" + "os" + "path/filepath" +) + +// isDirEmpty returns if a given directory is empty or not. +func isDirEmpty(path string) (bool, error) { + dir, err := os.Open(path) + if err != nil { + return false, err + } + defer dir.Close() + _, err = dir.Readdirnames(1) + if err != io.EOF { + return false, err + } + return true, nil +} + +// GetEmptyDirs returns a slice of paths for empty directories which are located in PathToScan arg +func GetEmptyDirs(PathToScan string) ([]string, error) { + var emptyDir []string + err := filepath.Walk(PathToScan, func(path string, f os.FileInfo, errIn error) error { + if errIn != nil { + return errIn + } + if !f.IsDir() { + return nil + } + isEmpty, err := isDirEmpty(path) + if err != nil { + if os.IsNotExist(err) { + return filepath.SkipDir + } + return err + } + if isEmpty { + emptyDir = append(emptyDir, path) + } + return nil + }) + return emptyDir, err +} diff --git a/ledger/store/trackerdb/version.go b/ledger/store/trackerdb/version.go new file mode 100644 index 0000000000..42917f2620 --- /dev/null +++ b/ledger/store/trackerdb/version.go @@ -0,0 +1,22 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package trackerdb + +// AccountDBVersion is the database version that this binary would know how to support and how to upgrade to. +// details about the content of each of the versions can be found in the upgrade functions upgradeDatabaseSchemaXXXX +// and their descriptions. +var AccountDBVersion = int32(9) diff --git a/ledger/tracker.go b/ledger/tracker.go index 45e14a63e8..26f7f25b2d 100644 --- a/ledger/tracker.go +++ b/ledger/tracker.go @@ -30,7 +30,7 @@ import ( "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/ledger/internal" "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/logging/telemetryspec" "github.com/algorand/go-algorand/protocol" @@ -107,7 +107,7 @@ type ledgerTracker interface { // commitRound is called for each of the trackers after a deferredCommitContext was agreed upon // by all the prepareCommit calls. The commitRound is being executed within a single transactional // context, and so, if any of the tracker's commitRound calls fails, the transaction is rolled back. - commitRound(context.Context, store.TransactionScope, *deferredCommitContext) error + commitRound(context.Context, trackerdb.TransactionScope, *deferredCommitContext) error // postCommit is called only on a successful commitRound. In that case, each of the trackers have // the chance to update it's internal data structures, knowing that the given deferredCommitContext // has completed. An optional context is provided for long-running operations. @@ -133,7 +133,7 @@ type ledgerTracker interface { // ledgerForTracker defines the part of the ledger that a tracker can // access. This is particularly useful for testing trackers in isolation. type ledgerForTracker interface { - trackerDB() store.TrackerStore + trackerDB() trackerdb.TrackerStore blockDB() db.Pair trackerLog() logging.Logger trackerEvalVerified(bookkeeping.Block, internal.LedgerForEvaluator) (ledgercore.StateDelta, error) @@ -173,7 +173,7 @@ type trackerRegistry struct { // cached to avoid SQL queries. dbRound basics.Round - dbs store.TrackerStore + dbs trackerdb.TrackerStore log logging.Logger // the synchronous mode that would be used for the account database. @@ -245,12 +245,12 @@ type deferredCommitContext struct { compactKvDeltas map[string]modifiedKvValue compactCreatableDeltas map[basics.CreatableIndex]ledgercore.ModifiedCreatable - updatedPersistedAccounts []store.PersistedAccountData - updatedPersistedResources map[basics.Address][]store.PersistedResourcesData - updatedPersistedKVs map[string]store.PersistedKVData + updatedPersistedAccounts []trackerdb.PersistedAccountData + updatedPersistedResources map[basics.Address][]trackerdb.PersistedResourcesData + updatedPersistedKVs map[string]trackerdb.PersistedKVData compactOnlineAccountDeltas compactOnlineAccountDeltas - updatedPersistedOnlineAccounts []store.PersistedOnlineAccountData + updatedPersistedOnlineAccounts []trackerdb.PersistedOnlineAccountData updatingBalancesDuration time.Duration @@ -281,7 +281,7 @@ func (tr *trackerRegistry) initialize(l ledgerForTracker, trackers []ledgerTrack tr.dbs = l.trackerDB() tr.log = l.trackerLog() - err = tr.dbs.Snapshot(func(ctx context.Context, tx store.SnapshotScope) (err error) { + err = tr.dbs.Snapshot(func(ctx context.Context, tx trackerdb.SnapshotScope) (err error) { ar, err := tx.MakeAccountsReader() if err != nil { return err @@ -515,7 +515,7 @@ func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) error { start := time.Now() ledgerCommitroundCount.Inc(nil) - err := tr.dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + err := tr.dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { arw, err := tx.MakeAccountsReaderWriter() if err != nil { return err diff --git a/ledger/tracker_test.go b/ledger/tracker_test.go index 908510893e..1671e03024 100644 --- a/ledger/tracker_test.go +++ b/ledger/tracker_test.go @@ -31,7 +31,7 @@ import ( "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" ledgertesting "github.com/algorand/go-algorand/ledger/testing" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" @@ -180,7 +180,7 @@ func (bt *producePrepareBlockingTracker) prepareCommit(*deferredCommitContext) e } // commitRound is not used by the blockingTracker -func (bt *producePrepareBlockingTracker) commitRound(context.Context, store.TransactionScope, *deferredCommitContext) error { +func (bt *producePrepareBlockingTracker) commitRound(context.Context, trackerdb.TransactionScope, *deferredCommitContext) error { return nil } diff --git a/ledger/trackerdb.go b/ledger/trackerdb.go index a6809e0d71..8955a27c0f 100644 --- a/ledger/trackerdb.go +++ b/ledger/trackerdb.go @@ -20,13 +20,13 @@ import ( "context" "fmt" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" ) // trackerDBInitialize initializes the accounts DB if needed and return current account round. // as part of the initialization, it tests the current database schema version, and perform upgrade // procedures to bring it up to the database schema supported by the binary. -func trackerDBInitialize(l ledgerForTracker, catchpointEnabled bool, dbPathPrefix string) (mgr store.TrackerDBInitParams, err error) { +func trackerDBInitialize(l ledgerForTracker, catchpointEnabled bool, dbPathPrefix string) (mgr trackerdb.InitParams, err error) { dbs := l.trackerDB() bdbs := l.blockDB() log := l.trackerLog() @@ -38,13 +38,13 @@ func trackerDBInitialize(l ledgerForTracker, catchpointEnabled bool, dbPathPrefi return } - err = dbs.Transaction(func(ctx context.Context, tx store.TransactionScope) error { + err = dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error { arw, err := tx.MakeAccountsReaderWriter() if err != nil { return err } - tp := store.TrackerDBParams{ + tp := trackerdb.Params{ InitAccounts: l.GenesisAccounts(), InitProto: l.GenesisProtoVersion(), GenesisHash: l.GenesisHash(), @@ -54,7 +54,7 @@ func trackerDBInitialize(l ledgerForTracker, catchpointEnabled bool, dbPathPrefi BlockDb: bdbs, } var err0 error - mgr, err0 = tx.RunMigrations(ctx, tp, log, store.AccountDBVersion) + mgr, err0 = tx.RunMigrations(ctx, tp, log, trackerdb.AccountDBVersion) if err0 != nil { return err0 } @@ -69,7 +69,7 @@ func trackerDBInitialize(l ledgerForTracker, catchpointEnabled bool, dbPathPrefi if err0 != nil { return err0 } - mgr, err0 = tx.RunMigrations(ctx, tp, log, store.AccountDBVersion) + mgr, err0 = tx.RunMigrations(ctx, tp, log, trackerdb.AccountDBVersion) if err0 != nil { return err0 } diff --git a/ledger/txtail.go b/ledger/txtail.go index 5262632245..a4095c6449 100644 --- a/ledger/txtail.go +++ b/ledger/txtail.go @@ -28,7 +28,7 @@ import ( "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/ledger/store" + "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/logging" ) @@ -92,11 +92,11 @@ type txTail struct { func (t *txTail) loadFromDisk(l ledgerForTracker, dbRound basics.Round) error { t.log = l.trackerLog() - var roundData []*store.TxTailRound + var roundData []*trackerdb.TxTailRound var roundTailHashes []crypto.Digest var baseRound basics.Round if dbRound > 0 { - err := l.trackerDB().Snapshot(func(ctx context.Context, tx store.SnapshotScope) (err error) { + err := l.trackerDB().Snapshot(func(ctx context.Context, tx trackerdb.SnapshotScope) (err error) { ar, err := tx.MakeAccountsReader() if err != nil { return err @@ -196,7 +196,7 @@ func (t *txTail) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) { return } - var tail store.TxTailRound + var tail trackerdb.TxTailRound tail.TxnIDs = make([]transactions.Txid, len(delta.Txids)) tail.LastValid = make([]basics.Round, len(delta.Txids)) tail.Hdr = blk.BlockHeader @@ -206,7 +206,7 @@ func (t *txTail) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) { tail.TxnIDs[txnInc.Intra] = txid tail.LastValid[txnInc.Intra] = txnInc.LastValid if blk.Payset[txnInc.Intra].Txn.Lease != [32]byte{} { - tail.Leases = append(tail.Leases, store.TxTailRoundLease{ + tail.Leases = append(tail.Leases, trackerdb.TxTailRoundLease{ Sender: blk.Payset[txnInc.Intra].Txn.Sender, Lease: blk.Payset[txnInc.Intra].Txn.Lease, TxnIdx: txnInc.Intra, @@ -272,7 +272,7 @@ func (t *txTail) prepareCommit(dcc *deferredCommitContext) (err error) { return } -func (t *txTail) commitRound(ctx context.Context, tx store.TransactionScope, dcc *deferredCommitContext) error { +func (t *txTail) commitRound(ctx context.Context, tx trackerdb.TransactionScope, dcc *deferredCommitContext) error { arw, err := tx.MakeAccountsReaderWriter() if err != nil { return err diff --git a/ledger/txtail_test.go b/ledger/txtail_test.go index 954139e54f..4a232ff054 100644 --- a/ledger/txtail_test.go +++ b/ledger/txtail_test.go @@ -30,8 +30,9 @@ import ( "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/ledger/store" storetesting "github.com/algorand/go-algorand/ledger/store/testing" + "github.com/algorand/go-algorand/ledger/store/trackerdb" + "github.com/algorand/go-algorand/ledger/store/trackerdb/sqlitedriver" ledgertesting "github.com/algorand/go-algorand/ledger/testing" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/test/partitiontest" @@ -150,10 +151,10 @@ func (t *txTailTestLedger) initialize(ts *testing.T, protoVersion protocol.Conse // create a corresponding blockdb. inMemory := true t.blockDBs, _ = storetesting.DbOpenTest(ts, inMemory) - t.trackerDBs, _ = store.DbOpenTrackerTest(ts, inMemory) + t.trackerDBs, _ = sqlitedriver.DbOpenTrackerTest(ts, inMemory) t.protoVersion = protoVersion - err := t.trackerDBs.Batch(func(transactionCtx context.Context, tx store.BatchScope) (err error) { + err := t.trackerDBs.Batch(func(transactionCtx context.Context, tx trackerdb.BatchScope) (err error) { arw, err := tx.MakeAccountsWriter() if err != nil { return err @@ -169,7 +170,7 @@ func (t *txTailTestLedger) initialize(ts *testing.T, protoVersion protocol.Conse for i := startRound; i <= t.Latest(); i++ { blk, err := t.Block(i) require.NoError(ts, err) - tail, err := store.TxTailRoundFromBlock(blk) + tail, err := trackerdb.TxTailRoundFromBlock(blk) require.NoError(ts, err) encoded, _ := tail.Encode() roundData = append(roundData, encoded) @@ -300,7 +301,7 @@ func TestTxTailDeltaTracking(t *testing.T) { err = txtail.prepareCommit(dcc) require.NoError(t, err) - err := ledger.trackerDBs.Transaction(func(ctx context.Context, tx store.TransactionScope) (err error) { + err := ledger.trackerDBs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { err = txtail.commitRound(context.Background(), tx, dcc) require.NoError(t, err) return nil From b22f3b5da6c19bff916c4f67e95e128a52be7110 Mon Sep 17 00:00:00 2001 From: Will Winder Date: Mon, 27 Feb 2023 20:21:49 -0500 Subject: [PATCH 58/81] fix(follower): update test that defines a follower relay. (#5162) --- test/testdata/nettemplates/TwoNodesFollower100Second.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/testdata/nettemplates/TwoNodesFollower100Second.json b/test/testdata/nettemplates/TwoNodesFollower100Second.json index 921066b48b..2a9733c5cf 100644 --- a/test/testdata/nettemplates/TwoNodesFollower100Second.json +++ b/test/testdata/nettemplates/TwoNodesFollower100Second.json @@ -13,12 +13,12 @@ "Nodes": [ { "Name": "Follower", - "IsRelay": true, "Wallets": [], "ConfigJSONOverride": "{\"EnableFollowMode\":true}" }, { "Name": "Primary", + "IsRelay": true, "Wallets": [ { "Name": "Wallet1", "ParticipationOnly": false } From 9220f7b48e93f79959365b26e7939ccda90f6f52 Mon Sep 17 00:00:00 2001 From: Will Winder Date: Tue, 28 Feb 2023 13:51:03 -0500 Subject: [PATCH 59/81] fix(follower): add experimental tag to deltas endpoint. (#5169) --- daemon/algod/api/algod.oas2.json | 3 ++- daemon/algod/api/algod.oas3.yml | 3 ++- .../api/server/v2/generated/data/data_routes.yml | 6 ++++-- .../algod/api/server/v2/generated/data/routes.go | 16 ++++++++-------- 4 files changed, 16 insertions(+), 12 deletions(-) diff --git a/daemon/algod/api/algod.oas2.json b/daemon/algod/api/algod.oas2.json index 70b9981a8b..9eda15df6a 100644 --- a/daemon/algod/api/algod.oas2.json +++ b/daemon/algod/api/algod.oas2.json @@ -1439,7 +1439,8 @@ "description": "Get ledger deltas for a round.", "tags": [ "public", - "data" + "data", + "experimental" ], "produces": [ "application/json", diff --git a/daemon/algod/api/algod.oas3.yml b/daemon/algod/api/algod.oas3.yml index 57774cc1e0..f144be5fbb 100644 --- a/daemon/algod/api/algod.oas3.yml +++ b/daemon/algod/api/algod.oas3.yml @@ -3802,7 +3802,8 @@ "summary": "Get a LedgerStateDelta object for a given round", "tags": [ "public", - "data" + "data", + "experimental" ] } }, diff --git a/daemon/algod/api/server/v2/generated/data/data_routes.yml b/daemon/algod/api/server/v2/generated/data/data_routes.yml index 06a6306492..3055eda1e0 100644 --- a/daemon/algod/api/server/v2/generated/data/data_routes.yml +++ b/daemon/algod/api/server/v2/generated/data/data_routes.yml @@ -5,13 +5,15 @@ generate: output-options: include-tags: - data - - public +# do not include endpoints tagged public/experimental +# - public exclude-tags: - common - private - participating - nonparticipating - - experimental +# do not exclude endpoints tagged data/experimental +# - experimental type-mappings: integer: uint64 skip-prune: true diff --git a/daemon/algod/api/server/v2/generated/data/routes.go b/daemon/algod/api/server/v2/generated/data/routes.go index 4d178d1317..2a32e1cd96 100644 --- a/daemon/algod/api/server/v2/generated/data/routes.go +++ b/daemon/algod/api/server/v2/generated/data/routes.go @@ -305,14 +305,14 @@ var swaggerSpec = []string{ "F/x3iT6FWpXtqPhaFf+AlRMQURQSj4+O7uzRrd7KXtkI9Rqcx+s2EHsy9YU/KVtsaGTj06NHdza3drDt", "rSfWBdeb1SnHWBcj+Yk92XBCT/+yE3qBN2jD8UvGM/tigqa4p+0Gxfl9+Zedn2aF96BxfHwGFCoAz+5w", "g316JjR6OM0JtrSzefKXnc0ZyA1LgZxDUQpJJct35CdeZ5cGFaX6x9xP/IKLS+4JYRTtqiio3LkjkJKu", - "qAofrm+9/2c0PLpS6ODDGt6zuY23/3Dljl4ryA7te9HNiex/3nGX25VDLFLqJ67AWlR9UveOp0PnMTY+", - "2/H0bX1I9g4H3Jh/HBP316nGF8UFhtL8wdJ8mvh99imp8Gn33ifbLG+hEBtQ9RPJDXMaBcpcSuxryVIU", - "AQ8fjGya+aBi6hwN/ZG8k6UB3tNS9+yJmz43PBIoNQnPPZGNFvyU11nr1087mRR2qHuxBZr9SxD8SxDc", - "oSDQleSDWzQ4vzDaF0obyERSmq7hYPohuuNpeIktRayUytmIsHAFJIZkxVlbVvypr7If/hTn+wvK/X5u", - "rbgNL6MyZyBrLqC8X9PjX1Lgf4wUsMWJnLloTjTkuQr3vha4922UgEvi4DZ6Y6Ic6L7dGfv58GP77ZiW", - "3U6tK52Jy6Av+nptoELfnFe/ptj6+/CSMp0shXQJHFjbt99ZA80PXWmSzq9NNnDvC6Y4Bz+G0VfRXw/r", - "0unRj12bauyrsykONPKFpfznxqcS+ihQQtbeiXcfjHzCwpxOeDYm9+PDQwyKXgulD2dX848dc3z48UPN", - "Er5i26yUbIMJ4B+u/n8AAAD//y/FazkqxQAA", + "qAofrm+9/2c0PLpS6ODDGt6z+cyljsG2BMkK4FiV4sqdxFauHdrno5sD2v+84y7VK4dY4NRPXIE1sPoc", + "7x1Ph45nbHy24+nb+szsnRW4T/84nu4vW40vSg+MrPmDhfs0afzsU1Lh027FT7Z33kIhNqDqF5Mb5jT6", + "lLmj2MeTpSgCHj4Y3EMf8IIf11Od36E/kve5NMB7SuuePXHT14dH4qYm4bkn0NGCn/JYa/0Yaiexwg51", + "L7ZAs38Jgn8JgjsUBLqSfHCLBucXBv9CaeOaSErTNYzJg/5pGd5pSxGrrHI2IixcPYkhWXHWlhV/6pvt", + "hz/F+f6Ccr+fWytuo82ozBnImgso75f4+JcU+B8jBWytImc9mhMNea7Cva8F7n0bNOByOrgN5pgoB7pP", + "ecZ+PvzYfkqmZcZT60pn4jLoi65fG7fQt+7Vjyu2/j68pEwnSyFdPgeW+u131kDzQ1eppPNrkxzc+4IZ", + "z8GPYTBW9NfDupJ69GPXxBr76kyMA418nSn/uXGxhC4LlJC1s+LdByOfsE6nE56NBf748BBjpNdC6cPZ", + "1fxjxzoffvxQs4Qv4DYrJdtgPviHq/8fAAD///Ozgdc5xQAA", } // GetSwagger returns the content of the embedded swagger specification file From a7bb1ee32d8add4a067a316ec2c5806585cb47e2 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Wed, 1 Mar 2023 11:41:01 -0500 Subject: [PATCH 60/81] goal: catchup without args prompts to continue (#5165) --- cmd/goal/messages.go | 4 +++- cmd/goal/node.go | 16 ++++++++++++++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/cmd/goal/messages.go b/cmd/goal/messages.go index 8e4dcebf89..6bf976f6a9 100644 --- a/cmd/goal/messages.go +++ b/cmd/goal/messages.go @@ -69,6 +69,8 @@ const ( infoNodeCatchpointCatchupAccounts = "Catchpoint total accounts: %d\nCatchpoint accounts processed: %d\nCatchpoint accounts verified: %d\nCatchpoint total KVs: %d\nCatchpoint KVs processed: %d\nCatchpoint KVs verified: %d" infoNodeCatchpointCatchupBlocks = "Catchpoint total blocks: %d\nCatchpoint downloaded blocks: %d" nodeLastCatchpoint = "Last Catchpoint: %s" + nodeConfirmImplicitCatchpoint = "Fast catchup to %s is about to start.\nUsing external catchpoints is not a secure practice and should not be done for consensus participating nodes.\nType 'yes' to accept the risk and continue: " + errorAbortedPerUserRequest = "Aborted" errorNodeCreationIPFailure = "Parsing passed IP %v failed: need a valid IPv4 or IPv6 address with a specified port number" errorNodeNotDetected = "Algorand node does not appear to be running: %s" errorNodeStatus = "Cannot contact Algorand node: %s" @@ -87,7 +89,7 @@ const ( errLoadingConfig = "Error loading Config file from '%s': %v" errorNodeFailedToShutdown = "Unable to shut down node: %v" errorCatchpointLabelParsingFailed = "The provided catchpoint is not a valid one" - errorCatchpointLabelMissing = "A catchpoint argument is needed: %s" + errorCatchpointLabelMissing = "A catchpoint argument is needed: %s: %s" errorUnableToLookupCatchpointLabel = "Unable to fetch catchpoint label" errorTooManyCatchpointLabels = "The catchup command expect a single catchpoint" diff --git a/cmd/goal/node.go b/cmd/goal/node.go index 803b305992..b8b5f2efa4 100644 --- a/cmd/goal/node.go +++ b/cmd/goal/node.go @@ -19,6 +19,7 @@ package main //go:generate ./bundle_genesis_json.sh import ( + "bufio" "encoding/base64" "encoding/json" "errors" @@ -60,6 +61,7 @@ var newNodeRelay string var newNodeFullConfig bool var watchMillisecond uint64 var abortCatchup bool +var fastCatchupForce bool const catchpointURL = "https://algorand-catchpoints.s3.us-east-2.amazonaws.com/channel/%s/latest.catchpoint" @@ -108,6 +110,7 @@ func init() { statusCmd.Flags().Uint64VarP(&watchMillisecond, "watch", "w", 0, "Time (in milliseconds) between two successive status updates") catchupCmd.Flags().BoolVarP(&abortCatchup, "abort", "x", false, "Aborts the current catchup process") + catchupCmd.Flags().BoolVar(&fastCatchupForce, "force", false, "Forces fast catchup with implicit catchpoint to start without a consent prompt") } @@ -149,7 +152,7 @@ func getMissingCatchpointLabel(URL string) (label string, err error) { var catchupCmd = &cobra.Command{ Use: "catchup", Short: "Catchup the Algorand node to a specific catchpoint", - Long: "Catchup allows making large jumps over round ranges without the need to incrementally validate each individual round. If no catchpoint is provided, this command attempts to lookup the latest catchpoint from algorand-catchpoints.s3.us-east-2.amazonaws.com.", + Long: "Catchup allows making large jumps over round ranges without the need to incrementally validate each individual round. Using external catchpoints is not a secure practice and should not be done for consensus participating nodes.\nIf no catchpoint is provided, this command attempts to lookup the latest catchpoint from algorand-catchpoints.s3.us-east-2.amazonaws.com.", Example: "goal node catchup 6500000#1234567890ABCDEF01234567890ABCDEF0\tStart catching up to round 6500000 with the provided catchpoint\ngoal node catchup --abort\t\t\t\t\tAbort the current catchup", Args: catchpointCmdArgument, Run: func(cmd *cobra.Command, args []string) { @@ -164,9 +167,18 @@ var catchupCmd = &cobra.Command{ URL := fmt.Sprintf(catchpointURL, genesis) label, err := getMissingCatchpointLabel(URL) if err != nil { - reportErrorf(errorCatchpointLabelMissing, errorUnableToLookupCatchpointLabel) + reportErrorf(errorCatchpointLabelMissing, errorUnableToLookupCatchpointLabel, err.Error()) } args = append(args, label) + if !fastCatchupForce { + fmt.Printf(nodeConfirmImplicitCatchpoint, label) + reader := bufio.NewReader(os.Stdin) + text, _ := reader.ReadString('\n') + text = strings.Replace(text, "\n", "", -1) + if text != "yes" { + reportErrorf(errorAbortedPerUserRequest) + } + } } catchup(dataDir, args) }) From f2b7f8581ebeba855a8717201d86983bec4bc6b4 Mon Sep 17 00:00:00 2001 From: Will Winder Date: Wed, 1 Mar 2023 14:35:21 -0500 Subject: [PATCH 61/81] api: lower default for MaxHeaderBytes. (#5171) --- daemon/algod/server.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/daemon/algod/server.go b/daemon/algod/server.go index 97ede14578..e116f206b3 100644 --- a/daemon/algod/server.go +++ b/daemon/algod/server.go @@ -49,6 +49,9 @@ import ( var server http.Server +// maxHeaderBytes must have enough room to hold an api token +const maxHeaderBytes = 4096 + // ServerNode is the required methods for any node the server fronts type ServerNode interface { apiServer.APINodeInterface @@ -297,9 +300,10 @@ func (s *Server) Start() { addr = listener.Addr().String() server = http.Server{ - Addr: addr, - ReadTimeout: time.Duration(cfg.RestReadTimeoutSeconds) * time.Second, - WriteTimeout: time.Duration(cfg.RestWriteTimeoutSeconds) * time.Second, + Addr: addr, + ReadTimeout: time.Duration(cfg.RestReadTimeoutSeconds) * time.Second, + WriteTimeout: time.Duration(cfg.RestWriteTimeoutSeconds) * time.Second, + MaxHeaderBytes: maxHeaderBytes, } e := apiServer.NewRouter( From c573d2b3f643298b9e748313167145e4862944b3 Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Wed, 1 Mar 2023 14:51:39 -0500 Subject: [PATCH 62/81] AVM: Go19 curve check (#4917) --- config/consensus.go | 5 +++++ data/transactions/logic/eval.go | 19 +++++++++++-------- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/config/consensus.go b/config/consensus.go index 4f1ac86736..f6e831493a 100644 --- a/config/consensus.go +++ b/config/consensus.go @@ -483,6 +483,10 @@ type ConsensusParams struct { // the rewardsLevel, but the rewardsLevel has no meaning because the account // has fewer than RewardUnit algos. UnfundedSenders bool + + // EnablePrecheckECDSACurve means that ecdsa_verify opcode will bail early, + // returning false, if pubkey is not on the curve. + EnablePrecheckECDSACurve bool } // PaysetCommitType enumerates possible ways for the block header to commit to @@ -1249,6 +1253,7 @@ func initConsensusProtocols() { vFuture.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} vFuture.LogicSigVersion = 9 // When moving this to a release, put a new higher LogicSigVersion here + vFuture.EnablePrecheckECDSACurve = true Consensus[protocol.ConsensusFuture] = vFuture diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go index c0e94e71c9..42f5669c3b 100644 --- a/data/transactions/logic/eval.go +++ b/data/transactions/logic/eval.go @@ -3432,6 +3432,8 @@ var ecdsaVerifyCosts = []int{ Secp256r1: 2500, } +var secp256r1 = elliptic.P256() + func opEcdsaVerify(cx *EvalContext) error { ecdsaCurve := EcdsaCurve(cx.program[cx.pc+1]) fs, ok := ecdsaCurveSpecByField(ecdsaCurve) @@ -3471,15 +3473,16 @@ func opEcdsaVerify(cx *EvalContext) error { pubkey := secp256k1.S256().Marshal(x, y) result = secp256k1.VerifySignature(pubkey, msg, signature) } else if fs.field == Secp256r1 { - r := new(big.Int).SetBytes(sigR) - s := new(big.Int).SetBytes(sigS) - - pubkey := ecdsa.PublicKey{ - Curve: elliptic.P256(), - X: x, - Y: y, + if !cx.Proto.EnablePrecheckECDSACurve || secp256r1.IsOnCurve(x, y) { + pubkey := ecdsa.PublicKey{ + Curve: secp256r1, + X: x, + Y: y, + } + r := new(big.Int).SetBytes(sigR) + s := new(big.Int).SetBytes(sigS) + result = ecdsa.Verify(&pubkey, msg, r, s) } - result = ecdsa.Verify(&pubkey, msg, r, s) } cx.stack[fifth] = boolToSV(result) From 07ecf74fd81778e61af5b52045ce3f96fcfc23bb Mon Sep 17 00:00:00 2001 From: Jason Paulos Date: Wed, 1 Mar 2023 11:59:37 -0800 Subject: [PATCH 63/81] Algod: Make simulation endpoint non-experimental (#5159) Co-authored-by: algochoi <86622919+algochoi@users.noreply.github.com> --- daemon/algod/api/algod.oas2.json | 37 +- daemon/algod/api/algod.oas3.yml | 32 +- .../api/server/v2/generated/data/routes.go | 16 +- .../v2/generated/experimental/routes.go | 351 ++++++------- .../nonparticipating/private/routes.go | 10 +- .../nonparticipating/public/routes.go | 475 ++++++++++-------- .../generated/participating/private/routes.go | 26 +- .../generated/participating/public/routes.go | 24 +- daemon/algod/api/server/v2/handlers.go | 11 +- .../algod/api/server/v2/test/handlers_test.go | 82 ++- 10 files changed, 561 insertions(+), 503 deletions(-) diff --git a/daemon/algod/api/algod.oas2.json b/daemon/algod/api/algod.oas2.json index 9eda15df6a..ce40f9ab63 100644 --- a/daemon/algod/api/algod.oas2.json +++ b/daemon/algod/api/algod.oas2.json @@ -1209,7 +1209,7 @@ "post": { "tags": [ "public", - "experimental" + "nonparticipating" ], "consumes": [ "application/x-binary" @@ -1221,7 +1221,7 @@ "schemes": [ "http" ], - "summary": "Simulates a raw transaction or transaction group as it would be evaluated on the network. WARNING: This endpoint is experimental and under active development. There are no guarantees in terms of functionality or future support.", + "summary": "Simulates a raw transaction or transaction group as it would be evaluated on the network. The simulation will use blockchain state from the latest committed round.", "operationId": "SimulateTransaction", "parameters": [ { @@ -1254,6 +1254,9 @@ "$ref": "#/definitions/ErrorResponse" } }, + "404": { + "description": "Transaction simulator not enabled" + }, "500": { "description": "Internal Error", "schema": { @@ -2372,6 +2375,33 @@ } } } + }, + "/v2/experimental": { + "get": { + "tags": [ + "public", + "experimental" + ], + "produces": [ + "application/json" + ], + "schemes": [ + "http" + ], + "summary": "Returns OK if experimental API is enabled.", + "operationId": "ExperimentalCheck", + "responses": { + "200": { + "description": "Experimental API enabled" + }, + "404": { + "description": "Experimental API not enabled" + }, + "default": { + "description": "Unknown Error" + } + } + } } }, "definitions": { @@ -4014,9 +4044,6 @@ }, "SimulateResponse": { "description": "Result of a transaction group simulation.", - "tags": [ - "experimental" - ], "schema": { "type": "object", "required": [ diff --git a/daemon/algod/api/algod.oas3.yml b/daemon/algod/api/algod.oas3.yml index f144be5fbb..11435901dd 100644 --- a/daemon/algod/api/algod.oas3.yml +++ b/daemon/algod/api/algod.oas3.yml @@ -3807,6 +3807,30 @@ ] } }, + "/v2/experimental": { + "get": { + "operationId": "ExperimentalCheck", + "responses": { + "200": { + "content": {}, + "description": "Experimental API enabled" + }, + "404": { + "content": {}, + "description": "Experimental API not enabled" + }, + "default": { + "content": {}, + "description": "Unknown Error" + } + }, + "summary": "Returns OK if experimental API is enabled.", + "tags": [ + "public", + "experimental" + ] + } + }, "/v2/ledger/supply": { "get": { "operationId": "GetSupply", @@ -5779,6 +5803,10 @@ }, "description": "Invalid API Token" }, + "404": { + "content": {}, + "description": "Transaction simulator not enabled" + }, "500": { "content": { "application/json": { @@ -5814,10 +5842,10 @@ "description": "Unknown Error" } }, - "summary": "Simulates a raw transaction or transaction group as it would be evaluated on the network. WARNING: This endpoint is experimental and under active development. There are no guarantees in terms of functionality or future support.", + "summary": "Simulates a raw transaction or transaction group as it would be evaluated on the network. The simulation will use blockchain state from the latest committed round.", "tags": [ "public", - "experimental" + "nonparticipating" ], "x-codegen-request-body-name": "rawtxn" } diff --git a/daemon/algod/api/server/v2/generated/data/routes.go b/daemon/algod/api/server/v2/generated/data/routes.go index 2a32e1cd96..4556670809 100644 --- a/daemon/algod/api/server/v2/generated/data/routes.go +++ b/daemon/algod/api/server/v2/generated/data/routes.go @@ -305,14 +305,14 @@ var swaggerSpec = []string{ "F/x3iT6FWpXtqPhaFf+AlRMQURQSj4+O7uzRrd7KXtkI9Rqcx+s2EHsy9YU/KVtsaGTj06NHdza3drDt", "rSfWBdeb1SnHWBcj+Yk92XBCT/+yE3qBN2jD8UvGM/tigqa4p+0Gxfl9+Zedn2aF96BxfHwGFCoAz+5w", "g316JjR6OM0JtrSzefKXnc0ZyA1LgZxDUQpJJct35CdeZ5cGFaX6x9xP/IKLS+4JYRTtqiio3LkjkJKu", - "qAofrm+9/2c0PLpS6ODDGt6z+cyljsG2BMkK4FiV4sqdxFauHdrno5sD2v+84y7VK4dY4NRPXIE1sPoc", - "7x1Ph45nbHy24+nb+szsnRW4T/84nu4vW40vSg+MrPmDhfs0afzsU1Lh027FT7Z33kIhNqDqF5Mb5jT6", - "lLmj2MeTpSgCHj4Y3EMf8IIf11Od36E/kve5NMB7SuuePXHT14dH4qYm4bkn0NGCn/JYa/0Yaiexwg51", - "L7ZAs38Jgn8JgjsUBLqSfHCLBucXBv9CaeOaSErTNYzJg/5pGd5pSxGrrHI2IixcPYkhWXHWlhV/6pvt", - "hz/F+f6Ccr+fWytuo82ozBnImgso75f4+JcU+B8jBWytImc9mhMNea7Cva8F7n0bNOByOrgN5pgoB7pP", - "ecZ+PvzYfkqmZcZT60pn4jLoi65fG7fQt+7Vjyu2/j68pEwnSyFdPgeW+u131kDzQ1eppPNrkxzc+4IZ", - "z8GPYTBW9NfDupJ69GPXxBr76kyMA418nSn/uXGxhC4LlJC1s+LdByOfsE6nE56NBf748BBjpNdC6cPZ", - "1fxjxzoffvxQs4Qv4DYrJdtgPviHq/8fAAD///Ozgdc5xQAA", + "qAofrm+9/2c0PLpS6ODDGt6z+cyljsG2BMkK4FiV4sqdxK1f6/PZSrtD+6h0/+cddwlgOcTCqX7iCqzZ", + "1Wd+73g6dGhj47MdT9/WJ2nvBMHd+8dxen8xa3xRpmC8zR8s8qfJ6GefkgqfdoN+sh31FgqxAVW/o9ww", + "p9GyzM3FPqksRRHw8MHgzvqA1/649uq8Ef2RvCemAd5TZffsiZu+STwSTTUJzz3hjxb8lCdc6ydSO+kW", + "dqh7sQWa/UsQ/EsQ3KEg0JXkg1s0OL8wJBhKG+1EUpquYUwe9E/L8KZbili9lbMRYeGqTAzJirO2rPhT", + "33c//CnO9xeU+/3cWnEbg0ZlzkDWXEB5v/DHv6TA/xgpYCsYOZvSnGjIcxXufS1w79tQApfpwW2Ix0Q5", + "0H3gM/bz4cf2AzMt455aVzoTl0FfdAjbaIa+za9+crH19+ElZTpZCumyPLAAcL+zBpofuvolnV+blOHe", + "F8yDDn4MQ7Sivx7W9dWjH7uG19hXZ3gcaOSrT/nPjeMldGSghKxdGO8+GPmE1Tud8Gzs8seHhxg5vRZK", + "H86u5h87Nvvw44eaJXxZt1kp2QazxD9c/f8AAAD//8pjb+JPxQAA", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/generated/experimental/routes.go b/daemon/algod/api/server/v2/generated/experimental/routes.go index eb147bd397..3e8edb58bd 100644 --- a/daemon/algod/api/server/v2/generated/experimental/routes.go +++ b/daemon/algod/api/server/v2/generated/experimental/routes.go @@ -8,22 +8,20 @@ import ( "compress/gzip" "encoding/base64" "fmt" - "net/http" "net/url" "path" "strings" . "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" - "github.com/algorand/oapi-codegen/pkg/runtime" "github.com/getkin/kin-openapi/openapi3" "github.com/labstack/echo/v4" ) // ServerInterface represents all server handlers. type ServerInterface interface { - // Simulates a raw transaction or transaction group as it would be evaluated on the network. WARNING: This endpoint is experimental and under active development. There are no guarantees in terms of functionality or future support. - // (POST /v2/transactions/simulate) - SimulateTransaction(ctx echo.Context, params SimulateTransactionParams) error + // Returns OK if experimental API is enabled. + // (GET /v2/experimental) + ExperimentalCheck(ctx echo.Context) error } // ServerInterfaceWrapper converts echo contexts to parameters. @@ -31,23 +29,14 @@ type ServerInterfaceWrapper struct { Handler ServerInterface } -// SimulateTransaction converts echo context to params. -func (w *ServerInterfaceWrapper) SimulateTransaction(ctx echo.Context) error { +// ExperimentalCheck converts echo context to params. +func (w *ServerInterfaceWrapper) ExperimentalCheck(ctx echo.Context) error { var err error ctx.Set(Api_keyScopes, []string{""}) - // Parameter object where we will unmarshal all parameters from the context - var params SimulateTransactionParams - // ------------- Optional query parameter "format" ------------- - - err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), ¶ms.Format) - if err != nil { - return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err)) - } - // Invoke the callback with all the unmarshalled arguments - err = w.Handler.SimulateTransaction(ctx, params) + err = w.Handler.ExperimentalCheck(ctx) return err } @@ -79,179 +68,173 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL Handler: si, } - router.POST(baseURL+"/v2/transactions/simulate", wrapper.SimulateTransaction, m...) + router.GET(baseURL+"/v2/experimental", wrapper.ExperimentalCheck, m...) } // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9aXPctrLoX0HNvVVe3lCS13OsqtR9sp346MZ2XJaS8+6x/BIM2TODiAQYAJwlfv7v", - "r9AASJAEZ6gl9klVPtkaYmk0Go3e8WmSiqIUHLhWk+NPk5JKWoAGiX/RNBUV1wnLzF8ZqFSyUjPBJ8f+", - "G1FaMr6YTCfM/FpSvZxMJ5wW0LQx/acTCb9VTEI2OdaygulEpUsoqBlYb0vTuh5pkyxE4oY4sUOcvpx8", - "3vGBZpkEpfpQ/sDzLWE8zasMiJaUK5qaT4qsmV4SvWSKuM6EcSI4EDEnetlqTOYM8kwd+EX+VoHcBqt0", - "kw8v6XMDYiJFDn04X4hixjh4qKAGqt4QogXJYI6NllQTM4OB1TfUgiigMl2SuZB7QLVAhPACr4rJ8YeJ", - "Ap6BxN1Kga3wv3MJ8DskmsoF6MnHaWxxcw0y0ayILO3UYV+CqnKtCLbFNS7YCjgxvQ7Im0ppMgNCOXn/", - "3Qvy6NGjZ2YhBdUaMkdkg6tqZg/XZLtPjicZ1eA/92mN5gshKc+Suv37717g/GdugWNbUaUgflhOzBdy", - "+nJoAb5jhIQY17DAfWhRv+kRORTNzzOYCwkj98Q2vtVNCef/qruSUp0uS8G4juwLwa/Efo7ysKD7Lh5W", - "A9BqXxpMSTPoh6Pk2cdPD6YPjj7/x4eT5F/uzyePPo9c/ot63D0YiDZMKymBp9tkIYHiaVlS3sfHe0cP", - "aimqPCNLusLNpwWyeteXmL6Wda5oXhk6YakUJ/lCKEIdGWUwp1WuiZ+YVDw3bMqM5qidMEVKKVYsg2xq", - "uO96ydIlSamyQ2A7smZ5bmiwUpAN0Vp8dTsO0+cQJQaua+EDF/Tvi4xmXXswARvkBkmaCwWJFnuuJ3/j", - "UJ6R8EJp7ip1tcuKnC+B4OTmg71sEXfc0HSeb4nGfc0IVYQSfzVNCZuTrajIGjcnZ5fY363GYK0gBmm4", - "Oa171BzeIfT1kBFB3kyIHChH5Plz10cZn7NFJUGR9RL00t15ElQpuAIiZr9Cqs22//fZD2+JkOQNKEUX", - "8I6mlwR4KjLIDsjpnHChA9JwtIQ4ND2H1uHgil3yvyphaKJQi5Kml/EbPWcFi6zqDd2woioIr4oZSLOl", - "/grRgkjQleRDANkR95BiQTf9Sc9lxVPc/2balixnqI2pMqdbRFhBN98cTR04itA8JyXwjPEF0Rs+KMeZ", - "ufeDl0hR8WyEmKPNngYXqyohZXMGGalH2QGJm2YfPIxfDZ5G+ArA8YMMglPPsgccDpsIzZjTbb6Qki4g", - "IJkD8qNjbvhVi0vgNaGT2RY/lRJWTFSq7jQAI069WwLnQkNSSpizCI2dOXQYBmPbOA5cOBkoFVxTxiEz", - "zBmBFhossxqEKZhwt77Tv8VnVMHTx0N3fPN15O7PRXfXd+74qN3GRok9kpGr03x1BzYuWbX6j9APw7kV", - "WyT2595GssW5uW3mLMeb6Fezfx4NlUIm0EKEv5sUW3CqKwnHF/y++Ysk5ExTnlGZmV8K+9ObKtfsjC3M", - "T7n96bVYsPSMLQaQWcMaVbiwW2H/MePF2bHeRPWK10JcVmW4oLSluM625PTl0CbbMa9KmCe1thsqHucb", - "r4xctYfe1Bs5AOQg7kpqGl7CVoKBlqZz/GczR3qic/m7+acsc9Nbl/MYag0duysZzQfOrHBSljlLqUHi", - "e/fZfDVMAKwiQZsWh3ihHn8KQCylKEFqZgelZZnkIqV5ojTVONJ/SphPjif/cdjYXw5td3UYTP7a9DrD", - "TkZktWJQQsvyCmO8M6KP2sEsDIPGT8gmLNtDoYlxu4mGlJhhwTmsKNcHjcrS4gf1Af7gZmrwbaUdi++O", - "CjaIcGIbzkBZCdg2vKNIgHqCaCWIVhRIF7mY1T/cPSnLBoP4/aQsLT5QegSGghlsmNLqHi6fNicpnOf0", - "5QF5FY6Norjg+dZcDlbUMHfD3N1a7harbUtuDc2IdxTB7RTywGyNR4MR82+D4lCtWIrcSD17acU0/odr", - "G5KZ+X1U5z8HiYW4HSYuVLQc5qyOg78Eys3dDuX0CceZew7ISbfv9cjGjBInmGvRys79tOPuwGONwrWk", - "pQXQfbF3KeOopNlGFtYbctORjC4Kc3CGA1pDqK591vaehygkSAodGJ7nIr38B1XLWzjzMz9W//jhNGQJ", - "NANJllQtDyYxKSM8Xs1oY46YaYgKPpkFUx3US7yt5e1ZWkY1DZbm4I2LJRb12A+ZHsiI7vID/ofmxHw2", - "Z9uwfjvsATlHBqbscXZOhsxo+1ZBsDOZBmiFEKSwCj4xWveVoHzRTB7fp1F79K21KbgdcovAHRKbWz8G", - "z8UmBsNzsekdAbEBdRv0YcZBMVJDoUbA99JBJnD/HfqolHTbRzKOPQbJZoFGdFV4Gnh445tZGuPsyUzI", - "63GfDlvhpDE5E2pGDZjvtIMkbFqViSPFiNnKNugM1Hj5djON7vAxjLWwcKbpH4AFZUa9DSy0B7ptLIii", - "ZDncAukvo0x/RhU8ekjO/nHy5MHDnx8+eWpIspRiIWlBZlsNitx1uhlRepvDvf7KUDuqch0f/eljb6hs", - "jxsbR4lKplDQsj+UNYBaEcg2I6ZdH2ttNOOqawDHHM5zMJzcop1Y274B7SVTRsIqZreyGUMIy5pZMuIg", - "yWAvMV11ec0023CJciur21BlQUohI/Y1PGJapCJPViAVExFvyjvXgrgWXrwtu79baMmaKmLmRtNvxVGg", - "iFCW3vDxfN8Ofb7hDW52cn673sjq3Lxj9qWNfG9JVKQEmegNJxnMqkVLE5pLURBKMuyId/Qr0GdbnqJV", - "7TaIdFhNKxhHE7/a8jTQ2cxG5ZAtWptwc92sixVvn7NT3VERcAw6XuNnVOtfQq7prcsv3QlisL/wG2mB", - "JZlpiFrwa7ZY6kDAfCeFmN8+jLFZYoDiByue56ZPX0h/KzIwi63ULVzGzWANrZs9DSmczkSlCSVcZIAW", - "lUrFr+kBzz26DNHTqcObXy+txD0DQ0gprcxqq5KgH6/HOZqOCU0t9SaIGjXgxajdT7aVnc56hXMJNDNa", - "PXAiZs5V4JwYuEiKTkjtLzonJETOUguuUooUlIIscSaKvaD5dpaJ6B14QsAR4HoWogSZU3ljYC9Xe+G8", - "hG2CLnNF7n7/k7r3FeDVQtN8D2KxTQy9tcLn/EF9qMdNv4vgupOHZEclEM9zjXZpGEQOGoZQeCWcDO5f", - "F6LeLt4cLSuQ6Jn5QyneT3IzAqpB/YPp/abQVuVAIJhTdM5ZgXY7TrlQkAqeqehgOVU62ceWTaOWNmZW", - "EHDCGCfGgQeEktdUaetNZDxDI4i9TnAeK6CYKYYBHhRIzcg/eVm0P3Zq7kGuKlULpqoqSyE1ZLE1cNjs", - "mOstbOq5xDwYu5Z+tSCVgn0jD2EpGN8hy67EIojq2uju3O39xaFp2tzz2ygqW0A0iNgFyJlvFWA3DIYZ", - "AISpBtGWcJjqUE4dgTOdKC3K0nALnVS87jeEpjPb+kT/2LTtExfVzb2dCVAYg+PaO8jXFrM2DGpJjQqN", - "I5OCXhrZAxVi6/bsw2wOY6IYTyHZRfnmWJ6ZVuER2HtIq3IhaQZJBjnd9gf90X4m9vOuAXDHG8VHaEhs", - "PEt80xtK9uEDO4YWOJ6KCY8Ev5DUHEGjeTQE4nrvGTkDHDvGnBwd3amHwrmiW+THw2XbrY6MiLfhSmiz", - "45YcEGLH0MfAO4CGeuTrYwI7J41a1p3if0C5CWox4uqTbEENLaEZ/0oLGDCmuUjh4Lh0uHuHAUe55iAX", - "28NGhk7sgGXvHZWapaxEVed72N665tedIOpvIhloynLISPDBaoFl2J/YQIzumNfTBEcZYfrg96wwkeXk", - "TKHE0wb+Eraocr+zEX7nQVzgLaiykVHN9UQ5QUB93JCRwMMmsKGpzrdGTtNL2JI1SCCqmhVMaxu529Z0", - "tSiTcICogXvHjM6bY6Pj/A6McS+d4VDB8vpbMZ1YlWA3fOcdvaCFDqcKlELkI4xHPWREIRjl+CelMLvO", - "XBCxDyP1lNQC0jFtdOXVt/8d1UIzroD8j6hISjlqXJWGWqQREuUElB/NDEYCq+d0Lv4GQ5BDAVaRxC/3", - "73cXfv++23OmyBzWPvLeNOyi4/59NOO8E0q3DtctmArNcTuNXB9o+cd7zwUvdHjKfhezG3nMTr7rDF67", - "C8yZUsoRrln+jRlA52Ruxqw9pJFx7nUcd5RRPxg6tm7c9zNWVDnVt+G+2CmP1voEKwrIGNWQb0kpIQUb", - "XW0ELGVhMaARG3eVLilfoFwtRbVwgT92HGSMlbIWDFnx3hBR4UNveLKQoipjjNIFe/oAeyN2ADWaT4BI", - "7Gzl/DWt53M5FWNuMI/wYHdemTGHvArTyaBiaJC6ahRDi5x2lkAcC5j2kKgqTQGiIcAxlateaicbsslv", - "cQMasaGSNgaK0FRXNA+pjpzOCeXbdpokZbkyXJApgu1M5yaudmrX5nNY5jS3vtlIUkV4UloSX7DzDUq7", - "qBjpd0AiMdJQnzJCAjTHy5DxH2PDb4aOQdmfOAi6aj4OxV0Z/Tvf3oIYZAciEkoJCi+t0G6l7FcxD3Of", - "3K2mtkpD0Tft264/DzCa94MKpOA545AUgsM2mu7LOLzBj1HGgRfnQGcUYYb6drWSFvwdsNrzjKHGm+IX", - "dzvgRe/qgMNb2PzuuB2vTpj1hVZLyEtCSZoztGkKrrSsUn3BKVpNgsMWCczw+uGwHe2FbxI33EXsam6o", - "C04xKKe2pUSdyXOIGA6+A/DmNFUtFqA6/JPMAS64a8U4qTjTOFdh9iuxG1aCxOiIA9uyoFvDAtHs9ztI", - "QWaVbvNkzDxR2rBL62Iy0xAxv+BUkxyMTv2G8fMNDuddtJ5mOOi1kJc1FuJXyAI4KKaSeADJK/sVY/vc", - "8pcuzg8zhe1n65Qw4zfpKVs0qjTZr//37n8dfzhJ/kWT34+SZ//r8OOnx5/v3e/9+PDzN9/8v/ZPjz5/", - "c++//jO2Ux72WF6Eg/z0pVPWTl+iRN54JXqwfzGLdMF4EiWy0PfeoS1yF3MAHQHda9tr9BIuuN5wQ0gr", - "mrPMiFzXIYcui+udRXs6OlTT2oiOfcav9Ypy7g24DIkwmQ5rvPY13o+5imcgoZvMJRXheZlX3G6lF3Rt", - "gL2PfRHzaZ1lZgtQHBNMQVpSH7jl/nz45Olk2qQO1d8n04n7+jFCySzbRKVD2MTUF3dA8GDcUaSkWwUD", - "AijCHg3zsdEG4bAFGL1XLVn55TmF0mwW53A+bNmZQTb8lNt4YnN+0Om2dbZ8Mf/ycGtp5PBSL2OJ6S1J", - "AVs1uwnQCYQopVgBnxJ2AAddM0RmVDMXcJQDnWOCNCp6YkwaRn0OLKF5qgiwHi5klK4fox8Ubh23/jyd", - "uMtf3bo87gaOwdWds/aw+b+1IHdefXtODh3DVHdsrqIdOsgui2itLoGiFSJjuJktx2GTNS/4BX8Jc8aZ", - "+X58wTOq6eGMKpaqw0qBfE5zylM4WAhy7HMyXlJNL3hP0hqsmBNkw5CymuUsJZehRNyQp62C0B/h4uID", - "zRfi4uJjL1qgL7+6qaL8xU6QrJleikonLoc7kbCmMuaNUXUOL45sizTsmnVK3NiWFbsccTd+nOfRslTd", - "XL7+8ssyN8sPyFC5TDWzZURpIb0sYgQUCw3u71vhLgZJ196EUSlQ5JeClh8Y1x9JclEdHT0C0kpu+8Vd", - "+YYmtyWMNmQM5hp27Re4cKvXwEZLmpR0EfP6XFx80EBL3H2UlwtUsvOcYLdWUp0PGsahmgV4fAxvgIXj", - "yglCuLgz28vX64kvAT/hFmIbI240rujr7leQZnft7eqk6vV2qdLLxJzt6KqUIXG/M3UZj4URsnx8gGIL", - "jMF0FU9mQNIlpJeuFAUUpd5OW919CIoTND3rYMoWKbFJMpgmjzbzGZCqzKgTxbsWpNmWKNDaB4G+h0vY", - "nosmy/4qCcrtfFk1dFCRUgPp0hBreGzdGN3Nd3FOaOIqS592ivlHniyOa7rwfYYPshV5b+EQx4iilc85", - "hAgqI4iwxD+Agmss1Ix3I9KPLc9oGTN780UKlnjeT1yTRnlyIUnhatDAbb8XgBWPxFqRGTVyu3DFemxO", - "aMDFKkUXMCAhh26LkZmXLVcHDrLv3ovedGLevdB6900UZNs4MWuOUgqYL4ZUUJnpBKL5maxnzDkBsAaf", - "Q9gsRzGpjtizTIfKlvvIFhUbAi1OwCB5I3B4MNoYCSWbJVW+jhCWW/JneZQM8AfmOO+qbBEa9IOaSrV9", - "3fPc7jntaZeuvoUvauErWYSq5YiqFEbCx7Dt2HYIjgJQBjks7MJtY08oTb51s0EGjh/m85xxIEksHIsq", - "JVJmC0E114ybA4x8fJ8QawImo0eIkXEANnp8cWDyVoRnky+uAiR3+eLUj42+4uBviKe22ABlI/KI0rBw", - "NuBASj0HoC6Gr76/OpGkOAxhfEoMm1vR3LA5p/E1g/QKLKDY2imn4GIO7g2Jszss8PZiudKa7FV0ndWE", - "MpMHOi7Q7YB4JjaJzW2LSryzzczQezRmGzPtYgfTlrK4o8hMbDCOBa8WGyO8B5ZhODwYgYa/YQrpFfsN", - "3eYWmF3T7pamYlSokGScOa8mlyFxYszUAxLMELncDapTXAuAjrGjKfXqlN+9SmpbPOlf5s2tNm2qLvl0", - "mNjxHzpC0V0awF/fClPXk3jXlViidop2OEa7lEYgQsaI3rCJvpOm7wpSkAMqBUlLiEouY647o9sA3jhn", - "vltgvMCCHZRv7wUxPhIWTGlojOg+JOFrmCcp1gkTYj68Ol3KuVnfeyHqa8oWosGOrWV+8RVgjOycSaUT", - "9EBEl2AafadQqf7ONI3LSu0oIltVk2Vx3oDTXsI2yVhexenVzfv9SzPt25olqmqG/JZxGxsywyqw0djC", - "HVPb8NOdC35tF/ya3tp6x50G09RMLA25tOf4k5yLDufdxQ4iBBgjjv6uDaJ0B4MMUkL73DGQm+zhxJTQ", - "g13W195hyvzYe8NGfGLq0B1lR4quJTAY7FwFQzeREUuYDoqo9nM1B84ALUuWbTq2UDvqoMZMr2Tw8KWn", - "OljA3XWD7cFAYPeMpYtIUO0qY42Ab8vhtop8HIzCzHm7FljIEMKpmPLF3PuIqtPJ9uHqHGj+PWx/Mm1x", - "OZPP08nNTKcxXLsR9+D6Xb29UTyja96a0lqekCuinJalFCuaJ87APESaUqwcaWJzb4/+wqwubsY8//bk", - "9TsH/ufpJM2ByqQWFQZXhe3KP82qbEGzgQPii0Ubnc/L7FaUDDa/rsIUGqXXS3BVdwNptFcesHE4BEfR", - "Gann8QihvSZn5xuxS9zhI4GydpE05jvrIWl7ReiKstzbzTy0A9E8uLhxNSajXCEc4MbelcBJltwqu+md", - "7vjpaKhrD08K59pRF7iwpa8VEbzrQsfw4m3pvO4FxeJ+1irSZ068KtCSkKicpXEbK58pQxzc+s5MY4KN", - "B4RRM2LFBlyxvGLBWKaZGqHodoAM5ogi0xeKHMLdTLhnTSrOfquAsAy4Np8knsrOQcVqis7a3r9OjezQ", - "n8sNbC30zfA3kTHCwpbdGw+B2C1ghJ66Hrgva5XZL7S2SGG4deOSuILDP5yxdyXucNY7+nDUbIMXl22P", - "W/gKSZ//GcKw5aj3P4HilVdXYXNgjuiTJkwlcyl+h7ieh+pxJBXHl/JkGOXyO/ARMeeNdad5maWZfXC7", - "h6Sb0ArVDlIYoHrc+cAthzUFvYWacrvV9oWBVqxbnGDCqNJDO35DMA7mXiRuTtczGiu4aIQMA9NJ4wBu", - "2dK1IL6zx72qExvs7CTwJddtmc2yLkE2WXL9ii3XFBjstKNFhUYyQKoNZYKp9f/lSkSGqfiacvtQheln", - "j5LrrcAav0yvtZBYI0HFzf4ZpKygeVxyyNK+iTdjC2bfYKgUBEX+3UD2fRtLRe6hhDpdx6HmdE6OpsFL", - "I243MrZiis1ywBYPbIsZVcjJa0NU3cUsD7heKmz+cETzZcUzCZleKotYJUgt1KF6UzuvZqDXAJwcYbsH", - "z8hddNsptoJ7Bovufp4cP3iGRlf7x1HsAnBvaOziJhmyk386dhKnY/Rb2jEM43ajHkTTye0jWsOMa8dp", - "sl3HnCVs6Xjd/rNUUE4XEI8UKfbAZPvibqIhrYMXntkXYJSWYkuYjs8Pmhr+NBB9btifBYOkoiiYLpxz", - "R4nC0FNTwd9O6oezz8m44qseLv8RfaSldxF1lMgvazS191ts1ejJfksLaKN1SqgtjJGzJnrBl4Qmp77u", - "DlajrYvQWtyYuczSUczBYIY5KSXjGhWLSs+Tv5N0SSVNDfs7GAI3mT19HKnA264Eya8G+BfHuwQFchVH", - "vRwgey9DuL7kLhc8KQxHye412R7BqRx05sbddkO+w91DjxXKzCjJILlVLXKjAae+EeHxHQPekBTr9VyJ", - "Hq+8si9OmZWMkwetzA79+P61kzIKIWPF9Jrj7iQOCVoyWGHsXnyTzJg33AuZj9qFm0D/dT0PXuQMxDJ/", - "lmOKwHMR0U59Vejaku5i1SPWgaFjaj4YMpi5oaakXYH3yzv9vPG573wyXzys+EcX2K+8pYhkv4KBTQyq", - "g0e3M6u/B/5vSp6LzdhN7ZwQv7H/BqiJoqRiefZTk5XZKb4uKU+XUX/WzHT8uXkmql6cvZ+iNeuWlHPI", - "o8NZWfBnLzNGpNpfxdh5CsZHtu3Wg7fL7SyuAbwNpgfKT2jQy3RuJgix2k54qwOq84XICM7TFEhruGf/", - "HYGg2vNvFSgdSx7CDzaoC+2WRt+1xYYJ8Ay1xQPyyr4EuwTSKn+DWlpdRcCVvrUG9arMBc2mWMjh/NuT", - "18TOavvYx05sseMFKintVXTsVUHtx3Hhwf7dknjqwvhxdsdSm1UrjdWolKZFGUsONS3OfQPMQA1t+Ki+", - "hNg5IC+DNx1tHqkZwtDDnMnCaFz1aFZ2QZow/9GapktUyVosdZjkx1fp9lSpgpfx6hdu6oKIeO4M3K5Q", - "t63TPSXC6M1rpuwDoLCCdj5qnZztTAI+P7W9PFlxbiklKnvsKh5wHbR74GyghjfzRyHrIP6KArktcn/V", - "ouVn2CtaoKlbAb33JJ7NbqxfLvEPO6eUC85SLI8Uu5rdS6FjfGAjKkl1jaz+iLsTGjlc0brrdZicw+Jg", - "JXbPCB3i+kb44KvZVEsd9k+NT1IuqSYL0MpxNsim/vkAZwdkXIErcInvygZ8UsiWXxE5ZNRVndQujSuS", - "EabFDCh235lvb53aj/Hil4yjgO/Q5kLTraUOHzLURitgmiwEKLeedm6w+mD6HGCabAabjwf+4UNbDQbd", - "cmbZ1gfdH+rEe6SdB9i0fWHaujpB9c+tCGQ76UlZukmHH5eIygN6wwcRHPEsJt61EyC3Hj8cbQe57Qwl", - "wfvUEBqs0BENJd7DPcKoH1roPOJjhFZLUdiC2BCuaAUDxiNgvGYcmmc5IxdEGr0ScGPwvA70U6mk2oqA", - "o3jaOdAcvc8xhqa0cz3cdKhuLSGDElyjn2N4G5s3IgYYR92gEdwo39avgRrqDoSJF/gMsUNk/8UHlKqc", - "EJVhRkHnDYgY4zCM278y074A+segLxPZ7lpSe3KuchMNJYnOqmwBOqFZFqtI9Ry/Evzqi0vBBtKqLkxZ", - "liTFmijtIjF9anMTpYKrqtgxl29ww+mCR1Ui1BA+7OJ3GJNQZlv8N1aVcXhnXBDGlcMAfcSFe4XiinJz", - "e6Se1GtoOlFskYzHBN4pN0dHM/X1CL3pf6uUnotFG5AvXBpiF5cL9yjG3741F0dYOaFXatReLXVhAwy6", - "E/4pPFQb65TcNlfCq6xXexSdPfVTW7sNEMOPZk3x8hsIvQ0KYlB7v1rv4VAAbjoYL061y1zTlOxkQYPZ", - "QDZ6x+b9IBRxy+lQxI4N2DGfe73HSYY9ORvH3olQHwrWB+h7H2dKSsqca7xhFn3Muoj0YXPhrkPXbHB3", - "ES7Oe9Bi9/1qKCabKMYXORD83n1m6BJcOnv9zrxdq49K8iqh/dU982rHq6Pio+vvRyfgVF/XDDpotD13", - "Je3tMp1O/v1PNoaNANdy+29gwu1teu+Rpr60a81TTRNSl0MeVR65dSvG31sarn/U1DxCeiqFYk0J7thD", - "TCNj3c7xLaWgflN/LB9osoJUY931xoEuAa5SzclMFjzy91cdpAHdsQ4JdOWPdtU86hdb33Oh9dKSgtQ6", - "W6j6YHyFn5M6TAqZElbAXQB37+y1Ew5Ghz3P55BqttqTBvbPJfAgxWjqjRD2vdwgK4zVYbRYReTqJrYG", - "oF1ZWjvhCar53RicoSSQS9jeUaRFDdHK2VN/r1yngARiALlDYkhEqFgYgrWaOs8wUzVlIBZ82I/tDk0p", - "rsE3d4KkxmvO5UnS3LhNouOOKeOPfoyay3S9UvovRoQOZYr1Hw0YFrZf4hsNqn4PzxegCFVSctov07d2", - "BSwwaa92FPhSFqD8bz5D186Ss0sIXwVCt8yaysy3iNoZvAkj2XEf9dK7fMH7LtDzembWBGn2E3oihZ8w", - "FDfNhZG/kqF45nZcZPh4PkZ/2JLfGPFp4JqDdK+nobCXCwWJFj6ocxccu1DhHnq/DhLUYLFFC9xgCZT3", - "TY0XLDpLseQJdZEt4QKJhIIa6GRQiWV4zl3IfmG/+wwWX3R0rzmlptf9heZ9eC5TPSSGVD8n7rbcnxlz", - "HcsK49y+1apiZVm4QWVo+i+lyKrUXtDhwaitT6OLHu1gJVGjRNpfZU+/zLEE2Osgz/AStodW9Pel+v1W", - "htBbEcquIcjr7+z2rRqd4vp1vrALWNwKnF/TcDOdlELkyYCt/7RfXaZ7Bi5ZegkZMXeHD2wbeLaE3EUT", - "c+3MXS+3vppKWQKH7N4BISfchhJ7v267vHFncn5H75p/g7NmlS345GxKBxc8HpOJpZjkDfmbH2Y3V1Ng", - "mN8Np7KD7KldshmobCPpOvKIz8FYpbTvae0+rNIQlYUiJqXsecIi4kX2byL4FzZ8xooWBUv7ryj0RIk5", - "vkaV0MjgpzUDn7beCmSdhzt8jSH7TENKrQBnlAfK8kqCyxywz+Z0yumXVC/99pnmfTHLXNmgMKzflmSn", - "yioFXjlxb/Z0z4UokxxW0HIkuHSGKk1BKbaC8L0f25lkACWq6t0LJGYhD+mqw0Pc2pPAxjoGu1GmYhFr", - "d4rs4RgDj7EnljzUWBIyEK1YVtEW/tQNnmIZ+bZ7COvIE3LlwxFfXO9ouOdSkrqYW8yQ6dJJ/BYa+m2e", - "dukISMETLPWYA29R1li4iTAyiNo4Zq9XRGMUPfRt2pEjEzy7stvyEtbYaYJ3pXWNoKbmT113S980p3Hc", - "AzC+wx7wQoNc8ASMl4QcOF85wvZNjZRgKYOU0Fr+PhufW2DDvoItsrzbLNNWPLPRWe19CQy46kVtFx16", - "l6lrPsWCOoJjkbG+2VWhqwxrlYeEY3i3XNH8y5tOsdLSCeLDvXMbX2hoewuRbFGprhfm9pqOmjuws93e", - "1Pwdmnr/CWaPoj5ON5TzedSygvcMIcukOclF88IdDknWOKZ1ij54SmYuRaeUkDLFOtmLa19GuTY14asC", - "zfPHu21b+9b5k9A3IOO5F1/I26YkqxZ4YzQQNkf0KzOVgZMbpfIY9fXIIoK/GI8Ka2XsuS4uW95SW+K6", - "EwYoJNyy1zSIf7qi17RfBWTs8qxn0Fw6lYL+Okff1i3cRi7qZm1jXf595O6q2znGUx8vx2u6Y6iARQjW", - "siYIKvnlwS9EwhwfqxHk/n2c4P79qWv6y8P2Z3Oc79+PP7P8pYIELI7cGG7eGMX8NBQ2bkOjBzIUOvtR", - "sTzbRxitfJPmuSfMqPjZZZx9lQenfra+nP5RdY9+XCU8qbsJiJjIWluTB1MFmSQjkkhct0jKCFpF0koy", - "vcVCON70z36OhjO8qr2Fzttcl05wd58Wl1CXUmp8i5Xyt+srQXO8j4xMjcFhGp/W/XZDizIHd1C+uTP7", - "Gzz6++Ps6NGDv83+fvTkKIXHT54dHdFnj+mDZ48ewMO/P3l8BA/mT5/NHmYPHz+cPX74+OmTZ+mjxw9m", - "j58++9sdw4cMyBbQiU+7nvwffJUtOXl3mpwbYBuc0JLVL2obMvZPy9AUTyIUlOWTY//T//Yn7CAVRTO8", - "/3XisjonS61LdXx4uF6vD8Iuhwt0JiRaVOny0M/Tf8n43WmdmWNVS9xRm3ThTQaeFE7w2/tvz87JybvT", - "g+ClzOPJ0cHRwQN8SLEETks2OZ48wp/w9Cxx3w8dsU2OP32eTg6XQHP0vZs/CtCSpf6TWtPFAuSBe2PH", - "/LR6eOhFicNPzpHyede3w7Bc9eGnlr8p29MTy9kefvJVWna3bpVBcX62oMNIKHY1O5xh8ufYpqCCxsNL", - "QQVDHX5CEXnw90OXERf/iKqKPQOH3ikbb9nC0ie9MbB2ergn+Q8/4X+QJgOwbPxpH1ybKXZoH7Ps/7zl", - "afTH/kDdRxViPx9+ahf1bCFULSudiXXQF4Vwq0H256vL3Lf+PlxTps216jzrWHSl31kDzQ9dzkjn1yZM", - "s/cFY0+DH9tPr0d+PaxrWkU/dok99tVt9kAjn/GHqZ/CZhXW3Oc0a0xgobXMV8iyJUOPP0Te8pyzRSU7", - "bxR3Xj8mTJH/PvvhLRGSOCPBO5pe1oFo5HRuq51IsWKYo5MFiV2m54G/BX6rQG4bLu3kh7Acpn8BwWU+", - "FWpRttMEat3ko727QennItvueIBsk8wYp3LbfoSskV3sx/4E/ScSl2CLwXnzT2jwQ7XN7VEoVWhZga16", - "gThFBv/w6OivF8L/eiH8rxfCWxJweAb8uf/rGPx1DP56KH/0Q/mPr3ix7PTjtLJURh3QqwzXW+hzmhFf", - "GSEhb2hubmjIyInT1lr+O1zrgz/tWk85BsQa9ZBY9ffzdPLkT7x5p1yD5DQn2NKu5tGfdjVnIFcsBXIO", - "RSkklSzfkh95Xc4gKGHY52Y/8ksu1twj4vN0oqqiQPGzFtEVoRiDEp5nISPHmyrCdOPDAJt8DN3iCAfk", - "nyfv356+fXVszT+1pcL8f1OCZAVwTXP0oFYucEazFZAMVpCL0nzGun0S0IPHBVlUVFKuAVxVSVmgl8W/", - "pU1zprcG6HmFb58ZtVJIewHQhcIoGHzoYjKdhCAYnrdJjPi8AJ44AT6ZiWzrC85KutYby10PA5teaCND", - "Vaa2jn34aHQBLAzntJzG5HN8eIhBeUuh9OHk8/RTxxwUfvxYg+4rBk1KyVaYgPjx8/8PAAD//xT50lyq", - "vwAA", + "H4sIAAAAAAAC/+x9+3PcNtLgv4Ka76vy44aS/Ih3rarUd4qdZHVxHJelZO9by5dgyJ4ZrEiAAUDNTHz6", + "36/QAEiQBDnUI/Zu1f1kawg0Go1Go9EvfJqloigFB67V7PjTrKSSFqBB4l80TUXFdcIy81cGKpWs1Ezw", + "2bH/RpSWjK9m8xkzv5ZUr2fzGacFNG1M//lMwu8Vk5DNjrWsYD5T6RoKagDrXWla15C2yUokDsSJBXH6", + "enY98oFmmQSl+lj+xPMdYTzNqwyIlpQrmppPimyYXhO9Zoq4zoRxIjgQsSR63WpMlgzyTB34Sf5egdwF", + "s3SDD0/pukExkSKHPp6vRLFgHDxWUCNVLwjRgmSwxEZrqokZweDqG2pBFFCZrslSyD2oWiRCfIFXxez4", + "w0wBz0DiaqXArvC/SwnwBySayhXo2cd5bHJLDTLRrIhM7dRRX4Kqcq0ItsU5rtgVcGJ6HZAfK6XJAgjl", + "5P13r8izZ89emokUVGvIHJMNzqoZPZyT7T47nmVUg//c5zWar4SkPEvq9u+/e4Xjn7kJTm1FlYL4Zjkx", + "X8jp66EJ+I4RFmJcwwrXocX9pkdkUzQ/L2ApJExcE9v4XhclHP+LrkpKdbouBeM6si4EvxL7OSrDgu5j", + "MqxGoNW+NJSSBuiHo+Tlx09P5k+Orv/jw0nyD/fnV8+uJ07/VQ13DwWiDdNKSuDpLllJoLhb1pT36fHe", + "8YNaiyrPyJpe4eLTAkW960tMXys6r2heGT5hqRQn+UooQh0bZbCkVa6JH5hUPDdiykBz3E6YIqUUVyyD", + "bG6k72bN0jVJqbIgsB3ZsDw3PFgpyIZ4LT67kc10HZLE4HUreuCE/nWJ0cxrDyVgi9IgSXOhINFiz/Hk", + "TxzKMxIeKM1ZpW52WJHzNRAc3Hywhy3SjhuezvMd0biuGaGKUOKPpjlhS7ITFdng4uTsEvu72RiqFcQQ", + "DRendY6azTtEvh4xIsRbCJED5Ug8v+/6JONLtqokKLJZg167M0+CKgVXQMTin5Bqs+z/6+ynt0RI8iMo", + "RVfwjqaXBHgqMsgOyOmScKED1nC8hDQ0PYfm4fCKHfL/VMLwRKFWJU0v4yd6zgoWmdWPdMuKqiC8KhYg", + "zZL6I0QLIkFXkg8hZCHuYcWCbvuDnsuKp7j+zbAtXc5wG1NlTndIsIJuvz6aO3QUoXlOSuAZ4yuit3xQ", + "jzNj70cvkaLi2QQ1R5s1DQ5WVULKlgwyUkMZwcQNsw8fxm+GT6N8Beh4IIPo1KPsQYfDNsIzZnebL6Sk", + "KwhY5oD87IQbftXiEnjN6GSxw0+lhCsmKlV3GsARhx7XwLnQkJQSlizCY2eOHEbA2DZOAhdOB0oF15Rx", + "yIxwRqSFBiusBnEKBhy/7/RP8QVV8OL50BnffJ24+kvRXfXRFZ+02tgosVsycnSar27DxjWrVv8J98Nw", + "bMVWif25t5BsdW5OmyXL8ST6p1k/T4ZKoRBoEcKfTYqtONWVhOML/tj8RRJypinPqMzML4X96ccq1+yM", + "rcxPuf3pjVix9IytBohZ4xq9cGG3wv5j4MXFsd5G7xVvhLisynBCaeviutiR09dDi2xh3pQxT+rbbnjx", + "ON/6y8hNe+htvZADSA7SrqSm4SXsJBhsabrEf7ZL5Ce6lH+Yf8oyN711uYyR1vCxO5LRfODMCidlmbOU", + "GiK+d5/NVyMEwF4kaNPiEA/U408BiqUUJUjNLFBalkkuUponSlONkP5TwnJ2PPuPw8b+cmi7q8Ng8Dem", + "1xl2MiqrVYMSWpY3gPHOqD5qRFgYAY2fUExYsYdKE+N2EQ0rMSOCc7iiXB80V5aWPKg38Ac3UkNvq+1Y", + "eneuYIMEJ7bhApTVgG3DB4oEpCdIVoJkRYV0lYtF/cPDk7JsKIjfT8rS0gO1R2ComMGWKa0e4fRps5PC", + "cU5fH5DvQ9ioigue78zhYFUNczYs3anlTrHatuTm0EB8oAgup5AHZmk8GYyafx8ch9eKtciN1rOXV0zj", + "v7m2IZuZ3yd1/vdgsZC2w8yFFy1HOXvHwV+Cy83DDuf0GceZew7ISbfv7djGQIkzzK14ZXQ9LdwROtYk", + "3EhaWgTdF3uWMo6XNNvI4npHaTpR0EVxDvZwwGuI1a332t79EMUEWaGDwze5SC//RtX6Hvb8wsPqbz8c", + "hqyBZiDJmqr1wSymZYTbq4E2ZYuZhnjBJ4tgqIN6ivc1vT1Ty6imwdQcvnG1xJIe+6HQAxm5u/yE/6E5", + "MZ/N3jai34I9IOcowJTdzs7JkJnbvr0g2JFMA7RCCFLYCz4xt+4bYfmqGTy+TpPW6FtrU3Ar5CaBKyS2", + "974NvhHbGA7fiG1vC4gtqPvgDwMH1UgNhZqA32uHmcD1d+SjUtJdn8gIewqRzQSN6qpwN/DwxDejNMbZ", + "k4WQt5M+HbHCSWNyJtRADYTvvEMkbFqViWPFiNnKNugAarx840KjCz5GsRYVzjT9E6igDNT7oEIb0H1T", + "QRQly+EeWH8dFfoLquDZU3L2t5Ovnjz99elXLwxLllKsJC3IYqdBkYfubkaU3uXwqD8zvB1VuY5Df/Hc", + "GyrbcGNwlKhkCgUt+6CsAdSqQLYZMe36VGuTGWddIzhlc56DkeSW7MTa9g1qr5kyGlaxuJfFGCJY1oyS", + "EYdJBnuZ6abTa4bZhVOUO1ndx1UWpBQyYl/DLaZFKvLkCqRiIuJNeedaENfCq7dl93eLLdlQRczYaPqt", + "OCoUEc7SWz5d7lvQ51ve0GZU8tv5Rmbnxp2yLm3ie0uiIiXIRG85yWBRrVo3oaUUBaEkw454Rn8P+mzH", + "U7Sq3QeTDl/TCsbRxK92PA3ubGahcshWrUW4+92sSxVvn7NDPVARdAw53uBnvNa/hlzTe9dfugPEcH/l", + "F9IiSzLTEG/Bb9hqrQMF850UYnn/OMZGiSGKH6x6nps+fSX9rcjATLZS93AYN8AaXjdrGnI4XYhKE0q4", + "yAAtKpWKH9MDnnt0GaKnU4cnv15bjXsBhpFSWpnZViVBP15PcjQdE5pa7k2QNGrAi1G7n2wrO5z1CucS", + "aGZu9cCJWDhXgXNi4CQpOiG1P+ickhDZSy28SilSUAqyxJko9qLm21khokfohIgjwvUoRAmypPLOyF5e", + "7cXzEnYJuswVefjDL+rRF8BXC03zPYTFNjHy1hc+5w/qYz1t+DGG6w4esh2VQLzMNbdLIyBy0DBEwhvR", + "ZHD9uhj1VvHuZLkCiZ6ZP5Xj/SB3Y6Aa1T+Z3++KbVUOBIK5i845K9BuxykXClLBMxUFllOlk31i2TRq", + "3cbMDAJJGJPECHhAKXlDlbbeRMYzNILY4wTHsQqKGWIY4UGF1ED+xeuifdipOQe5qlStmKqqLIXUkMXm", + "wGE7MtZb2NZjiWUAu9Z+tSCVgn2Qh6gUwHfEsjOxBKK6Nro7d3t/cmiaNuf8LkrKFhINIcYQOfOtAuqG", + "wTADiDDVENoyDlMdzqkjcOYzpUVZGmmhk4rX/YbIdGZbn+ifm7Z95qK6ObczAQpjcFx7h/nGUtaGQa2p", + "uUIjZFLQS6N74IXYuj37OJvNmCjGU0jGON9syzPTKtwCezdpVa4kzSDJIKe7PtCf7WdiP48BwBVvLj5C", + "Q2LjWeKL3nCyDx8YAS0QnoopjwS/kNRsQXPzaBjE9d4DOQOEHRNOjo8e1KBwrOgSeXg4bbvUEYh4Gl4J", + "bVbcsgNi7AT6FHwHyFBDvj0lsHPSXMu6Q/w3KDdArUbcfJAdqKEpNPBvNIEBY5qLFA62S0e6dwRwVGoO", + "SrE9YmRoxw5Y9t5RqVnKSrzq/AC7e7/5dQeI+ptIBpqyHDISfLC3wDLsT2wgRhfm7W6Ck4wwffR7VpjI", + "dHKmUONpI38JO7xyv7MRfudBXOA9XGUjUM3xRDlBRH3ckNHAwyawpanOd0ZP02vYkQ1IIKpaFExrG7nb", + "vulqUSYhgKiBe2RE582x0XF+Baa4l84QVDC9/lLMZ/ZKMI7feede0CKHuwqUQuQTjEc9YkQxmOT4J6Uw", + "q85cELEPI/Wc1ELSCW105dWn/wPVIjPOgPy3qEhKOd64Kg21SiMk6gmoP5oRjAZWj+lc/A2FIIcC7EUS", + "vzx+3J3448duzZkiS9j4yHvTsEuOx4/RjPNOKN3aXPdgKjTb7TRyfKDlH889F7zQkSn7XcwO8pSVfNcB", + "XrsLzJ5SyjGumf6dBUBnZ26nzD3kkWnudYQ7yagfgI7NG9f9jBVVTvV9uC9G9dH6PsGKAjJGNeQ7UkpI", + "wUZXGwVLWVwMasTGXaVryleoV0tRrVzgj4WDgrFS1oIhK94DEVU+9JYnKymqMiYoXbCnD7A3agdQc/MJ", + "CImdrZ6/ofV4LqdiygnmCR6szvcG5pBXYT4bvBgaol41F0NLnHaWQJwKmPaQqCpNAaIhwLErVz3VTjZk", + "k9/iABq1oZI2BorQVFc0D7mOnC4J5bt2miRluTJSkCmC7UznJq52bufmc1iWNLe+2UhSRbhTWhpfsPIN", + "SbukmOh3QCYx2lCfM0IGNNvLsPGfY8NvQMew7A8cBF01H4firsz9O9/dgxpkAREJpQSFh1Zot1L2q1iG", + "uU/uVFM7paHom/Zt118HBM37wQuk4DnjkBSCwy6a7ss4/Igfo4IDD86BzqjCDPXt3kpa+HfQao8zhRvv", + "Sl9c7UAWvasDDu9h8btwO16dMOsLrZaQl4SSNGdo0xRcaVml+oJTtJoEmy0SmOHvh8N2tFe+SdxwF7Gr", + "OVAXnGJQTm1LiTqTlxAxHHwH4M1pqlqtQHXkJ1kCXHDXinFScaZxrMKsV2IXrASJ0REHtmVBd0YEotnv", + "D5CCLCrdlsmYeaK0EZfWxWSGIWJ5wakmOZg79Y+Mn28RnHfRep7hoDdCXtZUiB8hK+CgmEriASTf268Y", + "2+emv3ZxfpgpbD9bp4SB36Sn7NCo0mS//p+H/3X84ST5B03+OEpe/o/Dj5+eXz963Pvx6fXXX//f9k/P", + "rr9+9F//GVspj3ssL8JhfvraXdZOX6NG3nglerh/Not0wXgSZbLQ997hLfIQcwAdAz1q22v0Gi643nLD", + "SFc0Z5lRuW7DDl0R19uLdnd0uKa1EB37jJ/rDfXcO0gZEhEyHdF462O8H3MVz0BCN5lLKsL9sqy4XUqv", + "6NoAex/7IpbzOsvMFqA4JpiCtKY+cMv9+fSrF7N5kzpUf5/NZ+7rxwgns2wb1Q5hG7u+uA2CG+OBIiXd", + "KRhQQBH3aJiPjTYIwRZg7r1qzcrPLymUZou4hPNhy84MsuWn3MYTm/2DTreds+WL5efHW0ujh5d6HUtM", + "b2kK2KpZTYBOIEQpxRXwOWEHcNA1Q2TmauYCjnKgS0yQxouemJKGUe8Dy2ieKwKqhxOZdNeP8Q8qt05a", + "X89n7vBX966PO8AxvLpj1h42/7cW5MH3356TQycw1QObq2hBB9llkVurS6BohcgYaWbLcdhkzQt+wV/D", + "knFmvh9f8IxqerigiqXqsFIgv6E55SkcrAQ59jkZr6mmF7ynaQ1WzAmyYUhZLXKWkstQI27Y01ZB6EO4", + "uPhA85W4uPjYixbo669uqKh8sQMkG6bXotKJy+FOJGyojHljVJ3Di5BtkYaxUefEwbai2OWIO/hxmUfL", + "UnVz+frTL8vcTD9gQ+Uy1cySEaWF9LqIUVAsNri+b4U7GCTdeBNGpUCR3wpafmBcfyTJRXV09AxIK7nt", + "N3fkG57clTDZkDGYa9i1X+DE7b0GtlrSpKSrmNfn4uKDBlri6qO+XOAlO88Jdmsl1fmgYQTVTMDTY3gB", + "LB43ThDCyZ3ZXr5eT3wK+AmXENsYdaNxRd92vYI0u1svVydVr7dKlV4nZm9HZ6UMi/uVqct4rIyS5eMD", + "FFthDKareLIAkq4hvXSlKKAo9W7e6u5DUJyi6UUHU7ZIiU2SwTR5tJkvgFRlRp0q3rUgLXZEgdY+CPQ9", + "XMLuXDRZ9jdJUG7ny6qhjYqcGmiXhlnDbetgdBffxTmhiassfdop5h95tjiu+cL3Gd7IVuW9h00cY4pW", + "PucQIaiMEMIy/wAJbjFRA+9OrB+bnrllLOzJFylY4mU/cU2ay5MLSQpngwZu+70ArHgkNoosqNHbhSvW", + "Y3NCAylWKbqCAQ05dFtMzLxsuToQyL5zL3rSiWX3QOudN1GUbePEzDnKKWC+GFbBy0wnEM2PZD1jzgmA", + "NfgcwRY5qkl1xJ4VOlS23Ee2qNgQanEGBskbhcOj0aZIqNmsqfJ1hLDckt/Lk3SAPzHHeayyRWjQD2oq", + "1fZ1L3O7+7R3u3T1LXxRC1/JIrxaTqhKYTR8DNuOLYfgqABlkMPKTtw29ozS5Fs3C2Tw+Gm5zBkHksTC", + "sahSImW2EFRzzLgxwOjHjwmxJmAyGUKMjQO00eOLgMlbEe5NvroJktzli1MPG33Fwd8QT22xAcpG5RGl", + "EeFswIGUeglAXQxffX51IkkRDGF8ToyYu6K5EXPuxtcA6RVYQLW1U07BxRw8GlJnRyzw9mC50ZzsUXSb", + "2YQ6k0c6rtCNYLwQ28TmtkU13sV2Yfg9GrONmXaxjWlLWTxQZCG2GMeCR4uNEd6DyzAeHo3ghr9lCvkV", + "+w2d5haZsWHHtakYFypkGWfOq9llSJ2YMvSABjPELg+D6hS3QqBj7GhKvbrL795Lals96R/mzak2b6ou", + "+XSY2PYf2kLRVRqgX98KU9eTeNfVWKJ2inY4RruURqBCxpjeiIm+k6bvClKQA14KkpYSlVzGXHfmbgN4", + "4pz5boHxAgt2UL57FMT4SFgxpaExovuQhC9hnqRYJ0yI5fDsdCmXZn7vhaiPKVuIBju2pvnZZ4Axsksm", + "lU7QAxGdgmn0ncJL9XemaVxXakcR2aqaLIvLBhz2EnZJxvIqzq9u3B9em2Hf1iJRVQuUt4zb2JAFVoGN", + "xhaODG3DT0cn/MZO+A29t/lO2w2mqRlYGnZpj/Fvsi86kndMHEQYMMYc/VUbJOmIgAxSQvvSMdCb7ObE", + "lNCDMetrbzNlHvbesBGfmDp0RllI0bkEBoPRWTB0Exm1hOmgiGo/V3NgD9CyZNm2Ywu1UAdvzPRGBg9f", + "eqpDBVxdB2wPBQK7ZyxdRIJqVxlrFHxbDrdV5ONgEmXO27XAQoEQDsWUL+beJ1SdTraPVudA8x9g94tp", + "i9OZXc9ndzOdxmjtIO6h9bt6eaN0Rte8NaW1PCE3JDktSymuaJ44A/MQa0px5VgTm3t79GcWdXEz5vm3", + "J2/eOfSv57M0ByqTWlUYnBW2K/9tZmULmg1sEF8s2tz5vM5uVclg8esqTKFRerMGV3U30EZ75QEbh0Ow", + "FZ2RehmPENprcna+ETvFER8JlLWLpDHfWQ9J2ytCryjLvd3MYzsQzYOTm1ZjMioVQgB39q4ETrLkXsVN", + "b3fHd0fDXXtkUjjWSF3gwpa+VkTwrgsdw4t3pfO6FxSL+1mrSF848apAS0KicpbGbax8oQxzcOs7M40J", + "Nh5QRg3Eig24YnnFAlimmZpw0e0gGYwRJaYvFDlEu4Vwz5pUnP1eAWEZcG0+SdyVnY2K1RSdtb1/nBrd", + "oT+WA2wt9A34u+gYYWHL7omHSIwrGKGnrofu6/rK7CdaW6Qw3LpxSdzA4R+O2DsSR5z1jj8cN9vgxXXb", + "4xa+QtKXf4YxbDnq/U+g+Murq7A5MEb0SROmkqUUf0D8nofX40gqji/lyTDK5Q/gE2LOG+tO8zJLM/rg", + "cg9pN6EVqh2kMMD1uPKBWw5rCnoLNeV2qe0LA61YtzjDhFGlhxZ+wzAO514kbk43CxoruGiUDIPTSeMA", + "btnStSC+s6e9qhMb7Ogk8CXXbZnNsi5BNlly/Yott1QY7LCTVYVGM0CuDXWCufX/5UpEwFR8Q7l9qML0", + "s1vJ9VZgjV+m10ZIrJGg4mb/DFJW0DyuOWRp38SbsRWzbzBUCoIi/w6Qfd/GcpF7KKFO13GkOV2So3nw", + "0ohbjYxdMcUWOWCLJ7bFgiqU5LUhqu5ipgdcrxU2fzqh+brimYRMr5UlrBKkVurwelM7rxagNwCcHGG7", + "Jy/JQ3TbKXYFjwwV3fk8O37yEo2u9o+j2AHg3tAYkyYZipO/O3ES52P0W1oYRnA7qAfRdHL7iNaw4BrZ", + "TbbrlL2ELZ2s27+XCsrpCuKRIsUenGxfXE00pHXowjP7AozSUuwI0/HxQVMjnwaiz434s2iQVBQF04Vz", + "7ihRGH5qKvjbQT04+5yMK77q8fIf0UdaehdR5xL5eY2m9nyLzRo92W9pAW2yzgm1hTFy1kQv+JLQ5NTX", + "3cFqtHURWksbM5aZOqo5GMywJKVkXOPFotLL5K8kXVNJUyP+DobQTRYvnkcq8LYrQfKbIf7Z6S5BgbyK", + "k14OsL3XIVxf8pALnhRGomSPmmyPYFcOOnPjbrsh3+E46KlKmYGSDLJb1WI3GkjqOzEeHwF4R1as53Mj", + "frzxzD47Z1Yyzh60Miv08/s3TssohIwV02u2u9M4JGjJ4Apj9+KLZGDecS1kPmkV7oL9l/U8eJUzUMv8", + "Xo5dBL4RkduprwpdW9JdrHrEOjC0Tc0HwwYLB2pO2hV4P7/Tzxuf+84n88Xjin90kf3CS4pE9jMYWMSg", + "Onh0ObP6e+D/puQbsZ26qJ0d4hf2X4A0UZJULM9+abIyO8XXJeXpOurPWpiOvzbPRNWTs+dTtGbdmnIO", + "eRSc1QV/9TpjRKv9p5g6TsH4xLbdevB2up3JNYi30fRI+QENeZnOzQAhVdsJb3VAdb4SGcFxmgJpjfTs", + "vyMQVHv+vQKlY8lD+MEGdaHd0tx3bbFhAjzD2+IB+d6+BLsG0ip/g7e0uoqAK31rDepVmQuazbGQw/m3", + "J2+IHdX2sY+d2GLHK7yktGfRsVcFtR+nhQf7d0viqQvT4YzHUptZK43VqJSmRRlLDjUtzn0DzEANbfh4", + "fQmpc0BeB2862jxSA8Lww5LJwty4amhWd0GeMP/RmqZrvJK1ROowy0+v0u25UgUv49Uv3NQFEXHfGbxd", + "oW5bp3tOhLk3b5iyD4DCFbTzUevkbGcS8Pmp7enJinPLKVHdY6x4wG3I7pGzgRrezB/FrEP4Gyrktsj9", + "TYuWn2GvaIGmbgX03pN4NruxfrnEP+ycUi44S7E8Uuxodi+FTvGBTagk1TWy+i3udmhkc0Xrrtdhco6K", + "g5XYvSB0hOsb4YOvZlEtd9g/NT5JuaaarEArJ9kgm/vnA5wdkHEFrsAlvisbyEkhW35FlJBRV3VSuzRu", + "yEaYFjNwsfvOfHvrrv0YL37JOCr4jmwuNN1a6vAhQ21uBUyTlQDl5tPODVYfTJ8DTJPNYPvxwD98aKvB", + "oFvOTNv6oPugTrxH2nmATdtXpq2rE1T/3IpAtoOelKUbdPhxiag+oLd8kMARz2LiXTsBcWv4IbQRdhsN", + "JcHz1DAaXKEjGko8h3uMUT+00HnExyitlqOwBbEhXNEKBoxH0HjDODTPckYOiDR6JODC4H4d6KdSSbVV", + "ASfJtHOgOXqfYwJNaed6uCuobi0hQxKcox9jeBmbNyIGBEfdoFHcKN/Vr4Ea7g6UiVf4DLEjZP/FB9Sq", + "nBKVYUZB5w2ImOAwgtu/MtM+APrboK8T2e5aUrtzbnISDSWJLqpsBTqhWRarSPUNfiX41ReXgi2kVV2Y", + "sixJijVR2kVi+tzmBkoFV1UxMpZvcMfhgkdVItwQPuziVxiTUBY7/DdWlXF4ZVwQxo3DAH3EhXuF4oZ6", + "cxtST+s1PJ0otkqmUwLPlLuToxn6doze9L9XTs/Fqo3IZy4NMSblwjWKybdvzcERVk7olRq1R0td2ACD", + "7oR/Cg+vjXVKblsq4VHWqz2Kzp76qa1xA8Two1lzPPwGQm+DghjUnq/WezgUgJsOxotT7TLXNCWjImgw", + "G8hG79i8H8QibjkditixATvmc6/3NM2wp2cj7FGC+lCwPkI/+DhTUlLmXOONsOhT1kWkD5sLxzZds8Dd", + "Sbg470GL3Q9XQzHZRDG+yoHg9+4zQ5fg0tnrd+btXH1Ukr8S2l/dM68WXh0VH51/PzoBh/qyZtBBo+25", + "K2lvp+nu5D/8YmPYCHAtd/8CJtzeovceaepru9Y81TQhdTnkSeWRW6di/L2l4fpHTc0j5KdSKNaU4I49", + "xDQx1u0c31IK6jf1YflAkytINdZdbxzoEuAm1ZzMYMEjf/+/DtLA3bEOCXTlj8ZqHvWLre850HppSUFq", + "nS1UfTC9ws9JHSaFQgkr4K6Au3f22gkHk8Oel0tINbvakwb29zXwIMVo7o0Q9r3cICuM1WG0WEXk5ia2", + "BqGxLK1RfIJqfndGZygJ5BJ2DxRpcUO0cvbcnyu3KSCBFEDpkBgWESoWhmCtps4zzFTNGUgFH/Zju0NT", + "imvwzZ0gqfGWY3mWNCduk+g4MmT80Y9JY5muN0r/xYjQoUyx/qMBw8r2a3yjQdXv4fkCFOGVlJz2y/Rt", + "XAELTNqrHQW+lAUo/5vP0LWj5OwSwleB0C2zoTLzLaJ2Bm/CSEbOo156ly9430V6WY/MmiDNfkJPpPAT", + "huKmuTD6VzIUz9yOiwwfz8foD1vyGyM+DV5LkO71NFT2cqEg0cIHdY7hMUYK99D7bYigBostWuQGS6C8", + "b2q8YNFZiiVPqItsCSdIJBTUYCeDSizDY44R+5X97jNYfNHRveaUml/3F5r34blM9YgYcv2SuNNyf2bM", + "bSwrjHP7VquKlWXhhpSh6b+UIqtSe0CHG6O2Pk0uejQiSqJGibQ/y979MscSYG+CPMNL2B1a1d+X6vdL", + "GWJvVSg7hyCvv7Pa92p0it+v85WdwOpe8PyShpv5rBQiTwZs/af96jLdPXDJ0kvIiDk7fGDbwLMl5CGa", + "mGtn7ma989VUyhI4ZI8OCDnhNpTY+3Xb5Y07g/MHemz8LY6aVbbgk7MpHVzweEwmlmKSd5RvHsy4VFNg", + "hN8dh7JA9tQu2Q5UtpF0E3nE52DqpbTvae0+rNIwlcUipqXsecIi4kX2byL4FzZ8xooWBUv7ryj0VIkl", + "vkaV0Ajw01qAz1tvBbLOwx2+xpB9piGlVoEzlwfK8kqCyxywz+Z0yumXVK/98pnmfTXLHNmgMKzflmSn", + "yl4K/OXEvdnT3ReiTHK4gpYjwaUzVGkKSrErCN/7sZ1JBlDiVb17gMQs5CFfdWSIm3sS2FinUDcqVCxh", + "7UqRPRJj4DH2xLKHmspCBqMrllW0RT91h6dYJr7tHuI6cYfceHPEJ9fbGu65lKQu5hYzZLp0Er+Ehn+b", + "p106ClLwBEsNc+AtypoKd1FGBkkbp+ztimhM4oe+TTuyZYJnV8YtL2GNnSZ4V1rXCN7U/K7rLumPzW6c", + "9gCM77AHvdAgFzwB4zUhh84XjrD9sSZKMJVBTmhNf5+Nz02wEV/BElnZbaZpK57Z6Kz2ugQGXPWqtosO", + "vcvUNZ9iQR3BschY3+yq0FWGtcpDxjGyW17R/PObTrHS0gnSw71zG59oaHsLiWxJqW4X5vaGTho7sLPd", + "39D8HZp6/w5mjaI+TgfK+TxqXcF7hlBk0pzkonnhDkGSDcK0TtEnL8jCpeiUElKmWCd7cePLKNemJnxV", + "oHn+eNy2tW+evwh9BzZeevWFvG1KsmqBJ0aDYbNFv7BQGdi5US6PcV+PLSL0i8mosFbGnuPisuUttSWu", + "O2GAQsI9e02D+Kcbek37VUCmTs96Bs2hUynoz3Pyad2ibeSgbuY21eXfJ+5Y3c4pnvp4OV7THUMFLEGw", + "ljVBVMlvT34jEpb4WI0gjx/jAI8fz13T3562P5vt/Phx/JnlzxUkYGnkYLhxYxzzy1DYuA2NHshQ6KxH", + "xfJsH2O08k2a554wo+JXl3H2RR6c+tX6cvpb1T36cZPwpO4iIGEic20NHgwVZJJMSCJx3SIpI2gVSSvJ", + "9A4L4XjTP/s1Gs7wfe0tdN7munSCO/u0uIS6lFLjW6yUP12/FzTH88jo1BgcpvFp3W+3tChzcBvl6weL", + "v8Czvz7Pjp49+cvir0dfHaXw/KuXR0f05XP65OWzJ/D0r189P4InyxcvF0+zp8+fLp4/ff7iq5fps+dP", + "Fs9fvPzLAyOHDMoW0ZlPu579b3yVLTl5d5qcG2QbmtCS1S9qGzb2T8vQFHciFJTls2P/0//0O+wgFUUD", + "3v86c1mds7XWpTo+PNxsNgdhl8MVOhMSLap0fejH6b9k/O60zsyxV0tcUZt04U0GnhVO8Nv7b8/Oycm7", + "04Pgpczj2dHB0cETfEixBE5LNjuePcOfcPescd0PHbPNjj9dz2eHa6A5+t7NHwVoyVL/SW3oagXywL2x", + "Y366enroVYnDT86Rcj327TAsV334qeVvyvb0xHK2h598lZbx1q0yKM7PFnSYiMVYs8MFJn9ObQoqaDw8", + "FbxgqMNPqCIP/n7oMuLiH/GqYvfAoXfKxlu2qPRJbw2unR7uSf7DT/gf5MkALRt/2kcXtiVIZpQs635e", + "2ZJRNQufZrPj2bdBo1drSC+xQLI1MiBvPj06igTFB72I3Sp0kUNm+Pz50fMJHbjQYSdXyqPf8Wd+ycWG", + "EwyhtHKzKgoqd6iP6EpyRX76gbAlge4QTPkRcK/SlUJTLVZjnc1nLfJ8vHZEs+l1h/YF0IaW/ucdT6M/", + "9qnffYki9vPhp3Yl1BYXqnWlM7EJ+uLNxV67++PVbwO0/j7cUKaNLuLCEbBSTb+zBpofukSbzq9NbGvv", + "CwbsBj+236uP/HpYFwKLfuxKiNhXt0MGGvk0Sf+50RDCE3d2/CE4az98vP5ovknTGj81B8jx4SG6+NZC", + "6cPZ9fxT53AJP36secznH89Kya4wnPnj9f8LAAD//+4FyDz4swAA", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go b/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go index b339716955..5bf532b490 100644 --- a/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go +++ b/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go @@ -290,11 +290,11 @@ var swaggerSpec = []string{ "RQpele3VCaKVGLY9uqXqAGI1MXYkX3fAx94Pu55Onu454632o1Z0bOQdsK95xnxuJY796PONfSoxksXK", "dUbn1vV08uxzzv5UWpbnOcOWQTWd/tL/JC+kupK+pVUyqtWKlxu/jXVLKDC32HiU8YVGT0YpLjnqdlLJ", "1hMhk4/oPYvltw7IG234DeTNme31b3nzueQNLtJdyJs2oDuWN4/33PN//hn/W8L+2STsGYm7W0lYp/BR", - "SlFfA6Xk/0N6n7z/80am0R/7gLrvZMV+PvzUrtPe0pH1sjKZuqLKFNFDAYuR8txVLkMjaH2hMop5AE1A", - "LfvRJbzkG7T8igwYx0x8VZnmxms71x7d2idhITRv6C2ExAHQuIyjUIk+HoSqaUiVpBenOgeQw+yNyqB/", - "AOER81sF5aY5YxyOk2lLAjkWihTEu7VA7wuM6/0YDI3g5MHpM0f9zFTr78MrLow9plxkK1K039kAzw9d", - "znbn1yZNqvcFc7+CH0O3dPTXw7qmbPRj97IZ++ouWwONfMUN/7kxNoXGG2SJ2mzz/qNdWaxY5rilsUUc", - "Hx5itNhSaXM4uZ5+6tgpwo8f68X0pWzqRb3+eP0/AQAA//9NWINiQ74AAA==", + "SlFfA4V1AaVYgaTCRu5XKglwSK+W93/eyDT6Yx989/Ws2M+Hn9rV21uas15WJlNXVK8ielRgiVKeu3pm", + "aBqtr1lGMQ+gCbNlP7o0mHyD9mCRAeOYn68q09yDbefaz1t7KiyE5mW9hZA4AJqccRQq3MeDADYNqZL0", + "DlXnWHKYvVEZ9I8lPHh+q6DcNCePw3Eybcklx1iRMnm3FvN9MXK9H9uhaZz8On3mqB+fav19eMWFsYeX", + "i3dFivY7G+D5ocvk7vzaJE/1vmBGWPBj6KyO/npYV5qNfuxeQWNf3RVsoJGvw+E/Nyao0KSDLFEbc95/", + "tCuLdcwctzQWiuPDQ4whWyptDifX008d60X48WO9mL7ATb2o1x+v/ycAAP//m8e6KFm+AAA=", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go index 89c8e57209..7c89fbd8b4 100644 --- a/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go +++ b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go @@ -78,6 +78,9 @@ type ServerInterface interface { // Get parameters for constructing a new transaction // (GET /v2/transactions/params) TransactionParams(ctx echo.Context) error + // Simulates a raw transaction or transaction group as it would be evaluated on the network. The simulation will use blockchain state from the latest committed round. + // (POST /v2/transactions/simulate) + SimulateTransaction(ctx echo.Context, params SimulateTransactionParams) error } // ServerInterfaceWrapper converts echo contexts to parameters. @@ -495,6 +498,26 @@ func (w *ServerInterfaceWrapper) TransactionParams(ctx echo.Context) error { return err } +// SimulateTransaction converts echo context to params. +func (w *ServerInterfaceWrapper) SimulateTransaction(ctx echo.Context) error { + var err error + + ctx.Set(Api_keyScopes, []string{""}) + + // Parameter object where we will unmarshal all parameters from the context + var params SimulateTransactionParams + // ------------- Optional query parameter "format" ------------- + + err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), ¶ms.Format) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err)) + } + + // Invoke the callback with all the unmarshalled arguments + err = w.Handler.SimulateTransaction(ctx, params) + return err +} + // This is a simple interface which specifies echo.Route addition functions which // are present on both echo.Echo and echo.Group, since we want to allow using // either of them for path registration @@ -542,234 +565,242 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL router.POST(baseURL+"/v2/teal/disassemble", wrapper.TealDisassemble, m...) router.POST(baseURL+"/v2/teal/dryrun", wrapper.TealDryrun, m...) router.GET(baseURL+"/v2/transactions/params", wrapper.TransactionParams, m...) + router.POST(baseURL+"/v2/transactions/simulate", wrapper.SimulateTransaction, m...) } // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9a3PbuJLoX0FptyqPFeW898RVU3udZB7eSTKp2DO754xzZyCyJeGYAngA0JYmN//9", - "FhoACZKgRNmOncz4U2IRj0aj0Wj08+MoFctCcOBajfY/jgoq6RI0SPyLpqkouU5YZv7KQKWSFZoJPtr3", - "34jSkvH5aDxi5teC6sVoPOJ0CXUb0388kvCvkknIRvtaljAeqXQBS2oG1uvCtK5GWiVzkbghDuwQh69G", - "nzZ8oFkmQakulD/xfE0YT/MyA6Il5Yqm5pMi50wviF4wRVxnwjgRHIiYEb1oNCYzBnmmJn6R/ypBroNV", - "usn7l/SpBjGRIocunC/Fcso4eKigAqraEKIFyWCGjRZUEzODgdU31IIooDJdkJmQW0C1QITwAi+Xo/1f", - "Rwp4BhJ3KwV2hv+dSYA/INFUzkGPPoxji5tpkIlmy8jSDh32Jagy14pgW1zjnJ0BJ6bXhLwplSZTIJST", - "99+9JI8fP35uFrKkWkPmiKx3VfXs4Zps99H+KKMa/OcurdF8LiTlWVK1f//dS5z/yC1waCuqFMQPy4H5", - "Qg5f9S3Ad4yQEOMa5rgPDeo3PSKHov55CjMhYeCe2MZXuinh/De6KynV6aIQjOvIvhD8SuznKA8Lum/i", - "YRUAjfaFwZQ0g/76IHn+4ePD8cMHn/7t14PkH+7Pp48/DVz+y2rcLRiINkxLKYGn62QugeJpWVDexcd7", - "Rw9qIco8Iwt6hptPl8jqXV9i+lrWeUbz0tAJS6U4yOdCEerIKIMZLXNN/MSk5LlhU2Y0R+2EKVJIccYy", - "yMaG+54vWLogKVV2CGxHzlmeGxosFWR9tBZf3YbD9ClEiYHrQvjABX25yKjXtQUTsEJukKS5UJBoseV6", - "8jcO5RkJL5T6rlK7XVbkeAEEJzcf7GWLuOOGpvN8TTTua0aoIpT4q2lM2IysRUnOcXNydor93WoM1pbE", - "IA03p3GPmsPbh74OMiLImwqRA+WIPH/uuijjMzYvJShyvgC9cHeeBFUIroCI6T8h1Wbb//vop7dESPIG", - "lKJzeEfTUwI8FRlkE3I4I1zogDQcLSEOTc++dTi4Ypf8P5UwNLFU84Kmp/EbPWdLFlnVG7piy3JJeLmc", - "gjRb6q8QLYgEXUreB5AdcQspLumqO+mxLHmK+19P25DlDLUxVeR0jQhb0tU3D8YOHEVonpMCeMb4nOgV", - "75XjzNzbwUukKHk2QMzRZk+Di1UVkLIZg4xUo2yAxE2zDR7Gd4OnFr4CcPwgveBUs2wBh8MqQjPmdJsv", - "pKBzCEhmQn52zA2/anEKvCJ0Ml3jp0LCGROlqjr1wIhTb5bAudCQFBJmLEJjRw4dhsHYNo4DL50MlAqu", - "KeOQGeaMQAsNlln1whRMuPm9073Fp1TBsyd9d3z9deDuz0R71zfu+KDdxkaJPZKRq9N8dQc2Llk1+g94", - "H4ZzKzZP7M+djWTzY3PbzFiON9E/zf55NJQKmUADEf5uUmzOqS4l7J/w++YvkpAjTXlGZWZ+Wdqf3pS5", - "Zkdsbn7K7U+vxZylR2zeg8wK1uiDC7st7T9mvDg71qvou+K1EKdlES4obTxcp2ty+Kpvk+2YuxLmQfXa", - "DR8exyv/GNm1h15VG9kDZC/uCmoansJagoGWpjP8ZzVDeqIz+Yf5pyhy01sXsxhqDR27KxnVB06tcFAU", - "OUupQeJ799l8NUwA7EOC1i328ELd/xiAWEhRgNTMDkqLIslFSvNEaapxpH+XMBvtj/5tr9a/7Nnuai+Y", - "/LXpdYSdjMhqxaCEFsUOY7wzoo/awCwMg8ZPyCYs20OhiXG7iYaUmGHBOZxRrif1k6XBD6oD/Kubqca3", - "lXYsvltPsF6EE9twCspKwLbhHUUC1BNEK0G0okA6z8W0+uHuQVHUGMTvB0Vh8YHSIzAUzGDFlFb3cPm0", - "PknhPIevJuT7cGwUxQXP1+ZysKKGuRtm7tZyt1ilW3JrqEe8owhup5ATszUeDUbMvwqKw2fFQuRG6tlK", - "K6bxD65tSGbm90Gdvw4SC3HbT1z40HKYs28c/CV43NxtUU6XcJy6Z0IO2n0vRjZmlDjBXIhWNu6nHXcD", - "HisUnktaWADdF3uXMo6PNNvIwnpJbjqQ0UVhDs5wQGsI1YXP2tbzEIUESaEFw4tcpKc/ULW4gjM/9WN1", - "jx9OQxZAM5BkQdViMopJGeHxqkcbcsRMQ3zgk2kw1aRa4lUtb8vSMqppsDQHb1wssajHfsj0QEbeLj/h", - "f2hOzGdztg3rt8NOyDEyMGWPszMyZOa1bx8IdibTALUQgiztA5+YV/dOUL6sJ4/v06A9+tbqFNwOuUXg", - "DonVlR+DF2IVg+GFWHWOgFiBugr6MOOgGKlhqQbA98pBJnD/HfqolHTdRTKOPQTJZoFGdFV4Gnh445tZ", - "auXswVTIi3GfFlvhpFY5E2pGDZjvuIUkbFoWiSPFiNrKNmgNVFv5NjON9vAxjDWwcKTpZ8CCMqNeBRaa", - "A101FsSyYDlcAekvokx/ShU8fkSOfjh4+vDRb4+ePjMkWUgxl3RJpmsNitx1bzOi9DqHe92V4euozHV8", - "9GdPvKKyOW5sHCVKmcKSFt2hrALUikC2GTHtulhrohlXXQE45HAeg+HkFu3E6vYNaK+YMhLWcnolm9GH", - "sKyeJSMOkgy2EtOuy6unWYdLlGtZXsVTFqQUMqJfwyOmRSry5AykYiJiTXnnWhDXwou3Rft3Cy05p4qY", - "uVH1W3IUKCKUpVd8ON+3Qx+veI2bjZzfrjeyOjfvkH1pIt9rEhUpQCZ6xUkG03LeeAnNpFgSSjLsiHf0", - "96CP1jxFrdpVEGn/M23JOKr41ZqnwZvNbFQO2byxCZd/m7Wx4vVzdqo7KgKOQcdr/IzP+leQa3rl8kt7", - "ghjsL/1GWmBJZhriK/g1my90IGC+k0LMrh7G2CwxQPGDFc9z06crpL8VGZjFluoKLuN6sJrWzZ6GFE6n", - "otSEEi4yQI1KqeLXdI/lHk2GaOnU4c2vF1binoIhpJSWZrVlQdCO1+EcdceEppZ6E0SN6rFiVOYn28pO", - "Z63CuQSamVc9cCKmzlTgjBi4SIpGSO0vOickRM5SA65CihSUgixxKoqtoPl2lonoDXhCwBHgahaiBJlR", - "eWlgT8+2wnkK6wRN5orc/fEXde8G4NVC03wLYrFNDL3Vg8/Zg7pQD5t+E8G1Jw/Jjkognuea16VhEDlo", - "6EPhTjjp3b82RJ1dvDxazkCiZeazUryf5HIEVIH6men9stCWRY8jmHvoHLMl6u045UJBKnimooPlVOlk", - "G1s2jRqvMbOCgBPGODEO3COUvKZKW2si4xkqQex1gvNYAcVM0Q9wr0BqRv7Fy6LdsVNzD3JVqkowVWVR", - "CKkhi62Bw2rDXG9hVc0lZsHYlfSrBSkVbBu5D0vB+A5ZdiUWQVRXSndnbu8uDlXT5p5fR1HZAKJGxCZA", - "jnyrALuhM0wPIEzViLaEw1SLcioPnPFIaVEUhlvopORVvz40HdnWB/rnum2XuKiu7+1MgEIfHNfeQX5u", - "MWvdoBbUPKFxZLKkp0b2wAexNXt2YTaHMVGMp5BsonxzLI9Mq/AIbD2kZTGXNIMkg5yuu4P+bD8T+3nT", - "ALjj9cNHaEisP0t802tK9u4DG4YWOJ6KCY8Ev5DUHEHz8qgJxPXeMnIGOHaMOTk6ulMNhXNFt8iPh8u2", - "Wx0ZEW/DM6HNjltyQIgdQx8Cbw8aqpEvjgnsnNTPsvYUfwflJqjEiN0nWYPqW0I9/k4L6FGmOU/h4Li0", - "uHuLAUe5Zi8X28JG+k5sj2bvHZWapazAp86PsL7yl197gqi9iWSgKcshI8EH+woswv7EOmK0x7zYS3CQ", - "EqYLfkcLE1lOzhRKPE3gT2GNT+531sPvOPALvIKnbGRUcz1RThBQ7zdkJPCwCaxoqvO1kdP0AtbkHCQQ", - "VU6XTGvrudt86WpRJOEAUQX3hhmdNcd6x/kdGGJeOsKhguV1t2I8sk+CzfAdt94FDXS4p0AhRD5AedRB", - "RhSCQYZ/Ugiz68w5EXs3Uk9JDSAd00ZTXnX731ENNOMKyN9FSVLK8cVVaqhEGiFRTkD50cxgJLBqTmfi", - "rzEEOSzBPiTxy/377YXfv+/2nCkyg3PveW8attFx/z6qcd4JpRuH6wpUhea4HUauD9T8473nnBdaPGW7", - "idmNPGQn37UGr8wF5kwp5QjXLP/SDKB1MldD1h7SyDDzOo47SKkfDB1bN+77EVuWOdVXYb7YKI9W7wm2", - "XELGqIZ8TQoJKVjvaiNgKQuLAY1Yv6t0Qfkc5Wopyrlz/LHjIGMsldVgyJJ3hogKH3rFk7kUZRFjlM7Z", - "0zvYG7EDqHn5BIjEzlbOP6fVfC6mYsgN5hEe7M73Zsw+q8J41PswNEg9qx+GFjnNKIE4FjDsIVFlmgJE", - "XYBjT65qqa1oyDq+xQ1oxIZSWh8oQlNd0jykOnI4I5Svm2GSlOXKcEGmCLYznWu/2rFdm49hmdHc2mYj", - "QRXhSWlIfMHO1yhto2Kg3QGJxEhDXcoICdAcL0PGn0eHXw8dg7I7ceB0VX/s87sy7+98fQVikB2ISCgk", - "KLy0Qr2Vsl/FLIx9creaWisNy65q33b9rYfRvO99QAqeMw7JUnBYR8N9GYc3+DHKOPDi7OmMIkxf3/ar", - "pAF/C6zmPEOo8bL4xd0OeNG7yuHwCja/PW7LqhNGfaHWEvKCUJLmDHWagisty1SfcIpak+CwRRwz/Puw", - "X4/20jeJK+4iejU31Amn6JRT6VKixuQZRBQH3wF4dZoq53NQLf5JZgAn3LVinJScaZxrafYrsRtWgETv", - "iIltuaRrwwJR7fcHSEGmpW7yZIw8UdqwS2tiMtMQMTvhVJMczJv6DePHKxzOm2g9zXDQ50KeVliIXyFz", - "4KCYSuIOJN/br+jb55a/cH5+GClsP1ujhBm/Dk9Zo1Kljn79v3f/a//Xg+QfNPnjQfL8P/Y+fHzy6d79", - "zo+PPn3zzf9r/vT40zf3/uvfYzvlYY/FRTjID1+5x9rhK5TIa6tEB/Zr00gvGU+iRBba3lu0Re5iDKAj", - "oHtNfY1ewAnXK24I6YzmLDMi10XIoc3iOmfRno4W1TQ2oqWf8WvdUc69BJchESbTYo0Xvsa7PlfxCCQ0", - "k7mgIjwvs5LbrfSCrnWw974vYjauosxsAop9giFIC+odt9yfj54+G43r0KHq+2g8cl8/RCiZZauodAir", - "2PPFHRA8GHcUKehaQY8AirBH3Xyst0E47BLMu1ctWHH9nEJpNo1zOO+27NQgK37IrT+xOT9odFs7Xb6Y", - "XT/cWho5vNCLWGB6Q1LAVvVuArQcIQopzoCPCZvApK2GyMzTzDkc5UBnGCCNDz0xJAyjOgeW0DxVBFgP", - "FzLorR+jHxRuHbf+NB65y19duTzuBo7B1Z6zsrD5v7Ugd77/9pjsOYap7thYRTt0EF0WebW6AIqGi4zh", - "ZjYdhw3WPOEn/BXMGGfm+/4Jz6ime1OqWKr2SgXyBc0pT2EyF2Tfx2S8opqe8I6k1ZsxJ4iGIUU5zVlK", - "TkOJuCZPmwWhO8LJya80n4uTkw8db4Gu/OqmivIXO0FyzvRClDpxMdyJhHMqY9YYVcXw4sg2ScOmWcfE", - "jW1ZsYsRd+PHeR4tCtWO5esuvyhys/yADJWLVDNbRpQW0ssiRkCx0OD+vhXuYpD03KswSgWK/L6kxa+M", - "6w8kOSkfPHgMpBHc9ru78g1NrgsYrMjojTVs6y9w4fZdAystaVLQeczqc3LyqwZa4O6jvLzER3aeE+zW", - "CKrzTsM4VL0Aj4/+DbBw7BwghIs7sr18vp74EvATbiG2MeJGbYq+6H4FYXYX3q5WqF5nl0q9SMzZjq5K", - "GRL3O1Ol8ZgbIcv7Byg2Rx9Ml/FkCiRdQHrqUlHAstDrcaO7d0FxgqZnHUzZJCU2SAbD5FFnPgVSFhl1", - "onhbgzRdEwVaeyfQ93AK62NRR9nvEqDcjJdVfQcVKTWQLg2xhsfWjdHefOfnhCquovBhpxh/5Mliv6IL", - "36f/IFuR9woOcYwoGvGcfYigMoIIS/w9KLjAQs14lyL92PLMK2Nqb75IwhLP+4lrUj+enEtSuBpUcNvv", - "S8CMR+JckSk1crtwyXpsTGjAxUpF59AjIYdmi4GRlw1TBw6y7d6L3nRi1r7QOvdNFGTbODFrjlIKmC+G", - "VPAx03JE8zNZy5gzAmAOPoewaY5iUuWxZ5kOlQ3zkU0q1gdanIBB8lrg8GA0MRJKNguqfB4hTLfkz/Ig", - "GeAzxjhvymwRKvSDnEqVft3z3PY57bwuXX4Ln9TCZ7IIn5YDslIYCR/dtmPbITgKQBnkMLcLt409odTx", - "1vUGGTh+ms1yxoEkMXcsqpRImU0EVV8zbg4w8vF9QqwKmAweIUbGAdho8cWByVsRnk0+3wVI7uLFqR8b", - "bcXB3xAPbbEOykbkEYVh4azHgJR6DkCdD191f7U8SXEYwviYGDZ3RnPD5tyLrx6kk2ABxdZWOgXnc3Cv", - "T5zdoIG3F8tOa7JX0UVWE8pMHui4QLcB4qlYJTa2LSrxTldTQ+9Rn22MtIsdTJvK4o4iU7FCPxa8WqyP", - "8BZY+uHwYAQv/BVTSK/Yr+82t8BsmnazNBWjQoUk49R5Fbn0iRNDpu6RYPrI5W6QneJCALSUHXWqV/f4", - "3fpIbYon3cu8vtXGddYlHw4TO/59Ryi6Sz3462phqnwS79oSS1RP0XTHaKbSCETIGNEbNtE10nRNQQpy", - "wEdB0hCiktOY6c68bQBvnCPfLVBeYMIOytf3Ah8fCXOmNNRKdO+ScBPqSYp5woSY9a9OF3Jm1vdeiOqa", - "solosGNjmde+AvSRnTGpdIIWiOgSTKPvFD6qvzNN47JS04vIZtVkWZw34LSnsE4ylpdxenXz/vjKTPu2", - "YomqnCK/Zdz6hkwxC2zUt3DD1Nb9dOOCX9sFv6ZXtt5hp8E0NRNLQy7NOb6Sc9HivJvYQYQAY8TR3bVe", - "lG5gkEFIaJc7BnKTPZwYEjrZpH3tHKbMj73VbcQHpvbdUXak6FoChcHGVTA0ExmxhOkgiWo3VrPnDNCi", - "YNmqpQu1o/a+mOlOCg+feqqFBdxdN9gWDAR6z1i4iATVzDJWC/g2HW4jycdkEGaOm7nAQoYQTsWUT+be", - "RVQVTrYNV8dA8x9h/Ytpi8sZfRqPLqc6jeHajbgF1++q7Y3iGU3zVpXWsITsiHJaFFKc0TxxCuY+0pTi", - "zJEmNvf66GtmdXE15vG3B6/fOfA/jUdpDlQmlajQuypsV3w1q7IJzXoOiE8Wbd58Xma3omSw+VUWplAp", - "fb4Al3U3kEY76QFrg0NwFJ2Sehb3ENqqcna2EbvEDTYSKCoTSa2+sxaSplWEnlGWe72Zh7bHmwcXNyzH", - "ZJQrhANc2roSGMmSK2U3ndMdPx01dW3hSeFcG/ICL23qa0UEb5vQ0b14XTir+5Jicj+rFekyJ14uUZOQ", - "qJylcR0rnypDHNzazkxjgo17hFEzYsl6TLG8ZMFYppka8NBtARnMEUWmTxTZh7upcGVNSs7+VQJhGXBt", - "Pkk8la2DitkUnba9e50a2aE7lxvYaujr4S8jY4SJLds3HgKxWcAILXUdcF9VT2a/0Eojhe7WtUliB4N/", - "OGPnStxgrHf04ajZOi8umha3sApJl/8ZwrDpqLeXQPGPV5dhs2eOaEkTppKZFH9A/J2Hz+NIKI5P5cnQ", - "y+UP4AN8zmvtTl2ZpZ69d7v7pJtQC9V0Uuihetz5wCyHOQW9hppyu9W2wkDD1y1OMKFX6Z4dvyYYB3PH", - "Ezen51MaS7hohAwD00FtAG7o0rUgvrPHvaoCG+zsJLAlV22ZjbIuQNZRct2MLRcUGOy0g0WFWjJAqg1l", - "grG1/+VKRIYp+TnltlCF6WePkuutwCq/TK9zITFHgoqr/TNI2ZLmcckhS7sq3ozNma3BUCoIkvy7gWx9", - "G0tFrlBCFa7jUHM4Iw/GQaURtxsZO2OKTXPAFg9tiylVyMkrRVTVxSwPuF4obP5oQPNFyTMJmV4oi1gl", - "SCXU4fOmMl5NQZ8DcPIA2z18Tu6i2U6xM7hnsOju59H+w+eodLV/PIhdAK6GxiZukiE7+R/HTuJ0jHZL", - "O4Zh3G7USTSc3BbR6mdcG06T7TrkLGFLx+u2n6Ul5XQOcU+R5RaYbF/cTVSktfDCM1sBRmkp1oTp+Pyg", - "qeFPPd7nhv1ZMEgqlkuml864o8TS0FOdwd9O6oez5WRc8lUPl/+INtLCm4haj8jrVZra+y22arRkv6VL", - "aKJ1TKhNjJGz2nvBp4Qmhz7vDmajrZLQWtyYuczSUcxBZ4YZKSTjGh8WpZ4lfyPpgkqaGvY36QM3mT57", - "EsnA28wEyXcD/NrxLkGBPIujXvaQvZchXF9ylwueLA1Hye7V0R7Bqew15sbNdn22w81DDxXKzChJL7mV", - "DXKjAae+FOHxDQNekhSr9exEjzuv7Nops5Rx8qCl2aGf3792UsZSyFgyvfq4O4lDgpYMztB3L75JZsxL", - "7oXMB+3CZaC/WcuDFzkDscyf5dhD4IWIvE59VuhKk+581SPagb5jaj4YMpi6ocakmYH3+o1+XvncNT6Z", - "Lx5W/KMN7A1vKSLZr6BnE4Ps4NHtzKrvgf2bkhdiNXRTWyfEb+wXgJooSkqWZ7/UUZmt5OuS8nQRtWdN", - "Tcff6jJR1eLs/RTNWbegnEMeHc7Kgr95mTEi1f5TDJ1nyfjAtu188Ha5rcXVgDfB9ED5CQ16mc7NBCFW", - "mwFvlUN1PhcZwXnqBGk19+zWEQiyPf+rBKVjwUP4wTp1od7SvHdtsmECPMPX4oR8byvBLoA00t/gK63K", - "IuBS31qFelnkgmZjTORw/O3Ba2JntX1ssROb7HiOj5TmKlr6qiD34zD3YF+3JB66MHyczb7UZtVKYzYq", - "pemyiAWHmhbHvgFGoIY6fHy+hNiZkFdBTUcbR2qGMPQwY3JpXlzVaFZ2QZow/9Gapgt8kjVYaj/JD8/S", - "7alSBZXxqgo3VUJEPHcGbpeo2+bpHhNh3s3nTNkCoHAGzXjUKjjbqQR8fGpzebLk3FJKVPbYlDzgImj3", - "wFlHDa/mj0LWQvyOArlNcr9r0vIj7BVN0NTOgN4piWejG6vKJb6wc0q54CzF9Eixq9lVCh1iAxuQSaqt", - "ZPVH3J3QyOGK5l2v3OQcFnszsXtG6BDXVcIHX82mWuqwf2osSbmgmsxBK8fZIBv78gFOD8i4ApfgEuvK", - "BnxSyIZdETlk1FSdVCaNHckIw2J6HnbfmW9v3bMf/cVPGUcB36HNuaZbTR0WMtTmVcA0mQtQbj3N2GD1", - "q+kzwTDZDFYfJr7woc0Gg2Y5s2xrg+4OdeAt0s4CbNq+NG1dnqDq54YHsp30oCjcpP3FJaLygF7xXgRH", - "LIuJN+0EyK3GD0fbQG4bXUnwPjWEBmdoiIYC7+EOYVSFFlpFfIzQaikKWxDrwhXNYMB4BIzXjENdljNy", - "QaTRKwE3Bs9rTz+VSqqtCDiIpx0DzdH6HGNoSjvTw2WHaucSMijBNfo5+rexrhHRwziqBrXgRvm6qgZq", - "qDsQJl5iGWKHyG7FB5SqnBCVYURBqwZEjHEYxu2rzDQvgO4x6MpEtruW1J6cXW6iviDRaZnNQSc0y2IZ", - "qV7gV4JffXIpWEFaVokpi4KkmBOlmSSmS21uolRwVS43zOUbXHK6oKhKhBrCwi5+hzEIZbrGf2NZGft3", - "xjlh7OwG6D0uXBWKHeXm5kgdqdfQdKLYPBmOCbxTLo+OeuqLEXrd/0opPRfzJiDXnBpiE5cL9yjG3741", - "F0eYOaGTatReLVViA3S6E74UHj4bq5DcJlfCq6yTexSNPVWprc0KiP6iWWO8/Hpcb4OEGNTer9Z62OeA", - "m/b6i1PtItc0JRtZUG80kPXesXE/CEVcc9rnsWMddsznTu9hkmFHzsaxNyLUu4J1AfrR+5mSgjJnGq+Z", - "RRezziO9X1246dDVG9xehPPz7tXY/XjW55NNFOPzHAh+b5cZOgUXzl7Vmbdr9V5J/klof3VlXu14lVd8", - "dP1d7wSc6mbVoL1K22OX0t4u073Jf/zF+rAR4FquvwAVbmfTO0WautKuVU/VTUiVDnlQeuTGrRivt9Sf", - "/6jOeYT0VAjF6hTcsUJMA33djrGWUpC/qTuWdzQ5g1Rj3vXagC4BdsnmZCYLivzd5kHqeTtWLoEu/dGm", - "nEfdZOtbLrROWFIQWmcTVU+GZ/g5qNykkClhBtw5cFdnrxlwMNjteTaDVLOzLWFg/7MAHoQYjb0SwtbL", - "DaLCWOVGi1lEdlex1QBtitLaCE+Qze/S4PQFgZzC+o4iDWqIZs4e+3vlIgkkEAPIHRJDIkLF3BCs1tRZ", - "hpmqKAOx4N1+bHeoU3H11twJghovOJcnSXPj1oGOG6aMF/0YNJfpulP4L3qE9kWKdYsG9Avbr7BGg6rq", - "4fkEFOGTlBx20/SduwQWGLRXGQp8KgtQ/jcfoWtnydkphFWB0CxzTmXmW0T1DF6FkWy4jzrhXT7hfRvo", - "WTUzq500uwE9kcRP6Iqb5sLIX0mfP3PTLzIsno/eHzblN3p8GrhmIF31NBT2cqEg0cI7dW6CYxMqXKH3", - "iyBB9SZbtMD1pkB5X+d4waSzFFOeUOfZEi6QSFhSA50MMrH0z7kJ2S/tdx/B4pOOblWnVPS6PdG8d89l", - "qoPEkOpnxN2W2yNjLqJZYZzbWq0qlpaFG1SGqv9CiqxM7QUdHoxK+zQ46dEGVhJVSqTdVXbelzmmAHsd", - "xBmewnrPiv4+Vb/fyhB6K0LZNQRx/a3dvlKlU/x9nc/tAuZXAudNKm7Go0KIPOnR9R92s8u0z8ApS08h", - "I+bu8I5tPWVLyF1UMVfG3PPF2mdTKQrgkN2bEHLArSuxt+s20xu3Jud39Kb5VzhrVtqET06nNDnhcZ9M", - "TMUkL8nf/DCbuZoCw/wuOZUdZEvuklVPZhtJzyNFfCZDH6VdS2u7sEpNVBaKmJSypYRFxIrsayL4Chs+", - "YkWLJUu7VRQ6osQMq1ElNDL4YcXAx41agaxVuMPnGLJlGlJqBTjzeKAsLyW4yAFbNqeVTr+geuG3zzTv", - "ilnmygaFbv02JTtV9lHgHyeuZk/7XIgiyeEMGoYEF85Qpikoxc4grPdjO5MMoMCnevsCiWnIQ7pq8RC3", - "9iTQsQ7BbpSpWMTanSJbOEZPMfbEkocaSkIGojOWlbSBP3WJUiwDa7uHsA48ITsfjvjiOkfDlUtJqmRu", - "MUWmCyfxW2joty7t0hKQghIs1Zg9tSgrLFxGGOlFbRyzF0uiMYgeujrtyJEJyq5s1ryEOXZq511pTSP4", - "UvOnrr2lb+rTOKwAjO+wBbxQIReUgPGSkAPnhj1s31RICZbSSwmN5W/T8bkF1uwr2CLLu80ybcYz653V", - "3JdAgateVnrRvrpMbfUpJtQRHJOMddWuCk1lmKs8JBzDu+UZza9fdYqZlg4QH67ObXyhoe4tRLJFpbqY", - "m9trOmjuQM92dVPzd6jq/R8wexS1cbqhnM2jkhW8ZQhZJs1JLuoKdzgkOccxrVH04TMydSE6hYSUKdaK", - "Xjz3aZQrVRNWFajLH2/WbW1b5y9CX4KMZ158IW/rlKxa4I1RQ1gf0RtmKj0nN0rlMerrkEUEfzEeFebK", - "2HJdnDaspTbFdcsNUEi4Yqtp4P+0o9W0mwVk6PKsZdBcOqWC7joH39YN3EYu6nptQ03+XeRuyts5xFIf", - "T8druqOrgEUI5rImCCr5/eHvRMIMi9UIcv8+TnD//tg1/f1R87M5zvfvx8ssX5eTgMWRG8PNG6OYX/rc", - "xq1rdE+EQms/SpZn2wijEW9Sl3vCiIrfXMTZjRSc+s3acrpH1RX92MU9qb0JiJjIWhuTB1MFkSQDgkhc", - "t0jICGpF0lIyvcZEOF71z36LujN8X1kLnbW5Sp3g7j4tTqFKpVTbFkvlb9fvBc3xPjIyNTqHaSyt++2K", - "Losc3EH55s70P+Hx355kDx4//M/p3x48fZDCk6fPHzygz5/Qh88fP4RHf3v65AE8nD17Pn2UPXryaPrk", - "0ZNnT5+nj588nD559vw/7xg+ZEC2gI582PXof7EqW3Lw7jA5NsDWOKEFqypqGzL2pWVoiicRlpTlo33/", - "0//xJ2ySimU9vP915KI6RwutC7W/t3d+fj4Ju+zN0ZiQaFGmiz0/T7eS8bvDKjLHPi1xR23QhVcZeFI4", - "wG/vvz06JgfvDidBpcz90YPJg8lDLKRYAKcFG+2PHuNPeHoWuO97jthG+x8/jUd7C6A52t7NH0vQkqX+", - "kzqn8znIiauxY346e7TnRYm9j86Q8smMOo+lCLIxRkFgSbf0jDPKoqOmjSFqpHJXLrP4uErw77QWPMPQ", - "D2ubMKytQtZhVmeyPawZlc/nYxMc7v8aqTw4Y3Pzjm5UVG3VajUv7/8++uktEZK4J807mp5WbjPkcGZz", - "M0hxxjCiIAvCUEzPiafZf5Ug1zVNOW4XJu/z+dpdnMZSzYumU3MtScUKisfK/ODMhhQCYq7MnjWz0rKE", - "EJKa9Rp2+iB5/uHj0799Gg0ABG3wCjC1w+80z3+3FWJhhYZMnxzJJb8YR3KTowQ9rs1o2KHeyTF6ZVdf", - "w/IzVZtmLNDvXHD4vW8bHGDRfaB5bhoKDrE9+IDJB5BY8Jw9evDgyupWVeFv1re7GsWTxAUG6jIh+ylS", - "itaXr+qpQ/vkChfadF699HLbw3UW/YJmWBIElLZLefjVLuWQoxuMuRSIvfQ+jUdPv+K9OeSG59CcYMsg", - "s0/3ovmZn3Jxzn1LI/CUyyWVaxRngrpFrdBaOldoUUEWac92o1LJ6MOn3ltvLyzEsPex4UmRXepO7NSg", - "OXy15Zq8o/o4ZzcvZqvOg/lepfFHW7srZoGFBdS9Cfk+7I3cG9NM2CQOpeSQeUcIf+tVebN8Nq4atjsq", - "zMARvbQDFfHt/X3T9/dBU8HRyL0YA6ZxCjbC1PHGuuwF2o2galXyu1ClvKDiwgXyVn/WckKt96Wd6UPs", - "+beVUd/irgd3fWJSAG8lMTUrZXx+1uwjFqqbpHFlfEbG/ZULfW9obugkWG4rmtsmJL0VBv8ywmDloGsL", - "8voc3JcTD7Eaz95Hn2T2CkRCl2R3gDAYPquDvkES1LstdnJvYjPGhm0uxjOcR+5WMQ9T/94KeF+AgNdN", - "qx0Do06WfHNCHcKwqPNu71JNt1Ema6f84F+pFPcXRlav2GYg3S6wXYB9doQxx6w/G1v9UwphDmm34tdf", - "Wvyq4mQuJYA1EuO7yKvAjHUp7V1bO8d0JYk1Y6UCzlaVIXZHeFwX8TEsBvMq+ZQaauxfhmhCtY9Gu1nj", - "zruxK2J9D+ED9cX68NU26eor0vMMzu8XuQXie/O5eWnU7PD+eswOw3jTkwdPrg+CcBfeCk2+w1v8M3PI", - "z8rS4mS1KwvbxJH2pjbz8CauxFtsCRlFnVE44FFYuCHMWmw9Mu66cplhJoh7E+LzG6uqWoPLPzAXNK/z", - "LFE5t50MjzNIIHf8n/s4/p0J+Q4jArQao2OZdqn8yR3G9f7DR4+fuCaSnlu/rXa76bMn+wfffOOa1dms", - "7fum01xpub+APBeug7sbuuOaD/v/+/d/TCaTO1vZqVi9WL+1qeO+FJ46jsUTVBvft1tf+SbFXukupd9W", - "1F2L2f6FWEW5v1jd3j43dvsY7P8pbp1pk4zcA7TSYDbi7a/wFrLHZJd7aOyzQxu+U10mE/JWuNQnZU4l", - "ETID6crbzEsqKdcA2cRTKkatKZvqIc0ZcG0ejFiwQyaKZWAjxuelhIzkbIkVbSWcoT88To9v+QYE2xk9", - "es1+sUz+DV0F6RCm1TWthVsyqjuXdOVLBmFRDCHxp2++IQ/G9aslz80ASYWYGHNd0tXoGrV9FbEN8jVv", - "ZvXf6oyLYw/RHNXSj63ORpspxP/anPurldgtubuNvSLOubPBpzbohPoDl2Bko+bACna2oBBWuFnX4bxG", - "yvMiVJzFmRmGKgW+YNvAVpV09PHZRu/tIb59/F+KlbQJake2gRGmau8j2jJCntE5txgh99cykwY2IymW", - "3mgkyAx0unDBuS3UR9iTrynQz5s2VZS8aqkGd7Gb0jrM74iVDgcmDAniJtFwBzJCxD/59L7mM5vZDBW+", - "XoQvnIomKeZriVVlxFyxRaa8H7+P4TW7uBOUL+vJuwIZouUq7J63CN4NwR3m+K2vWYUYc4v4M3j6+6dk", - "Qt6KOkTclUv4M5ocP+fN/rkX9FZwsLZ1I/laWrw1o1ZiB6rwESk+N4h9v1SJqy8sguz56msb5ZAfbO2z", - "jbLIkNvbTPZVXuE/ROsaN24Zs7bJ1sQH9WhDmLNpaHNMN7NL3+Ar5kb46Rf4tLkJjnU9LAYPqeczTizg", - "V8t0MN2OJea9KrFwHweK52ofzI20qNzPounVp5ALPldfJivaRB1xvESopMpiH09V/9c7uy8xk4958lrP", - "R5fbSTGegq0u6AvMu8RrFsK/XR+Emi19Lk4exqzeMHd5+uDx9U1/BPKMpUCOYVkISSXL1+RnXlWCvAy3", - "w0T8Va41rw2O1l5Aa1MzB1gaJiy6OBNsuKx91CuWfdrODIOMfTvyQcYDPhjmF6RFAVRenAFuN10dt2Y8", - "fBV6BTfyw1fZsyKgGBTt6Bj/H6OBeicMdxczd/mV3ALqM305NuFcdsVsXDnHGClAzPbJCb9P1II+ffjo", - "t0dPn/k/Hz191qM5M/O4BD1d3Vk9kPlshxmiQPuq1YFXK7VX+N2/7t3ebRPHI5atovmi65ownay5Tiy7", - "o0hB171p5ostNW3CYev6Ntef2FBpNl1E31f++VNVvT3kL6pXsM2+50rB3Nay6QmaCPiMIbS6qE2F9c31", - "bTZIky2yrAqJXPfjtA4usBedR55s3Tk3Kujqm3qkJvhGBe4FmyZabk6mxJzm48DcXdURR9+VsiiE1NXp", - "VpNB4h70me0a0l4f4e4kzKVUp4uy2PuI/8FsXp/qgANbuTOw87nfbY39PWvF3yTnHdkWl7wTWwK19R1o", - "5VL3ieWcZ4GYkTcsleIAU+W760atlYZlJ/uf6/rbpurt0atJ8JxxSJaCx3LS/YRf3+DHaDJ+oWne1/nY", - "fOzr22KOTfhbYDXnGcIZL4vfL+QpfikVUmu1EswxruubWfrf8aj5Q7PmafckrXnaPWaNIms9P+99bPzp", - "fHhcS7UodSbOg774ALS8aIj5PsiVPVxvXr2JWjmnFclAGaL9+pRUAR5iJ6b6GslEFmRE701G9hdVW80Y", - "z1pEghJlKs5AqkqhIb27za3u6s+juxq87zvxWJt5cxtHK9XVSiRvRQZ23Gay21i4KBcZuAShXUGkksHi", - "731/K9XtWi+wlJbzhSZlQbSIvfXqjglNLZO1ZRjVtrp1tpWvtnIGhOYSaLYmUwBOxNQsuln/k1CFrvL+", - "wegkzXj5tRquQooUlIIs8eGx20Cr0q7i81JvwBMCjgBXsxAlyIzKSwN7erYVzipVuSJ3f/xF3bsBeK0o", - "uBmx1kE3gt7KCchJe12oh02/ieDak4dkRyUQLxqgfkssixychiuCwp1w0rt/bYg6u3h5tKAKiH1miveT", - "XI6AKlA/M71fFtqySMz9HSkQab8esyVKYpxyoSAVPFP9ZVy3sWUsFxKsRZkVBJwwxolx4J4H52uq9Htn", - "7AiregVlScwUG+rO9qXENyP/UiXE74ydmvuQq1JVWfOdAiNeWYvDasNcb2FVzYXWJj92pSHRgpQKto3c", - "h6VgfIcsFRaS1YGZCIuGdBeHOU2oU1B0UdkAokbEJkCOfKtGybjahNEDCFM1oqvqkE3KCcpjKS2KAqvW", - "JSWv+vWh6ci2PtA/1227xOUKF+G9nQlQofbKQX5uMaswaGNBFXFwkCU9dQquucv5FCnpxZaQoGE62UT5", - "5lgemVbhEdh6SMtiLmmGBUZpRJXys/1M7OdNA+COe/LE6s3JFGbRIiRm02tKlr0qompogeOpmPCIxZ4V", - "Sc0RnGFVHE8grveWkTPoqTR9HFS/dM1xrugW+fFw2Xare9RSZgyz45YcEGLH0IfA24OGauSLYwI7J7X2", - "oD3F30G5CSoxYvdJ1qD6llCPv9MC2tq88P5qXBQt7t5iwFGu2cvFtrCRvhMb0x9+lUF9bbPtZ/RJa+pP", - "g/ff5CJv271zynQyE9LVz6czDTKiymuVNKBM+5hBa0DRwnlMEBzBXZtuHFeHvU684ZiIBYH4Kp1sGcnj", - "Y6b6TshBgT9N9zbKNCm5ZnkQ/Fy9lL88feGtDuBWB3CrA7jVAdzqAG51ALc6gFsdwK0O4FYHcKsDuNUB", - "/GV1ADcVyZd4gcP7N3PBEw5zqtkZVCF+t8mH/lSRL9VV5XUSqMU4p0y7VJ6EejEAv1wu8E8DzREHLEce", - "WwjVmyMJCz8rUcoUSGogZJwUOTVPA1jpKrFcM2WpT6LsSj9jFlSq4PEjcvTDgXfQXzhH8mbbuwcuGbnS", - "6xzuudQNVW1Wn8MBuEG6S+FA/ZXgE9C5dHwsB6IMer/F1q/gDHJRgLS+v0TLMqLxOQaav3S42aLwaZTW", - "NKP9Pm7omRzalrQIStzjWqki1EZtNCtjzmiu+ktj2vGWtIjlgKsuPqsKQm7yQmTr1gkxu7aHG9g8G7Wb", - "PuNUriMhOp0T0SENLQy/coTV1WV9uvJgki7RdslsG4XFpHUJKnqON1F5NIqi2rDOUDbYZ9aik2jp6Hbo", - "wKgCcIgDrKFnvyfkve13s6HqCJE7YjUz/2L8BpstK6aBbc0jwrGerzWu3CM+enrx7I8NYWdlCoRpRXw8", - "yvbrZTxaJWakOfDEMaBkKrJ10mBfo8YtlDFFlYLldPtNFPJPl/XYXT7my+Z76maukVfB4jbx5JBoVolj", - "wD3cea1hMG+usIUjOvYcYPxzs+g+NhqCQBx/iimV2rVmdmR69TTrW8Z3y/iC09iSCBh38XttJjL5jIxP", - "rmXJ+3netytISwNceJLvonYeTXKw0g27ZgbTcj7H7M0dG51ZGuB4TPAbYoV2uUO54G4UZAevMnpeNolU", - "e7gudwli1e4KSeZSlMU9W6aKr9GYsSwoX3uTLySKLcvc4tAmvrtaRmtD7LqOAGiOdbq/Pq32O6/yC3S3", - "7qpt/m7RQs6pInZ/ISMlz1zkUCcQd8WHZ462Qx+veM2mN+aOtuuNrM7NO+SK8LvsQlwqM3cBMtErbg9U", - "M727Dfi1J3dym7X2r3FtvLMZF3oYbDd4tWYIV3R7yICv4fURpCipQ+GatbZsJcC+wJEwX4lteaXOI53h", - "mz4kQR0+ayOFvCDUlxRIBVdalqk+4RRtNMHCJl3/Eq+N7udvL32TuJkwYsVzQ51wihnnK8tNlM/NIGKm", - "+A7As1FVzuegDK8MiWQGcMJdK8ZJyc1LS8zIkqVSJDYM1ZwhI59MbMslXZMZzdHI+AdIQabmZg923SqM", - "lWZ57hxazDREzE441SQHqjR5wwyXNcP5XGKVJxfocyFPKyzE01fMgYNiKokrX763XzFDhFu+V/KhwtJ+", - "riO7rzc1hIedZb2QH74ycFNMhpMzpWsfiA7s12b/XjKeRInseAHEuYS1aYvcxVwxjoDuNa1DegEn3Nxw", - "WhDk6lRfjBzaZp7OWbSno0U1jY1oWYP8Wgc98a6Ey5AIk7k1rfyJAjMDOvDmS9x4rEXT3vsdzSgby1vG", - "vrqMYj2N3CMB/Gd7ivCON8uCtJRMr9EOQQv22ymY/3/49MF8k2feRFHKfLQ/Wmhd7O/tYd3KhVB6b/Rp", - "HH5TrY8fqpV/9NaGQrIzTGP94dP/DwAA//8ICcmexzwBAA==", + "H4sIAAAAAAAC/+x9a3Mbt5LoX0Fxt8qP5VB+Zk9Uldor23loYzsuS8nuObFvAs40SRwNgTkARiLj6/9+", + "Cw1gBjODIYcSLdmJPtni4NFoNBqNfn4YpWJZCA5cq9Hhh1FBJV2CBol/0TQVJdcJy8xfGahUskIzwUeH", + "/htRWjI+H41HzPxaUL0YjUecLqFuY/qPRxL+VTIJ2ehQyxLGI5UuYEnNwHpdmNbVSKtkLhI3xJEd4vjF", + "6OOGDzTLJCjVhfInnq8J42leZkC0pFzR1HxS5ILpBdELpojrTBgnggMRM6IXjcZkxiDP1MQv8l8lyHWw", + "Sjd5/5I+1iAmUuTQhfO5WE4ZBw8VVEBVG0K0IBnMsNGCamJmMLD6hloQBVSmCzITcguoFogQXuDlcnT4", + "60gBz0DibqXAzvG/MwnwBySayjno0ftxbHEzDTLRbBlZ2rHDvgRV5loRbItrnLNz4MT0mpBXpdJkCoRy", + "8va75+Tx48dfm4UsqdaQOSLrXVU9e7gm2310OMqoBv+5S2s0nwtJeZZU7d9+9xznP3ELHNqKKgXxw3Jk", + "vpDjF30L8B0jJMS4hjnuQ4P6TY/Ioah/nsJMSBi4J7bxXjclnP9GdyWlOl0UgnEd2ReCX4n9HOVhQfdN", + "PKwCoNG+MJiSZtBfHyRfv//wcPzwwcd/+/Uo+Yf78+njjwOX/7wadwsGog3TUkrg6TqZS6B4WhaUd/Hx", + "1tGDWogyz8iCnuPm0yWyeteXmL6WdZ7TvDR0wlIpjvK5UIQ6MspgRstcEz8xKXlu2JQZzVE7YYoUUpyz", + "DLKx4b4XC5YuSEqVHQLbkQuW54YGSwVZH63FV7fhMH0MUWLguhQ+cEGfLzLqdW3BBKyQGyRpLhQkWmy5", + "nvyNQ3lGwgulvqvUbpcVOV0AwcnNB3vZIu64oek8XxON+5oRqggl/moaEzYja1GSC9ycnJ1hf7cag7Ul", + "MUjDzWnco+bw9qGvg4wI8qZC5EA5Is+fuy7K+IzNSwmKXCxAL9ydJ0EVgisgYvpPSLXZ9v8++ek1EZK8", + "AqXoHN7Q9IwAT0UG2YQczwgXOiANR0uIQ9Ozbx0Ortgl/08lDE0s1byg6Vn8Rs/ZkkVW9Yqu2LJcEl4u", + "pyDNlvorRAsiQZeS9wFkR9xCiku66k56Kkue4v7X0zZkOUNtTBU5XSPClnT1zYOxA0cRmuekAJ4xPid6", + "xXvlODP3dvASKUqeDRBztNnT4GJVBaRsxiAj1SgbIHHTbIOH8d3gqYWvABw/SC841SxbwOGwitCMOd3m", + "CynoHAKSmZCfHXPDr1qcAa8InUzX+KmQcM5EqapOPTDi1JslcC40JIWEGYvQ2IlDh2Ewto3jwEsnA6WC", + "a8o4ZIY5I9BCg2VWvTAFE25+73Rv8SlV8NWTvju+/jpw92eivesbd3zQbmOjxB7JyNVpvroDG5esGv0H", + "vA/DuRWbJ/bnzkay+am5bWYsx5von2b/PBpKhUyggQh/Nyk251SXEg7f8fvmL5KQE015RmVmflnan16V", + "uWYnbG5+yu1PL8WcpSds3oPMCtbogwu7Le0/Zrw4O9ar6LvipRBnZREuKG08XKdrcvyib5PtmLsS5lH1", + "2g0fHqcr/xjZtYdeVRvZA2Qv7gpqGp7BWoKBlqYz/Gc1Q3qiM/mH+acoctNbF7MYag0duysZ1QdOrXBU", + "FDlLqUHiW/fZfDVMAOxDgtYtDvBCPfwQgFhIUYDUzA5KiyLJRUrzRGmqcaR/lzAbHY7+7aDWvxzY7uog", + "mPyl6XWCnYzIasWghBbFDmO8MaKP2sAsDIPGT8gmLNtDoYlxu4mGlJhhwTmcU64n9ZOlwQ+qA/yrm6nG", + "t5V2LL5bT7BehBPbcArKSsC24R1FAtQTRCtBtKJAOs/FtPrh7lFR1BjE70dFYfGB0iMwFMxgxZRW93D5", + "tD5J4TzHLybk+3BsFMUFz9fmcrCihrkbZu7WcrdYpVtya6hHvKMIbqeQE7M1Hg1GzN8HxeGzYiFyI/Vs", + "pRXT+AfXNiQz8/ugzl8GiYW47ScufGg5zNk3Dv4SPG7utiinSzhO3TMhR+2+lyMbM0qcYC5FKxv30467", + "AY8VCi8kLSyA7ou9SxnHR5ptZGG9IjcdyOiiMAdnOKA1hOrSZ23reYhCgqTQguFZLtKzH6ha7OHMT/1Y", + "3eOH05AF0AwkWVC1mIxiUkZ4vOrRhhwx0xAf+GQaTDWplriv5W1ZWkY1DZbm4I2LJRb12A+ZHsjI2+Un", + "/A/NiflszrZh/XbYCTlFBqbscXZGhsy89u0Dwc5kGqAWQpClfeAT8+reCcrn9eTxfRq0R99anYLbIbcI", + "3CGx2vsxeCZWMRieiVXnCIgVqH3QhxkHxUgNSzUAvhcOMoH779BHpaTrLpJx7CFINgs0oqvC08DDG9/M", + "Uitnj6ZCXo77tNgKJ7XKmVAzasB8xy0kYdOySBwpRtRWtkFroNrKt5lptIePYayBhRNNPwEWlBl1H1ho", + "DrRvLIhlwXLYA+kvokx/ShU8fkROfjh6+vDRb4+efmVIspBiLumSTNcaFLnr3mZE6XUO97orw9dRmev4", + "6F898YrK5rixcZQoZQpLWnSHsgpQKwLZZsS062KtiWZcdQXgkMN5CoaTW7QTq9s3oL1gykhYy+leNqMP", + "YVk9S0YcJBlsJaZdl1dPsw6XKNey3MdTFqQUMqJfwyOmRSry5BykYiJiTXnjWhDXwou3Rft3Cy25oIqY", + "uVH1W3IUKCKUpVd8ON+3Q5+ueI2bjZzfrjeyOjfvkH1pIt9rEhUpQCZ6xUkG03LeeAnNpFgSSjLsiHf0", + "96BP1jxFrdo+iLT/mbZkHFX8as3T4M1mNiqHbN7YhKu/zdpY8fo5O9UdFQHHoOMlfsZn/QvINd27/NKe", + "IAb7c7+RFliSmYb4Cn7J5gsdCJhvpBCz/cMYmyUGKH6w4nlu+nSF9NciA7PYUu3hMq4Hq2nd7GlI4XQq", + "Sk0o4SID1KiUKn5N91ju0WSIlk4d3vx6YSXuKRhCSmlpVlsWBO14Hc5Rd0xoaqk3QdSoHitGZX6yrex0", + "1iqcS6CZedUDJ2LqTAXOiIGLpGiE1P6ic0JC5Cw14CqkSEEpyBKnotgKmm9nmYjegCcEHAGuZiFKkBmV", + "Vwb27HwrnGewTtBkrsjdH39R924AXi00zbcgFtvE0Fs9+Jw9qAv1sOk3EVx78pDsqATiea55XRoGkYOG", + "PhTuhJPe/WtD1NnFq6PlHCRaZj4pxftJrkZAFaifmN6vCm1Z9DiCuYfOKVui3o5TLhSkgmcqOlhOlU62", + "sWXTqPEaMysIOGGME+PAPULJS6q0tSYynqESxF4nOI8VUMwU/QD3CqRm5F+8LNodOzX3IFelqgRTVRaF", + "kBqy2Bo4rDbM9RpW1VxiFoxdSb9akFLBtpH7sBSM75BlV2IRRHWldHfm9u7iUDVt7vl1FJUNIGpEbALk", + "xLcKsBs6w/QAwlSNaEs4TLUop/LAGY+UFkVhuIVOSl7160PTiW19pH+u23aJi+r63s4EKPTBce0d5BcW", + "s9YNakHNExpHJkt6ZmQPfBBbs2cXZnMYE8V4CskmyjfH8sS0Co/A1kNaFnNJM0gyyOm6O+jP9jOxnzcN", + "gDteP3yEhsT6s8Q3vaZk7z6wYWiB46mY8EjwC0nNETQvj5pAXO8tI2eAY8eYk6OjO9VQOFd0i/x4uGy7", + "1ZER8TY8F9rsuCUHhNgx9CHw9qChGvnymMDOSf0sa0/xd1BugkqM2H2SNai+JdTj77SAHmWa8xQOjkuL", + "u7cYcJRr9nKxLWyk78T2aPbeUKlZygp86vwI672//NoTRO1NJANNWQ4ZCT7YV2AR9ifWEaM95uVegoOU", + "MF3wO1qYyHJyplDiaQJ/Bmt8cr+xHn6ngV/gHp6ykVHN9UQ5QUC935CRwMMmsKKpztdGTtMLWJMLkEBU", + "OV0yra3nbvOlq0WRhANEFdwbZnTWHOsd53dgiHnpBIcKltfdivHIPgk2w3faehc00OGeAoUQ+QDlUQcZ", + "UQgGGf5JIcyuM+dE7N1IPSU1gHRMG0151e1/RzXQjCsgfxclSSnHF1epoRJphEQ5AeVHM4ORwKo5nYm/", + "xhDksAT7kMQv9++3F37/vttzpsgMLrznvWnYRsf9+6jGeSOUbhyuPagKzXE7jlwfqPnHe885L7R4ynYT", + "sxt5yE6+aQ1emQvMmVLKEa5Z/pUZQOtkroasPaSRYeZ1HHeQUj8YOrZu3PcTtixzqvdhvtgoj1bvCbZc", + "QsaohnxNCgkpWO9qI2ApC4sBjVi/q3RB+RzlainKuXP8seMgYyyV1WDIkneGiAofesWTuRRlEWOUztnT", + "O9gbsQOoefkEiMTOVs6/oNV8LqZiyA3mER7szvdmzD6rwnjU+zA0SD2vH4YWOc0ogTgWMOwhUWWaAkRd", + "gGNPrmqprWjIOr7FDWjEhlJaHyhCU13SPKQ6cjwjlK+bYZKU5cpwQaYItjOda7/asV2bj2GZ0dzaZiNB", + "FeFJaUh8wc7XKG2jYqDdAYnESENdyggJ0BwvQ8afRodfDx2Dsjtx4HRVf+zzuzLv73y9BzHIDkQkFBIU", + "Xlqh3krZr2IWxj65W02tlYZlV7Vvu/7Ww2je9j4gBc8Zh2QpOKyj4b6Mwyv8GGUceHH2dEYRpq9v+1XS", + "gL8FVnOeIdR4Vfzibge86E3lcLiHzW+P27LqhFFfqLWEvCCUpDlDnabgSssy1e84Ra1JcNgijhn+fdiv", + "R3vum8QVdxG9mhvqHafolFPpUqLG5BlEFAffAXh1mirnc1At/klmAO+4a8U4KTnTONfS7FdiN6wAid4R", + "E9tySdeGBaLa7w+QgkxL3eTJGHmitGGX1sRkpiFi9o5TTXIwb+pXjJ+ucDhvovU0w0FfCHlWYSF+hcyB", + "g2IqiTuQfG+/om+fW/7C+flhpLD9bI0SZvw6PGWNSpU6+vX/3v2vw1+Pkn/Q5I8Hydf/cfD+w5OP9+53", + "fnz08Ztv/l/zp8cfv7n3X/8e2ykPeywuwkF+/MI91o5foEReWyU6sF+bRnrJeBIlstD23qItchdjAB0B", + "3Wvqa/QC3nG94oaQzmnOMiNyXYYc2iyucxbt6WhRTWMjWvoZv9Yd5dwrcBkSYTIt1njpa7zrcxWPQEIz", + "mQsqwvMyK7ndSi/oWgd77/siZuMqyswmoDgkGIK0oN5xy/356OlXo3EdOlR9H41H7uv7CCWzbBWVDmEV", + "e764A4IH444iBV0r6BFAEfaom4/1NgiHXYJ596oFK66fUyjNpnEO592WnRpkxY+59Sc25weNbmunyxez", + "64dbSyOHF3oRC0xvSArYqt5NgJYjRCHFOfAxYROYtNUQmXmaOYejHOgMA6TxoSeGhGFU58ASmqeKAOvh", + "Qga99WP0g8Kt49YfxyN3+au9y+Nu4Bhc7TkrC5v/Wwty5/tvT8mBY5jqjo1VtEMH0WWRV6sLoGi4yBhu", + "ZtNx2GDNd/wdfwEzxpn5fviOZ1TTgylVLFUHpQL5jOaUpzCZC3LoYzJeUE3f8Y6k1ZsxJ4iGIUU5zVlK", + "zkKJuCZPmwWhO8K7d7/SfC7evXvf8Rboyq9uqih/sRMkF0wvRKkTF8OdSLigMmaNUVUML45skzRsmnVM", + "3NiWFbsYcTd+nOfRolDtWL7u8osiN8sPyFC5SDWzZURpIb0sYgQUCw3u72vhLgZJL7wKo1SgyO9LWvzK", + "uH5PknflgwePgTSC2353V76hyXUBgxUZvbGGbf0FLty+a2ClJU0KOo9Zfd69+1UDLXD3UV5e4iM7zwl2", + "awTVeadhHKpegMdH/wZYOHYOEMLFndhePl9PfAn4CbcQ2xhxozZFX3a/gjC7S29XK1Svs0ulXiTmbEdX", + "pQyJ+52p0njMjZDl/QMUm6MPpst4MgWSLiA9c6koYFno9bjR3bugOEHTsw6mbJISGySDYfKoM58CKYuM", + "OlG8rUGarokCrb0T6Fs4g/WpqKPsdwlQbsbLqr6DipQaSJeGWMNj68Zob77zc0IVV1H4sFOMP/JkcVjR", + "he/Tf5CtyLuHQxwjikY8Zx8iqIwgwhJ/DwousVAz3pVIP7Y888qY2psvkrDE837imtSPJ+eSFK4GFdz2", + "+xIw45G4UGRKjdwuXLIeGxMacLFS0Tn0SMih2WJg5GXD1IGDbLv3ojedmLUvtM59EwXZNk7MmqOUAuaL", + "IRV8zLQc0fxM1jLmjACYg88hbJqjmFR57FmmQ2XDfGSTivWBFidgkLwWODwYTYyEks2CKp9HCNMt+bM8", + "SAb4hDHOmzJbhAr9IKdSpV/3PLd9TjuvS5ffwie18JkswqflgKwURsJHt+3YdgiOAlAGOcztwm1jTyh1", + "vHW9QQaOn2aznHEgScwdiyolUmYTQdXXjJsDjHx8nxCrAiaDR4iRcQA2WnxxYPJahGeTz3cBkrt4cerH", + "Rltx8DfEQ1usg7IReURhWDjrMSClngNQ58NX3V8tT1IchjA+JobNndPcsDn34qsH6SRYQLG1lU7B+Rzc", + "6xNnN2jg7cWy05rsVXSZ1YQykwc6LtBtgHgqVomNbYtKvNPV1NB71GcbI+1iB9OmsrijyFSs0I8Frxbr", + "I7wFln44PBjBC3/FFNIr9uu7zS0wm6bdLE3FqFAhyTh1XkUufeLEkKl7JJg+crkbZKe4FAAtZUed6tU9", + "frc+UpviSfcyr2+1cZ11yYfDxI5/3xGK7lIP/rpamCqfxJu2xBLVUzTdMZqpNAIRMkb0hk10jTRdU5CC", + "HPBRkDSEqOQsZrozbxvAG+fEdwuUF5iwg/L1vcDHR8KcKQ21Et27JNyEepJinjAhZv2r04WcmfW9FaK6", + "pmwiGuzYWOa1rwB9ZGdMKp2gBSK6BNPoO4WP6u9M07is1PQislk1WRbnDTjtGayTjOVlnF7dvD++MNO+", + "rliiKqfIbxm3viFTzAIb9S3cMLV1P9244Jd2wS/p3tY77DSYpmZiacilOccXci5anHcTO4gQYIw4urvW", + "i9INDDIICe1yx0BusocTQ0Inm7SvncOU+bG3uo34wNS+O8qOFF1LoDDYuAqGZiIjljAdJFHtxmr2nAFa", + "FCxbtXShdtTeFzPdSeHhU0+1sIC76wbbgoFA7xkLF5GgmlnGagHfpsNtJPmYDMLMaTMXWMgQwqmY8snc", + "u4iqwsm24eoUaP4jrH8xbXE5o4/j0dVUpzFcuxG34PpNtb1RPKNp3qrSGpaQHVFOi0KKc5onTsHcR5pS", + "nDvSxOZeH33NrC6uxjz99ujlGwf+x/EozYHKpBIVeleF7YovZlU2oVnPAfHJos2bz8vsVpQMNr/KwhQq", + "pS8W4LLuBtJoJz1gbXAIjqJTUs/iHkJbVc7ONmKXuMFGAkVlIqnVd9ZC0rSK0HPKcq8389D2ePPg4obl", + "mIxyhXCAK1tXAiNZsld20znd8dNRU9cWnhTOtSEv8NKmvlZE8LYJHd2L14Wzui8pJvezWpEuc+LlEjUJ", + "icpZGtex8qkyxMGt7cw0Jti4Rxg1I5asxxTLSxaMZZqpAQ/dFpDBHFFk+kSRfbibClfWpOTsXyUQlgHX", + "5pPEU9k6qJhN0Wnbu9epkR26c7mBrYa+Hv4qMkaY2LJ94yEQmwWM0FLXAfdF9WT2C600UuhuXZskdjD4", + "hzN2rsQNxnpHH46arfPiomlxC6uQdPmfIQybjnp7CRT/eHUZNnvmiJY0YSqZSfEHxN95+DyOhOL4VJ4M", + "vVz+AD7A57zW7tSVWerZe7e7T7oJtVBNJ4UeqsedD8xymFPQa6gpt1ttKww0fN3iBBN6lR7Y8WuCcTB3", + "PHFzejGlsYSLRsgwMB3VBuCGLl0L4jt73KsqsMHOTgJbctWW2SjrAmQdJdfN2HJJgcFOO1hUqCUDpNpQ", + "Jhhb+1+uRGSYkl9QbgtVmH72KLneCqzyy/S6EBJzJKi42j+DlC1pHpccsrSr4s3YnNkaDKWCIMm/G8jW", + "t7FU5AolVOE6DjXHM/JgHFQacbuRsXOm2DQHbPHQtphShZy8UkRVXczygOuFwuaPBjRflDyTkOmFsohV", + "glRCHT5vKuPVFPQFACcPsN3Dr8ldNNspdg73DBbd/Tw6fPg1Kl3tHw9iF4CrobGJm2TITv7HsZM4HaPd", + "0o5hGLcbdRINJ7dFtPoZ14bTZLsOOUvY0vG67WdpSTmdQ9xTZLkFJtsXdxMVaS288MxWgFFaijVhOj4/", + "aGr4U4/3uWF/FgySiuWS6aUz7iixNPRUZ/C3k/rhbDkZl3zVw+U/oo208Cai1iPyepWm9n6LrRot2a/p", + "EppoHRNqE2PkrPZe8CmhybHPu4PZaKsktBY3Zi6zdBRz0JlhRgrJuMaHRalnyd9IuqCSpob9TfrATaZf", + "PYlk4G1mguS7AX7teJegQJ7HUS97yN7LEK4vucsFT5aGo2T36miP4FT2GnPjZrs+2+HmoYcKZWaUpJfc", + "yga50YBTX4nw+IYBr0iK1Xp2osedV3btlFnKOHnQ0uzQz29fOiljKWQsmV593J3EIUFLBufouxffJDPm", + "FfdC5oN24SrQ36zlwYucgVjmz3LsIfBMRF6nPit0pUl3vuoR7UDfMTUfDBlM3VBj0szAe/1GP6987hqf", + "zBcPK/7RBvaGtxSR7FfQs4lBdvDodmbV98D+TckzsRq6qa0T4jf2M0BNFCUly7Nf6qjMVvJ1SXm6iNqz", + "pqbjb3WZqGpx9n6K5qxbUM4hjw5nZcHfvMwYkWr/KYbOs2R8YNt2Pni73NbiasCbYHqg/IQGvUznZoIQ", + "q82At8qhOp+LjOA8dYK0mnt26wgE2Z7/VYLSseAh/GCdulBvad67NtkwAZ7ha3FCvreVYBdAGulv8JVW", + "ZRFwqW+tQr0sckGzMSZyOP326CWxs9o+ttiJTXY8x0dKcxUtfVWQ+3GYe7CvWxIPXRg+zmZfarNqpTEb", + "ldJ0WcSCQ02LU98AI1BDHT4+X0LsTMiLoKajjSM1Qxh6mDG5NC+uajQruyBNmP9oTdMFPskaLLWf5Idn", + "6fZUqYLKeFWFmyohIp47A7dL1G3zdI+JMO/mC6ZsAVA4h2Y8ahWc7VQCPj61uTxZcm4pJSp7bEoecBm0", + "e+Cso4ZX80chayF+R4HcJrnfNWn5CfaKJmhqZ0DvlMSz0Y1V5RJf2DmlXHCWYnqk2NXsKoUOsYENyCTV", + "VrL6I+5OaORwRfOuV25yDou9mdg9I3SI6yrhg69mUy112D81lqRcUE3moJXjbJCNffkApwdkXIFLcIl1", + "ZQM+KWTDrogcMmqqTiqTxo5khGExPQ+778y31+7Zj/7iZ4yjgO/Q5lzTraYOCxlq8ypgmswFKLeeZmyw", + "+tX0mWCYbAar9xNf+NBmg0GznFm2tUF3hzryFmlnATZtn5u2Lk9Q9XPDA9lOelQUbtL+4hJReUCveC+C", + "I5bFxJt2AuRW44ejbSC3ja4keJ8aQoNzNERDgfdwhzCqQgutIj5GaLUUhS2IdeGKZjBgPALGS8ahLssZ", + "uSDS6JWAG4PntaefSiXVVgQcxNNOgeZofY4xNKWd6eGqQ7VzCRmU4Br9HP3bWNeI6GEcVYNacKN8XVUD", + "NdQdCBPPsQyxQ2S34gNKVU6IyjCioFUDIsY4DOP2VWaaF0D3GHRlIttdS2pPzi43UV+Q6LTM5qATmmWx", + "jFTP8CvBrz65FKwgLavElEVBUsyJ0kwS06U2N1EquCqXG+byDa44XVBUJUINYWEXv8MYhDJd47+xrIz9", + "O+OcMHZ2A/QeF64KxY5yc3OkjtRraDpRbJ4MxwTeKVdHRz315Qi97r9XSs/FvAnINaeG2MTlwj2K8bdv", + "zcURZk7opBq1V0uV2ACd7oQvhYfPxiokt8mV8Crr5B5FY09VamuzAqK/aNYYL78e19sgIQa196u1HvY5", + "4Ka9/uJUu8g1TclGFtQbDWS9d2zcD0IR15z2eexYhx3zudN7mGTYkbNx7I0I9a5gXYB+9H6mpKDMmcZr", + "ZtHFrPNI71cXbjp09Qa3F+H8vHs1dj+e9/lkE8X4PAeC39tlhs7AhbNXdebtWr1Xkn8S2l9dmVc7XuUV", + "H11/1zsBp7pZNWiv0vbUpbS3y3Rv8h9/sT5sBLiW689AhdvZ9E6Rpq60a9VTdRNSpUMelB65cSvG6y31", + "5z+qcx4hPRVCsToFd6wQ00Bft1OspRTkb+qO5R1NziHVmHe9NqBLgF2yOZnJgiJ/t3mQet6OlUugS3+0", + "KedRN9n6lgutE5YUhNbZRNWT4Rl+jio3KWRKmAF3DtzV2WsGHAx2e57NINXsfEsY2P8sgAchRmOvhLD1", + "coOoMFa50WIWkd1VbDVAm6K0NsITZPO7Mjh9QSBnsL6jSIMaopmzx/5euUwCCcQAcofEkIhQMTcEqzV1", + "lmGmKspALHi3H9sd6lRcvTV3gqDGS87lSdLcuHWg44Yp40U/Bs1luu4U/oseoX2RYt2iAf3C9gus0aCq", + "eng+AUX4JCXH3TR9Fy6BBQbtVYYCn8oClP/NR+jaWXJ2BmFVIDTLXFCZ+RZRPYNXYSQb7qNOeJdPeN8G", + "elbNzGonzW5ATyTxE7riprkw8lfS58/c9IsMi+ej94dN+Y0enwauGUhXPQ2FvVwoSLTwTp2b4NiEClfo", + "/TJIUL3JFi1wvSlQ3tY5XjDpLMWUJ9R5toQLJBKW1EAng0ws/XNuQvZz+91HsPiko1vVKRW9bk80791z", + "meogMaT6GXG35fbImMtoVhjntlariqVl4QaVoeq/kCIrU3tBhwej0j4NTnq0gZVElRJpd5Wd92WOKcBe", + "BnGGZ7A+sKK/T9XvtzKE3opQdg1BXH9rt/eqdIq/r/O5XcB8L3DepOJmPCqEyJMeXf9xN7tM+wycsfQM", + "MmLuDu/Y1lO2hNxFFXNlzL1YrH02laIADtm9CSFH3LoSe7tuM71xa3J+R2+af4WzZqVN+OR0SpN3PO6T", + "iamY5BX5mx9mM1dTYJjfFaeyg2zJXbLqyWwj6UWkiM9k6KO0a2ltF1apicpCEZNStpSwiFiRfU0EX2HD", + "R6xosWRpt4pCR5SYYTWqhEYGP64Y+LhRK5C1Cnf4HEO2TENKrQBnHg+U5aUEFzlgy+a00ukXVC/89pnm", + "XTHLXNmg0K3fpmSnyj4K/OPE1expnwtRJDmcQ8OQ4MIZyjQFpdg5hPV+bGeSART4VG9fIDENeUhXLR7i", + "1p4EOtYh2I0yFYtYu1NkC8foKcaeWPJQQ0nIQHTOspI28KeuUIplYG33ENaBJ2TnwxFfXOdouHIpSZXM", + "LabIdOEkfgsN/dalXVoCUlCCpRqzpxZlhYWrCCO9qI1j9nJJNAbRQ1enHTkyQdmVzZqXMMdO7bwrrWkE", + "X2r+1LW39FV9GocVgPEdtoAXKuSCEjBeEnLg3LCH7asKKcFSeimhsfxtOj63wJp9BVtkebdZps14Zr2z", + "mvsSKHDV80ov2leXqa0+xYQ6gmOSsa7aVaGpDHOVh4RjeLc8p/n1q04x09IR4sPVuY0vNNS9hUi2qFSX", + "c3N7SQfNHejZ9jc1f4Oq3v8Bs0dRG6cbytk8KlnBW4aQZdKc5KKucIdDkgsc0xpFH35Fpi5Ep5CQMsVa", + "0YsXPo1ypWrCqgJ1+ePNuq1t6/xF6CuQ8cyLL+R1nZJVC7wxagjrI3rDTKXn5EapPEZ9HbKI4C/Go8Jc", + "GVuui7OGtdSmuG65AQoJe7aaBv5PO1pNu1lAhi7PWgbNpVMq6K5z8G3dwG3koq7XNtTk30XuprydQyz1", + "8XS8pju6CliEYC5rgqCS3x/+TiTMsFiNIPfv4wT3749d098fNT+b43z/frzM8nU5CVgcuTHcvDGK+aXP", + "bdy6RvdEKLT2o2R5to0wGvEmdbknjKj4zUWc3UjBqd+sLad7VF3Rj13ck9qbgIiJrLUxeTBVEEkyIIjE", + "dYuEjKBWJC0l02tMhONV/+y3qDvD95W10Fmbq9QJ7u7T4gyqVEq1bbFU/nb9XtAc7yMjU6NzmMbSut+u", + "6LLIwR2Ub+5M/xMe/+1J9uDxw/+c/u3B0wcpPHn69YMH9Osn9OHXjx/Co789ffIAHs6++nr6KHv05NH0", + "yaMnXz39On385OH0yVdf/+cdw4cMyBbQkQ+7Hv0vVmVLjt4cJ6cG2BontGBVRW1Dxr60DE3xJMKSsnx0", + "6H/6P/6ETVKxrIf3v45cVOdooXWhDg8OLi4uJmGXgzkaExItynRx4OfpVjJ+c1xF5tinJe6oDbrwKgNP", + "Ckf47e23J6fk6M3xJKiUeTh6MHkweYiFFAvgtGCjw9Fj/AlPzwL3/cAR2+jww8fx6GABNEfbu/ljCVqy", + "1H9SF3Q+BzlxNXbMT+ePDrwocfDBGVI+mlHnsRRBNsYoCCzplp5xRll01LQxRI1U7splFh9XCf6d1oJn", + "GPphbROGtVXIOs7qTLbHNaPy+XxsgsPDXyOVB2dsbt7RjYqqrVqt5uX93yc/vSZCEvekeUPTs8pthhzP", + "bG4GKc4ZRhRkQRiK6TnxNPuvEuS6pinH7cLkfT5fu4vTWKp50XRqriWpWEHxWJkfnNmQQkDMldmzZlZa", + "lhBCUrNew04fJF+///D0bx9HAwBBG7wCTO3wO83z322FWFihIdMnR3LJL8aR3OQoQY9rMxp2qHdyjF7Z", + "1dew/EzVphkL9DsXHH7v2wYHWHQfaJ6bhoJDbA/eY/IBJBY8Z48ePNhb3aoq/M36dlejeJK4xEBdJmQ/", + "RUrR+vJVPXVon+xxoU3n1Ssvtz1cZ9HPaIYlQUBpu5SHX+xSjjm6wZhLgdhL7+N49PQL3ptjbngOzQm2", + "DDL7dC+an/kZFxfctzQCT7lcUrlGcSaoW9QKraVzhRYVZJH2bDcqlYzef+y99Q7CQgwHHxqeFNmV7sRO", + "DZrjF1uuyTuqj3N282K26jyY71Uaf7S1u2IWWFhA3ZuQ78PeyL0xzYRN4lBKDpl3hPC3XpU3y2fjqmG7", + "o8IMHNFLO1AR397fN31/HzUVHI3cizFgGqdgI0wdb6yrXqDdCKpWJb9LVcoLKi5cIm/1Jy0n1Hpf2pne", + "x55/Wxn1Le56cNcnJgXwVhJTs1LGp2fNPmKhukkaV8YnZNxfuND3iuaGToLltqK5bULSW2HwLyMMVg66", + "tiCvz8F9NfEQq/EcfPBJZvcgErokuwOEwfBZHfQNkqDebbGTexObMTZsczme4Txyt4p5mPr3VsD7DAS8", + "blrtGBh1suSbE+oQhkWdd3uXarqNMlk75Qf/QqW4vzCyesU2A+l2ge0S7LMjjDlm/cnY6p9SCHNIuxW/", + "/tLiVxUncyUBrJEY30VeBWasK2nv2to5pitJrBkrFXC2qgyxO8LjuoiPYTGYV8mn1FBj/zJEE6p9NNrN", + "GnfejV0R63sIH6jP1scvtklXX5CeZ3B+v8gtEN+bT81Lo2aHt9djdhjGm548eHJ9EIS78Fpo8h3e4p+Y", + "Q35SlhYnq11Z2CaOdDC1mYc3cSXeYkvIKOqMwgGPwsINYdZi65Fx15XLDDNB3JsQn99YVdUaXP6BuaB5", + "nWeJyrntZHicQQK54/88xPHvTMh3GBGg1Rgdy7RL5U/uMK4PHz56/MQ1kfTC+m21202/enJ49M03rlmd", + "zdq+bzrNlZaHC8hz4Tq4u6E7rvlw+L9//8dkMrmzlZ2K1bP1a5s67nPhqeNYPEG18X279YVvUuyV7lL6", + "bUXdtZjtn4lVlPuL1e3tc2O3j8H+n+LWmTbJyD1AKw1mI95+j7eQPSa73ENjnx3a8J3qMpmQ18KlPilz", + "KomQGUhX3mZeUkm5BsgmnlIxak3ZVA9pzoBr82DEgh0yUSwDGzE+LyVkJGdLrGgr4Rz94XF6fMs3INjO", + "6NFr9rNl8q/oKkiHMK2uaS3cklHduaQrXzIIi2IIiT998w15MK5fLXluBkgqxMSY65KuRteo7auIbZCv", + "eTOr/1ZnXBx7iOaoln5sdTbaTCH+1+bcX6zEbsndbeyeOOfOBp/aoBPqD1yCkY2aAyvY2YJCWOFmXYfz", + "GinPi1BxFmdmGKoU+IxtA1tV0tHHZxu9t4f49vF/JVbSJqgd2QZGmKqDD2jLCHlG59xihNxfy0wa2Iyk", + "WHqjkSAz0OnCBee2UB9hT76mQD9v2lRRct9SDe5iN6V1mN8RKx0OTBgSxE2i4Q5khIh/8ul9zWc2sxkq", + "fL0IXzgVTVLM1xKryoi5YotMeT9+H8NrdnEnKJ/Xk3cFMkTLPuyetwjeDcEd5vitr1mFGHOL+DN4+vun", + "ZEJeizpE3JVL+DOaHD/lzf6pF/RacLC2dSP5Wlq8NaNWYgeq8BEpPjeIfb9UiasvLYIc+OprG+WQH2zt", + "s42yyJDb20z2RV7hP0TrGjduGbO2ydbEB/VoQ5izaWhzTDezS9/gK+ZG+Oln+LS5CY51PSwGD6nnM04s", + "4PtlOphuxxLzQZVYuI8DxXO1D+ZGWlTuZ9H06lPIBZ+rz5MVbaKOOF4iVFJlsY+nqv/rnd3nmMnHPHmt", + "56PL7aQYT8FWF/QF5l3iNQvh364PQs2WPhcnD2NWb5i7PH3w+PqmPwF5zlIgp7AshKSS5WvyM68qQV6F", + "22Ei/irXmtcGR2svoLWpmQMsDRMWXZ4JNlzWPugVyz5uZ4ZBxr4d+SDjAR8M8wvSogAqL88At5uuTlsz", + "Hr8IvYIb+eGr7FkRUAyKdnSM/4/RQL0ThruLmbv8Sm4B9Zm+HJtwLrtiNq6cY4wUIGaH5B2/T9SCPn34", + "6LdHT7/yfz56+lWP5szM4xL0dHVn9UDmsx1miALti1YH7ldqr/B7eN27vdsmjkcsW0XzRdc1YTpZc51Y", + "dkeRgq5708wXW2rahMPW9W2uP7Gh0my6iL6v/POnqnp7zJ9Vr2Cbfc+VgrmtZdMTNBHwGUNodVGbCuub", + "69tskCZbZFkVErnux2kdXGAvOo882bpzblTQ1Tf1SE3wjQrcCzZNtNycTIk5zceBubuqI46+K2VRCKmr", + "060mg8Q96DPbNaS9PsLdSZhLqU4XZXHwAf+D2bw+1gEHtnJnYOdzv8OqAMmM6IiFe9yvtvL+gbXtb5L+", + "TmyLK96ULTHbehS0Mqz7dHPO30DMyCuWSnGECfTdJaTWSsOykxPQdf1tU0336IUleM44JEvBY5nqfsKv", + "r/BjNEW/0DTv63xqPvb1bbHMJvwtsJrzDOGXV8XvZ/JAv5JiqbVaCeZw11XPLP3veAD9oVnztHuS1jzt", + "Hr5G6bWenw8+NP50nj2upVqUOhMXQV98FloONcSoH2TQHq5Nr15KrUzUimSgDNF+eaqrAA+xE1N9jeQn", + "C/Kk96Yo+4sqs2aMZy0iQTkzFecgVaXmkN4J51aj9efRaA3e9514rM3HuY2jlWq/EslrkYEdt5kCNxZE", + "ykUGLm1oVxCpJLO4FsDfSnW71rsspeV8oUlZEC1iL8C6Y0JTy2RtcUa1rZqdbeVrsJwDobkEmq3JFIAT", + "MTWLblYFJVShA71/Rjr5M16UrYarkCIFpSBLfNDsNtCqZKz46NQb8ISAI8DVLEQJMqPyysCenW+Fs0pg", + "rsjdH39R924AXisKbkasdduNoLdyDXLSXhfqYdNvIrj25CHZUQnEiwao9RLLIgen94qgcCec9O5fG6LO", + "Ll4dLagYYp+Y4v0kVyOgCtRPTO9XhbYsEnN/R8pG2q+nbImSGKdcKEgFz1R/cddtbBmLiARrUWYFASeM", + "cWIcuOfB+ZIq/daZQMJaX0GxEjPFhmq0fYnyzci/VGnyO2On5j7kqlRVLn2n1ojX2+Kw2jDXa1hVc6EN", + "yo9d6U20IKWCbSP3YSkY3yFLheVldWA8wlIi3cVhphPqFBRdVDaAqBGxCZAT36pRSK42bPQAwlSN6Kpm", + "ZJNygqJZSouiwFp2Scmrfn1oOrGtj/TPddsucblyRnhvZwJUqNNykF9YzCoM5VhQRRwcZEnPnNpr7jJB", + "RQp9sSUkaK5ONlG+OZYnplV4BLYe0rKYS5ph2VEaUaX8bD8T+3nTALjjnjyxpnMyhVm0NInZ9JqSZa+K", + "qBpa4HgqJjxiCWhFUnMEZ1grxxOI671l5Ax66k+fBjUxXXOcK7pFfjxctt3qHrWUGcPsuCUHhNgx9CHw", + "9qChGvnymMDOSa09aE/xd1BugkqM2H2SNai+JdTj77SAtjYvvL8aF0WLu7cYcJRr9nKxLWyk78TG9Idf", + "ZKhf25j7CT3VmvrT4P03uczb9uCCMp3MhHRV9elMg4yo8lqFDijTPpLQmlW0cH4UBEdw16Ybx1Vnr9Nx", + "OCZiQSC+didbRrL7mKm+E3JQOFDT6Y0yTUquWR6ERFcv5c9PX3irA7jVAdzqAG51ALc6gFsdwK0O4FYH", + "cKsDuNUB3OoAbnUAf1kdwE3F9yVe4PBez1zwhMOcanYOVeDfbUqiP1U8THVVeZ0EajEuKNMuwSehXgzA", + "L1cLB9RAc8QBy5HHFkL1Zk7CctBKlDIFkhoIGSdFTs3TAFa6SjfXTGTqUyu7gtCYG5UqePyInPxw5N32", + "F869vNn27pFLUa70Ood7LqFDVbHVZ3YAbpDuEjtQfyX4tHQuSR/LgSiD3m+x9Qs4h1wUIK1HMNGyjGh8", + "ToHmzx1utih8GgU3zWi/jxt6Joe2JS2Cwve4VqoItbEczXqZM5qr/oKZdrwlLWKZ4aqLz6qCkJs8E9m6", + "dULMrh3gBjbPRu28zziV60jgTudEdEhDC8OvHGF1dVkf9x5i0iXaLplto7CYtC5BRc/xJiqPxlZUG9YZ", + "yoYAzVp0Ei0o3Q4oGFUADnGANfTs94S8tf1uNoAdIXJHrGbmn43fYLNlxTSwrXlEONbzpUabe8RHTy+e", + "/bEh7KxMgTCtiI9S2X69jEerxIw0B544BpRMRbZOGuxr1LiFMqaoUrCcbr+JQv7pciG7y8d82XxP3cw1", + "8iJY3CaeHBLNKnEMuIc7rzUM5s0VtnBEx54DjH9qFt3HRkMQiONPMaVSuwLNjkyvnmZ9y/huGV9wGlsS", + "AeMuqq/NRCafkPHJtSx5P8/7dgVpaYALT/Jd1M6jSQ5WumHXzGBazueY07ljozNLAxyPCX5DrNAudygX", + "3I2C7OBVns+rppZqD9flLkEE210hyVyKsrhni1fxNRozlgXla2/yhUSxZZlbHNp0ePtltDbwrusIgOZY", + "p/vr02q/8Sq/QHfrrtrm7xYt5IIqYvcXMlLyzEUOdcJzV3x4Pmk79OmK12x6Y0Zpu97I6ty8Q64Iv8su", + "xKUycxcgE73i9kA1k77bMGB7cie3uWz/GtfGG5uHoYfBdkNaa4awp9tDBnwNr48gcUkdCteswGXrA/YF", + "joRZTGzLvTqPdIZv+pAE1fmsjRTyglBfaCAVXGlZpvodp2ijCRY26fqXeG10P3977pvEzYQRK54b6h2n", + "mIe+stxE+dwMImaK7wA8G1XlfA7K8MqQSGYA77hrxTgpuXlpiRlZslSKxIahmjNk5JOJbbmkazKjORoZ", + "/wApyNTc7MGuW4Wx0izPnUOLmYaI2TtONcmBKk1eMcNlzXA+w1jlyQX6QsizCgvxpBZz4KCYSuLKl+/t", + "V8wb4ZbvlXyosLSf63jv600Y4WFnWS/kxy8M3BRT5ORM6doHogP7tdm/l4wnUSI7XQBxLmFt2iJ3MYOM", + "I6B7TeuQXsA7bm44LQhydaovRw5tM0/nLNrT0aKaxka0rEF+rYOeeHvhMiTCZG5NK3+iwMyADrz5Ejce", + "K9S0935HM8rGopexry7PWE8j90hoKMKaF/eJa3HaAPnPm6P+/b60ZsNtGgtAnlnp+EPOqgWp9ujTas42", + "upTUOeWWS8gY1ZCvSSEhBczkgr429YNzYmP2SbqgfI73jhTl3JWDtuNcgIQq/ZZ547WHiGf8WPEEBd2I", + "48ARsco6T17m6GHBwxCd2NlyZ/Oo9Ki1CRWGPBsjx+F7M2bfK3I86pUSDVLPa98ui5zmGYlj4UKUeZao", + "Mk0h5qFxHPWaqpbaFOBwLCu7uQEhI1kpbWVsQlNdYg2aKebFFLbkFOXr5vWPtbuEDPJnEkoUm3OqSwlj", + "uzb0FJsCQYPjJOI+07rcG9d0sPM1Stuo2Ed5gdtjcHsM/nzHoHPpvHVEMmupEixlhAT4pyoMUWeOO3Il", + "Ntq50f6sRSL61clus4W8TtXZ9Zdi+JTPi0+9mk/1WvFsXBGK9X5DTtDKpmgZA1WEacctp0DgnOYlclOX", + "L9694SfktOa1lZd4qVwa03RBGXfZa6p4BoRDu1TL2ud23JdCk17oFXf6TMs2UZFpsAFpKZle40OGFuy3", + "MzD/f29eArb+qH3jlDIfHY4WWheHBwdYsn8hlD4YfRyH31Tr4/sK/A/+eVJIdo4VfN5//P8BAAD//6G6", + "O0DCSQEA", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/generated/participating/private/routes.go b/daemon/algod/api/server/v2/generated/participating/private/routes.go index 3fd51bda92..b513cffac6 100644 --- a/daemon/algod/api/server/v2/generated/participating/private/routes.go +++ b/daemon/algod/api/server/v2/generated/participating/private/routes.go @@ -312,19 +312,19 @@ var swaggerSpec = []string{ "VBTN8P7XmcvqnK21LtXh/v7l5eVe2GV/hc6ERIsqXe/7efovGb8/rjNzrGqJO2qTLrzJwJPCEX778P3J", "KTl6f7wXvJR5ODvYO9h7gg8plsBpyWaHs2f4E56eNe77viO22eHnq/lsfw00R9+7+aMALVnqP6lLulqB", "3HNv7JifLp7ue1Fi/7NzpFyNfdsPy1Xvf275m7IdPbGc7f5nX6VlvHWrDIrzswUdJkIx1mx/gcmfU5uC", - "ChoPLwUVDLX/GUXkwd/3XUZc/COqKvYM7HunbLxlC0uf9cbA2unhnuTf/4z/QZoMwLLxp31wbabYvn3M", - "sv/zlqfRH/sD9R5VWEE0yQ3TzejYs/F4CuwBOs6Qr+neK/hYodlaOfBwPD04+HM8iP/8moCO2lRaEaMR", - "YF7SjPh8Q5z7yf3NfcwxusPwOmJ5OULw/P4gaJfD/hG25J3Q5AdUOq7ms2/ucyeOuRGBaE6wZVDtpn9E", - "fubnXFxy39IIAVVRULmdfHw0XSl0OEh2QZ0IFryQMPuEHi6bg9o+akdZ1iN6KwyB0i9Fth3BWKFWpcsP", - "aZDWyIKMmyX0lcn+k5O9V+vPYUus/987CrjIYBZKaVpWcHVLnvCnfWD/K0/5ylOknf7Z/U1/AvKCpUBO", - "oSiFpJLlW/Izr7N7b8zjjrIsGlvZPvo7eZzRs1ORwQp44hhYshDZ1lcwbE1wDlbt6wky+5/bZcitCDjL", - "IAcdjRszv9cPRPYXsdiS49c9Ccd263Lel1tsGpT3Pvz42epNRilo1JouiD3OGFaW7vKmT3GuOUb2ZiEr", - "oYnFQuYW9ZURfWVEtxJuJh+eKfJNVPuwtTNo786e+zIYsQJIVPdBmaKjfNHjeycb39d/YvqOjVGFjAQf", - "bDJFF81fWcRXFnE7FvEGIocRT61jGhGiu54+NJVhYLBN1n3sB10HvnmVU0kUTDVzHOGIzrhxH1zjvpW6", - "KK6sTkd58x5aZAPvVs/7yvK+srw/D8s72s1o2oLJrTWjc9gWtKz1IbWudCYuA08CwmIje/p24Pr50dbf", - "+5eU6WQppMt4wmLY/c4aaL7vavl0fm3S53tfsCZA8GMYrhj9db9+ayD6seuEiH11RviBRr4Sm//cOCFD", - "px6y9tqd9/GTYctYydZx/cZHdbi/j1kEa6H0/uxq/rnjvwo/fqpJ4HN9VzhSuPp09f8CAAD//9hbr4Fb", - "yAAA", + "ChoPLwUVDLX/GUXkwd/3XUZc/COqKvYM7HunbLxlC0uf9cbA2unhnuTf/4z/QZoMwLLxp31wYVOCZEbI", + "Qvez+9Xmj+3bJy77P295Gv2xP3zvqYUVRFPfMAmNjj0mj2fDHqvjDLmd7r2Nj3Wbre0Dj8zTg4M/xzP5", + "z68J6KilpRVHGgHmJc2Iz0LEuZ/c39zHHGM+DAcklsMjBM/vD4J2kewfYUveCU1+QFXkaj775j534pgb", + "wYjmBFsGNXD6R+Rnfs7FJfctjWhQFQWV28nHR9OVQjeEZBfUCWbBuwmzT+j3spmp7aN2lGU9orciEij9", + "UmTbEYwValW6rJEGaY2EyLhZQl/F7D9E2XvL/hy2xEYFePcBFxnMQtlNywqubskT/rTP7n/lKV95irTT", + "P7u/6U9AXrAUyCkUpZBUsnxLfuZ1zu+NedxRlkUjLttHfyePM9p3KjJYAU8cA0sWItv6uoatCc7BKoM9", + "QWb/c7s4uRUMZxnkoKPRZOb3+tnI/iIWW3L8uifh2G5dzvtyi02Dot+HHz9bbcqoCo2y0wWxxxnDetNd", + "3vQpzjXHyN4sZCU0sVjI3KK+MqKvjOhWws3kwzNFvolqH7aiBu3d2XNfHCNWFonqPihTdJQvenzvZOP7", + "+k9M37GRq5CR4INNseii+SuL+Moibsci3kDkMOKpdUwjQnTX04emMgwMwcm6TwChQ8E3r3IqiYKpZo4j", + "HNEZN+6Da9y3UhfFldXpKG9eSYts4N3qeV9Z3leW9+dheUe7GU1bMLm1ZnQO24KWtT6k1pXOxGXgX0BY", + "bLxP3w5cP0ra+nv/kjKdLIV0eVBYIrvfWQPN912Fn86vTVJ97wtWCgh+DIMYo7/u1y8QRD92XROxr840", + "P9DI12fznxvXZOjqQ9ZeO/k+fjJsGevbOq7feK4O9/cxt2AtlN6fXc0/d7xa4cdPNQl8ru8KRwpXn67+", + "XwAAAP//qWbn/XHIAAA=", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/generated/participating/public/routes.go b/daemon/algod/api/server/v2/generated/participating/public/routes.go index 92c4525894..f197fd7aa7 100644 --- a/daemon/algod/api/server/v2/generated/participating/public/routes.go +++ b/daemon/algod/api/server/v2/generated/participating/public/routes.go @@ -340,18 +340,18 @@ var swaggerSpec = []string{ "LnjC8fmRayCBKvjk+OHvdoVnHAPwjDhKrLj9cTr56ne8ZWfcCDY0J9jSrubx73Y15yCvWQrkAopSSCpZ", "viE/8Tp9OiiZ1md/P/ErLlbcI8JoklVRULlxQjSteU7Fg4T2rfynF7vQCNrIRelCoZsbRdRJ65ktvpi8", "/+h1gJGKxbZmRzOs5zK2Kaig8bB2gj4DdfQBrd6Dvx+5Ihfxj+h9sGrtkY+zjLdsKT4f9NrA2umRUp0u", - "q/LoA/4H1cwALJtS1gfXFn84su/T93/e8DT6Y3+g7jtpsZ+PPrTr9LcQqpaVzsQq6It2desU6s9Xv1zV", - "+vtoRZk2EoILlsU6iv3OGmh+5NLAO782mVe9L5hOFvzYkSlKYSt1tNW5t3R10fIaS1u645nINlu4zTqZ", - "MY5HMGQRjbXMfuzrB/1HuZdgyw97h2NEANOCzKSgWUoVludzBRN6iuHHWyofHblxfRZxJyGYqGv34y7N", - "YTrc6WPAcfd8rzyoaouSrlL+3fHfUirpQfSMZsSXdknIK5qbDYeMnDrZt4WN31qi+PwiwGe+sz/ZJfvM", - "Hz5FKEaWtbQjGYnucTFQ7qCOuVGNCmUYwAJ44lhQMhPZxhdplnSl1zYOrcvcjupq29GPd2CG++e2ve0y", - "uX2xdH2xdH2xhXyxdH3Z3S+WrpGWri92oC92oH9JO9A+xp+YmOmMH8PSJla8pK15rW5Hm2TLmsW3o/2Z", - "rmWyfnFjpg8JucBUNmpuCbgGSXN8AEIFuakFBhZizgBkJ5c8aUFiw/fMxPeb/9q4Sfe+/vGDbh+lWZ6H", - "vLnfF+Vd/GSrvnxDLieXk95IEgpxDZnNkA9Te2yvncP+n3rcH3tZgphcja86+9QCoqr5nKXMojwXfEHo", - "QjQxv4ZvEy7wC0gDnK21QJieuuIdTJGVWbyrO9rOQGpL7n0J4KzZwp0+8w65xN3lhvD29JX/xxhH+b+0", - "lH6LZIVbMdKtY/e46heu8im4ymfnK793L2RgPvxfKWY+OX7yu11QaGx+LTT5DuPZbyeO1bWcYyUnbipo", - "+cLg3tzXxMSGMaZ4i9bRpe/em4sAH1ZxF2wTMnlydIRJ7Uuh9NHEXH/tcMrw4/saZl9xf1JKdo0F/N5/", - "/J8AAAD//7X6ZKDq1gAA", + "q/LoA/4H1cwALJtS1gcX1iVIZng5RpS6X21JiCP7an3/5w1Poz/2h+++nhb7+ehDu3p/C81qWelMrIK+", + "aG23rqL+fPV7Vq2/j1aUaSM3uBBarK7Y76yB5kcuObzza5OP1fuCSWbBjx1JoxS2fkdbyXtLVxctX7K0", + "BT2eiWyzhQetkxnjeDBDxtHY0OzHvtbQf6p7CbYosXdDRsQyLchMCpqlVGHRPldGoacufrylStKRJtdn", + "EScTgokaeD8a0xyxw52eBxx3z1fMg1q3KP8q5V8j/y1llR5Ez2hGfMGXhLyiudlwyMipk4hb2Pit5YzP", + "Lxh85pv8k129z/zhU4RivFlLZ5KRmB8XGeUO6ph71ihWhgEsgCeOBSUzkW186WZJV3pto9O6zO2orsEd", + "/XgHxrl/bovcLkPcF/vXF/vXFwvJF/vXl939Yv8aaf/6Yh36Yh36l7QO7WMSiomZziQyLG1iHUzamtfq", + "drRJwaxZfDsHgOlaJuuXPGb6kJALTHCj5paAa5A0x2chVJCxWmC4IWYSQHZyyZMWJDaoz0x8v/mvjaZ0", + "r+4fP+j2UZrlecib+31R3sVPthbMN+RycjnpjSShENeQ2bz5MOHH9to57P+px/2xlzuIKdf41rNPOCCq", + "ms9ZyizKc8EXhC5EEwls+DbhAr+ANMDZCgyE6akr6cEUWZnFu2qk7byktuTelwDOmi3c6UnvkEvciW4I", + "b08P+n+McZ//S0vpt0hhuBUj3Tp2j6t+4Sqfgqt8dr7ye/dNBubD/5Vi5pPjJ7/bBYXG5tdCk+8wyv12", + "4lhd4TlWiOKmgpYvF+7NfU2kbBh5irdoHXP67r25CPC5FXfBNoGUJ0dHmOq+FEofTcz11w6yDD++r2H2", + "dfgnpWTXWNbv/cf/CQAA///C44qjANcAAA==", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go index 3af95fa1c5..d9e53216d9 100644 --- a/daemon/algod/api/server/v2/handlers.go +++ b/daemon/algod/api/server/v2/handlers.go @@ -942,12 +942,6 @@ type preEncodedSimulateResponse struct { // SimulateTransaction simulates broadcasting a raw transaction to the network, returning relevant simulation results. // (POST /v2/transactions/simulate) func (v2 *Handlers) SimulateTransaction(ctx echo.Context, params model.SimulateTransactionParams) error { - if !v2.Node.Config().EnableExperimentalAPI { - // Right now this is a redundant/useless check at runtime, since experimental APIs are not registered when EnableExperimentalAPI=false. - // However, this endpoint won't always be experimental, so I've left this here as a reminder to have some other flag guarding its usage. - return ctx.String(http.StatusNotFound, fmt.Sprintf("%s was not enabled in the configuration file by setting EnableExperimentalAPI to true", ctx.Request().URL.Path)) - } - stat, err := v2.Node.Status() if err != nil { return internalError(ctx, err, errFailedRetrievingNodeStatus, v2.Log) @@ -1662,3 +1656,8 @@ func (v2 *Handlers) TealDisassemble(ctx echo.Context) error { } return ctx.JSON(http.StatusOK, response) } + +// ExperimentalCheck is only available when EnabledExperimentalAPI is true +func (v2 *Handlers) ExperimentalCheck(ctx echo.Context) error { + return ctx.JSON(http.StatusOK, true) +} diff --git a/daemon/algod/api/server/v2/test/handlers_test.go b/daemon/algod/api/server/v2/test/handlers_test.go index fe02164f2d..0c7c5135c4 100644 --- a/daemon/algod/api/server/v2/test/handlers_test.go +++ b/daemon/algod/api/server/v2/test/handlers_test.go @@ -653,14 +653,13 @@ func TestPendingTransactionsByAddress(t *testing.T) { pendingTransactionsByAddressTest(t, -1, "json", 400) } -func prepareTransactionTest(t *testing.T, txnToUse, expectedCode int, enableTransactionSimulator bool) (handler v2.Handlers, c echo.Context, rec *httptest.ResponseRecorder, releasefunc func()) { +func prepareTransactionTest(t *testing.T, txnToUse, expectedCode int) (handler v2.Handlers, c echo.Context, rec *httptest.ResponseRecorder, releasefunc func()) { numAccounts := 5 numTransactions := 5 offlineAccounts := true mockLedger, _, _, stxns, releasefunc := testingenv(t, numAccounts, numTransactions, offlineAccounts) dummyShutdownChan := make(chan struct{}) mockNode := makeMockNode(mockLedger, t.Name(), nil, false) - mockNode.config.EnableExperimentalAPI = enableTransactionSimulator handler = v2.Handlers{ Node: mockNode, @@ -681,7 +680,7 @@ func prepareTransactionTest(t *testing.T, txnToUse, expectedCode int, enableTran } func postTransactionTest(t *testing.T, txnToUse, expectedCode int) { - handler, c, rec, releasefunc := prepareTransactionTest(t, txnToUse, expectedCode, false) + handler, c, rec, releasefunc := prepareTransactionTest(t, txnToUse, expectedCode) defer releasefunc() err := handler.RawTransaction(c) require.NoError(t, err) @@ -696,8 +695,8 @@ func TestPostTransaction(t *testing.T) { postTransactionTest(t, 0, 200) } -func simulateTransactionTest(t *testing.T, txnToUse int, format string, expectedCode int, enableTransactionSimulator bool) { - handler, c, rec, releasefunc := prepareTransactionTest(t, txnToUse, expectedCode, enableTransactionSimulator) +func simulateTransactionTest(t *testing.T, txnToUse int, format string, expectedCode int) { + handler, c, rec, releasefunc := prepareTransactionTest(t, txnToUse, expectedCode) defer releasefunc() err := handler.SimulateTransaction(c, model.SimulateTransactionParams{Format: (*model.SimulateTransactionParamsFormat)(&format)}) require.NoError(t, err) @@ -709,52 +708,29 @@ func TestPostSimulateTransaction(t *testing.T) { t.Parallel() testCases := []struct { - txnIndex int - format string - expectedStatus int - enableTransactionSimulator bool + txnIndex int + format string + expectedStatus int }{ { - txnIndex: -1, - format: "json", - expectedStatus: 400, - enableTransactionSimulator: true, + txnIndex: -1, + format: "json", + expectedStatus: 400, }, { - txnIndex: 0, - format: "json", - expectedStatus: 404, - enableTransactionSimulator: false, + txnIndex: 0, + format: "json", + expectedStatus: 200, }, { - txnIndex: 0, - format: "msgpack", - expectedStatus: 404, - enableTransactionSimulator: false, + txnIndex: 0, + format: "msgpack", + expectedStatus: 200, }, { - txnIndex: 0, - format: "bad format", - expectedStatus: 404, - enableTransactionSimulator: false, - }, - { - txnIndex: 0, - format: "json", - expectedStatus: 200, - enableTransactionSimulator: true, - }, - { - txnIndex: 0, - format: "msgpack", - expectedStatus: 200, - enableTransactionSimulator: true, - }, - { - txnIndex: 0, - format: "bad format", - expectedStatus: 400, - enableTransactionSimulator: true, + txnIndex: 0, + format: "bad format", + expectedStatus: 400, }, } @@ -762,7 +738,7 @@ func TestPostSimulateTransaction(t *testing.T) { testCase := testCase t.Run(fmt.Sprintf("i=%d", i), func(t *testing.T) { t.Parallel() - simulateTransactionTest(t, testCase.txnIndex, testCase.format, testCase.expectedStatus, testCase.enableTransactionSimulator) + simulateTransactionTest(t, testCase.txnIndex, testCase.format, testCase.expectedStatus) }) } } @@ -877,7 +853,6 @@ func TestSimulateTransaction(t *testing.T) { defer releasefunc() dummyShutdownChan := make(chan struct{}) mockNode := makeMockNode(mockLedger, t.Name(), nil, false) - mockNode.config.EnableExperimentalAPI = true handler := v2.Handlers{ Node: mockNode, Log: logging.Base(), @@ -1026,7 +1001,6 @@ func TestSimulateTransactionVerificationFailure(t *testing.T) { defer releasefunc() dummyShutdownChan := make(chan struct{}) mockNode := makeMockNode(mockLedger, t.Name(), nil, false) - mockNode.config.EnableExperimentalAPI = true handler := v2.Handlers{ Node: mockNode, Log: logging.Base(), @@ -1844,3 +1818,19 @@ func TestStateproofTransactionForRoundShutsDown(t *testing.T) { _, err := v2.GetStateProofTransactionForRound(ctx, &ledger, basics.Round(stateProofIntervalForHandlerTests*2+1), 1000, stoppedChan) a.ErrorIs(err, v2.ErrShutdown) } + +func TestExperimentalCheck(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, false) + defer releasefunc() + + // Since we are invoking the method directly, it doesn't matter if EnableExperimentalAPI is true. + // When this is false, the router never even registers this endpoint. + err := handler.ExperimentalCheck(c) + require.NoError(t, err) + + require.Equal(t, 200, rec.Code) + require.Equal(t, "true\n", string(rec.Body.Bytes())) +} From c5936d285c36a79b692366c775471027624aea02 Mon Sep 17 00:00:00 2001 From: algobarb <78746954+algobarb@users.noreply.github.com> Date: Wed, 1 Mar 2023 15:01:17 -0500 Subject: [PATCH 64/81] deploy templates: Recipe Changes (#5155) --- .../generate-recipe/generate_network_tpl.py | 202 ++++++++++++++++++ .../recipes/custom/README.md | 25 ++- .../recipes/custom/configs/node.json | 2 +- .../custom/example/npr/five-relays.txt | 20 ++ .../custom/network_templates/five-relays.json | 81 +++++++ .../Makefile | 0 .../recipes/legacy-mainnet-model/README.md | 2 + .../configs/node.json | 0 .../configs/nonPartNode.json | 0 .../configs/relay.json | 0 .../data/bandwidth.json | 0 .../data/countries.json | 0 .../data/latency.json | 0 .../generate_network_rules.js | 0 .../generated/genesis.json | 0 .../generated/net.json | 0 .../generated/topology.json | 0 .../network-tpl.json | 0 .../network_performance_rules | 0 .../recipe.json | 0 20 files changed, 327 insertions(+), 5 deletions(-) create mode 100644 test/testdata/deployednettemplates/generate-recipe/generate_network_tpl.py create mode 100644 test/testdata/deployednettemplates/recipes/custom/example/npr/five-relays.txt create mode 100644 test/testdata/deployednettemplates/recipes/custom/network_templates/five-relays.json rename test/testdata/deployednettemplates/recipes/{mainnet-model => legacy-mainnet-model}/Makefile (100%) create mode 100644 test/testdata/deployednettemplates/recipes/legacy-mainnet-model/README.md rename test/testdata/deployednettemplates/recipes/{mainnet-model => legacy-mainnet-model}/configs/node.json (100%) rename test/testdata/deployednettemplates/recipes/{mainnet-model => legacy-mainnet-model}/configs/nonPartNode.json (100%) rename test/testdata/deployednettemplates/recipes/{mainnet-model => legacy-mainnet-model}/configs/relay.json (100%) rename test/testdata/deployednettemplates/recipes/{mainnet-model => legacy-mainnet-model}/data/bandwidth.json (100%) rename test/testdata/deployednettemplates/recipes/{mainnet-model => legacy-mainnet-model}/data/countries.json (100%) rename test/testdata/deployednettemplates/recipes/{mainnet-model => legacy-mainnet-model}/data/latency.json (100%) rename test/testdata/deployednettemplates/recipes/{mainnet-model => legacy-mainnet-model}/generate_network_rules.js (100%) rename test/testdata/deployednettemplates/recipes/{mainnet-model => legacy-mainnet-model}/generated/genesis.json (100%) rename test/testdata/deployednettemplates/recipes/{mainnet-model => legacy-mainnet-model}/generated/net.json (100%) rename test/testdata/deployednettemplates/recipes/{mainnet-model => legacy-mainnet-model}/generated/topology.json (100%) rename test/testdata/deployednettemplates/recipes/{mainnet-model => legacy-mainnet-model}/network-tpl.json (100%) rename test/testdata/deployednettemplates/recipes/{mainnet-model => legacy-mainnet-model}/network_performance_rules (100%) rename test/testdata/deployednettemplates/recipes/{mainnet-model => legacy-mainnet-model}/recipe.json (100%) diff --git a/test/testdata/deployednettemplates/generate-recipe/generate_network_tpl.py b/test/testdata/deployednettemplates/generate-recipe/generate_network_tpl.py new file mode 100644 index 0000000000..74083f35bd --- /dev/null +++ b/test/testdata/deployednettemplates/generate-recipe/generate_network_tpl.py @@ -0,0 +1,202 @@ +#!/usr/bin/env python3 +""" +generate_network_tpl.py + +reads a network_performance_rules file and returns a network-tpl.json that can be used by generate_recipe.py. + +v2 format: + + ``` + group1 group2 minrtt + ``` + + > group1 and group2 are referring to individual hosts (e.g. R1 and R2), so there should be one group per host. + +""" +import argparse +import json +import math + +DEFAULT_NUM_N = 5 +DEFAULT_NUM_NPN = 5 +DEFAULT_REGION = 'us-west-1' + +def main(): + args = parse_args() + + # initialize network_tpl with defaults + network_tpl = get_default_network_tpl() + + groups = [] + + if args.network_rules_file is not None: + network_tpl_from_rules = gen_network_tpl_from_rules_v2(args.network_rules_file) + merge(network_tpl_from_rules, network_tpl) + + # write network_tpl to file + with open(args.out, 'w') as out: + out.write(json.dumps(network_tpl, indent=2)) + out.write('\n') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Generate a network-tpl.json file for generate_network.py' + ) + parser.add_argument('-n', '--network-rules-file', help='Path of network_performance_rules file', required=True) + parser.add_argument('-o', '--out', help='Path to write output', default='network-tpl.json') + return parser.parse_args() + + +def get_default_network_tpl(): + return { + 'network': { + 'wallets': 5 + }, + 'instances': { + 'relays': { + 'config': './configs/relay.json', + 'type': 'c5.xlarge', + 'count': 5 + }, + 'participatingNodes': { + 'config': './configs/node.json', + 'type': 'c5.xlarge', + 'count': 5 + }, + 'nonParticipatingNodes': { + 'config': './configs/nonPartNode.json', + 'type': 'c5.xlarge', + 'count': 5 + } + } + } + +def merge(source, destination): + for key, value in source.items(): + if isinstance(value, dict): + # get node or create one + node = destination.setdefault(key, {}) + merge(value, node) + else: + destination[key] = value + + return destination + + +def gen_network_tpl_from_rules_v2(path): + """ + Loads network_performance_rules v2 file, and generates a network-tpl.json file + @param path: the filesystem path to the network_performance_rules file + @return list of network-tpl.json groups + """ + groups = [] + + with open(path) as network_performance_rules: + npr = network_performance_rules.readlines() + + found = {} + num_relays = 0 + num_npn = 0 + num_n = 0 + + # loop over rules and get counts of types of instances + for rule in npr: + # algonet retrieves groups from terraform-inventory and they are lowercase + name = rule.split(' ')[0].lower() + relays = 0 + nonParticipatingNodes = 0 + participatingNodes = 0 + + if name in found: + continue + + found[name] = None + + if name.startswith('r'): + num_relays += 1 + elif name.startswith('npn'): + num_npn += 1 + else: + num_n += 1 + + # If no participation nodes are defined in the network_performance_rules file, set the default group. + if num_n == 0: + num_n = DEFAULT_NUM_N + group = { + 'name': 'n', + 'region': DEFAULT_REGION, + 'percent': { + 'relays': 0, + 'nonParticipatingNodes': 0, + 'participatingNodes': 100 + } + } + groups.append(group) + + # If no non-participation nodes are defined in the network_performance_rules file, set the default group. + if num_npn == 0: + num_npn = DEFAULT_NUM_NPN + group = { + 'name': 'npn', + 'region': DEFAULT_REGION, + 'percent': { + 'relays': 0, + 'nonParticipatingNodes': 100, + 'participatingNodes': 0 + } + } + groups.append(group) + + for item in found: + group = {'name': item, 'region': DEFAULT_REGION} + percent = {} + if item.startswith('r'): + percent = { + 'relays': math.ceil(1 / num_relays * 100), + 'nonParticipatingNodes': 0, + 'participatingNodes': 0 + } + elif item.startswith('npn'): + percent = { + 'relays': 0, + 'nonParticipatingNodes': math.ceil(1 / num_npn * 100), + 'participatingNodes': 0 + } + else: + percent = { + 'relays': 0, + 'nonParticipatingNodes': 0, + 'participatingNodes': math.ceil(1 / num_n * 100) + } + + group['percent'] = percent + groups.append(group) + + network = { + 'relays': num_relays, + 'nodes': num_n, + 'npn': num_npn + } + + instances = { + 'relays': { + 'count': num_relays + }, + 'participatingNodes': { + 'count': num_n + }, + 'nonParticipatingNodes': { + 'count': num_npn + } + } + + return { + 'network': network, + 'instances': instances, + 'groups': groups + } + + +if __name__ == '__main__': + main() diff --git a/test/testdata/deployednettemplates/recipes/custom/README.md b/test/testdata/deployednettemplates/recipes/custom/README.md index a74784c25d..9b7a76c918 100644 --- a/test/testdata/deployednettemplates/recipes/custom/README.md +++ b/test/testdata/deployednettemplates/recipes/custom/README.md @@ -1,5 +1,7 @@ # Custom Recipe -This custom recipe serves as a template for performance testing on algonet (new network on AWS EC2 machines). With this recipe, you can modify the number of nodes, the type of machines, introduce new parameters to modify the network's configs and consensus parameters. +This custom recipe serves as a template for performance testing on algonet (new network on AWS EC2 machines). +With this recipe, you can modify the number of nodes, the type of machines, introduce new parameters to modify the +network's configs and consensus parameters. N = participating Nodes NPN = Non-Participating Nodes @@ -8,7 +10,8 @@ R = relays ## Running a Small Network (less than 20 total nodes) If you are running a network with less than 20 nodes, then you will need to update the default "FractionApply" 1. Modify `configs/node.json` folder - - `"FractionApply"` in configs/node.json represents the number of nodes to report to telemetry. We don't want to overwhelm the telemetry server, so use something small like "0.2" on a large network. + - `"FractionApply"` in configs/node.json represents the number of nodes to report to telemetry. We don't want to + overwhelm the telemetry server, so use something small like "0.2" on a large network. - For small networks, update this value to "1.0" ## Quick Start - Jenkins @@ -21,16 +24,30 @@ Build and create the recipe. - See Update config.json (below) to update config.json ## "Quick" Start - Manual recipe generation (not using Jenkins) -Generate the recipe with the `network-tpl.json` file +Generate the recipe with the `network-tpl.json` file. You will need netgoal set up in your local environment/path. - (See the first section above for small networks. See Troubleshooting for netgoal path set up) 1. Make sure you're in the same directory as this README and `cp network_templates/network-tpl.json network-tpl.json` 2. Generate the recipe with a python script: ``` cd go-algorand -python3 test/testdata/deployednettemplates/generate-recipe/generate_network.py -f test/testdata/deployednettemplates/recipes/custom/network-tpl.json +python3 test/testdata/deployednettemplates/generate-recipe/generate_network.py -f test/testdata/deployednettemplates/recipes/custom/network_templates/network-tpl.json ``` 3. This will create a new set of files in the `generated` folder +## "Quick" Start - Manual recipe generation based off of Network Performance Rules (not using Jenkins) +If you have a network_performance_Rules file in the following format on each line `group1 group2 minrtt`, you can +first generate a template and then generate the recipe. You will need netgoal set up in your local environment/path. +1. Generate the template: +``` +cd go-algorand +python3 test/testdata/deployednettemplates/generate-recipe/generate_network_tpl.py --network-rules-file example/npr/five-relays.txt --out test/testdata/deployednettemplates/recipes/custom/network_templates/five-relays.json +``` +2. Generate the recipe: +``` +cp test/testdata/deployednettemplates/recipes/custom/network_templates/five-relays.json test/testdata/deployednettemplates/recipes/custom/. +python3 test/testdata/deployednettemplates/generate-recipe/generate_network.py -f test/testdata/deployednettemplates/recipes/custom/five-relays.json +``` + ## Network Templates With the custom recipe, you can store multiple network templates in the network_templates directory. Variables to modify: diff --git a/test/testdata/deployednettemplates/recipes/custom/configs/node.json b/test/testdata/deployednettemplates/recipes/custom/configs/node.json index 0b310ce098..4f95d0d051 100644 --- a/test/testdata/deployednettemplates/recipes/custom/configs/node.json +++ b/test/testdata/deployednettemplates/recipes/custom/configs/node.json @@ -17,7 +17,7 @@ "EnableMetrics": true, "MetricsURI": "{{MetricsURI}}", "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0 }", - "FractionApply": 1.0 + "FractionApply": 0.01 } ] } diff --git a/test/testdata/deployednettemplates/recipes/custom/example/npr/five-relays.txt b/test/testdata/deployednettemplates/recipes/custom/example/npr/five-relays.txt new file mode 100644 index 0000000000..c1dbf7fd61 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/custom/example/npr/five-relays.txt @@ -0,0 +1,20 @@ +R1 R2 116818 +R2 R1 116818 +R1 R3 99721 +R3 R1 99721 +R1 R4 70 +R4 R1 70 +R1 R5 865 +R5 R1 865 +R2 R3 65917 +R3 R2 65917 +R2 R4 116664 +R4 R2 116664 +R2 R5 47849 +R5 R2 47849 +R3 R4 95230 +R4 R3 95230 +R3 R5 44367 +R5 R3 44367 +R4 R5 554 +R5 R4 554 diff --git a/test/testdata/deployednettemplates/recipes/custom/network_templates/five-relays.json b/test/testdata/deployednettemplates/recipes/custom/network_templates/five-relays.json new file mode 100644 index 0000000000..6b0bad3dc8 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/custom/network_templates/five-relays.json @@ -0,0 +1,81 @@ +{ + "network": { + "wallets": 5, + "relays": 5, + "nodes": 5, + "npn": 5 + }, + "instances": { + "relays": { + "config": "./configs/relay.json", + "type": "c5.xlarge", + "count": 5 + }, + "participatingNodes": { + "config": "./configs/node.json", + "type": "c5.xlarge", + "count": 5 + }, + "nonParticipatingNodes": { + "config": "./configs/nonPartNode.json", + "type": "c5.xlarge", + "count": 5 + } + }, + "groups": [ + { + "name": "r1", + "percent": { + "relays": 20, + "nonParticipatingNodes": 0, + "participatingNodes": 0 + }, + "region": "us-west-1" + }, + { + "name": "r2", + "percent": { + "relays": 20, + "nonParticipatingNodes": 0, + "participatingNodes": 0 + }, + "region": "us-west-1" + }, + { + "name": "r3", + "percent": { + "relays": 20, + "nonParticipatingNodes": 0, + "participatingNodes": 0 + }, + "region": "us-west-1" + }, + { + "name": "r4", + "percent": { + "relays": 20, + "nonParticipatingNodes": 0, + "participatingNodes": 0 + }, + "region": "us-west-1" + }, + { + "name": "r5", + "percent": { + "relays": 20, + "nonParticipatingNodes": 0, + "participatingNodes": 0 + }, + "region": "us-west-1" + }, + { + "name": "n", + "percent": { + "relays": 0, + "nonParticipatingNodes": 100, + "participatingNodes": 100 + }, + "region": "us-west-1" + } + ] +} diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/Makefile b/test/testdata/deployednettemplates/recipes/legacy-mainnet-model/Makefile similarity index 100% rename from test/testdata/deployednettemplates/recipes/mainnet-model/Makefile rename to test/testdata/deployednettemplates/recipes/legacy-mainnet-model/Makefile diff --git a/test/testdata/deployednettemplates/recipes/legacy-mainnet-model/README.md b/test/testdata/deployednettemplates/recipes/legacy-mainnet-model/README.md new file mode 100644 index 0000000000..90fca938b3 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/legacy-mainnet-model/README.md @@ -0,0 +1,2 @@ +## Legacy Mainnet Model +This is a previously used scenario for mainnet model. The recipe in this folder still works, but the network_performance_rules will not. diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/configs/node.json b/test/testdata/deployednettemplates/recipes/legacy-mainnet-model/configs/node.json similarity index 100% rename from test/testdata/deployednettemplates/recipes/mainnet-model/configs/node.json rename to test/testdata/deployednettemplates/recipes/legacy-mainnet-model/configs/node.json diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/configs/nonPartNode.json b/test/testdata/deployednettemplates/recipes/legacy-mainnet-model/configs/nonPartNode.json similarity index 100% rename from test/testdata/deployednettemplates/recipes/mainnet-model/configs/nonPartNode.json rename to test/testdata/deployednettemplates/recipes/legacy-mainnet-model/configs/nonPartNode.json diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/configs/relay.json b/test/testdata/deployednettemplates/recipes/legacy-mainnet-model/configs/relay.json similarity index 100% rename from test/testdata/deployednettemplates/recipes/mainnet-model/configs/relay.json rename to test/testdata/deployednettemplates/recipes/legacy-mainnet-model/configs/relay.json diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/data/bandwidth.json b/test/testdata/deployednettemplates/recipes/legacy-mainnet-model/data/bandwidth.json similarity index 100% rename from test/testdata/deployednettemplates/recipes/mainnet-model/data/bandwidth.json rename to test/testdata/deployednettemplates/recipes/legacy-mainnet-model/data/bandwidth.json diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/data/countries.json b/test/testdata/deployednettemplates/recipes/legacy-mainnet-model/data/countries.json similarity index 100% rename from test/testdata/deployednettemplates/recipes/mainnet-model/data/countries.json rename to test/testdata/deployednettemplates/recipes/legacy-mainnet-model/data/countries.json diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/data/latency.json b/test/testdata/deployednettemplates/recipes/legacy-mainnet-model/data/latency.json similarity index 100% rename from test/testdata/deployednettemplates/recipes/mainnet-model/data/latency.json rename to test/testdata/deployednettemplates/recipes/legacy-mainnet-model/data/latency.json diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/generate_network_rules.js b/test/testdata/deployednettemplates/recipes/legacy-mainnet-model/generate_network_rules.js similarity index 100% rename from test/testdata/deployednettemplates/recipes/mainnet-model/generate_network_rules.js rename to test/testdata/deployednettemplates/recipes/legacy-mainnet-model/generate_network_rules.js diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/generated/genesis.json b/test/testdata/deployednettemplates/recipes/legacy-mainnet-model/generated/genesis.json similarity index 100% rename from test/testdata/deployednettemplates/recipes/mainnet-model/generated/genesis.json rename to test/testdata/deployednettemplates/recipes/legacy-mainnet-model/generated/genesis.json diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/generated/net.json b/test/testdata/deployednettemplates/recipes/legacy-mainnet-model/generated/net.json similarity index 100% rename from test/testdata/deployednettemplates/recipes/mainnet-model/generated/net.json rename to test/testdata/deployednettemplates/recipes/legacy-mainnet-model/generated/net.json diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/generated/topology.json b/test/testdata/deployednettemplates/recipes/legacy-mainnet-model/generated/topology.json similarity index 100% rename from test/testdata/deployednettemplates/recipes/mainnet-model/generated/topology.json rename to test/testdata/deployednettemplates/recipes/legacy-mainnet-model/generated/topology.json diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/network-tpl.json b/test/testdata/deployednettemplates/recipes/legacy-mainnet-model/network-tpl.json similarity index 100% rename from test/testdata/deployednettemplates/recipes/mainnet-model/network-tpl.json rename to test/testdata/deployednettemplates/recipes/legacy-mainnet-model/network-tpl.json diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/network_performance_rules b/test/testdata/deployednettemplates/recipes/legacy-mainnet-model/network_performance_rules similarity index 100% rename from test/testdata/deployednettemplates/recipes/mainnet-model/network_performance_rules rename to test/testdata/deployednettemplates/recipes/legacy-mainnet-model/network_performance_rules diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/recipe.json b/test/testdata/deployednettemplates/recipes/legacy-mainnet-model/recipe.json similarity index 100% rename from test/testdata/deployednettemplates/recipes/mainnet-model/recipe.json rename to test/testdata/deployednettemplates/recipes/legacy-mainnet-model/recipe.json From 09a2c19e95f25d694b64589352cafde0f4123601 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Thu, 2 Mar 2023 15:58:29 -0500 Subject: [PATCH 65/81] deploy templates: increase default npn number for mmnet-model (#5178) --- .../generate-recipe/generate_network_tpl.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/testdata/deployednettemplates/generate-recipe/generate_network_tpl.py b/test/testdata/deployednettemplates/generate-recipe/generate_network_tpl.py index 74083f35bd..bbb6cbb1fc 100644 --- a/test/testdata/deployednettemplates/generate-recipe/generate_network_tpl.py +++ b/test/testdata/deployednettemplates/generate-recipe/generate_network_tpl.py @@ -18,7 +18,7 @@ import math DEFAULT_NUM_N = 5 -DEFAULT_NUM_NPN = 5 +DEFAULT_NUM_NPN = 10 DEFAULT_REGION = 'us-west-1' def main(): @@ -176,7 +176,8 @@ def gen_network_tpl_from_rules_v2(path): network = { 'relays': num_relays, 'nodes': num_n, - 'npn': num_npn + 'npn': num_npn, + 'wallets': num_n, } instances = { From 25e8e94b9594ef3faa7535d413785a118f14a83c Mon Sep 17 00:00:00 2001 From: AlgoAxel <113933518+AlgoAxel@users.noreply.github.com> Date: Thu, 2 Mar 2023 16:51:19 -0500 Subject: [PATCH 66/81] refactor: Push test-only functionality of storage interfaces into test-interface. (#5175) --- ledger/acctdeltas_test.go | 24 ++--- ledger/acctonline_test.go | 93 +++++++++++++++++++ ledger/acctupdates_test.go | 6 +- ledger/ledger_test.go | 6 +- ledger/store/trackerdb/interface.go | 4 +- .../trackerdb/sqlitedriver/accountsV2.go | 7 ++ .../sqlitedriver/store_sqlite_impl.go | 22 +++++ .../store/trackerdb/sqlitedriver/testing.go | 9 ++ ledger/store/trackerdb/store.go | 20 +--- ledger/store/trackerdb/testinterface.go | 63 +++++++++++++ ledger/txtail_test.go | 2 +- 11 files changed, 216 insertions(+), 40 deletions(-) create mode 100644 ledger/store/trackerdb/testinterface.go diff --git a/ledger/acctdeltas_test.go b/ledger/acctdeltas_test.go index 1b405dfc1f..829db03086 100644 --- a/ledger/acctdeltas_test.go +++ b/ledger/acctdeltas_test.go @@ -59,7 +59,7 @@ func checkAccounts(t *testing.T, tx trackerdb.TransactionScope, rnd basics.Round require.NoError(t, err) require.Equal(t, r, rnd) - aor, err := tx.MakeAccountsOptimizedReader() + aor, err := tx.Testing().MakeAccountsOptimizedReader() require.NoError(t, err) var totalOnline, totalOffline, totalNotPart uint64 @@ -83,7 +83,7 @@ func checkAccounts(t *testing.T, tx trackerdb.TransactionScope, rnd basics.Round } } - all, err := arw.AccountsAllTest() + all, err := arw.Testing().AccountsAllTest() require.NoError(t, err) require.Equal(t, all, accts) @@ -155,12 +155,12 @@ func TestAccountDBInit(t *testing.T) { err := dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { accts := ledgertesting.RandomAccounts(20, true) - newDB := tx.AccountsInitTest(t, accts, protocol.ConsensusCurrentVersion) + newDB := tx.Testing().AccountsInitTest(t, accts, protocol.ConsensusCurrentVersion) require.True(t, newDB) checkAccounts(t, tx, 0, accts) - newDB, err = tx.AccountsInitLightTest(t, accts, proto) + newDB, err = tx.Testing().AccountsInitLightTest(t, accts, proto) require.NoError(t, err) require.False(t, newDB) checkAccounts(t, tx, 0, accts) @@ -219,7 +219,7 @@ func TestAccountDBRound(t *testing.T) { require.NoError(t, err) accts := ledgertesting.RandomAccounts(20, true) - tx.AccountsInitTest(t, accts, protocol.ConsensusCurrentVersion) + tx.Testing().AccountsInitTest(t, accts, protocol.ConsensusCurrentVersion) checkAccounts(t, tx, 0, accts) totals, err := arw.AccountsTotals(context.Background(), false) require.NoError(t, err) @@ -297,7 +297,7 @@ func TestAccountDBRound(t *testing.T) { require.NotEmpty(t, updatedOnlineAccts) checkAccounts(t, tx, basics.Round(i), accts) - arw.CheckCreatablesTest(t, i, expectedDbImage) + arw.Testing().CheckCreatablesTest(t, i, expectedDbImage) } // test the accounts totals @@ -372,7 +372,7 @@ func TestAccountDBInMemoryAcct(t *testing.T) { dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error { accts := ledgertesting.RandomAccounts(1, true) - tx.AccountsInitTest(t, accts, protocol.ConsensusCurrentVersion) + tx.Testing().AccountsInitTest(t, accts, protocol.ConsensusCurrentVersion) addr := ledgertesting.RandomAddress() // lastCreatableID stores asset or app max used index to get rid of conflicts @@ -443,7 +443,7 @@ func TestAccountStorageWithStateProofID(t *testing.T) { dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { accts := ledgertesting.RandomAccounts(20, false) - _ = tx.AccountsInitTest(t, accts, protocol.ConsensusCurrentVersion) + _ = tx.Testing().AccountsInitTest(t, accts, protocol.ConsensusCurrentVersion) checkAccounts(t, tx, 0, accts) require.True(t, allAccountsHaveStateProofPKs(accts)) return nil @@ -600,7 +600,7 @@ func generateRandomTestingAccountBalances(numAccounts int) (updates map[basics.A func benchmarkInitBalances(b testing.TB, numAccounts int, tx trackerdb.TransactionScope, proto protocol.ConsensusVersion) (updates map[basics.Address]basics.AccountData) { updates = generateRandomTestingAccountBalances(numAccounts) - tx.AccountsInitTest(b, updates, proto) + tx.Testing().AccountsInitTest(b, updates, proto) return } @@ -626,7 +626,7 @@ func benchmarkReadingAllBalances(b *testing.B, inMemory bool) { b.ResetTimer() // read all the balances in the database. var err2 error - bal, err2 = arw.AccountsAllTest() + bal, err2 = arw.Testing().AccountsAllTest() require.NoError(b, err2) return nil }) @@ -2091,7 +2091,7 @@ func TestAccountOnlineQueries(t *testing.T) { } var accts map[basics.Address]basics.AccountData - tx.AccountsInitTest(t, accts, protocol.ConsensusCurrentVersion) + tx.Testing().AccountsInitTest(t, accts, protocol.ConsensusCurrentVersion) totals, err := arw.AccountsTotals(context.Background(), false) require.NoError(t, err) @@ -2194,7 +2194,7 @@ func TestAccountOnlineQueries(t *testing.T) { addRound(2, ledgercore.StateDelta{Accts: delta2}) addRound(3, ledgercore.StateDelta{Accts: delta3}) - queries, err := tx.MakeOnlineAccountsOptimizedReader() + queries, err := tx.Testing().MakeOnlineAccountsOptimizedReader() require.NoError(t, err) // check round 1 diff --git a/ledger/acctonline_test.go b/ledger/acctonline_test.go index 98d1c1175b..eeaf434bfb 100644 --- a/ledger/acctonline_test.go +++ b/ledger/acctonline_test.go @@ -1611,6 +1611,99 @@ func TestAcctOnlineTopBetweenCommitAndPostCommit(t *testing.T) { } } +func TestAcctOnlineTopDBBehindMemRound(t *testing.T) { + partitiontest.PartitionTest(t) + a := require.New(t) + + const numAccts = 20 + allAccts := make([]basics.BalanceRecord, numAccts) + genesisAccts := []map[basics.Address]basics.AccountData{{}} + genesisAccts[0] = make(map[basics.Address]basics.AccountData, numAccts) + + for i := 0; i < numAccts; i++ { + allAccts[i] = basics.BalanceRecord{ + Addr: ledgertesting.RandomAddress(), + AccountData: basics.AccountData{ + MicroAlgos: basics.MicroAlgos{Raw: uint64(i + 1)}, + Status: basics.Online, + VoteLastValid: 1000, + VoteFirstValid: 0, + RewardsBase: 0}, + } + genesisAccts[0][allAccts[i].Addr] = allAccts[i].AccountData + } + addSinkAndPoolAccounts(genesisAccts) + + ml := makeMockLedgerForTracker(t, true, 1, protocol.ConsensusCurrentVersion, genesisAccts) + defer ml.Close() + + stallingTracker := &blockingTracker{ + postCommitUnlockedEntryLock: make(chan struct{}), + postCommitUnlockedReleaseLock: make(chan struct{}), + postCommitEntryLock: make(chan struct{}), + postCommitReleaseLock: make(chan struct{}), + alwaysLock: false, + shouldLockPostCommit: false, + } + + conf := config.GetDefaultLocal() + au, oa := newAcctUpdates(t, ml, conf) + defer oa.close() + ml.trackers.trackers = append([]ledgerTracker{stallingTracker}, ml.trackers.trackers...) + + proto := config.Consensus[protocol.ConsensusCurrentVersion] + top, _, err := oa.TopOnlineAccounts(0, 0, 5, &proto, 0) + a.NoError(err) + compareTopAccounts(a, top, allAccts) + + _, totals, err := au.LatestTotals() + require.NoError(t, err) + + // apply some rounds so the db round will make progress (not be 0) - i.e since the max lookback in memory is 8. deltas + // will get committed at round 9 + i := 1 + for ; i < 10; i++ { + var updates ledgercore.AccountDeltas + updates.Upsert(allAccts[numAccts-1].Addr, ledgercore.AccountData{ + AccountBaseData: ledgercore.AccountBaseData{Status: basics.Offline}, VotingData: ledgercore.VotingData{}}) + newBlockWithUpdates(genesisAccts, updates, totals, t, ml, i, oa) + } + + stallingTracker.shouldLockPostCommit = true + + updateAccountsRoutine := func() { + var updates ledgercore.AccountDeltas + updates.Upsert(allAccts[numAccts-1].Addr, ledgercore.AccountData{ + AccountBaseData: ledgercore.AccountBaseData{Status: basics.Offline}, VotingData: ledgercore.VotingData{}}) + newBlockWithUpdates(genesisAccts, updates, totals, t, ml, i, oa) + } + + // This go routine will trigger a commit producer. we added a special blockingTracker that will case our + // onlineAccoutsTracker to be "stuck" between commit and Post commit . + // thus, when we call onlineTop - it should wait for the post commit to happen. + // in a different go routine we will wait 2 sec and release the commit. + go updateAccountsRoutine() + + select { + case <-stallingTracker.postCommitEntryLock: + go func() { + time.Sleep(2 * time.Second) + // tweak the database to move backwards + err = oa.dbs.Batch(func(ctx context.Context, tx trackerdb.BatchScope) (err error) { + return tx.Testing().ModifyAcctBaseTest() + }) + stallingTracker.postCommitReleaseLock <- struct{}{} + }() + + _, _, err = oa.TopOnlineAccounts(2, 2, 5, &proto, 0) + a.Error(err) + a.Contains(err.Error(), "is behind in-memory round") + + case <-time.After(1 * time.Minute): + a.FailNow("timedout while waiting for post commit") + } +} + func TestAcctOnlineTop_ChangeOnlineStake(t *testing.T) { partitiontest.PartitionTest(t) a := require.New(t) diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go index baa3e8aa81..70b933a92f 100644 --- a/ledger/acctupdates_test.go +++ b/ledger/acctupdates_test.go @@ -268,7 +268,7 @@ func (au *accountUpdates) allBalances(rnd basics.Round) (bals map[basics.Address if err != nil { return err } - bals, err0 = arw.AccountsAllTest() + bals, err0 = arw.Testing().AccountsAllTest() return err0 }) if err != nil { @@ -1006,11 +1006,11 @@ func TestListCreatables(t *testing.T) { proto := config.Consensus[protocol.ConsensusCurrentVersion] accts := make(map[basics.Address]basics.AccountData) - _ = tx.AccountsInitTest(t, accts, protocol.ConsensusCurrentVersion) + _ = tx.Testing().AccountsInitTest(t, accts, protocol.ConsensusCurrentVersion) require.NoError(t, err) au := &accountUpdates{} - au.accountsq, err = tx.MakeAccountsOptimizedReader() + au.accountsq, err = tx.Testing().MakeAccountsOptimizedReader() require.NoError(t, err) // ******* All results are obtained from the cache. Empty database ******* diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go index 54a2f60fa7..d0324e249a 100644 --- a/ledger/ledger_test.go +++ b/ledger/ledger_test.go @@ -2304,12 +2304,12 @@ func TestLedgerReloadTxTailHistoryAccess(t *testing.T) { DbPathPrefix: l.catchpoint.dbDirectory, BlockDb: l.blockDBs, } - _, err0 = tx.RunMigrations(ctx, tp, l.log, preReleaseDBVersion /*target database version*/) + _, err0 = tx.Testing().RunMigrations(ctx, tp, l.log, preReleaseDBVersion /*target database version*/) if err0 != nil { return err0 } - if err0 := tx.AccountsUpdateSchemaTest(ctx); err != nil { + if err0 := tx.Testing().AccountsUpdateSchemaTest(ctx); err != nil { return err0 } @@ -2455,7 +2455,7 @@ func TestLedgerMigrateV6ShrinkDeltas(t *testing.T) { }() // create tables so online accounts can still be written err = trackerDB.Batch(func(ctx context.Context, tx trackerdb.BatchScope) error { - if err := tx.AccountsUpdateSchemaTest(ctx); err != nil { + if err := tx.Testing().AccountsUpdateSchemaTest(ctx); err != nil { return err } return nil diff --git a/ledger/store/trackerdb/interface.go b/ledger/store/trackerdb/interface.go index e03b6b5c3a..121dcdf211 100644 --- a/ledger/store/trackerdb/interface.go +++ b/ledger/store/trackerdb/interface.go @@ -18,7 +18,6 @@ package trackerdb import ( "context" - "testing" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" @@ -91,14 +90,13 @@ type AccountsReaderExt interface { TotalAccounts(ctx context.Context) (total uint64, err error) TotalKVs(ctx context.Context) (total uint64, err error) AccountsRound() (rnd basics.Round, err error) - AccountsAllTest() (bals map[basics.Address]basics.AccountData, err error) - CheckCreatablesTest(t *testing.T, iteration int, expectedDbImage map[basics.CreatableIndex]ledgercore.ModifiedCreatable) LookupOnlineAccountDataByAddress(addr basics.Address) (rowid int64, data []byte, err error) AccountsOnlineTop(rnd basics.Round, offset uint64, n uint64, proto config.ConsensusParams) (map[basics.Address]*ledgercore.OnlineAccount, error) AccountsOnlineRoundParams() (onlineRoundParamsData []ledgercore.OnlineRoundParamsData, endRound basics.Round, err error) OnlineAccountsAll(maxAccounts uint64) ([]PersistedOnlineAccountData, error) LoadTxTail(ctx context.Context, dbRound basics.Round) (roundData []*TxTailRound, roundHash []crypto.Digest, baseRound basics.Round, err error) LoadAllFullAccounts(ctx context.Context, balancesTable string, resourcesTable string, acctCb func(basics.Address, basics.AccountData)) (count int, err error) + Testing() TestAccountsReaderExt } // AccountsReaderWriter is AccountsReader+AccountsWriter diff --git a/ledger/store/trackerdb/sqlitedriver/accountsV2.go b/ledger/store/trackerdb/sqlitedriver/accountsV2.go index 7776409346..adee2f1fce 100644 --- a/ledger/store/trackerdb/sqlitedriver/accountsV2.go +++ b/ledger/store/trackerdb/sqlitedriver/accountsV2.go @@ -56,6 +56,11 @@ func NewAccountsSQLReaderWriter(e db.Executable) *accountsV2ReaderWriter { } } +// Testing returns this reader, exposed as an interface with test functions +func (r *accountsV2Reader) Testing() trackerdb.TestAccountsReaderExt { + return r +} + func (r *accountsV2Reader) getOrPrepare(queryString string) (stmt *sql.Stmt, err error) { // fetch statement (use the query as the key) if stmt, ok := r.preparedStatements[queryString]; ok { @@ -89,6 +94,7 @@ func (r *accountsV2Reader) AccountsTotals(ctx context.Context, catchpointStaging // AccountsAllTest iterates the account table and returns a map of the data // It is meant only for testing purposes - it is heavy and has no production use case. +// implements Testing interface func (r *accountsV2Reader) AccountsAllTest() (bals map[basics.Address]basics.AccountData, err error) { rows, err := r.q.Query("SELECT rowid, address, data FROM accountbase") if err != nil { @@ -132,6 +138,7 @@ func (r *accountsV2Reader) AccountsAllTest() (bals map[basics.Address]basics.Acc return } +// implements Testing interface func (r *accountsV2Reader) CheckCreatablesTest(t *testing.T, iteration int, expectedDbImage map[basics.CreatableIndex]ledgercore.ModifiedCreatable) { diff --git a/ledger/store/trackerdb/sqlitedriver/store_sqlite_impl.go b/ledger/store/trackerdb/sqlitedriver/store_sqlite_impl.go index cf222c31d0..3696ed37e6 100644 --- a/ledger/store/trackerdb/sqlitedriver/store_sqlite_impl.go +++ b/ledger/store/trackerdb/sqlitedriver/store_sqlite_impl.go @@ -157,6 +157,11 @@ func (s *trackerSQLStore) Close() { s.pair.Close() } +// Testing returns this scope, exposed as an interface with test functions +func (txs sqlTransactionScope) Testing() trackerdb.TestTransactionScope { + return txs +} + func (txs sqlTransactionScope) MakeCatchpointReaderWriter() (trackerdb.CatchpointReaderWriter, error) { return NewCatchpointSQLReaderWriter(txs.tx), nil } @@ -165,6 +170,7 @@ func (txs sqlTransactionScope) MakeAccountsReaderWriter() (trackerdb.AccountsRea return NewAccountsSQLReaderWriter(txs.tx), nil } +// implements Testing interface func (txs sqlTransactionScope) MakeAccountsOptimizedReader() (trackerdb.AccountsReader, error) { return AccountsInitDbQueries(txs.tx) } @@ -177,6 +183,7 @@ func (txs sqlTransactionScope) MakeOnlineAccountsOptimizedWriter(hasAccounts boo return MakeOnlineAccountsSQLWriter(txs.tx, hasAccounts) } +// implements Testing interface func (txs sqlTransactionScope) MakeOnlineAccountsOptimizedReader() (r trackerdb.OnlineAccountsReader, err error) { return OnlineAccountsInitDbQueries(txs.tx) } @@ -205,14 +212,21 @@ func (txs sqlTransactionScope) ResetTransactionWarnDeadline(ctx context.Context, return db.ResetTransactionWarnDeadline(ctx, txs.tx, deadline) } +// implements Testing interface func (txs sqlTransactionScope) AccountsInitTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) (newDatabase bool) { return AccountsInitTest(tb, txs.tx, initAccounts, proto) } +// implements Testing interface func (txs sqlTransactionScope) AccountsInitLightTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabase bool, err error) { return AccountsInitLightTest(tb, txs.tx, initAccounts, proto) } +// Testing returns this scope, exposed as an interface with test functions +func (bs sqlBatchScope) Testing() trackerdb.TestBatchScope { + return bs +} + func (bs sqlBatchScope) MakeCatchpointWriter() (trackerdb.CatchpointWriter, error) { return NewCatchpointSQLReaderWriter(bs.tx), nil } @@ -225,6 +239,7 @@ func (bs sqlBatchScope) MakeAccountsOptimizedWriter(hasAccounts, hasResources, h return MakeAccountsSQLWriter(bs.tx, hasAccounts, hasResources, hasKvPairs, hasCreatables) } +// implements Testing interface func (bs sqlBatchScope) RunMigrations(ctx context.Context, params trackerdb.Params, log logging.Logger, targetVersion int32) (mgr trackerdb.InitParams, err error) { return RunMigrations(ctx, bs.tx, params, log, targetVersion) } @@ -233,10 +248,17 @@ func (bs sqlBatchScope) ResetTransactionWarnDeadline(ctx context.Context, deadli return db.ResetTransactionWarnDeadline(ctx, bs.tx, deadline) } +// implements Testing interface func (bs sqlBatchScope) AccountsInitTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) (newDatabase bool) { return AccountsInitTest(tb, bs.tx, initAccounts, proto) } +// implements Testing interface +func (bs sqlBatchScope) ModifyAcctBaseTest() error { + return modifyAcctBaseTest(bs.tx) +} + +// implements Testing interface func (bs sqlBatchScope) AccountsUpdateSchemaTest(ctx context.Context) (err error) { return AccountsUpdateSchemaTest(ctx, bs.tx) } diff --git a/ledger/store/trackerdb/sqlitedriver/testing.go b/ledger/store/trackerdb/sqlitedriver/testing.go index a1965fac7d..2a96a661b1 100644 --- a/ledger/store/trackerdb/sqlitedriver/testing.go +++ b/ledger/store/trackerdb/sqlitedriver/testing.go @@ -50,13 +50,22 @@ func SetDbTrackerTestLogging(t testing.TB, dbs trackerdb.TrackerStore) { } // AccountsInitLightTest initializes an empty database for testing without the extra methods being called. +// implements Testing interface, test function only func AccountsInitLightTest(tb testing.TB, tx *sql.Tx, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabase bool, err error) { newDB, err := accountsInit(tx, initAccounts, proto) require.NoError(tb, err) return newDB, err } +// modifyAcctBaseTest tweaks the database to move backards. +// implements Testing interface, test function only +func modifyAcctBaseTest(tx *sql.Tx) error { + _, err := tx.Exec("update acctrounds set rnd = 1 WHERE id='acctbase' ") + return err +} + // AccountsInitTest initializes an empty database for testing. +// implements Testing interface, test function only func AccountsInitTest(tb testing.TB, tx *sql.Tx, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) (newDatabase bool) { newDB, err := accountsInit(tx, initAccounts, config.Consensus[proto]) require.NoError(tb, err) diff --git a/ledger/store/trackerdb/store.go b/ledger/store/trackerdb/store.go index c339736b2b..879f0cf95f 100644 --- a/ledger/store/trackerdb/store.go +++ b/ledger/store/trackerdb/store.go @@ -18,13 +18,9 @@ package trackerdb import ( "context" - "testing" "time" - "github.com/algorand/go-algorand/config" - "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/logging" - "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" ) @@ -33,19 +29,14 @@ type BatchScope interface { MakeCatchpointWriter() (CatchpointWriter, error) MakeAccountsWriter() (AccountsWriterExt, error) MakeAccountsOptimizedWriter(hasAccounts, hasResources, hasKvPairs, hasCreatables bool) (AccountsWriter, error) - - RunMigrations(ctx context.Context, params Params, log logging.Logger, targetVersion int32) (mgr InitParams, err error) ResetTransactionWarnDeadline(ctx context.Context, deadline time.Time) (prevDeadline time.Time, err error) - - AccountsInitTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) (newDatabase bool) - AccountsUpdateSchemaTest(ctx context.Context) (err error) + Testing() TestBatchScope } // SnapshotScope is the read scope to the store. type SnapshotScope interface { MakeAccountsReader() (AccountsReaderExt, error) MakeCatchpointReader() (CatchpointReader, error) - MakeCatchpointPendingHashesIterator(hashCount int) CatchpointPendingHashesIter } @@ -53,22 +44,15 @@ type SnapshotScope interface { type TransactionScope interface { MakeCatchpointReaderWriter() (CatchpointReaderWriter, error) MakeAccountsReaderWriter() (AccountsReaderWriter, error) - MakeAccountsOptimizedReader() (AccountsReader, error) MakeAccountsOptimizedWriter(hasAccounts, hasResources, hasKvPairs, hasCreatables bool) (AccountsWriter, error) MakeOnlineAccountsOptimizedWriter(hasAccounts bool) (w OnlineAccountsWriter, err error) - MakeOnlineAccountsOptimizedReader() (OnlineAccountsReader, error) - MakeMerkleCommitter(staging bool) (MerkleCommitter, error) - MakeOrderedAccountsIter(accountCount int) OrderedAccountsIter MakeKVsIter(ctx context.Context) (KVsIter, error) MakeEncodedAccoutsBatchIter() EncodedAccountsBatchIter - RunMigrations(ctx context.Context, params Params, log logging.Logger, targetVersion int32) (mgr InitParams, err error) ResetTransactionWarnDeadline(ctx context.Context, deadline time.Time) (prevDeadline time.Time, err error) - - AccountsInitTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) (newDatabase bool) - AccountsInitLightTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabase bool, err error) + Testing() TestTransactionScope } // BatchFn is the callback lambda used in `Batch`. diff --git a/ledger/store/trackerdb/testinterface.go b/ledger/store/trackerdb/testinterface.go new file mode 100644 index 0000000000..961857e874 --- /dev/null +++ b/ledger/store/trackerdb/testinterface.go @@ -0,0 +1,63 @@ +// Copyright (C) 2019-2023 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package trackerdb + +import ( + "context" + "testing" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/ledger/ledgercore" + "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/protocol" +) + +// testinterface.go contains interface extensions specific to testing +// testing interfaces should be made accessible by calling the Testing() method +// on the related interface. Example: +// testTx := tx.Testing() +// these can also be inlined: +// tx.Testing.AccountsInitTest(...) + +// TestBatchScope is an interface to extend BatchScope with test-only methods +type TestBatchScope interface { + BatchScope + + AccountsInitTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) (newDatabase bool) + AccountsUpdateSchemaTest(ctx context.Context) (err error) + RunMigrations(ctx context.Context, params Params, log logging.Logger, targetVersion int32) (mgr InitParams, err error) + ModifyAcctBaseTest() error +} + +// TestTransactionScope is an interface to extend TransactionScope with test-only methods +type TestTransactionScope interface { + TransactionScope + + MakeAccountsOptimizedReader() (AccountsReader, error) + MakeOnlineAccountsOptimizedReader() (OnlineAccountsReader, error) + AccountsInitTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) (newDatabase bool) + AccountsInitLightTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabase bool, err error) +} + +// TestAccountsReaderExt is an interface to extend AccountsReaderExt with test-only methods +type TestAccountsReaderExt interface { + AccountsReaderExt + + AccountsAllTest() (bals map[basics.Address]basics.AccountData, err error) + CheckCreatablesTest(t *testing.T, iteration int, expectedDbImage map[basics.CreatableIndex]ledgercore.ModifiedCreatable) +} diff --git a/ledger/txtail_test.go b/ledger/txtail_test.go index 4a232ff054..ba4a09755d 100644 --- a/ledger/txtail_test.go +++ b/ledger/txtail_test.go @@ -162,7 +162,7 @@ func (t *txTailTestLedger) initialize(ts *testing.T, protoVersion protocol.Conse accts := ledgertesting.RandomAccounts(20, true) proto := config.Consensus[protoVersion] - newDB := tx.AccountsInitTest(ts, accts, protoVersion) + newDB := tx.Testing().AccountsInitTest(ts, accts, protoVersion) require.True(ts, newDB) roundData := make([][]byte, 0, proto.MaxTxnLife) From 7fb8ebda3dadcf5fd8a5e95c8cc76d280193c680 Mon Sep 17 00:00:00 2001 From: Ignacio Corderi Date: Tue, 7 Mar 2023 16:46:54 -0300 Subject: [PATCH 67/81] ledger: remove "rowid" from the public interface (#5177) --- ledger/acctdeltas.go | 130 +++++---- ledger/acctdeltas_test.go | 246 ++++++++++-------- ledger/acctonline.go | 2 +- ledger/acctonline_test.go | 40 +-- ledger/acctupdates.go | 12 +- ledger/acctupdates_test.go | 10 +- ledger/applications_test.go | 8 +- ledger/catchpointtracker.go | 4 +- ledger/lruaccts_test.go | 20 +- ledger/lruonlineaccts_test.go | 18 +- ledger/lruresources_test.go | 62 ++--- ledger/store/trackerdb/data.go | 12 +- ledger/store/trackerdb/interface.go | 50 ++-- .../trackerdb/sqlitedriver/accountsV2.go | 32 ++- .../sqlitedriver/orderedAccountsIter.go | 4 +- ledger/store/trackerdb/sqlitedriver/sql.go | 83 ++++-- 16 files changed, 415 insertions(+), 318 deletions(-) diff --git a/ledger/acctdeltas.go b/ledger/acctdeltas.go index ea9b8be850..f1e10d21ce 100644 --- a/ledger/acctdeltas.go +++ b/ledger/acctdeltas.go @@ -238,13 +238,13 @@ func makeCompactResourceDeltas(stateDeltas []ledgercore.StateDelta, baseRound ba // baseResources caches deleted entries, and they have addrid = 0 // need to handle this and prevent such entries to be treated as fully resolved baseResourceData, has := baseResources.read(res.Addr, basics.CreatableIndex(res.Aidx)) - existingAcctCacheEntry := has && baseResourceData.Addrid != 0 + existingAcctCacheEntry := has && baseResourceData.AcctRef != nil if existingAcctCacheEntry { newEntry.oldResource = baseResourceData outResourcesDeltas.insert(newEntry) } else { if pad, has := baseAccounts.read(res.Addr); has { - newEntry.oldResource = trackerdb.PersistedResourcesData{Addrid: pad.Rowid} + newEntry.oldResource = trackerdb.PersistedResourcesData{AcctRef: pad.Ref} } newEntry.oldResource.Aidx = basics.CreatableIndex(res.Aidx) outResourcesDeltas.insertMissing(newEntry) @@ -274,13 +274,13 @@ func makeCompactResourceDeltas(stateDeltas []ledgercore.StateDelta, baseRound ba } newEntry.newResource.SetAppData(res.Params, res.State) baseResourceData, has := baseResources.read(res.Addr, basics.CreatableIndex(res.Aidx)) - existingAcctCacheEntry := has && baseResourceData.Addrid != 0 + existingAcctCacheEntry := has && baseResourceData.AcctRef != nil if existingAcctCacheEntry { newEntry.oldResource = baseResourceData outResourcesDeltas.insert(newEntry) } else { if pad, has := baseAccounts.read(res.Addr); has { - newEntry.oldResource = trackerdb.PersistedResourcesData{Addrid: pad.Rowid} + newEntry.oldResource = trackerdb.PersistedResourcesData{AcctRef: pad.Ref} } newEntry.oldResource.Aidx = basics.CreatableIndex(res.Aidx) outResourcesDeltas.insertMissing(newEntry) @@ -294,7 +294,7 @@ func makeCompactResourceDeltas(stateDeltas []ledgercore.StateDelta, baseRound ba // resourcesLoadOld updates the entries on the deltas.oldResource map that matches the provided addresses. // The round number of the persistedAccountData is not updated by this function, and the caller is responsible // for populating this field. -func (a *compactResourcesDeltas) resourcesLoadOld(tx trackerdb.TransactionScope, knownAddresses map[basics.Address]int64) (err error) { +func (a *compactResourcesDeltas) resourcesLoadOld(tx trackerdb.TransactionScope, knownAddresses map[basics.Address]trackerdb.AccountRef) (err error) { if len(a.misses) == 0 { return nil } @@ -306,7 +306,7 @@ func (a *compactResourcesDeltas) resourcesLoadOld(tx trackerdb.TransactionScope, defer func() { a.misses = nil }() - var addrid int64 + var acctRef trackerdb.AccountRef var aidx basics.CreatableIndex var resDataBuf []byte var ok bool @@ -314,10 +314,10 @@ func (a *compactResourcesDeltas) resourcesLoadOld(tx trackerdb.TransactionScope, delta := a.deltas[missIdx] addr := delta.address aidx = delta.oldResource.Aidx - if delta.oldResource.Addrid != 0 { - addrid = delta.oldResource.Addrid - } else if addrid, ok = knownAddresses[addr]; !ok { - addrid, err = arw.LookupAccountRowID(addr) + if delta.oldResource.AcctRef != nil { + acctRef = delta.oldResource.AcctRef + } else if acctRef, ok = knownAddresses[addr]; !ok { + acctRef, err = arw.LookupAccountRowID(addr) if err != nil { if err != sql.ErrNoRows { err = fmt.Errorf("base account cannot be read while processing resource for addr=%s, aidx=%d: %w", addr.String(), aidx, err) @@ -330,23 +330,23 @@ func (a *compactResourcesDeltas) resourcesLoadOld(tx trackerdb.TransactionScope, continue } } - resDataBuf, err = arw.LookupResourceDataByAddrID(addrid, aidx) + resDataBuf, err = arw.LookupResourceDataByAddrID(acctRef, aidx) switch err { case nil: if len(resDataBuf) > 0 { - persistedResData := trackerdb.PersistedResourcesData{Addrid: addrid, Aidx: aidx} + persistedResData := trackerdb.PersistedResourcesData{AcctRef: acctRef, Aidx: aidx} err = protocol.Decode(resDataBuf, &persistedResData.Data) if err != nil { return err } a.updateOld(missIdx, persistedResData) } else { - err = fmt.Errorf("empty resource record: addrid=%d, aidx=%d", addrid, aidx) + err = fmt.Errorf("empty resource record: addrid=%d, aidx=%d", acctRef, aidx) return err } case sql.ErrNoRows: // we don't have that account, just return an empty record. - a.updateOld(missIdx, trackerdb.PersistedResourcesData{Addrid: addrid, Aidx: aidx}) + a.updateOld(missIdx, trackerdb.PersistedResourcesData{AcctRef: acctRef, Aidx: aidx}) err = nil default: // unexpected error - let the caller know that we couldn't complete the operation. @@ -476,11 +476,11 @@ func (a *compactAccountDeltas) accountsLoadOld(tx trackerdb.TransactionScope) (e }() for _, idx := range a.misses { addr := a.deltas[idx].address - rowid, acctDataBuf, err := arw.LookupAccountDataByAddress(addr) + ref, acctDataBuf, err := arw.LookupAccountDataByAddress(addr) switch err { case nil: if len(acctDataBuf) > 0 { - persistedAcctData := &trackerdb.PersistedAccountData{Addr: addr, Rowid: rowid} + persistedAcctData := &trackerdb.PersistedAccountData{Addr: addr, Ref: ref} err = protocol.Decode(acctDataBuf, &persistedAcctData.AccountData) if err != nil { return err @@ -488,7 +488,7 @@ func (a *compactAccountDeltas) accountsLoadOld(tx trackerdb.TransactionScope) (e a.updateOld(idx, *persistedAcctData) } else { // to retain backward compatibility, we will treat this condition as if we don't have the account. - a.updateOld(idx, trackerdb.PersistedAccountData{Addr: addr, Rowid: rowid}) + a.updateOld(idx, trackerdb.PersistedAccountData{Addr: addr, Ref: ref}) } case sql.ErrNoRows: // we don't have that account, just return an empty record. @@ -616,11 +616,11 @@ func (a *compactOnlineAccountDeltas) accountsLoadOld(tx trackerdb.TransactionSco }() for _, idx := range a.misses { addr := a.deltas[idx].address - rowid, acctDataBuf, err := arw.LookupOnlineAccountDataByAddress(addr) + ref, acctDataBuf, err := arw.LookupOnlineAccountDataByAddress(addr) switch err { case nil: if len(acctDataBuf) > 0 { - persistedAcctData := &trackerdb.PersistedOnlineAccountData{Addr: addr, Rowid: rowid} + persistedAcctData := &trackerdb.PersistedOnlineAccountData{Addr: addr, Ref: ref} err = protocol.Decode(acctDataBuf, &persistedAcctData.AccountData) if err != nil { return err @@ -628,7 +628,7 @@ func (a *compactOnlineAccountDeltas) accountsLoadOld(tx trackerdb.TransactionSco a.updateOld(idx, *persistedAcctData) } else { // empty data means offline account - a.updateOld(idx, trackerdb.PersistedOnlineAccountData{Addr: addr, Rowid: rowid}) + a.updateOld(idx, trackerdb.PersistedOnlineAccountData{Addr: addr, Ref: ref}) } case sql.ErrNoRows: // we don't have that account, just return an empty record. @@ -748,10 +748,10 @@ func accountsNewRoundImpl( ) (updatedAccounts []trackerdb.PersistedAccountData, updatedResources map[basics.Address][]trackerdb.PersistedResourcesData, updatedKVs map[string]trackerdb.PersistedKVData, err error) { updatedAccounts = make([]trackerdb.PersistedAccountData, updates.len()) updatedAccountIdx := 0 - newAddressesRowIDs := make(map[basics.Address]int64) + newAddressesRowIDs := make(map[basics.Address]trackerdb.AccountRef) for i := 0; i < updates.len(); i++ { data := updates.getByIdx(i) - if data.oldAcct.Rowid == 0 { + if data.oldAcct.Ref == nil { // zero rowid means we don't have a previous value. if data.newAcct.IsEmpty() { // IsEmpty means we don't have a previous value. Note, can't use newAcct.MsgIsZero @@ -759,13 +759,12 @@ func accountsNewRoundImpl( // if we didn't had it before, and we don't have anything now, just skip it. } else { // create a new entry. - var rowid int64 normBalance := data.newAcct.NormalizedOnlineBalance(proto) - rowid, err = writer.InsertAccount(data.address, normBalance, data.newAcct) + ref, err := writer.InsertAccount(data.address, normBalance, data.newAcct) if err == nil { - updatedAccounts[updatedAccountIdx].Rowid = rowid + updatedAccounts[updatedAccountIdx].Ref = ref updatedAccounts[updatedAccountIdx].AccountData = data.newAcct - newAddressesRowIDs[data.address] = rowid + newAddressesRowIDs[data.address] = ref } } } else { @@ -773,25 +772,25 @@ func accountsNewRoundImpl( if data.newAcct.IsEmpty() { // new value is zero, which means we need to delete the current value. var rowsAffected int64 - rowsAffected, err = writer.DeleteAccount(data.oldAcct.Rowid) + rowsAffected, err = writer.DeleteAccount(data.oldAcct.Ref) if err == nil { // we deleted the entry successfully. - updatedAccounts[updatedAccountIdx].Rowid = 0 + updatedAccounts[updatedAccountIdx].Ref = nil updatedAccounts[updatedAccountIdx].AccountData = trackerdb.BaseAccountData{} if rowsAffected != 1 { - err = fmt.Errorf("failed to delete accountbase row for account %v, rowid %d", data.address, data.oldAcct.Rowid) + err = fmt.Errorf("failed to delete accountbase row for account %v, rowid %d", data.address, data.oldAcct.Ref) } } } else { var rowsAffected int64 normBalance := data.newAcct.NormalizedOnlineBalance(proto) - rowsAffected, err = writer.UpdateAccount(data.oldAcct.Rowid, normBalance, data.newAcct) + rowsAffected, err = writer.UpdateAccount(data.oldAcct.Ref, normBalance, data.newAcct) if err == nil { // rowid doesn't change on update. - updatedAccounts[updatedAccountIdx].Rowid = data.oldAcct.Rowid + updatedAccounts[updatedAccountIdx].Ref = data.oldAcct.Ref updatedAccounts[updatedAccountIdx].AccountData = data.newAcct if rowsAffected != 1 { - err = fmt.Errorf("failed to update accountbase row for account %v, rowid %d", data.address, data.oldAcct.Rowid) + err = fmt.Errorf("failed to update accountbase row for account %v, rowid %d", data.address, data.oldAcct.Ref) } } } @@ -818,21 +817,21 @@ func accountsNewRoundImpl( // that at all times there are no two representations of the same entry in the resources table. // ( which would trigger a constrain violation ) type resourceKey struct { - addrid int64 - aidx basics.CreatableIndex + acctRef trackerdb.AccountRef + aidx basics.CreatableIndex } var pendingResourcesDeletion map[resourceKey]struct{} // map to indicate which resources need to be deleted for i := 0; i < resources.len(); i++ { data := resources.getByIdx(i) - if data.oldResource.Addrid == 0 || data.oldResource.Data.IsEmpty() || !data.newResource.IsEmpty() { + if data.oldResource.AcctRef == nil || data.oldResource.Data.IsEmpty() || !data.newResource.IsEmpty() { continue } if pendingResourcesDeletion == nil { pendingResourcesDeletion = make(map[resourceKey]struct{}) } - pendingResourcesDeletion[resourceKey{addrid: data.oldResource.Addrid, aidx: data.oldResource.Aidx}] = struct{}{} + pendingResourcesDeletion[resourceKey{acctRef: data.oldResource.AcctRef, aidx: data.oldResource.Aidx}] = struct{}{} - entry := trackerdb.PersistedResourcesData{Addrid: 0, Aidx: data.oldResource.Aidx, Data: trackerdb.MakeResourcesData(0), Round: lastUpdateRound} + entry := trackerdb.PersistedResourcesData{AcctRef: nil, Aidx: data.oldResource.Aidx, Data: trackerdb.MakeResourcesData(0), Round: lastUpdateRound} deltas := updatedResources[data.address] deltas = append(deltas, entry) updatedResources[data.address] = deltas @@ -842,15 +841,15 @@ func accountsNewRoundImpl( data := resources.getByIdx(i) addr := data.address aidx := data.oldResource.Aidx - addrid := data.oldResource.Addrid - if addrid == 0 { + acctRef := data.oldResource.AcctRef + if acctRef == nil { // new entry, data.oldResource does not have addrid // check if this delta is part of in-memory only account // that is created, funded, transferred, and closed within a commit range inMemEntry := data.oldResource.Data.IsEmpty() && data.newResource.IsEmpty() - addrid = newAddressesRowIDs[addr] - if addrid == 0 && !inMemEntry { - err = fmt.Errorf("cannot resolve address %s (%d), aidx %d, data %v", addr.String(), addrid, aidx, data.newResource) + acctRef = newAddressesRowIDs[addr] + if acctRef == nil && !inMemEntry { + err = fmt.Errorf("cannot resolve address %s (%d), aidx %d, data %v", addr.String(), acctRef, aidx, data.newResource) return } } @@ -863,33 +862,33 @@ func accountsNewRoundImpl( // if we didn't had it before, and we don't have anything now, just skip it. // set zero addrid to mark this entry invalid for subsequent addr to addrid resolution // because the base account might gone. - entry = trackerdb.PersistedResourcesData{Addrid: 0, Aidx: aidx, Data: trackerdb.MakeResourcesData(0), Round: lastUpdateRound} + entry = trackerdb.PersistedResourcesData{AcctRef: nil, Aidx: aidx, Data: trackerdb.MakeResourcesData(0), Round: lastUpdateRound} } else { // create a new entry. if !data.newResource.IsApp() && !data.newResource.IsAsset() { - err = fmt.Errorf("unknown creatable for addr %v (%d), aidx %d, data %v", addr, addrid, aidx, data.newResource) + err = fmt.Errorf("unknown creatable for addr %v (%d), aidx %d, data %v", addr, acctRef, aidx, data.newResource) return } // check if we need to "upgrade" this insert operation into an update operation due to a scheduled // delete operation of the same resource. - if _, pendingDeletion := pendingResourcesDeletion[resourceKey{addrid: addrid, aidx: aidx}]; pendingDeletion { + if _, pendingDeletion := pendingResourcesDeletion[resourceKey{acctRef: acctRef, aidx: aidx}]; pendingDeletion { // yes - we've had this entry being deleted and re-created in the same commit range. This means that we can safely // update the database entry instead of deleting + inserting. - delete(pendingResourcesDeletion, resourceKey{addrid: addrid, aidx: aidx}) + delete(pendingResourcesDeletion, resourceKey{acctRef: acctRef, aidx: aidx}) var rowsAffected int64 - rowsAffected, err = writer.UpdateResource(addrid, aidx, data.newResource) + rowsAffected, err = writer.UpdateResource(acctRef, aidx, data.newResource) if err == nil { // rowid doesn't change on update. - entry = trackerdb.PersistedResourcesData{Addrid: addrid, Aidx: aidx, Data: data.newResource, Round: lastUpdateRound} + entry = trackerdb.PersistedResourcesData{AcctRef: acctRef, Aidx: aidx, Data: data.newResource, Round: lastUpdateRound} if rowsAffected != 1 { - err = fmt.Errorf("failed to update resources row for addr %s (%d), aidx %d", addr, addrid, aidx) + err = fmt.Errorf("failed to update resources row for addr %s (%d), aidx %d", addr, acctRef, aidx) } } } else { - _, err = writer.InsertResource(addrid, aidx, data.newResource) + _, err = writer.InsertResource(acctRef, aidx, data.newResource) if err == nil { // set the returned persisted account states so that we could store that as the baseResources in commitRound - entry = trackerdb.PersistedResourcesData{Addrid: addrid, Aidx: aidx, Data: data.newResource, Round: lastUpdateRound} + entry = trackerdb.PersistedResourcesData{AcctRef: acctRef, Aidx: aidx, Data: data.newResource, Round: lastUpdateRound} } } } @@ -901,16 +900,16 @@ func accountsNewRoundImpl( continue } else { if !data.newResource.IsApp() && !data.newResource.IsAsset() { - err = fmt.Errorf("unknown creatable for addr %v (%d), aidx %d, data %v", addr, addrid, aidx, data.newResource) + err = fmt.Errorf("unknown creatable for addr %v (%d), aidx %d, data %v", addr, acctRef, aidx, data.newResource) return } var rowsAffected int64 - rowsAffected, err = writer.UpdateResource(addrid, aidx, data.newResource) + rowsAffected, err = writer.UpdateResource(acctRef, aidx, data.newResource) if err == nil { // rowid doesn't change on update. - entry = trackerdb.PersistedResourcesData{Addrid: addrid, Aidx: aidx, Data: data.newResource, Round: lastUpdateRound} + entry = trackerdb.PersistedResourcesData{AcctRef: acctRef, Aidx: aidx, Data: data.newResource, Round: lastUpdateRound} if rowsAffected != 1 { - err = fmt.Errorf("failed to update resources row for addr %s (%d), aidx %d", addr, addrid, aidx) + err = fmt.Errorf("failed to update resources row for addr %s (%d), aidx %d", addr, acctRef, aidx) } } } @@ -929,13 +928,13 @@ func accountsNewRoundImpl( for delRes := range pendingResourcesDeletion { // new value is zero, which means we need to delete the current value. var rowsAffected int64 - rowsAffected, err = writer.DeleteResource(delRes.addrid, delRes.aidx) + rowsAffected, err = writer.DeleteResource(delRes.acctRef, delRes.aidx) if err == nil { // we deleted the entry successfully. // set zero addrid to mark this entry invalid for subsequent addr to addrid resolution // because the base account might gone. if rowsAffected != 1 { - err = fmt.Errorf("failed to delete resources row (%d), aidx %d", delRes.addrid, delRes.aidx) + err = fmt.Errorf("failed to delete resources row (%d), aidx %d", delRes.acctRef, delRes.aidx) } } if err != nil { @@ -990,7 +989,7 @@ func onlineAccountsNewRoundImpl( newAcct := data.newAcct[j] updRound := data.updRound[j] newStatus := data.newStatus[j] - if prevAcct.Rowid == 0 { + if prevAcct.Ref == nil { // zero rowid means we don't have a previous value. if newAcct.IsEmpty() { // IsEmpty means we don't have a previous value. @@ -1001,15 +1000,14 @@ func onlineAccountsNewRoundImpl( err = fmt.Errorf("empty voting data for online account %s: %v", data.address.String(), newAcct) } else { // create a new entry. - var rowid int64 normBalance := newAcct.NormalizedOnlineBalance(proto) - rowid, err = writer.InsertOnlineAccount(data.address, normBalance, newAcct, updRound, uint64(newAcct.VoteLastValid)) + ref, err := writer.InsertOnlineAccount(data.address, normBalance, newAcct, updRound, uint64(newAcct.VoteLastValid)) if err == nil { updated := trackerdb.PersistedOnlineAccountData{ Addr: data.address, AccountData: newAcct, Round: lastUpdateRound, - Rowid: rowid, + Ref: ref, UpdRound: basics.Round(updRound), } updatedAccounts = append(updatedAccounts, updated) @@ -1027,14 +1025,13 @@ func onlineAccountsNewRoundImpl( if newStatus == basics.Online { err = fmt.Errorf("empty voting data but online account %s: %v", data.address.String(), newAcct) } else { - var rowid int64 - rowid, err = writer.InsertOnlineAccount(data.address, 0, trackerdb.BaseOnlineAccountData{}, updRound, 0) + ref, err := writer.InsertOnlineAccount(data.address, 0, trackerdb.BaseOnlineAccountData{}, updRound, 0) if err == nil { updated := trackerdb.PersistedOnlineAccountData{ Addr: data.address, AccountData: trackerdb.BaseOnlineAccountData{}, Round: lastUpdateRound, - Rowid: rowid, + Ref: ref, UpdRound: basics.Round(updRound), } @@ -1044,15 +1041,14 @@ func onlineAccountsNewRoundImpl( } } else { if prevAcct.AccountData != newAcct { - var rowid int64 normBalance := newAcct.NormalizedOnlineBalance(proto) - rowid, err = writer.InsertOnlineAccount(data.address, normBalance, newAcct, updRound, uint64(newAcct.VoteLastValid)) + ref, err := writer.InsertOnlineAccount(data.address, normBalance, newAcct, updRound, uint64(newAcct.VoteLastValid)) if err == nil { updated := trackerdb.PersistedOnlineAccountData{ Addr: data.address, AccountData: newAcct, Round: lastUpdateRound, - Rowid: rowid, + Ref: ref, UpdRound: basics.Round(updRound), } diff --git a/ledger/acctdeltas_test.go b/ledger/acctdeltas_test.go index 829db03086..78de0b7fa1 100644 --- a/ledger/acctdeltas_test.go +++ b/ledger/acctdeltas_test.go @@ -261,9 +261,9 @@ func TestAccountDBRound(t *testing.T) { err = updatesOnlineCnt.accountsLoadOld(tx) require.NoError(t, err) - knownAddresses := make(map[basics.Address]int64) + knownAddresses := make(map[basics.Address]trackerdb.AccountRef) for _, delta := range updatesCnt.deltas { - knownAddresses[delta.oldAcct.Addr] = delta.oldAcct.Rowid + knownAddresses[delta.oldAcct.Addr] = delta.oldAcct.Ref } err = resourceUpdatesCnt.resourcesLoadOld(tx, knownAddresses) @@ -405,9 +405,9 @@ func TestAccountDBInMemoryAcct(t *testing.T) { err := outAccountDeltas.accountsLoadOld(tx) require.NoError(t, err) - knownAddresses := make(map[basics.Address]int64) + knownAddresses := make(map[basics.Address]trackerdb.AccountRef) for _, delta := range outAccountDeltas.deltas { - knownAddresses[delta.oldAcct.Addr] = delta.oldAcct.Rowid + knownAddresses[delta.oldAcct.Addr] = delta.oldAcct.Ref } err = outResourcesDeltas.resourcesLoadOld(tx, knownAddresses) @@ -423,7 +423,7 @@ func TestAccountDBInMemoryAcct(t *testing.T) { require.Equal(t, 1, len(updatesResources[addr])) // we store empty even for deleted resources require.Equal(t, - trackerdb.PersistedResourcesData{Addrid: 0, Aidx: 100, Data: trackerdb.MakeResourcesData(0), Round: basics.Round(lastRound)}, + trackerdb.PersistedResourcesData{AcctRef: nil, Aidx: 100, Data: trackerdb.MakeResourcesData(0), Round: basics.Round(lastRound)}, updatesResources[addr][0], ) @@ -1240,7 +1240,7 @@ func TestCompactResourceDeltas(t *testing.T) { a.Equal(addr, data.address) a.Equal(sample2, data) - old1 := trackerdb.PersistedResourcesData{Addrid: 111, Aidx: 1, Data: trackerdb.ResourcesData{Total: 789}} + old1 := trackerdb.PersistedResourcesData{AcctRef: mockEntryRef{111}, Aidx: 1, Data: trackerdb.ResourcesData{Total: 789}} ad.upsertOld(addr, old1) a.Equal(1, ad.len()) data = ad.getByIdx(0) @@ -1248,7 +1248,7 @@ func TestCompactResourceDeltas(t *testing.T) { a.Equal(resourceDelta{newResource: sample2.newResource, oldResource: old1, address: addr}, data) addr1 := ledgertesting.RandomAddress() - old2 := trackerdb.PersistedResourcesData{Addrid: 222, Aidx: 2, Data: trackerdb.ResourcesData{Total: 789}} + old2 := trackerdb.PersistedResourcesData{AcctRef: mockEntryRef{222}, Aidx: 2, Data: trackerdb.ResourcesData{Total: 789}} ad.upsertOld(addr1, old2) a.Equal(2, ad.len()) data = ad.getByIdx(0) @@ -1282,35 +1282,39 @@ func TestCompactResourceDeltas(t *testing.T) { func TestLookupAccountAddressFromAddressID(t *testing.T) { partitiontest.PartitionTest(t) - dbs, _ := storetesting.DbOpenTest(t, true) - storetesting.SetDbLogging(t, dbs) + dbs, _ := sqlitedriver.DbOpenTrackerTest(t, true) + dbs.SetLogger(logging.TestingLog(t)) defer dbs.Close() addrs := make([]basics.Address, 100) for i := range addrs { addrs[i] = ledgertesting.RandomAddress() } - addrsids := make(map[basics.Address]int64) - err := dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { - sqlitedriver.AccountsInitTest(t, tx, make(map[basics.Address]basics.AccountData), protocol.ConsensusCurrentVersion) + addrsids := make(map[basics.Address]trackerdb.AccountRef) + err := dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { + tx.Testing().AccountsInitTest(t, make(map[basics.Address]basics.AccountData), protocol.ConsensusCurrentVersion) + + aw, err := tx.MakeAccountsOptimizedWriter(true, false, false, false) + if err != nil { + return err + } for i := range addrs { - res, err := tx.ExecContext(ctx, "INSERT INTO accountbase (address, data) VALUES (?, ?)", addrs[i][:], []byte{12, 3, 4}) + ref, err := aw.InsertAccount(addrs[i], 0, trackerdb.BaseAccountData{}) if err != nil { return err } - rowid, err := res.LastInsertId() - if err != nil { - return err - } - addrsids[addrs[i]] = rowid + addrsids[addrs[i]] = ref } return nil }) require.NoError(t, err) - err = dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { - arw := sqlitedriver.NewAccountsSQLReaderWriter(tx) + err = dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { + arw, err := tx.MakeAccountsReaderWriter() + if err != nil { + return nil + } for addr, addrid := range addrsids { retAddr, err := arw.LookupAccountAddressFromAddressID(ctx, addrid) @@ -1322,7 +1326,7 @@ func TestLookupAccountAddressFromAddressID(t *testing.T) { } } // test fail case: - retAddr, err := arw.LookupAccountAddressFromAddressID(ctx, -1) + retAddr, err := arw.LookupAccountAddressFromAddressID(ctx, nil) if !errors.Is(err, sql.ErrNoRows) { return fmt.Errorf("unexpected error : %w", err) @@ -1336,37 +1340,48 @@ func TestLookupAccountAddressFromAddressID(t *testing.T) { } type mockResourcesKey struct { - addrid int64 - aidx basics.CreatableIndex + acctRef trackerdb.AccountRef + aidx basics.CreatableIndex } type mockAccountWriter struct { // rowid to data - accounts map[int64]ledgercore.AccountData + accounts map[trackerdb.AccountRef]ledgercore.AccountData // addr to rowid - addresses map[basics.Address]int64 + addresses map[basics.Address]trackerdb.AccountRef // rowid to addr - rowids map[int64]basics.Address + rowids map[trackerdb.AccountRef]basics.Address resources map[mockResourcesKey]ledgercore.AccountResource kvStore map[string][]byte - lastRowid int64 - availRowIds []int64 + lastAcctRef int64 + availAcctRefs []trackerdb.AccountRef } +// mockEntryRef is to be used exclusively with mock implementations +// any attempt to pass this ref to an actual db implementation during a test will result in a runtime error. +type mockEntryRef struct { + id int64 +} + +func (ref mockEntryRef) AccountRefMarker() {} +func (ref mockEntryRef) OnlineAccountRefMarker() {} +func (ref mockEntryRef) ResourceRefMarker() {} +func (ref mockEntryRef) CreatableRefMarker() {} + func makeMockAccountWriter() (m mockAccountWriter) { - m.accounts = make(map[int64]ledgercore.AccountData) + m.accounts = make(map[trackerdb.AccountRef]ledgercore.AccountData) m.resources = make(map[mockResourcesKey]ledgercore.AccountResource) - m.addresses = make(map[basics.Address]int64) - m.rowids = make(map[int64]basics.Address) + m.addresses = make(map[basics.Address]trackerdb.AccountRef) + m.rowids = make(map[trackerdb.AccountRef]basics.Address) return } func (m mockAccountWriter) clone() (m2 mockAccountWriter) { - m2.accounts = make(map[int64]ledgercore.AccountData, len(m.accounts)) + m2.accounts = make(map[trackerdb.AccountRef]ledgercore.AccountData, len(m.accounts)) m2.resources = make(map[mockResourcesKey]ledgercore.AccountResource, len(m.resources)) - m2.addresses = make(map[basics.Address]int64, len(m.resources)) - m2.rowids = make(map[int64]basics.Address, len(m.rowids)) + m2.addresses = make(map[basics.Address]trackerdb.AccountRef, len(m.resources)) + m2.rowids = make(map[trackerdb.AccountRef]basics.Address, len(m.rowids)) for k, v := range m.accounts { m2.accounts[k] = v } @@ -1379,67 +1394,67 @@ func (m mockAccountWriter) clone() (m2 mockAccountWriter) { for k, v := range m.rowids { m2.rowids[k] = v } - m2.lastRowid = m.lastRowid - m2.availRowIds = m.availRowIds + m2.lastAcctRef = m.lastAcctRef + m2.availAcctRefs = m.availAcctRefs return m2 } -func (m *mockAccountWriter) nextRowid() (rowid int64) { - if len(m.availRowIds) > 0 { - rowid = m.availRowIds[len(m.availRowIds)-1] - m.availRowIds = m.availRowIds[:len(m.availRowIds)-1] +func (m *mockAccountWriter) nextAcctRef() (ref trackerdb.AccountRef) { + if len(m.availAcctRefs) > 0 { + ref = m.availAcctRefs[len(m.availAcctRefs)-1] + m.availAcctRefs = m.availAcctRefs[:len(m.availAcctRefs)-1] } else { - m.lastRowid++ - rowid = m.lastRowid + m.lastAcctRef++ + ref = mockEntryRef{m.lastAcctRef} } return } func (m *mockAccountWriter) setAccount(addr basics.Address, data ledgercore.AccountData) { - var rowid int64 + var acctRef trackerdb.AccountRef var ok bool - if rowid, ok = m.addresses[addr]; !ok { - rowid = m.nextRowid() - m.rowids[rowid] = addr - m.addresses[addr] = rowid + if acctRef, ok = m.addresses[addr]; !ok { + acctRef = m.nextAcctRef() + m.rowids[acctRef] = addr + m.addresses[addr] = acctRef } - m.accounts[rowid] = data + m.accounts[acctRef] = data } func (m *mockAccountWriter) setResource(addr basics.Address, cidx basics.CreatableIndex, data ledgercore.AccountResource) error { - var rowid int64 + var acctRef trackerdb.AccountRef var ok bool - if rowid, ok = m.addresses[addr]; !ok { + if acctRef, ok = m.addresses[addr]; !ok { return fmt.Errorf("account %s does not exist", addr.String()) } - key := mockResourcesKey{rowid, cidx} + key := mockResourcesKey{acctRef, cidx} m.resources[key] = data return nil } func (m *mockAccountWriter) Lookup(addr basics.Address) (pad trackerdb.PersistedAccountData, ok bool, err error) { - rowid, ok := m.addresses[addr] + ref, ok := m.addresses[addr] if !ok { return } - data, ok := m.accounts[rowid] + data, ok := m.accounts[ref] if !ok { err = fmt.Errorf("not found %s", addr.String()) return } pad.AccountData.SetCoreAccountData(&data) pad.Addr = addr - pad.Rowid = rowid + pad.Ref = ref return } func (m *mockAccountWriter) LookupResource(addr basics.Address, cidx basics.CreatableIndex) (prd trackerdb.PersistedResourcesData, ok bool, err error) { - rowid, ok := m.addresses[addr] + acctRef, ok := m.addresses[addr] if !ok { return } - res, ok := m.resources[mockResourcesKey{rowid, cidx}] + res, ok := m.resources[mockResourcesKey{acctRef, cidx}] if !ok { err = fmt.Errorf("not found (%s, %d)", addr.String(), cidx) return @@ -1456,67 +1471,67 @@ func (m *mockAccountWriter) LookupResource(addr basics.Address, cidx basics.Crea if res.AssetParams != nil { prd.Data.SetAssetParams(*res.AssetParams, prd.Data.IsHolding()) } - prd.Addrid = rowid + prd.AcctRef = acctRef prd.Aidx = cidx return } -func (m *mockAccountWriter) InsertAccount(addr basics.Address, normBalance uint64, data trackerdb.BaseAccountData) (rowid int64, err error) { - rowid, ok := m.addresses[addr] +func (m *mockAccountWriter) InsertAccount(addr basics.Address, normBalance uint64, data trackerdb.BaseAccountData) (ref trackerdb.AccountRef, err error) { + ref, ok := m.addresses[addr] if ok { - err = fmt.Errorf("insertAccount: addr %s, rowid %d: UNIQUE constraint failed", addr.String(), rowid) + err = fmt.Errorf("insertAccount: addr %s, rowid %d: UNIQUE constraint failed", addr.String(), ref) return } - rowid = m.nextRowid() - m.addresses[addr] = rowid - m.rowids[rowid] = addr - m.accounts[rowid] = data.GetLedgerCoreAccountData() + ref = m.nextAcctRef() + m.addresses[addr] = ref + m.rowids[ref] = addr + m.accounts[ref] = data.GetLedgerCoreAccountData() return } -func (m *mockAccountWriter) DeleteAccount(rowid int64) (rowsAffected int64, err error) { +func (m *mockAccountWriter) DeleteAccount(ref trackerdb.AccountRef) (rowsAffected int64, err error) { var addr basics.Address var ok bool - if addr, ok = m.rowids[rowid]; !ok { + if addr, ok = m.rowids[ref]; !ok { return 0, nil } delete(m.addresses, addr) - delete(m.rowids, rowid) - delete(m.accounts, rowid) - m.availRowIds = append(m.availRowIds, rowid) + delete(m.rowids, ref) + delete(m.accounts, ref) + m.availAcctRefs = append(m.availAcctRefs, ref) return 1, nil } -func (m *mockAccountWriter) UpdateAccount(rowid int64, normBalance uint64, data trackerdb.BaseAccountData) (rowsAffected int64, err error) { - if _, ok := m.rowids[rowid]; !ok { - return 0, fmt.Errorf("updateAccount: not found rowid %d", rowid) +func (m *mockAccountWriter) UpdateAccount(ref trackerdb.AccountRef, normBalance uint64, data trackerdb.BaseAccountData) (rowsAffected int64, err error) { + if _, ok := m.rowids[ref]; !ok { + return 0, fmt.Errorf("updateAccount: not found rowid %d", ref) } - old, ok := m.accounts[rowid] + old, ok := m.accounts[ref] if !ok { - return 0, fmt.Errorf("updateAccount: not found data for %d", rowid) + return 0, fmt.Errorf("updateAccount: not found data for %d", ref) } if old == data.GetLedgerCoreAccountData() { return 0, nil } - m.accounts[rowid] = data.GetLedgerCoreAccountData() + m.accounts[ref] = data.GetLedgerCoreAccountData() return 1, nil } -func (m *mockAccountWriter) InsertResource(addrid int64, aidx basics.CreatableIndex, data trackerdb.ResourcesData) (rowid int64, err error) { - key := mockResourcesKey{addrid, aidx} +func (m *mockAccountWriter) InsertResource(acctRef trackerdb.AccountRef, aidx basics.CreatableIndex, data trackerdb.ResourcesData) (ref trackerdb.ResourceRef, err error) { + key := mockResourcesKey{acctRef, aidx} if _, ok := m.resources[key]; ok { - return 0, fmt.Errorf("insertResource: (%d, %d): UNIQUE constraint failed", addrid, aidx) + return nil, fmt.Errorf("insertResource: (%d, %d): UNIQUE constraint failed", acctRef, aidx) } // use persistedResourcesData.AccountResource for conversion prd := trackerdb.PersistedResourcesData{Data: data} new := prd.AccountResource() m.resources[key] = new - return 1, nil + return mockEntryRef{1}, nil } -func (m *mockAccountWriter) DeleteResource(addrid int64, aidx basics.CreatableIndex) (rowsAffected int64, err error) { - key := mockResourcesKey{addrid, aidx} +func (m *mockAccountWriter) DeleteResource(acctRef trackerdb.AccountRef, aidx basics.CreatableIndex) (rowsAffected int64, err error) { + key := mockResourcesKey{acctRef, aidx} if _, ok := m.resources[key]; !ok { return 0, nil } @@ -1524,11 +1539,11 @@ func (m *mockAccountWriter) DeleteResource(addrid int64, aidx basics.CreatableIn return 1, nil } -func (m *mockAccountWriter) UpdateResource(addrid int64, aidx basics.CreatableIndex, data trackerdb.ResourcesData) (rowsAffected int64, err error) { - key := mockResourcesKey{addrid, aidx} +func (m *mockAccountWriter) UpdateResource(acctRef trackerdb.AccountRef, aidx basics.CreatableIndex, data trackerdb.ResourcesData) (rowsAffected int64, err error) { + key := mockResourcesKey{acctRef, aidx} old, ok := m.resources[key] if !ok { - return 0, fmt.Errorf("updateResource: not found (%d, %d)", addrid, aidx) + return 0, fmt.Errorf("updateResource: not found (%d, %d)", acctRef, aidx) } // use persistedResourcesData.AccountResource for conversion prd := trackerdb.PersistedResourcesData{Data: data} @@ -1550,8 +1565,8 @@ func (m *mockAccountWriter) DeleteKvPair(key string) error { return nil } -func (m *mockAccountWriter) InsertCreatable(cidx basics.CreatableIndex, ctype basics.CreatableType, creator []byte) (rowid int64, err error) { - return 0, fmt.Errorf("insertCreatable: not implemented") +func (m *mockAccountWriter) InsertCreatable(cidx basics.CreatableIndex, ctype basics.CreatableType, creator []byte) (ref trackerdb.CreatableRef, err error) { + return nil, fmt.Errorf("insertCreatable: not implemented") } func (m *mockAccountWriter) DeleteCreatable(cidx basics.CreatableIndex, ctype basics.CreatableType) (rowsAffected int64, err error) { @@ -1890,7 +1905,7 @@ func TestAccountsNewRoundDeletedResourceEntries(t *testing.T) { matches := 0 for _, upd := range updatedAccounts { if addressesToCheck[upd.Addr] { - a.Equal(int64(0), upd.Rowid) + a.Nil(upd.Ref) a.Empty(upd.AccountData) matches++ } @@ -1900,7 +1915,7 @@ func TestAccountsNewRoundDeletedResourceEntries(t *testing.T) { for addr := range addressesToCheck { upd := updatedResources[addr] a.Equal(1, len(upd)) - a.Equal(int64(0), upd[0].Addrid) + a.Nil(upd[0].AcctRef) a.Equal(basics.CreatableIndex(aidx), upd[0].Aidx) a.Equal(trackerdb.MakeResourcesData(uint64(0)), upd[0].Data) } @@ -2162,7 +2177,7 @@ func TestAccountOnlineQueries(t *testing.T) { delta3.Upsert(addrB, dataB2) delta3.Upsert(addrC, dataC3) - addRound := func(rnd basics.Round, updates ledgercore.StateDelta) { + addRound := func(rnd basics.Round, updates ledgercore.StateDelta) (updatedOnlineAccts []trackerdb.PersistedOnlineAccountData) { totals = ledgertesting.CalculateNewRoundAccountTotals(t, updates.Accts, 0, proto, accts, totals) accts = applyPartialDeltas(accts, updates.Accts) @@ -2182,17 +2197,37 @@ func TestAccountOnlineQueries(t *testing.T) { require.NoError(t, err) require.Equal(t, updatesCnt.len(), len(updatedAccts)) - updatedOnlineAccts, err := onlineAccountsNewRound(tx, updatesOnlineCnt, proto, rnd) + updatedOnlineAccts, err = onlineAccountsNewRound(tx, updatesOnlineCnt, proto, rnd) require.NoError(t, err) require.NotEmpty(t, updatedOnlineAccts) err = arw.UpdateAccountsRound(rnd) require.NoError(t, err) + + return } - addRound(1, ledgercore.StateDelta{Accts: delta1}) - addRound(2, ledgercore.StateDelta{Accts: delta2}) - addRound(3, ledgercore.StateDelta{Accts: delta3}) + // add round 1 + round1poads := addRound(1, ledgercore.StateDelta{Accts: delta1}) + require.Equal(t, 2, len(round1poads)) + require.Equal(t, addrA, round1poads[0].Addr) + require.Equal(t, addrB, round1poads[1].Addr) + refoaA1 := round1poads[0].Ref + refoaB1 := round1poads[1].Ref + + // add round 2 + round2poads := addRound(2, ledgercore.StateDelta{Accts: delta2}) + require.Equal(t, 1, len(round2poads)) + require.Equal(t, addrA, round2poads[0].Addr) + refoaA2 := round2poads[0].Ref + + // add round 3 + round3poads := addRound(3, ledgercore.StateDelta{Accts: delta3}) + require.Equal(t, 2, len(round3poads)) + require.Equal(t, addrB, round3poads[0].Addr) + require.Equal(t, addrC, round3poads[1].Addr) + refoaB3 := round3poads[0].Ref + refoaC3 := round3poads[1].Ref queries, err := tx.Testing().MakeOnlineAccountsOptimizedReader() require.NoError(t, err) @@ -2317,25 +2352,20 @@ func TestAccountOnlineQueries(t *testing.T) { // A | 2 | 0 checkAddrB := func() { - require.Equal(t, int64(2), paods[0].Rowid) require.Equal(t, basics.Round(1), paods[0].UpdRound) require.Equal(t, addrB, paods[0].Addr) - require.Equal(t, int64(4), paods[1].Rowid) require.Equal(t, basics.Round(3), paods[1].UpdRound) require.Equal(t, addrB, paods[1].Addr) } checkAddrC := func() { - require.Equal(t, int64(5), paods[2].Rowid) require.Equal(t, basics.Round(3), paods[2].UpdRound) require.Equal(t, addrC, paods[2].Addr) } checkAddrA := func() { - require.Equal(t, int64(1), paods[3].Rowid) require.Equal(t, basics.Round(1), paods[3].UpdRound) require.Equal(t, addrA, paods[3].Addr) - require.Equal(t, int64(3), paods[4].Rowid) require.Equal(t, basics.Round(2), paods[4].UpdRound) require.Equal(t, addrA, paods[4].Addr) } @@ -2366,26 +2396,26 @@ func TestAccountOnlineQueries(t *testing.T) { require.NoError(t, err) require.Equal(t, basics.Round(3), rnd) require.Equal(t, 2, len(paods)) - require.Equal(t, int64(1), paods[0].Rowid) require.Equal(t, basics.Round(1), paods[0].UpdRound) - require.Equal(t, int64(3), paods[1].Rowid) + require.Equal(t, refoaA1, paods[0].Ref) require.Equal(t, basics.Round(2), paods[1].UpdRound) + require.Equal(t, refoaA2, paods[1].Ref) paods, rnd, err = queries.LookupOnlineHistory(addrB) require.NoError(t, err) require.Equal(t, basics.Round(3), rnd) require.Equal(t, 2, len(paods)) - require.Equal(t, int64(2), paods[0].Rowid) require.Equal(t, basics.Round(1), paods[0].UpdRound) - require.Equal(t, int64(4), paods[1].Rowid) + require.Equal(t, refoaB1, paods[0].Ref) require.Equal(t, basics.Round(3), paods[1].UpdRound) + require.Equal(t, refoaB3, paods[1].Ref) paods, rnd, err = queries.LookupOnlineHistory(addrC) require.NoError(t, err) require.Equal(t, basics.Round(3), rnd) require.Equal(t, 1, len(paods)) - require.Equal(t, int64(5), paods[0].Rowid) require.Equal(t, basics.Round(3), paods[0].UpdRound) + require.Equal(t, refoaC3, paods[0].Ref) return nil }) @@ -2396,9 +2426,9 @@ type mockOnlineAccountsWriter struct { rowid int64 } -func (w *mockOnlineAccountsWriter) InsertOnlineAccount(addr basics.Address, normBalance uint64, data trackerdb.BaseOnlineAccountData, updRound uint64, voteLastValid uint64) (rowid int64, err error) { +func (w *mockOnlineAccountsWriter) InsertOnlineAccount(addr basics.Address, normBalance uint64, data trackerdb.BaseOnlineAccountData, updRound uint64, voteLastValid uint64) (ref trackerdb.OnlineAccountRef, err error) { w.rowid++ - return w.rowid, nil + return mockEntryRef{w.rowid}, nil } func (w *mockOnlineAccountsWriter) Close() {} @@ -2448,7 +2478,7 @@ func TestAccountOnlineAccountsNewRound(t *testing.T) { MicroAlgos: basics.MicroAlgos{Raw: 400_000_000}, BaseVotingData: trackerdb.BaseVotingData{VoteFirstValid: 500}, }, - Rowid: 1, + Ref: mockEntryRef{1}, }, newAcct: []trackerdb.BaseOnlineAccountData{{ MicroAlgos: basics.MicroAlgos{Raw: 400_000_000}, @@ -2466,7 +2496,7 @@ func TestAccountOnlineAccountsNewRound(t *testing.T) { MicroAlgos: basics.MicroAlgos{Raw: 500_000_000}, BaseVotingData: trackerdb.BaseVotingData{VoteFirstValid: 500}, }, - Rowid: 2, + Ref: mockEntryRef{2}, }, newAcct: []trackerdb.BaseOnlineAccountData{{ MicroAlgos: basics.MicroAlgos{Raw: 500_000_000}, @@ -2557,7 +2587,7 @@ func TestAccountOnlineAccountsNewRoundFlip(t *testing.T) { MicroAlgos: basics.MicroAlgos{Raw: 300_000_000}, BaseVotingData: trackerdb.BaseVotingData{VoteFirstValid: 300}, }, - Rowid: 1, + Ref: mockEntryRef{1}, }, newAcct: []trackerdb.BaseOnlineAccountData{ { diff --git a/ledger/acctonline.go b/ledger/acctonline.go index 6e80d1bcfc..dc7335c30b 100644 --- a/ledger/acctonline.go +++ b/ledger/acctonline.go @@ -693,7 +693,7 @@ func (ao *onlineAccounts) lookupOnlineAccountData(rnd basics.Round, addr basics. // a separate transaction here, and directly use a prepared SQL query // against the database. persistedData, err = ao.accountsq.LookupOnline(addr, rnd) - if err != nil || persistedData.Rowid == 0 { + if err != nil || persistedData.Ref == nil { // no such online account, return empty return ledgercore.OnlineAccountData{}, err } diff --git a/ledger/acctonline_test.go b/ledger/acctonline_test.go index eeaf434bfb..7360f30e97 100644 --- a/ledger/acctonline_test.go +++ b/ledger/acctonline_test.go @@ -225,13 +225,13 @@ func TestAcctOnline(t *testing.T) { data, err := oa.accountsq.LookupOnline(bal.Addr, rnd) require.NoError(t, err) require.Equal(t, bal.Addr, data.Addr) - require.NotEmpty(t, data.Rowid) + require.NotEmpty(t, data.Ref) require.Equal(t, oa.cachedDBRoundOnline, data.Round) require.Empty(t, data.AccountData) data, has := oa.baseOnlineAccounts.read(bal.Addr) require.True(t, has) - require.NotEmpty(t, data.Rowid) + require.NotEmpty(t, data.Ref) require.Empty(t, data.AccountData) oad, err := oa.lookupOnlineAccountData(rnd, bal.Addr) @@ -242,7 +242,7 @@ func TestAcctOnline(t *testing.T) { data, err = oa.accountsq.LookupOnline(bal.Addr, rnd-1) require.NoError(t, err) require.Equal(t, bal.Addr, data.Addr) - require.NotEmpty(t, data.Rowid) + require.NotEmpty(t, data.Ref) require.Equal(t, oa.cachedDBRoundOnline, data.Round) require.NotEmpty(t, data.AccountData) } @@ -258,13 +258,13 @@ func TestAcctOnline(t *testing.T) { data, err := oa.accountsq.LookupOnline(bal.Addr, rnd) require.NoError(t, err) require.Equal(t, bal.Addr, data.Addr) - require.Empty(t, data.Rowid) + require.Empty(t, data.Ref) require.Equal(t, oa.cachedDBRoundOnline, data.Round) require.Empty(t, data.AccountData) data, has := oa.baseOnlineAccounts.read(bal.Addr) require.True(t, has) - require.NotEmpty(t, data.Rowid) // TODO: FIXME: set rowid to empty for these items + require.NotEmpty(t, data.Ref) // TODO: FIXME: set rowid to empty for these items require.Empty(t, data.AccountData) // committed round i => dbRound = i - maxDeltaLookback (= 13 for the account 0) @@ -283,14 +283,14 @@ func TestAcctOnline(t *testing.T) { data, err := oa.accountsq.LookupOnline(bal.Addr, rnd) require.NoError(t, err) require.Equal(t, bal.Addr, data.Addr) - require.NotEmpty(t, data.Rowid) + require.NotEmpty(t, data.Ref) require.Equal(t, oa.cachedDBRoundOnline, data.Round) require.NotEmpty(t, data.AccountData) // the most recent value is empty because the account is scheduled for removal data, has := oa.baseOnlineAccounts.read(bal.Addr) require.True(t, has) - require.NotEmpty(t, data.Rowid) // TODO: FIXME: set rowid to empty for these items + require.NotEmpty(t, data.Ref) // TODO: FIXME: set rowid to empty for these items require.Empty(t, data.AccountData) // account 1 went offline at round 2 => it offline at requested round 1+1=2 @@ -307,14 +307,14 @@ func TestAcctOnline(t *testing.T) { data, err := oa.accountsq.LookupOnline(bal.Addr, rnd) require.NoError(t, err) require.Equal(t, bal.Addr, data.Addr) - require.NotEmpty(t, data.Rowid) + require.NotEmpty(t, data.Ref) require.Equal(t, oa.cachedDBRoundOnline, data.Round) require.NotEmpty(t, data.AccountData) // the most recent value is empty because the account is scheduled for removal data, has := oa.baseOnlineAccounts.read(bal.Addr) require.True(t, has) - require.NotEmpty(t, data.Rowid) // TODO: FIXME: set rowid to empty for these items + require.NotEmpty(t, data.Ref) // TODO: FIXME: set rowid to empty for these items require.Empty(t, data.AccountData) // account 2 went offline at round 3 => it online at requested round 1+1=2 @@ -337,13 +337,13 @@ func TestAcctOnline(t *testing.T) { data, err := oa.accountsq.LookupOnline(bal.Addr, basics.Round(i+1)) require.NoError(t, err) require.Equal(t, bal.Addr, data.Addr) - require.NotEmpty(t, data.Rowid) + require.NotEmpty(t, data.Ref) require.Equal(t, oa.cachedDBRoundOnline, data.Round) require.Empty(t, data.AccountData) data, has := oa.baseOnlineAccounts.read(bal.Addr) require.True(t, has) - require.NotEmpty(t, data.Rowid) + require.NotEmpty(t, data.Ref) require.Empty(t, data.AccountData) oad, err := oa.lookupOnlineAccountData(basics.Round(i+1), bal.Addr) @@ -354,7 +354,7 @@ func TestAcctOnline(t *testing.T) { data, err = oa.accountsq.LookupOnline(bal.Addr, basics.Round(i)) require.NoError(t, err) require.Equal(t, bal.Addr, data.Addr) - require.NotEmpty(t, data.Rowid) + require.NotEmpty(t, data.Ref) require.Equal(t, oa.cachedDBRoundOnline, data.Round) require.NotEmpty(t, data.AccountData) } @@ -370,7 +370,7 @@ func TestAcctOnline(t *testing.T) { data, err := oa.accountsq.LookupOnline(bal.Addr, basics.Round(i)) require.NoError(t, err) require.Equal(t, bal.Addr, data.Addr) - require.NotEmpty(t, data.Rowid) + require.NotEmpty(t, data.Ref) require.Equal(t, oa.cachedDBRoundOnline, data.Round) require.NotEmpty(t, data.AccountData) @@ -513,7 +513,7 @@ func TestAcctOnlineCache(t *testing.T) { data, err := oa.accountsq.LookupOnline(bal.Addr, rnd) require.NoError(t, err) require.Equal(t, bal.Addr, data.Addr) - require.NotEmpty(t, data.Rowid) + require.NotEmpty(t, data.Ref) require.Equal(t, oa.cachedDBRoundOnline, data.Round) if (rnd-1)%(numAccts*2) >= numAccts { require.Empty(t, data.AccountData) @@ -549,9 +549,9 @@ func TestAcctOnlineCache(t *testing.T) { require.Equal(t, oa.cachedDBRoundOnline, data.Round) if (rnd-1)%(numAccts*2) >= numAccts { require.Empty(t, data.AccountData) - require.Empty(t, data.Rowid) + require.Empty(t, data.Ref) } else { - require.NotEmpty(t, data.Rowid) + require.NotEmpty(t, data.Ref) require.NotEmpty(t, data.AccountData) } @@ -1049,7 +1049,7 @@ func TestAcctOnlineCacheDBSync(t *testing.T) { pad, err := oa.accountsq.LookupOnline(addrA, 1) require.NoError(t, err) require.Equal(t, addrA, pad.Addr) - require.NotEmpty(t, pad.Rowid) + require.NotEmpty(t, pad.Ref) require.Empty(t, pad.AccountData.VoteLastValid) // commit a block to get these entries removed @@ -1082,7 +1082,7 @@ func TestAcctOnlineCacheDBSync(t *testing.T) { pad, err = oa.accountsq.LookupOnline(addrB, 1) require.NoError(t, err) require.Equal(t, addrB, pad.Addr) - require.NotEmpty(t, pad.Rowid) + require.NotEmpty(t, pad.Ref) require.NotEmpty(t, pad.AccountData.VoteLastValid) }() @@ -1098,7 +1098,7 @@ func TestAcctOnlineCacheDBSync(t *testing.T) { pad, err = oa.accountsq.LookupOnline(addrA, 1) require.NoError(t, err) require.Equal(t, addrA, pad.Addr) - require.Empty(t, pad.Rowid) + require.Empty(t, pad.Ref) require.Empty(t, pad.AccountData.VoteLastValid) _, has = oa.accounts[addrB] @@ -1114,7 +1114,7 @@ func TestAcctOnlineCacheDBSync(t *testing.T) { pad, err = oa.accountsq.LookupOnline(addrB, 1) require.NoError(t, err) require.Equal(t, addrB, pad.Addr) - require.NotEmpty(t, pad.Rowid) + require.NotEmpty(t, pad.Ref) require.NotEmpty(t, pad.AccountData.VoteLastValid) }) } diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index 378cd479dd..26649caff4 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -1210,7 +1210,7 @@ func (au *accountUpdates) lookupLatest(addr basics.Address) (data basics.Account // we don't technically need this, since it's already in the baseResources, however, writing this over // would ensure that we promote this field. au.baseResources.writePending(prd, addr) - if prd.Addrid != 0 { + if prd.AcctRef != nil { if err := addResource(prd.Aidx, rnd, prd.AccountResource()); err != nil { return basics.AccountData{}, basics.Round(0), basics.MicroAlgos{}, err } @@ -1235,7 +1235,7 @@ func (au *accountUpdates) lookupLatest(addr basics.Address) (data basics.Account return basics.AccountData{}, basics.Round(0), basics.MicroAlgos{}, err } if persistedData.Round == currentDbRound { - if persistedData.Rowid != 0 { + if persistedData.Ref != nil { // if we read actual data return it au.baseAccounts.writePending(persistedData) ad = persistedData.AccountData.GetLedgerCoreAccountData() @@ -1365,7 +1365,7 @@ func (au *accountUpdates) lookupResource(rnd basics.Round, addr basics.Address, return ledgercore.AccountResource{}, basics.Round(0), err } if persistedData.Round == currentDbRound { - if persistedData.Addrid != 0 { + if persistedData.AcctRef != nil { // if we read actual data return it au.baseResources.writePending(persistedData, addr) return persistedData.AccountResource(), rnd, nil @@ -1490,7 +1490,7 @@ func (au *accountUpdates) lookupWithoutRewards(rnd basics.Round, addr basics.Add return ledgercore.AccountData{}, basics.Round(0), "", 0, err } if persistedData.Round == currentDbRound { - if persistedData.Rowid != 0 { + if persistedData.Ref != nil { // if we read actual data return it au.baseAccounts.writePending(persistedData) return persistedData.AccountData.GetLedgerCoreAccountData(), rnd, rewardsVersion, rewardsLevel, nil @@ -1694,9 +1694,9 @@ func (au *accountUpdates) commitRound(ctx context.Context, tx trackerdb.Transact return err } - knownAddresses := make(map[basics.Address]int64, len(dcc.compactAccountDeltas.deltas)) + knownAddresses := make(map[basics.Address]trackerdb.AccountRef, len(dcc.compactAccountDeltas.deltas)) for _, delta := range dcc.compactAccountDeltas.deltas { - knownAddresses[delta.oldAcct.Addr] = delta.oldAcct.Rowid + knownAddresses[delta.oldAcct.Addr] = delta.oldAcct.Ref } err = dcc.compactResourcesDeltas.resourcesLoadOld(tx, knownAddresses) diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go index 70b933a92f..ff9ff9efeb 100644 --- a/ledger/acctupdates_test.go +++ b/ledger/acctupdates_test.go @@ -1628,9 +1628,9 @@ func TestCompactDeltasResources(t *testing.T) { // check deltas without missing accounts for i := int64(0); i < 4; i++ { - baseResources.write(trackerdb.PersistedResourcesData{Addrid: i + 1, Aidx: basics.CreatableIndex(100 + i)}, addrs[i]) + baseResources.write(trackerdb.PersistedResourcesData{AcctRef: mockEntryRef{i + 1}, Aidx: basics.CreatableIndex(100 + i)}, addrs[i]) if i%2 == 0 { - baseResources.write(trackerdb.PersistedResourcesData{Addrid: i + 1, Aidx: basics.CreatableIndex(200 + i)}, addrs[i]) + baseResources.write(trackerdb.PersistedResourcesData{AcctRef: mockEntryRef{i + 1}, Aidx: basics.CreatableIndex(200 + i)}, addrs[i]) } } @@ -1642,11 +1642,11 @@ func TestCompactDeltasResources(t *testing.T) { for i := int64(0); i < 4; i++ { delta, idx := outResourcesDeltas.get(addrs[i], basics.CreatableIndex(100+i)) require.NotEqual(t, -1, idx) - require.Equal(t, trackerdb.PersistedResourcesData{Addrid: i + 1, Aidx: basics.CreatableIndex(100 + i)}, delta.oldResource) + require.Equal(t, trackerdb.PersistedResourcesData{AcctRef: mockEntryRef{i + 1}, Aidx: basics.CreatableIndex(100 + i)}, delta.oldResource) if i%2 == 0 { delta, idx = outResourcesDeltas.get(addrs[i], basics.CreatableIndex(200+i)) require.NotEqual(t, -1, idx) - require.Equal(t, trackerdb.PersistedResourcesData{Addrid: i + 1, Aidx: basics.CreatableIndex(200 + i)}, delta.oldResource) + require.Equal(t, trackerdb.PersistedResourcesData{AcctRef: mockEntryRef{i + 1}, Aidx: basics.CreatableIndex(200 + i)}, delta.oldResource) } } @@ -1662,7 +1662,7 @@ func TestCompactDeltasResources(t *testing.T) { appLocalState204 := basics.AppLocalState{KeyValue: basics.TealKeyValue{"204": basics.TealValue{Type: basics.TealBytesType, Bytes: "204"}}} stateDeltas[1].Accts.UpsertAppResource(addrs[4], 104, ledgercore.AppParamsDelta{Params: &appParams104}, ledgercore.AppLocalStateDelta{LocalState: &appLocalState204}) - baseResources.write(trackerdb.PersistedResourcesData{Addrid: 5 /* 4+1 */, Aidx: basics.CreatableIndex(104)}, addrs[4]) + baseResources.write(trackerdb.PersistedResourcesData{AcctRef: mockEntryRef{5} /* 4+1 */, Aidx: basics.CreatableIndex(104)}, addrs[4]) outResourcesDeltas = makeCompactResourceDeltas(stateDeltas, basics.Round(1), true, baseAccounts, baseResources) require.Equal(t, 0, len(outResourcesDeltas.misses)) diff --git a/ledger/applications_test.go b/ledger/applications_test.go index 03041dcb50..436791b66a 100644 --- a/ledger/applications_test.go +++ b/ledger/applications_test.go @@ -758,10 +758,10 @@ return` pad, err := l.accts.accountsq.LookupAccount(userLocal) a.NoError(err) a.Equal(trackerdb.BaseAccountData{}, pad.AccountData) - a.Zero(pad.Rowid) + a.Nil(pad.Ref) prd, err := l.accts.accountsq.LookupResources(userLocal, basics.CreatableIndex(appIdx), basics.AppCreatable) a.NoError(err) - a.Zero(prd.Addrid) + a.Nil(prd.AcctRef) emptyResourceData := trackerdb.MakeResourcesData(0) a.Equal(emptyResourceData, prd.Data) } @@ -894,10 +894,10 @@ return` pad, err := l.accts.accountsq.LookupAccount(creator) a.NoError(err) a.Empty(pad.AccountData) - a.Zero(pad.Rowid) + a.Nil(pad.Ref) prd, err := l.accts.accountsq.LookupResources(creator, basics.CreatableIndex(appIdx), basics.AppCreatable) a.NoError(err) - a.Zero(prd.Addrid) + a.Nil(prd.AcctRef) emptyResourceData := trackerdb.MakeResourcesData(0) a.Equal(emptyResourceData, prd.Data) } diff --git a/ledger/catchpointtracker.go b/ledger/catchpointtracker.go index 154fe86ce0..ef3834c125 100644 --- a/ledger/catchpointtracker.go +++ b/ledger/catchpointtracker.go @@ -1416,9 +1416,9 @@ func (ct *catchpointTracker) initializeHashes(ctx context.Context, tx trackerdb. if !added { // we need to translate the "addrid" into actual account address so that // we can report the failure. - addr, err := arw.LookupAccountAddressFromAddressID(ctx, acct.Addrid) + addr, err := arw.LookupAccountAddressFromAddressID(ctx, acct.AccountRef) if err != nil { - ct.log.Warnf("initializeHashes attempted to add duplicate acct hash '%s' to merkle trie for account id %d : %v", hex.EncodeToString(acct.Digest), acct.Addrid, err) + ct.log.Warnf("initializeHashes attempted to add duplicate acct hash '%s' to merkle trie for account id %d : %v", hex.EncodeToString(acct.Digest), acct.AccountRef, err) } else { ct.log.Warnf("initializeHashes attempted to add duplicate acct hash '%s' to merkle trie for account %v", hex.EncodeToString(acct.Digest), addr) } diff --git a/ledger/lruaccts_test.go b/ledger/lruaccts_test.go index e77a0c3bf9..84674fc627 100644 --- a/ledger/lruaccts_test.go +++ b/ledger/lruaccts_test.go @@ -42,7 +42,7 @@ func TestLRUBasicAccounts(t *testing.T) { acct := trackerdb.PersistedAccountData{ Addr: basics.Address(crypto.Hash([]byte{byte(i)})), Round: basics.Round(i), - Rowid: int64(i), + Ref: mockEntryRef{int64(i)}, AccountData: trackerdb.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } baseAcct.write(acct) @@ -56,7 +56,7 @@ func TestLRUBasicAccounts(t *testing.T) { require.Equal(t, basics.Round(i), acct.Round) require.Equal(t, addr, acct.Addr) require.Equal(t, uint64(i), acct.AccountData.MicroAlgos.Raw) - require.Equal(t, int64(i), acct.Rowid) + require.Equal(t, mockEntryRef{int64(i)}, acct.Ref) } // verify expected missing entries @@ -80,7 +80,7 @@ func TestLRUBasicAccounts(t *testing.T) { require.Equal(t, basics.Round(i), acct.Round) require.Equal(t, addr, acct.Addr) require.Equal(t, uint64(i), acct.AccountData.MicroAlgos.Raw) - require.Equal(t, int64(i), acct.Rowid) + require.Equal(t, mockEntryRef{int64(i)}, acct.Ref) } else { require.False(t, has) require.Equal(t, trackerdb.PersistedAccountData{}, acct) @@ -102,7 +102,7 @@ func TestLRUAccountsDisable(t *testing.T) { acct := trackerdb.PersistedAccountData{ Addr: basics.Address(crypto.Hash([]byte{byte(i)})), Round: basics.Round(i), - Rowid: int64(i), + Ref: mockEntryRef{int64(i)}, AccountData: trackerdb.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } baseAcct.writePending(acct) @@ -116,7 +116,7 @@ func TestLRUAccountsDisable(t *testing.T) { acct := trackerdb.PersistedAccountData{ Addr: basics.Address(crypto.Hash([]byte{byte(i)})), Round: basics.Round(i), - Rowid: int64(i), + Ref: mockEntryRef{int64(i)}, AccountData: trackerdb.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } baseAcct.write(acct) @@ -137,7 +137,7 @@ func TestLRUAccountsPendingWrites(t *testing.T) { acct := trackerdb.PersistedAccountData{ Addr: basics.Address(crypto.Hash([]byte{byte(i)})), Round: basics.Round(i), - Rowid: int64(i), + Ref: mockEntryRef{int64(i)}, AccountData: trackerdb.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } baseAcct.writePending(acct) @@ -189,7 +189,7 @@ func TestLRUAccountsPendingWritesWarning(t *testing.T) { acct := trackerdb.PersistedAccountData{ Addr: basics.Address(crypto.Hash([]byte{byte(i)})), Round: basics.Round(i), - Rowid: int64(i), + Ref: mockEntryRef{int64(i)}, AccountData: trackerdb.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } baseAcct.writePending(acct) @@ -215,7 +215,7 @@ func TestLRUAccountsOmittedPendingWrites(t *testing.T) { acct := trackerdb.PersistedAccountData{ Addr: basics.Address(crypto.Hash([]byte{byte(i)})), Round: basics.Round(i), - Rowid: int64(i), + Ref: mockEntryRef{int64(i)}, AccountData: trackerdb.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } baseAcct.writePending(acct) @@ -231,7 +231,7 @@ func TestLRUAccountsOmittedPendingWrites(t *testing.T) { require.Equal(t, basics.Round(i), acct.Round) require.Equal(t, addr, acct.Addr) require.Equal(t, uint64(i), acct.AccountData.MicroAlgos.Raw) - require.Equal(t, int64(i), acct.Rowid) + require.Equal(t, mockEntryRef{int64(i)}, acct.Ref) } // verify expected missing entries @@ -286,7 +286,7 @@ func generatePersistedAccountData(startRound, endRound int) []trackerdb.Persiste accounts[i-startRound] = trackerdb.PersistedAccountData{ Addr: basics.Address(digest), Round: basics.Round(i + startRound), - Rowid: int64(i), + Ref: mockEntryRef{int64(i)}, AccountData: trackerdb.BaseAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } } diff --git a/ledger/lruonlineaccts_test.go b/ledger/lruonlineaccts_test.go index a433973368..ae1a8a7736 100644 --- a/ledger/lruonlineaccts_test.go +++ b/ledger/lruonlineaccts_test.go @@ -41,7 +41,7 @@ func TestLRUOnlineAccountsBasic(t *testing.T) { acct := trackerdb.PersistedOnlineAccountData{ Addr: basics.Address(crypto.Hash([]byte{byte(i)})), Round: basics.Round(i), - Rowid: int64(i), + Ref: mockEntryRef{int64(i)}, AccountData: trackerdb.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } baseOnlineAcct.write(acct) @@ -55,7 +55,7 @@ func TestLRUOnlineAccountsBasic(t *testing.T) { require.Equal(t, basics.Round(i), acct.Round) require.Equal(t, addr, acct.Addr) require.Equal(t, uint64(i), acct.AccountData.MicroAlgos.Raw) - require.Equal(t, int64(i), acct.Rowid) + require.Equal(t, mockEntryRef{int64(i)}, acct.Ref) } // verify expected missing entries @@ -79,7 +79,7 @@ func TestLRUOnlineAccountsBasic(t *testing.T) { require.Equal(t, basics.Round(i), acct.Round) require.Equal(t, addr, acct.Addr) require.Equal(t, uint64(i), acct.AccountData.MicroAlgos.Raw) - require.Equal(t, int64(i), acct.Rowid) + require.Equal(t, mockEntryRef{int64(i)}, acct.Ref) } else { require.False(t, has) require.Equal(t, trackerdb.PersistedOnlineAccountData{}, acct) @@ -101,7 +101,7 @@ func TestLRUOnlineAccountsDisable(t *testing.T) { acct := trackerdb.PersistedOnlineAccountData{ Addr: basics.Address(crypto.Hash([]byte{byte(i)})), Round: basics.Round(i), - Rowid: int64(i), + Ref: mockEntryRef{int64(i)}, AccountData: trackerdb.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } baseOnlineAcct.writePending(acct) @@ -115,7 +115,7 @@ func TestLRUOnlineAccountsDisable(t *testing.T) { acct := trackerdb.PersistedOnlineAccountData{ Addr: basics.Address(crypto.Hash([]byte{byte(i)})), Round: basics.Round(i), - Rowid: int64(i), + Ref: mockEntryRef{int64(i)}, AccountData: trackerdb.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } baseOnlineAcct.write(acct) @@ -136,7 +136,7 @@ func TestLRUOnlineAccountsPendingWrites(t *testing.T) { acct := trackerdb.PersistedOnlineAccountData{ Addr: basics.Address(crypto.Hash([]byte{byte(i)})), Round: basics.Round(i), - Rowid: int64(i), + Ref: mockEntryRef{int64(i)}, AccountData: trackerdb.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } baseOnlineAcct.writePending(acct) @@ -178,7 +178,7 @@ func TestLRUOnlineAccountsPendingWritesWarning(t *testing.T) { acct := trackerdb.PersistedOnlineAccountData{ Addr: basics.Address(crypto.Hash([]byte{byte(i)})), Round: basics.Round(i), - Rowid: int64(i), + Ref: mockEntryRef{int64(i)}, AccountData: trackerdb.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } baseOnlineAcct.writePending(acct) @@ -204,7 +204,7 @@ func TestLRUOnlineAccountsOmittedPendingWrites(t *testing.T) { acct := trackerdb.PersistedOnlineAccountData{ Addr: basics.Address(crypto.Hash([]byte{byte(i)})), Round: basics.Round(i), - Rowid: int64(i), + Ref: mockEntryRef{int64(i)}, AccountData: trackerdb.BaseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}}, } baseOnlineAcct.writePending(acct) @@ -220,7 +220,7 @@ func TestLRUOnlineAccountsOmittedPendingWrites(t *testing.T) { require.Equal(t, basics.Round(i), acct.Round) require.Equal(t, addr, acct.Addr) require.Equal(t, uint64(i), acct.AccountData.MicroAlgos.Raw) - require.Equal(t, int64(i), acct.Rowid) + require.Equal(t, mockEntryRef{int64(i)}, acct.Ref) } // verify expected missing entries diff --git a/ledger/lruresources_test.go b/ledger/lruresources_test.go index a000adffec..9268889a58 100644 --- a/ledger/lruresources_test.go +++ b/ledger/lruresources_test.go @@ -41,10 +41,10 @@ func TestLRUBasicResources(t *testing.T) { for i := 0; i < resourcesNum; i++ { addr := basics.Address(crypto.Hash([]byte{byte(i)})) res := trackerdb.PersistedResourcesData{ - Addrid: int64(i), - Aidx: basics.CreatableIndex(i), - Round: basics.Round(i), - Data: trackerdb.ResourcesData{Total: uint64(i)}, + AcctRef: mockEntryRef{int64(i)}, + Aidx: basics.CreatableIndex(i), + Round: basics.Round(i), + Data: trackerdb.ResourcesData{Total: uint64(i)}, } baseRes.write(res, addr) } @@ -55,7 +55,7 @@ func TestLRUBasicResources(t *testing.T) { res, has := baseRes.read(addr, basics.CreatableIndex(i)) require.True(t, has) require.Equal(t, basics.Round(i), res.Round) - require.Equal(t, int64(i), res.Addrid) + require.Equal(t, mockEntryRef{int64(i)}, res.AcctRef) require.Equal(t, uint64(i), res.Data.Total) require.Equal(t, basics.CreatableIndex(i), res.Aidx) } @@ -79,7 +79,7 @@ func TestLRUBasicResources(t *testing.T) { // expected to have it. require.True(t, has) require.Equal(t, basics.Round(i), res.Round) - require.Equal(t, int64(i), res.Addrid) + require.Equal(t, mockEntryRef{int64(i)}, res.AcctRef) require.Equal(t, uint64(i), res.Data.Total) require.Equal(t, basics.CreatableIndex(i), res.Aidx) } else { @@ -102,10 +102,10 @@ func TestLRUResourcesDisable(t *testing.T) { time.Sleep(time.Duration((crypto.RandUint64() % 50)) * time.Millisecond) addr := basics.Address(crypto.Hash([]byte{byte(i)})) res := trackerdb.PersistedResourcesData{ - Addrid: int64(i), - Aidx: basics.CreatableIndex(i), - Round: basics.Round(i), - Data: trackerdb.ResourcesData{Total: uint64(i)}, + AcctRef: mockEntryRef{int64(i)}, + Aidx: basics.CreatableIndex(i), + Round: basics.Round(i), + Data: trackerdb.ResourcesData{Total: uint64(i)}, } baseRes.writePending(res, addr) baseRes.writeNotFoundPending(addr, basics.CreatableIndex(i)) @@ -120,10 +120,10 @@ func TestLRUResourcesDisable(t *testing.T) { for i := 0; i < resourceNum; i++ { addr := basics.Address(crypto.Hash([]byte{byte(i)})) res := trackerdb.PersistedResourcesData{ - Addrid: int64(i), - Aidx: basics.CreatableIndex(i), - Round: basics.Round(i), - Data: trackerdb.ResourcesData{Total: uint64(i)}, + AcctRef: mockEntryRef{int64(i)}, + Aidx: basics.CreatableIndex(i), + Round: basics.Round(i), + Data: trackerdb.ResourcesData{Total: uint64(i)}, } baseRes.write(res, addr) } @@ -143,10 +143,10 @@ func TestLRUResourcesPendingWrites(t *testing.T) { time.Sleep(time.Duration((crypto.RandUint64() % 50)) * time.Millisecond) addr := basics.Address(crypto.Hash([]byte{byte(i)})) res := trackerdb.PersistedResourcesData{ - Addrid: int64(i), - Aidx: basics.CreatableIndex(i), - Round: basics.Round(i), - Data: trackerdb.ResourcesData{Total: uint64(i)}, + AcctRef: mockEntryRef{int64(i)}, + Aidx: basics.CreatableIndex(i), + Round: basics.Round(i), + Data: trackerdb.ResourcesData{Total: uint64(i)}, } baseRes.writePending(res, addr) }(i) @@ -196,10 +196,10 @@ func TestLRUResourcesPendingWritesWarning(t *testing.T) { for i := 0; i < j; i++ { addr := basics.Address(crypto.Hash([]byte{byte(i)})) res := trackerdb.PersistedResourcesData{ - Addrid: int64(i), - Aidx: basics.CreatableIndex(i), - Round: basics.Round(i), - Data: trackerdb.ResourcesData{Total: uint64(i)}, + AcctRef: mockEntryRef{int64(i)}, + Aidx: basics.CreatableIndex(i), + Round: basics.Round(i), + Data: trackerdb.ResourcesData{Total: uint64(i)}, } baseRes.writePending(res, addr) } @@ -223,10 +223,10 @@ func TestLRUResourcesOmittedPendingWrites(t *testing.T) { for i := 0; i < pendingWritesBuffer*2; i++ { addr := basics.Address(crypto.Hash([]byte{byte(i)})) res := trackerdb.PersistedResourcesData{ - Addrid: int64(i), - Aidx: basics.CreatableIndex(i), - Round: basics.Round(i), - Data: trackerdb.ResourcesData{Total: uint64(i)}, + AcctRef: mockEntryRef{int64(i)}, + Aidx: basics.CreatableIndex(i), + Round: basics.Round(i), + Data: trackerdb.ResourcesData{Total: uint64(i)}, } baseRes.writePending(res, addr) } @@ -239,7 +239,7 @@ func TestLRUResourcesOmittedPendingWrites(t *testing.T) { res, has := baseRes.read(addr, basics.CreatableIndex(i)) require.True(t, has) require.Equal(t, basics.Round(i), res.Round) - require.Equal(t, int64(i), res.Addrid) + require.Equal(t, mockEntryRef{int64(i)}, res.AcctRef) require.Equal(t, uint64(i), res.Data.Total) require.Equal(t, basics.CreatableIndex(i), res.Aidx) } @@ -295,10 +295,10 @@ func generatePersistedResourcesData(startRound, endRound int) []cachedResourceDa accounts[i-startRound] = cachedResourceData{ PersistedResourcesData: trackerdb.PersistedResourcesData{ - Addrid: int64(i), - Aidx: basics.CreatableIndex(i), - Round: basics.Round(i + startRound), - Data: trackerdb.ResourcesData{Total: uint64(i)}, + AcctRef: mockEntryRef{int64(i)}, + Aidx: basics.CreatableIndex(i), + Round: basics.Round(i + startRound), + Data: trackerdb.ResourcesData{Total: uint64(i)}, }, address: basics.Address(digest), } diff --git a/ledger/store/trackerdb/data.go b/ledger/store/trackerdb/data.go index 5ab3faad1f..264c055821 100644 --- a/ledger/store/trackerdb/data.go +++ b/ledger/store/trackerdb/data.go @@ -171,9 +171,9 @@ type PersistedAccountData struct { Addr basics.Address // The underlaying account data AccountData BaseAccountData - // The rowid, when available. If the entry was loaded from the disk, then we have the rowid for it. Entries - // that doesn't have rowid ( hence, rowid == 0 ) represent either deleted accounts or non-existing accounts. - Rowid int64 + // The reference to the stored object, when available. If the entry was loaded from the disk, then we have the ref for it. Entries + // that dont have ref ( hence, ref == nil ) represent either deleted accounts or non-existing accounts. + Ref AccountRef // the round number that is associated with the accountData. This field is needed so that we can maintain a correct // lruAccounts cache. We use it to ensure that the entries on the lruAccounts.accountsList are the latest ones. // this becomes an issue since while we attempt to write an update to disk, we might be reading an entry and placing @@ -185,14 +185,14 @@ type PersistedAccountData struct { // PersistedResourcesData is exported view of persistedResourcesData type PersistedResourcesData struct { - // addrid is the rowid of the account address that holds this resource. + // AcctRef is the stored object reference of the account address that holds this resource. // it is used in update/delete operations so must be filled for existing records. // resolution is a multi stage process: // - baseResources cache might have valid entries // - baseAccount cache might have an entry for the address with rowid set // - when loading non-cached resources in resourcesLoadOld // - when creating new accounts in accountsNewRound - Addrid int64 + AcctRef AccountRef // creatable index Aidx basics.CreatableIndex // actual resource data @@ -206,7 +206,7 @@ type PersistedResourcesData struct { type PersistedOnlineAccountData struct { Addr basics.Address AccountData BaseOnlineAccountData - Rowid int64 + Ref OnlineAccountRef // the round number that is associated with the baseOnlineAccountData. This field is the corresponding one to the round field // in persistedAccountData, and serves the same purpose. This value comes from account rounds table and correspond to // the last trackers db commit round. diff --git a/ledger/store/trackerdb/interface.go b/ledger/store/trackerdb/interface.go index 121dcdf211..b5c93cb017 100644 --- a/ledger/store/trackerdb/interface.go +++ b/ledger/store/trackerdb/interface.go @@ -26,21 +26,41 @@ import ( "github.com/algorand/go-algorand/ledger/ledgercore" ) +// AccountRef is an opaque ref to an account in the db. +type AccountRef interface { + AccountRefMarker() +} + +// OnlineAccountRef is an opaque ref to an "online" account in the db. +type OnlineAccountRef interface { + OnlineAccountRefMarker() +} + +// ResourceRef is an opaque ref to a resource in the db. +type ResourceRef interface { + ResourceRefMarker() +} + +// CreatableRef is an opaque ref to a creatable in the db. +type CreatableRef interface { + CreatableRefMarker() +} + // AccountsWriter is the write interface for: // - accounts, resources, app kvs, creatables type AccountsWriter interface { - InsertAccount(addr basics.Address, normBalance uint64, data BaseAccountData) (rowid int64, err error) - DeleteAccount(rowid int64) (rowsAffected int64, err error) - UpdateAccount(rowid int64, normBalance uint64, data BaseAccountData) (rowsAffected int64, err error) + InsertAccount(addr basics.Address, normBalance uint64, data BaseAccountData) (ref AccountRef, err error) + DeleteAccount(ref AccountRef) (rowsAffected int64, err error) + UpdateAccount(ref AccountRef, normBalance uint64, data BaseAccountData) (rowsAffected int64, err error) - InsertResource(addrid int64, aidx basics.CreatableIndex, data ResourcesData) (rowid int64, err error) - DeleteResource(addrid int64, aidx basics.CreatableIndex) (rowsAffected int64, err error) - UpdateResource(addrid int64, aidx basics.CreatableIndex, data ResourcesData) (rowsAffected int64, err error) + InsertResource(accountRef AccountRef, aidx basics.CreatableIndex, data ResourcesData) (ref ResourceRef, err error) + DeleteResource(accountRef AccountRef, aidx basics.CreatableIndex) (rowsAffected int64, err error) + UpdateResource(accountRef AccountRef, aidx basics.CreatableIndex, data ResourcesData) (rowsAffected int64, err error) UpsertKvPair(key string, value []byte) error DeleteKvPair(key string) error - InsertCreatable(cidx basics.CreatableIndex, ctype basics.CreatableType, creator []byte) (rowid int64, err error) + InsertCreatable(cidx basics.CreatableIndex, ctype basics.CreatableType, creator []byte) (ref CreatableRef, err error) DeleteCreatable(cidx basics.CreatableIndex, ctype basics.CreatableType) (rowsAffected int64, err error) Close() @@ -82,15 +102,15 @@ type AccountsReader interface { type AccountsReaderExt interface { AccountsTotals(ctx context.Context, catchpointStaging bool) (totals ledgercore.AccountTotals, err error) AccountsHashRound(ctx context.Context) (hashrnd basics.Round, err error) - LookupAccountAddressFromAddressID(ctx context.Context, addrid int64) (address basics.Address, err error) - LookupAccountDataByAddress(basics.Address) (rowid int64, data []byte, err error) - LookupAccountRowID(basics.Address) (addrid int64, err error) - LookupResourceDataByAddrID(addrid int64, aidx basics.CreatableIndex) (data []byte, err error) + LookupAccountAddressFromAddressID(ctx context.Context, ref AccountRef) (address basics.Address, err error) + LookupAccountDataByAddress(basics.Address) (ref AccountRef, data []byte, err error) + LookupAccountRowID(basics.Address) (ref AccountRef, err error) + LookupResourceDataByAddrID(accountRef AccountRef, aidx basics.CreatableIndex) (data []byte, err error) TotalResources(ctx context.Context) (total uint64, err error) TotalAccounts(ctx context.Context) (total uint64, err error) TotalKVs(ctx context.Context) (total uint64, err error) AccountsRound() (rnd basics.Round, err error) - LookupOnlineAccountDataByAddress(addr basics.Address) (rowid int64, data []byte, err error) + LookupOnlineAccountDataByAddress(addr basics.Address) (ref OnlineAccountRef, data []byte, err error) AccountsOnlineTop(rnd basics.Round, offset uint64, n uint64, proto config.ConsensusParams) (map[basics.Address]*ledgercore.OnlineAccount, error) AccountsOnlineRoundParams() (onlineRoundParamsData []ledgercore.OnlineRoundParamsData, endRound basics.Round, err error) OnlineAccountsAll(maxAccounts uint64) ([]PersistedOnlineAccountData, error) @@ -110,7 +130,7 @@ type AccountsReaderWriter interface { // OnlineAccountsWriter is the write interface for: // - online accounts type OnlineAccountsWriter interface { - InsertOnlineAccount(addr basics.Address, normBalance uint64, data BaseOnlineAccountData, updRound uint64, voteLastValid uint64) (rowid int64, err error) + InsertOnlineAccount(addr basics.Address, normBalance uint64, data BaseOnlineAccountData, updRound uint64, voteLastValid uint64) (ref OnlineAccountRef, err error) Close() } @@ -185,8 +205,8 @@ type OrderedAccountsIter interface { // AccountAddressHash is used by Next to return a single account address and the associated hash. type AccountAddressHash struct { - Addrid int64 - Digest []byte + AccountRef AccountRef + Digest []byte } // KVsIter is an iterator for an application Key/Values. diff --git a/ledger/store/trackerdb/sqlitedriver/accountsV2.go b/ledger/store/trackerdb/sqlitedriver/accountsV2.go index adee2f1fce..78083c95ac 100644 --- a/ledger/store/trackerdb/sqlitedriver/accountsV2.go +++ b/ledger/store/trackerdb/sqlitedriver/accountsV2.go @@ -267,11 +267,13 @@ func (r *accountsV2Reader) OnlineAccountsAll(maxAccounts uint64) ([]trackerdb.Pe for rows.Next() { var addrbuf []byte var buf []byte + var rowid int64 data := trackerdb.PersistedOnlineAccountData{} - err := rows.Scan(&data.Rowid, &addrbuf, &data.UpdRound, &buf) + err := rows.Scan(&rowid, &addrbuf, &data.UpdRound, &buf) if err != nil { return nil, err } + data.Ref = sqlRowRef{rowid} if len(addrbuf) != len(data.Addr) { err = fmt.Errorf("account DB address length mismatch: %d != %d", len(addrbuf), len(data.Addr)) return nil, err @@ -365,7 +367,12 @@ func (r *accountsV2Reader) LoadTxTail(ctx context.Context, dbRound basics.Round) } // LookupAccountAddressFromAddressID looks up an account based on a rowid -func (r *accountsV2Reader) LookupAccountAddressFromAddressID(ctx context.Context, addrid int64) (address basics.Address, err error) { +func (r *accountsV2Reader) LookupAccountAddressFromAddressID(ctx context.Context, accountRef trackerdb.AccountRef) (address basics.Address, err error) { + if accountRef == nil { + err = sql.ErrNoRows + return address, fmt.Errorf("no matching address could be found for rowid = nil: %w", err) + } + addrid := accountRef.(sqlRowRef).rowid var addrbuf []byte err = r.q.QueryRowContext(ctx, "SELECT address FROM accountbase WHERE rowid = ?", addrid).Scan(&addrbuf) if err != nil { @@ -382,52 +389,59 @@ func (r *accountsV2Reader) LookupAccountAddressFromAddressID(ctx context.Context return } -func (r *accountsV2Reader) LookupAccountDataByAddress(addr basics.Address) (rowid int64, data []byte, err error) { +func (r *accountsV2Reader) LookupAccountDataByAddress(addr basics.Address) (ref trackerdb.AccountRef, data []byte, err error) { // optimize this query for repeated usage selectStmt, err := r.getOrPrepare("SELECT rowid, data FROM accountbase WHERE address=?") if err != nil { return } + var rowid int64 err = selectStmt.QueryRow(addr[:]).Scan(&rowid, &data) if err != nil { return } - return rowid, data, err + return sqlRowRef{rowid}, data, err } // LookupOnlineAccountDataByAddress looks up online account data by address. -func (r *accountsV2Reader) LookupOnlineAccountDataByAddress(addr basics.Address) (rowid int64, data []byte, err error) { +func (r *accountsV2Reader) LookupOnlineAccountDataByAddress(addr basics.Address) (ref trackerdb.OnlineAccountRef, data []byte, err error) { // optimize this query for repeated usage selectStmt, err := r.getOrPrepare("SELECT rowid, data FROM onlineaccounts WHERE address=? ORDER BY updround DESC LIMIT 1") if err != nil { return } + var rowid int64 err = selectStmt.QueryRow(addr[:]).Scan(&rowid, &data) if err != nil { return } - return rowid, data, err + return sqlRowRef{rowid}, data, err } // LookupAccountRowID looks up the rowid of an account based on its address. -func (r *accountsV2Reader) LookupAccountRowID(addr basics.Address) (rowid int64, err error) { +func (r *accountsV2Reader) LookupAccountRowID(addr basics.Address) (ref trackerdb.AccountRef, err error) { // optimize this query for repeated usage addrRowidStmt, err := r.getOrPrepare("SELECT rowid FROM accountbase WHERE address=?") if err != nil { return } + var rowid int64 err = addrRowidStmt.QueryRow(addr[:]).Scan(&rowid) if err != nil { return } - return rowid, err + return sqlRowRef{rowid}, err } // LookupResourceDataByAddrID looks up the resource data by account rowid + resource aidx. -func (r *accountsV2Reader) LookupResourceDataByAddrID(addrid int64, aidx basics.CreatableIndex) (data []byte, err error) { +func (r *accountsV2Reader) LookupResourceDataByAddrID(accountRef trackerdb.AccountRef, aidx basics.CreatableIndex) (data []byte, err error) { + if accountRef == nil { + return data, sql.ErrNoRows + } + addrid := accountRef.(sqlRowRef).rowid // optimize this query for repeated usage selectStmt, err := r.getOrPrepare("SELECT data FROM resources WHERE addrid = ? AND aidx = ?") if err != nil { diff --git a/ledger/store/trackerdb/sqlitedriver/orderedAccountsIter.go b/ledger/store/trackerdb/sqlitedriver/orderedAccountsIter.go index 3175010c7e..2e2c40dbb9 100644 --- a/ledger/store/trackerdb/sqlitedriver/orderedAccountsIter.go +++ b/ledger/store/trackerdb/sqlitedriver/orderedAccountsIter.go @@ -224,7 +224,9 @@ func (iterator *orderedAccountsIter) Next(ctx context.Context) (acct []trackerdb acct = make([]trackerdb.AccountAddressHash, iterator.accountCount) acctIdx := 0 for iterator.hashesRows.Next() { - err = iterator.hashesRows.Scan(&(acct[acctIdx].Addrid), &(acct[acctIdx].Digest)) + var addrid int64 + err = iterator.hashesRows.Scan(&addrid, &(acct[acctIdx].Digest)) + acct[acctIdx].AccountRef = sqlRowRef{addrid} if err != nil { iterator.Close(ctx) return diff --git a/ledger/store/trackerdb/sqlitedriver/sql.go b/ledger/store/trackerdb/sqlitedriver/sql.go index 52e11e6a3c..1c7becb916 100644 --- a/ledger/store/trackerdb/sqlitedriver/sql.go +++ b/ledger/store/trackerdb/sqlitedriver/sql.go @@ -56,6 +56,15 @@ type onlineAccountsSQLWriter struct { insertStmt, updateStmt *sql.Stmt } +type sqlRowRef struct { + rowid int64 +} + +func (ref sqlRowRef) AccountRefMarker() {} +func (ref sqlRowRef) OnlineAccountRefMarker() {} +func (ref sqlRowRef) ResourceRefMarker() {} +func (ref sqlRowRef) CreatableRefMarker() {} + // AccountsInitDbQueries constructs an AccountsReader backed by sql queries. func AccountsInitDbQueries(q db.Queryable) (*accountsDbQueries, error) { var err error @@ -379,7 +388,7 @@ func (qs *accountsDbQueries) LookupResources(addr basics.Address, aidx basics.Cr if err == nil { data.Aidx = aidx if len(buf) > 0 && rowid.Valid { - data.Addrid = rowid.Int64 + data.AcctRef = sqlRowRef{rowid.Int64} err = protocol.Decode(buf, &data.Data) if err != nil { return err @@ -441,10 +450,10 @@ func (qs *accountsDbQueries) LookupAllResources(addr basics.Address) (data []tra return err } data = append(data, trackerdb.PersistedResourcesData{ - Addrid: addrid.Int64, - Aidx: basics.CreatableIndex(aidx.Int64), - Data: resData, - Round: dbRound, + AcctRef: sqlRowRef{addrid.Int64}, + Aidx: basics.CreatableIndex(aidx.Int64), + Data: resData, + Round: dbRound, }) rnd = dbRound } @@ -464,7 +473,7 @@ func (qs *accountsDbQueries) LookupAccount(addr basics.Address) (data trackerdb. if err == nil { data.Addr = addr if len(buf) > 0 && rowid.Valid { - data.Rowid = rowid.Int64 + data.Ref = sqlRowRef{rowid.Int64} err = protocol.Decode(buf, &data.AccountData) return err } @@ -493,7 +502,7 @@ func (qs *onlineAccountsDbQueries) LookupOnline(addr basics.Address, rnd basics. if err == nil { data.Addr = addr if len(buf) > 0 && rowid.Valid && updround.Valid { - data.Rowid = rowid.Int64 + data.Ref = sqlRowRef{rowid.Int64} data.UpdRound = basics.Round(updround.Int64) err = protocol.Decode(buf, &data.AccountData) return err @@ -542,10 +551,12 @@ func (qs *onlineAccountsDbQueries) LookupOnlineHistory(addr basics.Address) (res for rows.Next() { var buf []byte data := trackerdb.PersistedOnlineAccountData{} - err := rows.Scan(&data.Rowid, &data.UpdRound, &rnd, &buf) + var rowid int64 + err := rows.Scan(&rowid, &data.UpdRound, &rnd, &buf) if err != nil { return err } + data.Ref = sqlRowRef{rowid} err = protocol.Decode(buf, &data.AccountData) if err != nil { return err @@ -614,16 +625,20 @@ func (w *onlineAccountsSQLWriter) Close() { } } -func (w accountsSQLWriter) InsertAccount(addr basics.Address, normBalance uint64, data trackerdb.BaseAccountData) (rowid int64, err error) { +func (w accountsSQLWriter) InsertAccount(addr basics.Address, normBalance uint64, data trackerdb.BaseAccountData) (ref trackerdb.AccountRef, err error) { result, err := w.insertStmt.Exec(addr[:], normBalance, protocol.Encode(&data)) if err != nil { return } - rowid, err = result.LastInsertId() - return + rowid, err := result.LastInsertId() + return sqlRowRef{rowid}, err } -func (w accountsSQLWriter) DeleteAccount(rowid int64) (rowsAffected int64, err error) { +func (w accountsSQLWriter) DeleteAccount(ref trackerdb.AccountRef) (rowsAffected int64, err error) { + if ref == nil { + return 0, nil + } + rowid := ref.(sqlRowRef).rowid result, err := w.deleteByRowIDStmt.Exec(rowid) if err != nil { return @@ -632,7 +647,12 @@ func (w accountsSQLWriter) DeleteAccount(rowid int64) (rowsAffected int64, err e return } -func (w accountsSQLWriter) UpdateAccount(rowid int64, normBalance uint64, data trackerdb.BaseAccountData) (rowsAffected int64, err error) { +func (w accountsSQLWriter) UpdateAccount(ref trackerdb.AccountRef, normBalance uint64, data trackerdb.BaseAccountData) (rowsAffected int64, err error) { + if ref == nil { + err = sql.ErrNoRows + return 0, fmt.Errorf("no account could be found for rowid = nil: %w", err) + } + rowid := ref.(sqlRowRef).rowid result, err := w.updateStmt.Exec(normBalance, protocol.Encode(&data), rowid) if err != nil { return @@ -641,16 +661,26 @@ func (w accountsSQLWriter) UpdateAccount(rowid int64, normBalance uint64, data t return } -func (w accountsSQLWriter) InsertResource(addrid int64, aidx basics.CreatableIndex, data trackerdb.ResourcesData) (rowid int64, err error) { +func (w accountsSQLWriter) InsertResource(accountRef trackerdb.AccountRef, aidx basics.CreatableIndex, data trackerdb.ResourcesData) (ref trackerdb.ResourceRef, err error) { + if accountRef == nil { + err = sql.ErrNoRows + return nil, fmt.Errorf("no account could be found for rowid = nil: %w", err) + } + addrid := accountRef.(sqlRowRef).rowid result, err := w.insertResourceStmt.Exec(addrid, aidx, protocol.Encode(&data)) if err != nil { return } - rowid, err = result.LastInsertId() - return + rowid, err := result.LastInsertId() + return sqlRowRef{rowid}, err } -func (w accountsSQLWriter) DeleteResource(addrid int64, aidx basics.CreatableIndex) (rowsAffected int64, err error) { +func (w accountsSQLWriter) DeleteResource(accountRef trackerdb.AccountRef, aidx basics.CreatableIndex) (rowsAffected int64, err error) { + if accountRef == nil { + err = sql.ErrNoRows + return 0, fmt.Errorf("no account could be found for rowid = nil: %w", err) + } + addrid := accountRef.(sqlRowRef).rowid result, err := w.deleteResourceStmt.Exec(addrid, aidx) if err != nil { return @@ -659,7 +689,12 @@ func (w accountsSQLWriter) DeleteResource(addrid int64, aidx basics.CreatableInd return } -func (w accountsSQLWriter) UpdateResource(addrid int64, aidx basics.CreatableIndex, data trackerdb.ResourcesData) (rowsAffected int64, err error) { +func (w accountsSQLWriter) UpdateResource(accountRef trackerdb.AccountRef, aidx basics.CreatableIndex, data trackerdb.ResourcesData) (rowsAffected int64, err error) { + if accountRef == nil { + err = sql.ErrNoRows + return 0, fmt.Errorf("no account could be found for rowid = nil: %w", err) + } + addrid := accountRef.(sqlRowRef).rowid result, err := w.updateResourceStmt.Exec(protocol.Encode(&data), addrid, aidx) if err != nil { return @@ -693,13 +728,13 @@ func (w accountsSQLWriter) DeleteKvPair(key string) error { return err } -func (w accountsSQLWriter) InsertCreatable(cidx basics.CreatableIndex, ctype basics.CreatableType, creator []byte) (rowid int64, err error) { +func (w accountsSQLWriter) InsertCreatable(cidx basics.CreatableIndex, ctype basics.CreatableType, creator []byte) (ref trackerdb.CreatableRef, err error) { result, err := w.insertCreatableIdxStmt.Exec(cidx, creator, ctype) if err != nil { return } - rowid, err = result.LastInsertId() - return + rowid, err := result.LastInsertId() + return sqlRowRef{rowid}, err } func (w accountsSQLWriter) DeleteCreatable(cidx basics.CreatableIndex, ctype basics.CreatableType) (rowsAffected int64, err error) { @@ -711,11 +746,11 @@ func (w accountsSQLWriter) DeleteCreatable(cidx basics.CreatableIndex, ctype bas return } -func (w onlineAccountsSQLWriter) InsertOnlineAccount(addr basics.Address, normBalance uint64, data trackerdb.BaseOnlineAccountData, updRound uint64, voteLastValid uint64) (rowid int64, err error) { +func (w onlineAccountsSQLWriter) InsertOnlineAccount(addr basics.Address, normBalance uint64, data trackerdb.BaseOnlineAccountData, updRound uint64, voteLastValid uint64) (ref trackerdb.OnlineAccountRef, err error) { result, err := w.insertStmt.Exec(addr[:], normBalance, protocol.Encode(&data), updRound, voteLastValid) if err != nil { return } - rowid, err = result.LastInsertId() - return + rowid, err := result.LastInsertId() + return sqlRowRef{rowid}, err } From 843637c3fa94c65273039feb67cb3c3aa4ed558d Mon Sep 17 00:00:00 2001 From: Michael Diamant Date: Tue, 7 Mar 2023 14:51:13 -0500 Subject: [PATCH 68/81] tests: Fix t.Parallel() errors in netdeploy package (#4993) --- .golangci.yml | 3 --- netdeploy/networkTemplates_test.go | 6 ++++++ netdeploy/network_test.go | 2 ++ netdeploy/remote/bootstrappedNetwork_test.go | 1 + netdeploy/remote/deployedNetwork_test.go | 3 +++ 5 files changed, 12 insertions(+), 3 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 46463c18bf..ac4bea1a3c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -149,9 +149,6 @@ issues: - path: ^logging.*_test\.go linters: - paralleltest - - path: ^netdeploy.*_test\.go - linters: - - paralleltest - path: ^network.*_test\.go linters: - paralleltest diff --git a/netdeploy/networkTemplates_test.go b/netdeploy/networkTemplates_test.go index e4bc38e77b..63bff9f2d8 100644 --- a/netdeploy/networkTemplates_test.go +++ b/netdeploy/networkTemplates_test.go @@ -33,6 +33,7 @@ import ( func TestLoadConfig(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() a := require.New(t) @@ -46,6 +47,7 @@ func TestLoadConfig(t *testing.T) { func TestLoadMissingConfig(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() a := require.New(t) @@ -57,6 +59,7 @@ func TestLoadMissingConfig(t *testing.T) { func TestGenerateGenesis(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() a := require.New(t) @@ -76,6 +79,7 @@ func TestGenerateGenesis(t *testing.T) { func TestValidate(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() a := require.New(t) @@ -235,6 +239,8 @@ type overlayTestStruct struct { // TestJsonOverlay ensures that encoding/json will only clobber fields present in the json and leave other fields unchanged func TestJsonOverlay(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + before := overlayTestStruct{A: "one", B: "two"} setB := "{\"B\":\"other\"}" dec := json.NewDecoder(strings.NewReader(setB)) diff --git a/netdeploy/network_test.go b/netdeploy/network_test.go index dce8a10e4d..6b035547cd 100644 --- a/netdeploy/network_test.go +++ b/netdeploy/network_test.go @@ -29,6 +29,7 @@ import ( func TestSaveNetworkCfg(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() a := require.New(t) @@ -48,6 +49,7 @@ func TestSaveNetworkCfg(t *testing.T) { func TestSaveConsensus(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() a := require.New(t) diff --git a/netdeploy/remote/bootstrappedNetwork_test.go b/netdeploy/remote/bootstrappedNetwork_test.go index ac0afa6428..45cb4162c1 100644 --- a/netdeploy/remote/bootstrappedNetwork_test.go +++ b/netdeploy/remote/bootstrappedNetwork_test.go @@ -26,6 +26,7 @@ import ( func TestLoadBootstrappedData(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() badSpecPath := filepath.Join("./../../test", "testdata/deployednettemplates/networks/bootstrapped/badSpec.json") _, err := LoadBootstrappedData(badSpecPath) diff --git a/netdeploy/remote/deployedNetwork_test.go b/netdeploy/remote/deployedNetwork_test.go index 98149ac32e..f6ac76b3e8 100644 --- a/netdeploy/remote/deployedNetwork_test.go +++ b/netdeploy/remote/deployedNetwork_test.go @@ -30,6 +30,7 @@ import ( func TestCreateSignedTxBasic(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() var networkState netState networkState.nApplications = 2 @@ -90,6 +91,7 @@ func TestCreateSignedTxBasic(t *testing.T) { func TestCreateSignedTxAssets(t *testing.T) { // assets per account should not exceed limit partitiontest.PartitionTest(t) + t.Parallel() params := config.Consensus[protocol.ConsensusCurrentVersion] secretDst := keypair() @@ -127,6 +129,7 @@ func TestCreateSignedTxAssets(t *testing.T) { func TestAccountsNeeded(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() params := config.Consensus[protocol.ConsensusCurrentVersion] params.MaxAppsCreated = 10 From 4bbedc00180fb4e20a3496ba5ee8a31011d29159 Mon Sep 17 00:00:00 2001 From: Michael Diamant Date: Tue, 7 Mar 2023 14:54:45 -0500 Subject: [PATCH 69/81] tests: Fix t.Parallel() errors in cmd package (#4991) Co-authored-by: algochoi <86622919+algochoi@users.noreply.github.com> --- .golangci.yml | 3 - cmd/algocfg/getCommand_test.go | 3 + cmd/algofix/deadlock_test.go | 4 ++ cmd/algofix/main_test.go | 2 + cmd/algoh/blockWatcher_test.go | 10 ++++ cmd/algoh/blockstats_test.go | 6 ++ cmd/catchupsrv/download_test.go | 3 + cmd/goal/application_test.go | 3 + cmd/goal/commands_test.go | 10 ++-- cmd/goal/formatting_test.go | 9 +++ cmd/goal/inspect_test.go | 2 + cmd/goal/node_test.go | 3 + cmd/tealdbg/cdtSession_test.go | 17 ++++++ cmd/tealdbg/cdtdbg_test.go | 6 ++ cmd/tealdbg/debugger_test.go | 8 +++ cmd/tealdbg/localLedger_test.go | 2 + cmd/tealdbg/local_test.go | 98 +++++++++++++++++++++------------ cmd/tealdbg/remote_test.go | 2 + cmd/tealdbg/server_test.go | 6 +- cmd/tealdbg/webdbg_test.go | 2 + cmd/updater/version_test.go | 2 + 21 files changed, 157 insertions(+), 44 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index ac4bea1a3c..ff7892d4f6 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -125,9 +125,6 @@ issues: - path: ^catchup.*_test\.go linters: - paralleltest - - path: ^cmd.*_test\.go - linters: - - paralleltest - path: ^config.*_test\.go linters: - paralleltest diff --git a/cmd/algocfg/getCommand_test.go b/cmd/algocfg/getCommand_test.go index 6c1285bb76..ad1525b7d7 100644 --- a/cmd/algocfg/getCommand_test.go +++ b/cmd/algocfg/getCommand_test.go @@ -28,6 +28,7 @@ import ( func TestPrint(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() testcases := []struct { Input interface{} @@ -55,7 +56,9 @@ func TestPrint(t *testing.T) { }, } for i, tc := range testcases { + tc := tc t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) { + t.Parallel() ret, err := serializeObjectProperty(tc, "Input") assert.NoError(t, err) assert.Equal(t, tc.expected, ret) diff --git a/cmd/algofix/deadlock_test.go b/cmd/algofix/deadlock_test.go index 426b4297fc..7d23edb7fd 100644 --- a/cmd/algofix/deadlock_test.go +++ b/cmd/algofix/deadlock_test.go @@ -132,7 +132,11 @@ func main() { func TestDeadlockRewrite(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + + // nolint:paralleltest // Subtests modify shared resources. t.Run("simple", func(t *testing.T) { testDeadlock(t, deadlockSimpleSrc, deadlockSimpleDest) }) + // nolint:paralleltest // Subtests modify shared resources. t.Run("onoff", func(t *testing.T) { testDeadlock(t, deadlockTestSrc, deadlockTestFin) }) } diff --git a/cmd/algofix/main_test.go b/cmd/algofix/main_test.go index fad46ca7cc..2355214e23 100644 --- a/cmd/algofix/main_test.go +++ b/cmd/algofix/main_test.go @@ -76,6 +76,8 @@ func parseFixPrint(t *testing.T, fn func(*ast.File) bool, desc, in string, mustB func TestRewrite(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + for _, tt := range testCases { // Apply fix: should get tt.Out. out, fixed, ok := parseFixPrint(t, tt.Fn, tt.Name, tt.In, true) diff --git a/cmd/algoh/blockWatcher_test.go b/cmd/algoh/blockWatcher_test.go index a623897295..79086d097c 100644 --- a/cmd/algoh/blockWatcher_test.go +++ b/cmd/algoh/blockWatcher_test.go @@ -40,6 +40,8 @@ func bw(client Client) *blockWatcher { // Then blockIfStalled will block until the next block is reported func TestBlockIfStalled(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + client := mockClient{ error: []error{nil, nil, nil}, status: makeNodeStatuses(300, 300, 300, 301), @@ -62,6 +64,8 @@ func TestBlockIfStalled(t *testing.T) { // Then blockIfCatchup will block until a block is reported twice func TestBlockIfCatchup(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + client := mockClient{ error: []error{nil, nil, nil}, status: makeNodeStatuses(301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 310), @@ -84,6 +88,8 @@ func TestBlockIfCatchup(t *testing.T) { // Then blockIfCatchup will return after the first status call. func TestBlockIfCaughtUp(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + client := mockClient{ error: []error{nil, nil, nil}, status: makeNodeStatuses(300), @@ -116,6 +122,8 @@ func (l *testlistener) onBlock(rpcs.EncodedBlockCert) { func TestE2E(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + client := makeMockClient( []error{nil, nil, nil}, makeNodeStatuses(300, 301, 302, 302, 302, 302, 302, 302, 310, 320, 321, 321, 321, 322), @@ -165,6 +173,8 @@ func TestE2E(t *testing.T) { func TestAbortDuringStall(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + client := makeMockClient( []error{}, makeNodeStatuses(300), diff --git a/cmd/algoh/blockstats_test.go b/cmd/algoh/blockstats_test.go index 2c58e8c430..abd3e7eeaa 100644 --- a/cmd/algoh/blockstats_test.go +++ b/cmd/algoh/blockstats_test.go @@ -52,6 +52,8 @@ func makeTestBlock(round uint64) rpcs.EncodedBlockCert { func TestConsecutiveBlocks(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + sender := MockEventSender{} bs := blockstats{log: &sender} @@ -68,6 +70,8 @@ func TestConsecutiveBlocks(t *testing.T) { func TestEventWithDetails(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + sender := MockEventSender{} bs := blockstats{log: &sender} @@ -108,6 +112,8 @@ func TestEventWithDetails(t *testing.T) { func TestAgreementTime(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + sleepTime := 50 * time.Millisecond testAttempts := 0 const maxTestAttempts = 10 diff --git a/cmd/catchupsrv/download_test.go b/cmd/catchupsrv/download_test.go index f0f911ed13..975f321655 100644 --- a/cmd/catchupsrv/download_test.go +++ b/cmd/catchupsrv/download_test.go @@ -25,6 +25,7 @@ import ( func TestBlockToPath(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() require.Equal(t, "00/00/000000", blockToPath(0)) require.Equal(t, "00/00/0000rs", blockToPath(1000)) require.Equal(t, "05/yc/05ycfo", blockToPath(10000500)) @@ -33,6 +34,7 @@ func TestBlockToPath(t *testing.T) { func TestBlockToFileName(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() require.Equal(t, "000000", blockToFileName(0)) require.Equal(t, "0000rs", blockToFileName(1000)) require.Equal(t, "05ycfo", blockToFileName(10000500)) @@ -40,6 +42,7 @@ func TestBlockToFileName(t *testing.T) { } func TestBlockToString(t *testing.T) { + t.Parallel() partitiontest.PartitionTest(t) require.Equal(t, "0", blockToString(0)) require.Equal(t, "rs", blockToString(1000)) diff --git a/cmd/goal/application_test.go b/cmd/goal/application_test.go index 687518fe0c..d037f77bdd 100644 --- a/cmd/goal/application_test.go +++ b/cmd/goal/application_test.go @@ -26,6 +26,7 @@ import ( func TestParseMethodArgJSONtoByteSlice(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() makeRepeatSlice := func(size int, value string) []string { slice := make([]string, size) @@ -133,7 +134,9 @@ func TestParseMethodArgJSONtoByteSlice(t *testing.T) { } for i, test := range tests { + test := test t.Run(fmt.Sprintf("index=%d", i), func(t *testing.T) { + t.Parallel() applicationArgs := [][]byte{} err := parseMethodArgJSONtoByteSlice(test.argTypes, test.jsonArgs, &applicationArgs) require.NoError(t, err) diff --git a/cmd/goal/commands_test.go b/cmd/goal/commands_test.go index 78794793c6..8f5d02510c 100644 --- a/cmd/goal/commands_test.go +++ b/cmd/goal/commands_test.go @@ -25,7 +25,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestEnsureDataDirReturnsWhenDataDirIsProvided(t *testing.T) { +func TestEnsureDataDirReturnsWhenDataDirIsProvided(t *testing.T) { // nolint:paralleltest // Sets shared OS environment variable. partitiontest.PartitionTest(t) expectedDir := "~/.algorand" os.Setenv("ALGORAND_DATA", expectedDir) @@ -33,7 +33,7 @@ func TestEnsureDataDirReturnsWhenDataDirIsProvided(t *testing.T) { require.Equal(t, expectedDir, actualDir) } -func TestEnsureDataDirReturnsWhenWorkDirIsProvided(t *testing.T) { +func TestEnsureDataDirReturnsWhenWorkDirIsProvided(t *testing.T) { // nolint:paralleltest // Sets shared OS environment variable. partitiontest.PartitionTest(t) expectedDir, err := os.Getwd() if err != nil { @@ -44,7 +44,7 @@ func TestEnsureDataDirReturnsWhenWorkDirIsProvided(t *testing.T) { require.Equal(t, expectedDir, actualDir) } -func TestEnsureDataDirReturnsWhenRelPath1IsProvided(t *testing.T) { +func TestEnsureDataDirReturnsWhenRelPath1IsProvided(t *testing.T) { // nolint:paralleltest // Sets shared OS environment variable. partitiontest.PartitionTest(t) expectedDir, err := os.Getwd() if err != nil { @@ -55,7 +55,7 @@ func TestEnsureDataDirReturnsWhenRelPath1IsProvided(t *testing.T) { require.Equal(t, expectedDir, actualDir) } -func TestEnsureDataDirReturnsWhenRelPath2IsProvided(t *testing.T) { +func TestEnsureDataDirReturnsWhenRelPath2IsProvided(t *testing.T) { // nolint:paralleltest // Sets shared OS environment variable. partitiontest.PartitionTest(t) expectedDir, err := os.Getwd() if err != nil { @@ -66,7 +66,7 @@ func TestEnsureDataDirReturnsWhenRelPath2IsProvided(t *testing.T) { require.Equal(t, expectedDir, actualDir) } -func TestEnsureDataDirReturnsWhenRelPath3IsProvided(t *testing.T) { +func TestEnsureDataDirReturnsWhenRelPath3IsProvided(t *testing.T) { // nolint:paralleltest // Sets shared OS environment variable. partitiontest.PartitionTest(t) expectedDir, err := os.Getwd() if err != nil { diff --git a/cmd/goal/formatting_test.go b/cmd/goal/formatting_test.go index f368c54c9a..29d27483aa 100644 --- a/cmd/goal/formatting_test.go +++ b/cmd/goal/formatting_test.go @@ -25,6 +25,8 @@ import ( func TestUnicodePrintable(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + testUnicodePrintableStrings := []struct { testString string isPrintable bool @@ -45,6 +47,7 @@ func TestUnicodePrintable(t *testing.T) { func TestNewAppCallBytes(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() acb := newAppCallBytes("int:3") require.Equal(t, "int", acb.Encoding) @@ -57,6 +60,8 @@ func TestNewAppCallBytes(t *testing.T) { func TestNewBoxRef(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + br := newBoxRef("str:hello") require.EqualValues(t, 0, br.appID) require.Equal(t, "str", br.name.Encoding) @@ -73,6 +78,8 @@ func TestNewBoxRef(t *testing.T) { func TestStringsToBoxRefs(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + brs := stringsToBoxRefs([]string{"77,str:hello", "55,int:6", "int:88"}) require.EqualValues(t, 77, brs[0].appID) require.EqualValues(t, 55, brs[1].appID) @@ -108,7 +115,9 @@ func TestBytesToAppCallBytes(t *testing.T) { } for _, tc := range testCases { + tc := tc t.Run(tc.expected, func(t *testing.T) { + t.Parallel() acb := encodeBytesAsAppCallBytes(tc.input) require.Equal(t, tc.expected, acb) }) diff --git a/cmd/goal/inspect_test.go b/cmd/goal/inspect_test.go index 7f4eb7e1c8..9a153bebb1 100644 --- a/cmd/goal/inspect_test.go +++ b/cmd/goal/inspect_test.go @@ -30,6 +30,8 @@ import ( func TestInspect(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + var err error var empty transactions.SignedTxn diff --git a/cmd/goal/node_test.go b/cmd/goal/node_test.go index aa3829cfbf..e34568202d 100644 --- a/cmd/goal/node_test.go +++ b/cmd/goal/node_test.go @@ -33,6 +33,7 @@ var isAlnum = regexp.MustCompile(`^[a-zA-Z0-9_]*$`) func TestGetMissingCatchpointLabel(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() tests := []struct { name string URL string @@ -84,7 +85,9 @@ func TestGetMissingCatchpointLabel(t *testing.T) { } for _, test := range tests { + test := test t.Run(test.name, func(t *testing.T) { + t.Parallel() ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { http.Error(w, test.expectedErr, test.statusCode) })) diff --git a/cmd/tealdbg/cdtSession_test.go b/cmd/tealdbg/cdtSession_test.go index 89a2ce1b04..e4cae925cd 100644 --- a/cmd/tealdbg/cdtSession_test.go +++ b/cmd/tealdbg/cdtSession_test.go @@ -33,6 +33,8 @@ import ( func TestCdtSessionProto11Common(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + sid := "test" dbg := MockDebugControl{} ch := make(chan Notification) @@ -97,6 +99,8 @@ func TestCdtSessionProto11Common(t *testing.T) { func TestCdtSessionProto11Breakpoints(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + sid := "test" dbg := MockDebugControl{} ch := make(chan Notification) @@ -182,6 +186,8 @@ func TestCdtSessionProto11Breakpoints(t *testing.T) { func TestCdtSessionProto11Events(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + sid := "test" dbg := MockDebugControl{} ch := make(chan Notification) @@ -216,6 +222,8 @@ func TestCdtSessionProto11Events(t *testing.T) { func TestCdtSessionProto11Controls(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + sid := "test" dbg := MockDebugControl{} ch := make(chan Notification) @@ -260,6 +268,8 @@ func TestCdtSessionProto11Controls(t *testing.T) { func TestCdtSessionProto11Evaluate(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + sid := "test" dbg := MockDebugControl{} ch := make(chan Notification) @@ -299,6 +309,8 @@ func TestCdtSessionProto11Evaluate(t *testing.T) { func TestCdtSessionProto11CallOnFunc(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + sid := "test" dbg := MockDebugControl{} ch := make(chan Notification) @@ -388,6 +400,8 @@ func TestCdtSessionProto11CallOnFunc(t *testing.T) { func TestCdtSessionProto11GetProps(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + sid := "test" dbg := MockDebugControl{} ch := make(chan Notification) @@ -422,6 +436,8 @@ func TestCdtSessionProto11GetProps(t *testing.T) { func TestCdtSessionStateToEvent(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + sid := "test" dbg := MockDebugControl{} ch := make(chan Notification) @@ -461,6 +477,7 @@ func TestCdtSessionStateToEvent(t *testing.T) { func TestCdtSessionGetObjects(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() sid := "test" dbg := MockDebugControl{} ch := make(chan Notification) diff --git a/cmd/tealdbg/cdtdbg_test.go b/cmd/tealdbg/cdtdbg_test.go index a4e8177359..0ef838f954 100644 --- a/cmd/tealdbg/cdtdbg_test.go +++ b/cmd/tealdbg/cdtdbg_test.go @@ -34,6 +34,8 @@ import ( func TestCdtHandlers(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + params := CdtFrontendParams{ router: mux.NewRouter(), apiAddress: "127.0.0.1:12345", @@ -151,6 +153,8 @@ func (c *MockDebugControl) GetStates(s *logic.DebugState) AppState { func TestCdtFrontendSessionStarted(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + params := CdtFrontendParams{ router: mux.NewRouter(), apiAddress: "127.0.0.1:12345", @@ -188,6 +192,8 @@ func TestCdtFrontendSessionStarted(t *testing.T) { func TestCdtAdapterSessionEnded(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + params := CdtFrontendParams{ router: mux.NewRouter(), apiAddress: "127.0.0.1:12345", diff --git a/cmd/tealdbg/debugger_test.go b/cmd/tealdbg/debugger_test.go index a73fb3f4f7..295a30b005 100644 --- a/cmd/tealdbg/debugger_test.go +++ b/cmd/tealdbg/debugger_test.go @@ -94,6 +94,8 @@ func (d *testDbgAdapter) eventLoop() { func TestDebuggerSimple(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + proto := config.Consensus[protocol.ConsensusV18] require.Greater(t, proto.LogicSigVersion, uint64(0)) debugger := MakeDebugger() @@ -148,6 +150,8 @@ func createSessionFromSource(t *testing.T, program string) *session { func TestSession(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + s := createSessionFromSource(t, "#pragma version %d\nint 1\ndup\n+\n") err := s.SetBreakpoint(2) require.NoError(t, err) @@ -199,6 +203,7 @@ func TestSession(t *testing.T) { // that call stack is inspected correctly. func TestCallStackControl(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() newTestCase := func() (*session, chan struct{}, func(), *int) { s := createSessionFromSource(t, "#pragma version %d\nlab1:\nint 1\ncallsub lab1\ndup\n+\n") @@ -344,6 +349,7 @@ func TestCallStackControl(t *testing.T) { }, } + // nolint:paralleltest // Linter is not following formulation of subtests. for name, f := range cases { t.Run(name, f) } @@ -351,6 +357,8 @@ func TestCallStackControl(t *testing.T) { func TestSourceMaps(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + s := createSessionFromSource(t, "#pragma version %d\nint 1\n") // Source and source map checks diff --git a/cmd/tealdbg/localLedger_test.go b/cmd/tealdbg/localLedger_test.go index cf53b53d84..05640abd4c 100644 --- a/cmd/tealdbg/localLedger_test.go +++ b/cmd/tealdbg/localLedger_test.go @@ -36,6 +36,8 @@ import ( // intermediate changes. func TestBalanceAdapterStateChanges(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + a := require.New(t) source := `#pragma version 2 diff --git a/cmd/tealdbg/local_test.go b/cmd/tealdbg/local_test.go index 22fee98f60..01d5ca3b52 100644 --- a/cmd/tealdbg/local_test.go +++ b/cmd/tealdbg/local_test.go @@ -108,6 +108,8 @@ func allErrors(es []error) assert.Comparison { func TestTxnJSONInput(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + a := require.New(t) dp := DebugParams{ @@ -129,6 +131,8 @@ func TestTxnJSONInput(t *testing.T) { func TestTxnMessagePackInput(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + a := require.New(t) var txn transactions.SignedTxn @@ -281,6 +285,8 @@ func makeSampleBalanceRecord(addr basics.Address, assetIdx basics.AssetIndex, ap func TestBalanceJSONInput(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + a := require.New(t) addr, err := basics.UnmarshalChecksumAddress("47YPQTIGQEO7T4Y4RWDYWEKV6RTR2UNBQXBABEEGM72ESWDQNCQ52OPASU") @@ -304,6 +310,8 @@ func TestBalanceJSONInput(t *testing.T) { func TestBalanceMessagePackInput(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + a := require.New(t) addr, err := basics.UnmarshalChecksumAddress("47YPQTIGQEO7T4Y4RWDYWEKV6RTR2UNBQXBABEEGM72ESWDQNCQ52OPASU") a.NoError(err) @@ -332,6 +340,8 @@ func TestBalanceMessagePackInput(t *testing.T) { func TestDebugEnvironment(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + a := require.New(t) sender, err := basics.UnmarshalChecksumAddress("47YPQTIGQEO7T4Y4RWDYWEKV6RTR2UNBQXBABEEGM72ESWDQNCQ52OPASU") @@ -570,6 +580,8 @@ byte 0x676c6f62616c // global func TestDebugFromPrograms(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + a := require.New(t) txnBlob := []byte("[" + strings.Join([]string{txnSample, txnSample}, ",") + "]") @@ -649,6 +661,8 @@ func TestDebugFromPrograms(t *testing.T) { func TestRunMode(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + a := require.New(t) txnBlob := []byte("[" + strings.Join([]string{txnSample, txnSample}, ",") + "]") @@ -736,6 +750,8 @@ func TestRunMode(t *testing.T) { func TestDebugFromTxn(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + a := require.New(t) sender, err := basics.UnmarshalChecksumAddress("47YPQTIGQEO7T4Y4RWDYWEKV6RTR2UNBQXBABEEGM72ESWDQNCQ52OPASU") @@ -963,6 +979,8 @@ func checkBalanceAdapter( func TestLocalBalanceAdapter(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + a := require.New(t) sender, err := basics.UnmarshalChecksumAddress("47YPQTIGQEO7T4Y4RWDYWEKV6RTR2UNBQXBABEEGM72ESWDQNCQ52OPASU") @@ -1024,6 +1042,8 @@ func TestLocalBalanceAdapter(t *testing.T) { func TestLocalBalanceAdapterIndexer(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + a := require.New(t) sender, err := basics.UnmarshalChecksumAddress("47YPQTIGQEO7T4Y4RWDYWEKV6RTR2UNBQXBABEEGM72ESWDQNCQ52OPASU") @@ -1116,6 +1136,8 @@ func TestLocalBalanceAdapterIndexer(t *testing.T) { func TestDebugTxSubmit(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + a := require.New(t) sender, err := basics.UnmarshalChecksumAddress("47YPQTIGQEO7T4Y4RWDYWEKV6RTR2UNBQXBABEEGM72ESWDQNCQ52OPASU") @@ -1189,6 +1211,8 @@ int 1` func TestDebugFeePooling(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + a := require.New(t) sender, err := basics.UnmarshalChecksumAddress("47YPQTIGQEO7T4Y4RWDYWEKV6RTR2UNBQXBABEEGM72ESWDQNCQ52OPASU") @@ -1209,21 +1233,6 @@ int 1` a.NoError(err) prog := ops.Program - stxn := transactions.SignedTxn{ - Txn: transactions.Transaction{ - Type: protocol.ApplicationCallTx, - Header: transactions.Header{ - Sender: sender, - Note: []byte{1, 2, 3}, - }, - ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{ - ApplicationID: 0, - ApprovalProgram: prog, - ClearStateProgram: prog, - }, - }, - } - appIdx := basics.AppIndex(1) br := basics.BalanceRecord{ Addr: sender, @@ -1256,9 +1265,24 @@ int 1` } for _, test := range tests { + test := test t.Run(fmt.Sprintf("fee=%d", test.fee), func(t *testing.T) { - - stxn.Txn.Fee = basics.MicroAlgos{Raw: test.fee} + t.Parallel() + stxn := transactions.SignedTxn{ + Txn: transactions.Transaction{ + Type: protocol.ApplicationCallTx, + Header: transactions.Header{ + Fee: basics.MicroAlgos{Raw: test.fee}, + Sender: sender, + Note: []byte{1, 2, 3}, + }, + ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{ + ApplicationID: 0, + ApprovalProgram: prog, + ClearStateProgram: prog, + }, + }, + } encoded := protocol.EncodeJSON(&stxn) ds := DebugParams{ @@ -1274,7 +1298,7 @@ int 1` } local := MakeLocalRunner(nil) - err = local.Setup(&ds) + err := local.Setup(&ds) a.NoError(err) r := runAllResultFromInvocation(*local) @@ -1285,6 +1309,8 @@ int 1` func TestDebugCostPooling(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + a := require.New(t) sender, err := basics.UnmarshalChecksumAddress("47YPQTIGQEO7T4Y4RWDYWEKV6RTR2UNBQXBABEEGM72ESWDQNCQ52OPASU") @@ -1331,18 +1357,6 @@ byte 0x5ce9454909639d2d17a3f753ce7d93fa0b9ab12e // addr appIdx := basics.AppIndex(1) trivialAppIdx := basics.AppIndex(2) - trivialStxn := transactions.SignedTxn{ - Txn: transactions.Transaction{ - Type: protocol.ApplicationCallTx, - Header: transactions.Header{ - Sender: sender, - Fee: basics.MicroAlgos{Raw: 1000}, - }, - ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{ - ApplicationID: trivialAppIdx, - }, - }, - } br := basics.BalanceRecord{ Addr: sender, @@ -1381,12 +1395,26 @@ byte 0x5ce9454909639d2d17a3f753ce7d93fa0b9ab12e // addr }}, } for _, test := range tests { + test := test t.Run(fmt.Sprintf("txn-count=%d", test.additionalApps+1), func(t *testing.T) { + t.Parallel() txnBlob := protocol.EncodeMsgp(&stxn) for i := 0; i < test.additionalApps; i++ { val, err := getRandomAddress() a.NoError(err) - trivialStxn.Txn.Note = val[:] + trivialStxn := transactions.SignedTxn{ + Txn: transactions.Transaction{ + Type: protocol.ApplicationCallTx, + Header: transactions.Header{ + Sender: sender, + Fee: basics.MicroAlgos{Raw: 1000}, + Note: val[:], + }, + ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{ + ApplicationID: trivialAppIdx, + }, + }, + } txnBlob = append(txnBlob, protocol.EncodeMsgp(&trivialStxn)...) } @@ -1403,7 +1431,7 @@ byte 0x5ce9454909639d2d17a3f753ce7d93fa0b9ab12e // addr } local := MakeLocalRunner(nil) - err = local.Setup(&ds) + err := local.Setup(&ds) a.NoError(err) test.expected(*local, runAllResultFromInvocation(*local)) @@ -1412,8 +1440,9 @@ byte 0x5ce9454909639d2d17a3f753ce7d93fa0b9ab12e // addr } func TestGroupTxnIdx(t *testing.T) { - partitiontest.PartitionTest(t) + t.Parallel() + a := require.New(t) ddrBlob := `{ @@ -1511,8 +1540,9 @@ func TestGroupTxnIdx(t *testing.T) { } func TestRunAllGloads(t *testing.T) { - partitiontest.PartitionTest(t) + t.Parallel() + a := require.New(t) sourceA := `#pragma version 6 diff --git a/cmd/tealdbg/remote_test.go b/cmd/tealdbg/remote_test.go index 66d077981d..bf76e9c0da 100644 --- a/cmd/tealdbg/remote_test.go +++ b/cmd/tealdbg/remote_test.go @@ -33,6 +33,8 @@ import ( func TestRemoteAdapterHandlers(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + d := MakeDebugger() a := MakeRemoteHook(d) router := mux.NewRouter() diff --git a/cmd/tealdbg/server_test.go b/cmd/tealdbg/server_test.go index 81c6c28370..68dad2e781 100644 --- a/cmd/tealdbg/server_test.go +++ b/cmd/tealdbg/server_test.go @@ -124,13 +124,15 @@ func serverTestImpl(t *testing.T, run func(t *testing.T, ds *DebugServer) bool, require.NotNil(t, ds.server) } -func TestServerRemote(t *testing.T) { +func TestServerRemote(t *testing.T) { // nolint:paralleltest // Modifies global config (`port`). partitiontest.PartitionTest(t) + serverTestImpl(t, tryStartingServerRemote, &DebugParams{}) } -func TestServerLocal(t *testing.T) { +func TestServerLocal(t *testing.T) { // nolint:paralleltest // Modifies global config (`port`). partitiontest.PartitionTest(t) + txnBlob := []byte("[" + strings.Join([]string{txnSample, txnSample}, ",") + "]") dp := DebugParams{ ProgramNames: []string{"test"}, diff --git a/cmd/tealdbg/webdbg_test.go b/cmd/tealdbg/webdbg_test.go index d45ac9f217..40ddd41f84 100644 --- a/cmd/tealdbg/webdbg_test.go +++ b/cmd/tealdbg/webdbg_test.go @@ -30,6 +30,8 @@ import ( func TestWebPageFrontendHandlers(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + params := WebPageFrontendParams{ router: mux.NewRouter(), apiAddress: "127.0.0.1:12345", diff --git a/cmd/updater/version_test.go b/cmd/updater/version_test.go index a29658d537..62954d51cf 100644 --- a/cmd/updater/version_test.go +++ b/cmd/updater/version_test.go @@ -27,6 +27,8 @@ import ( func TestGetVersion(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + testValidVersion(t, "algonode_update_0.1.0.log", uint64(0x00010000)) testValidVersion(t, "algo_update_0.1.0", uint64(0x00010000)) testValidVersion(t, "algo_update_65535.1.0", uint64(0xFFFF00010000)) From ce375b18e78ed02b1415f4ebcfd7f329c498e314 Mon Sep 17 00:00:00 2001 From: Hang Su <87964331+ahangsu@users.noreply.github.com> Date: Thu, 9 Mar 2023 11:31:06 -0500 Subject: [PATCH 70/81] Enhancement: Disable LRU `flushPendingWrite` warning if disabled (#5184) --- ledger/acctonline.go | 2 +- ledger/acctupdates.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ledger/acctonline.go b/ledger/acctonline.go index dc7335c30b..6bc8125f95 100644 --- a/ledger/acctonline.go +++ b/ledger/acctonline.go @@ -193,7 +193,7 @@ func (ao *onlineAccounts) initializeFromDisk(l ledgerForTracker, lastBalancesRou if !ao.disableCache { ao.baseOnlineAccounts.init(ao.log, baseAccountsPendingAccountsBufferSize, baseAccountsPendingAccountsWarnThreshold) } else { - ao.baseOnlineAccounts.init(ao.log, 0, 0) + ao.baseOnlineAccounts.init(ao.log, 0, 1) } return } diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index 26649caff4..20431bce7a 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -974,9 +974,9 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker, lastBalancesRou au.baseResources.init(au.log, baseResourcesPendingAccountsBufferSize, baseResourcesPendingAccountsWarnThreshold) au.baseKVs.init(au.log, baseKVPendingBufferSize, baseKVPendingWarnThreshold) } else { - au.baseAccounts.init(au.log, 0, 0) - au.baseResources.init(au.log, 0, 0) - au.baseKVs.init(au.log, 0, 0) + au.baseAccounts.init(au.log, 0, 1) + au.baseResources.init(au.log, 0, 1) + au.baseKVs.init(au.log, 0, 1) } return } From bee25d7441f3ed15cda02bc64fc806c0e8cf7bd4 Mon Sep 17 00:00:00 2001 From: Will Winder Date: Thu, 9 Mar 2023 15:06:00 -0500 Subject: [PATCH 71/81] docs: Message pack information. (#5160) --- docs/messagepack.md | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 docs/messagepack.md diff --git a/docs/messagepack.md b/docs/messagepack.md new file mode 100644 index 0000000000..c81c19fde0 --- /dev/null +++ b/docs/messagepack.md @@ -0,0 +1,38 @@ +# Message Pack + +The Algorand protocol uses a modified message pack encoding for canonical +messages used throughout the system. Details about [Canonical Messagepack](https://github.com/algorandfoundation/specs/blob/6996ac344158ca90a430bc8601fc29b150b0aa3f/dev/crypto.md#canonical-msgpack) +can be found in the formal specification. What follows here are some of the +implementation details that are useful to know. + +## Why Message Pack + +Generally speaking, message pack is small and fast compared to many alternative +encodings. There are other encodings which would have also been suitable, but a +choice needed to be made. + +## Libraries + +### [algorand/go-codec](https://github.com/algorand/go-codec) +Forked from [ugorji/go](https://github.com/ugorji/go) + +This library uses `codec:` annotations in the go structs to define encodings. +It is used widely throughout the code for message pack and JSON encoding when +needed. It provides features that the builtin `encoding/json` including things +like allowing integer map keys. + +### [algorand/msgp](https://github.com/algorand/msgp) +Forked from [tinylib/msgp](https://github.com/tinylib/msgp) + +This library is used to generate code to serialize and deserialize message pack +messages without using reflection. It has been modified to be compatible with +the `codec:` annotations used by go-codec. The generated methods are +significantly faster than the reflection based go-codec versions. For a rough +idea of the performance difference, [here are some benchmarks for Transactions](https://github.com/algorand/go-algorand/pull/4266). + +## Code generation + +Install `msgp` with [install_buildtools.sh](scripts/buildtools/install_buildtools.sh) and use it with `make msgp`. + +The generated Marshal and Unmarshal utilities are located in `msgp_gen.go` +files. Update `MSGP_GENERATE` in the Makefile to add another package. From d28062360ca7931112c20e9b08317a340dd04851 Mon Sep 17 00:00:00 2001 From: cce <51567+cce@users.noreply.github.com> Date: Thu, 9 Mar 2023 22:53:30 -0500 Subject: [PATCH 72/81] agreement: update AttachReceivedAt to handle compound (PP) messages (#5142) --- agreement/demux.go | 2 +- agreement/events.go | 17 +++++++++++- agreement/msgp_gen.go | 58 ++++++++++++++++++++-------------------- agreement/player_test.go | 54 ++++++++++++++++++++++++++++++++----- agreement/proposal.go | 5 ---- 5 files changed, 93 insertions(+), 43 deletions(-) diff --git a/agreement/demux.go b/agreement/demux.go index 8d96444d5d..33584490f7 100644 --- a/agreement/demux.go +++ b/agreement/demux.go @@ -201,7 +201,7 @@ func (d *demux) next(s *Service, deadline time.Duration, fastDeadline time.Durat switch e.t() { case payloadVerified: e = e.(messageEvent).AttachValidatedAt(s.Clock.Since()) - case payloadPresent: + case payloadPresent, votePresent: e = e.(messageEvent).AttachReceivedAt(s.Clock.Since()) } }() diff --git a/agreement/events.go b/agreement/events.go index 0c858aa9f1..a4c717cbd8 100644 --- a/agreement/events.go +++ b/agreement/events.go @@ -946,7 +946,22 @@ func (e messageEvent) AttachValidatedAt(d time.Duration) messageEvent { return e } +// AttachReceivedAt looks for an unauthenticatedProposal inside a +// payloadPresent or votePresent messageEvent, and attaches the given +// time to the proposal's receivedAt field. func (e messageEvent) AttachReceivedAt(d time.Duration) messageEvent { - e.Input.UnauthenticatedProposal.receivedAt = d + if e.T == payloadPresent { + e.Input.UnauthenticatedProposal.receivedAt = d + } else if e.T == votePresent { + // Check for non-nil Tail, indicating this votePresent event + // contains a synthetic payloadPresent event that was attached + // to it by setupCompoundMessage. + if e.Tail != nil && e.Tail.T == payloadPresent { + // The tail event is payloadPresent, serialized together + // with the proposal vote as a single CompoundMessage + // using a protocol.ProposalPayloadTag network message. + e.Tail.Input.UnauthenticatedProposal.receivedAt = d + } + } return e } diff --git a/agreement/msgp_gen.go b/agreement/msgp_gen.go index 2fa1e11a0a..bf1c46f98c 100644 --- a/agreement/msgp_gen.go +++ b/agreement/msgp_gen.go @@ -3791,7 +3791,7 @@ func (z *proposal) MarshalMsg(b []byte) (o []byte) { o = msgp.Require(b, z.Msgsize()) // omitempty: check for empty values zb0004Len := uint32(29) - var zb0004Mask uint64 /* 39 bits */ + var zb0004Mask uint64 /* 38 bits */ if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0 { zb0004Len-- zb0004Mask |= 0x40 @@ -3854,59 +3854,59 @@ func (z *proposal) MarshalMsg(b []byte) (o []byte) { } if (*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero() { zb0004Len-- - zb0004Mask |= 0x800000 + zb0004Mask |= 0x400000 } if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero() { zb0004Len-- - zb0004Mask |= 0x1000000 + zb0004Mask |= 0x800000 } if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero() { zb0004Len-- - zb0004Mask |= 0x2000000 + zb0004Mask |= 0x1000000 } if (*z).unauthenticatedProposal.SeedProof.MsgIsZero() { zb0004Len-- - zb0004Mask |= 0x4000000 + zb0004Mask |= 0x2000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero() { zb0004Len-- - zb0004Mask |= 0x8000000 + zb0004Mask |= 0x4000000 } if len((*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking) == 0 { zb0004Len-- - zb0004Mask |= 0x10000000 + zb0004Mask |= 0x8000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0 { zb0004Len-- - zb0004Mask |= 0x20000000 + zb0004Mask |= 0x10000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0 { zb0004Len-- - zb0004Mask |= 0x40000000 + zb0004Mask |= 0x20000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero() { zb0004Len-- - zb0004Mask |= 0x80000000 + zb0004Mask |= 0x40000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero() { zb0004Len-- - zb0004Mask |= 0x100000000 + zb0004Mask |= 0x80000000 } if (*z).unauthenticatedProposal.Block.Payset.MsgIsZero() { zb0004Len-- - zb0004Mask |= 0x200000000 + zb0004Mask |= 0x100000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero() { zb0004Len-- - zb0004Mask |= 0x400000000 + zb0004Mask |= 0x200000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero() { zb0004Len-- - zb0004Mask |= 0x800000000 + zb0004Mask |= 0x400000000 } if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false { zb0004Len-- - zb0004Mask |= 0x1000000000 + zb0004Mask |= 0x800000000 } // variable map header, size zb0004Len o = msgp.AppendMapHeader(o, zb0004Len) @@ -3993,32 +3993,32 @@ func (z *proposal) MarshalMsg(b []byte) (o []byte) { o = append(o, 0xa4, 0x72, 0x61, 0x74, 0x65) o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate) } - if (zb0004Mask & 0x800000) == 0 { // if not empty + if (zb0004Mask & 0x400000) == 0 { // if not empty // string "rnd" o = append(o, 0xa3, 0x72, 0x6e, 0x64) o = (*z).unauthenticatedProposal.Block.BlockHeader.Round.MarshalMsg(o) } - if (zb0004Mask & 0x1000000) == 0 { // if not empty + if (zb0004Mask & 0x800000) == 0 { // if not empty // string "rwcalr" o = append(o, 0xa6, 0x72, 0x77, 0x63, 0x61, 0x6c, 0x72) o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MarshalMsg(o) } - if (zb0004Mask & 0x2000000) == 0 { // if not empty + if (zb0004Mask & 0x1000000) == 0 { // if not empty // string "rwd" o = append(o, 0xa3, 0x72, 0x77, 0x64) o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MarshalMsg(o) } - if (zb0004Mask & 0x4000000) == 0 { // if not empty + if (zb0004Mask & 0x2000000) == 0 { // if not empty // string "sdpf" o = append(o, 0xa4, 0x73, 0x64, 0x70, 0x66) o = (*z).unauthenticatedProposal.SeedProof.MarshalMsg(o) } - if (zb0004Mask & 0x8000000) == 0 { // if not empty + if (zb0004Mask & 0x4000000) == 0 { // if not empty // string "seed" o = append(o, 0xa4, 0x73, 0x65, 0x65, 0x64) o = (*z).unauthenticatedProposal.Block.BlockHeader.Seed.MarshalMsg(o) } - if (zb0004Mask & 0x10000000) == 0 { // if not empty + if (zb0004Mask & 0x8000000) == 0 { // if not empty // string "spt" o = append(o, 0xa3, 0x73, 0x70, 0x74) if (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking == nil { @@ -4038,42 +4038,42 @@ func (z *proposal) MarshalMsg(b []byte) (o []byte) { o = zb0002.MarshalMsg(o) } } - if (zb0004Mask & 0x20000000) == 0 { // if not empty + if (zb0004Mask & 0x10000000) == 0 { // if not empty // string "tc" o = append(o, 0xa2, 0x74, 0x63) o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter) } - if (zb0004Mask & 0x40000000) == 0 { // if not empty + if (zb0004Mask & 0x20000000) == 0 { // if not empty // string "ts" o = append(o, 0xa2, 0x74, 0x73) o = msgp.AppendInt64(o, (*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp) } - if (zb0004Mask & 0x80000000) == 0 { // if not empty + if (zb0004Mask & 0x40000000) == 0 { // if not empty // string "txn" o = append(o, 0xa3, 0x74, 0x78, 0x6e) o = (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.MarshalMsg(o) } - if (zb0004Mask & 0x100000000) == 0 { // if not empty + if (zb0004Mask & 0x80000000) == 0 { // if not empty // string "txn256" o = append(o, 0xa6, 0x74, 0x78, 0x6e, 0x32, 0x35, 0x36) o = (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.MarshalMsg(o) } - if (zb0004Mask & 0x200000000) == 0 { // if not empty + if (zb0004Mask & 0x100000000) == 0 { // if not empty // string "txns" o = append(o, 0xa4, 0x74, 0x78, 0x6e, 0x73) o = (*z).unauthenticatedProposal.Block.Payset.MarshalMsg(o) } - if (zb0004Mask & 0x400000000) == 0 { // if not empty + if (zb0004Mask & 0x200000000) == 0 { // if not empty // string "upgradedelay" o = append(o, 0xac, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x65, 0x6c, 0x61, 0x79) o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MarshalMsg(o) } - if (zb0004Mask & 0x800000000) == 0 { // if not empty + if (zb0004Mask & 0x400000000) == 0 { // if not empty // string "upgradeprop" o = append(o, 0xab, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x70, 0x72, 0x6f, 0x70) o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MarshalMsg(o) } - if (zb0004Mask & 0x1000000000) == 0 { // if not empty + if (zb0004Mask & 0x800000000) == 0 { // if not empty // string "upgradeyes" o = append(o, 0xaa, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x79, 0x65, 0x73) o = msgp.AppendBool(o, (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove) diff --git a/agreement/player_test.go b/agreement/player_test.go index 83368c3d0d..75987c2ed0 100644 --- a/agreement/player_test.go +++ b/agreement/player_test.go @@ -3244,15 +3244,14 @@ func TestPlayerRetainsReceivedValidatedAt(t *testing.T) { pWhite, pM, helper := setupP(t, r-1, p, soft) pP, pV := helper.MakeRandomProposalPayload(t, r-1) - // send a payload - // store an arbitrary proposal/payload + // send voteVerified message vVote := helper.MakeVerifiedVote(t, 0, r-1, p, propose, *pV) inMsg := messageEvent{T: voteVerified, Input: message{Vote: vVote, UnauthenticatedVote: vVote.u()}} err, panicErr := pM.transition(inMsg) require.NoError(t, err) require.NoError(t, panicErr) - // payloadPresent + // send payloadPresent message m := message{UnauthenticatedProposal: pP.u()} inMsg = messageEvent{T: payloadPresent, Input: m} inMsg = inMsg.AttachReceivedAt(time.Second) @@ -3260,14 +3259,55 @@ func TestPlayerRetainsReceivedValidatedAt(t *testing.T) { require.NoError(t, err) require.NoError(t, panicErr) + assertCorrectReceivedAtSet(t, pWhite, pM, helper, r, p, pP, pV, m) +} + +// test that ReceivedAt and ValidateAt timing information are retained in proposalStore +// when the payloadPresent (as part of the CompoundMessage encoding used by PP messages) +// and payloadVerified events are processed, and that both timings +// are available when the ensureAction is called for the block. +func TestPlayerRetainsReceivedValidatedAtPP(t *testing.T) { + partitiontest.PartitionTest(t) + + const r = round(20239) + const p = period(1001) + pWhite, pM, helper := setupP(t, r-1, p, soft) + pP, pV := helper.MakeRandomProposalPayload(t, r-1) + + // create a PP message for an arbitrary proposal/payload similar to setupCompoundMessage + vVote := helper.MakeVerifiedVote(t, 0, r-1, p, propose, *pV) + voteMsg := message{Vote: vVote, UnauthenticatedVote: vVote.u()} + proposalMsg := message{UnauthenticatedProposal: pP.u()} + compoundMsg := messageEvent{T: votePresent, Input: voteMsg, + Tail: &messageEvent{T: payloadPresent, Input: proposalMsg}} + inMsg := compoundMsg.AttachReceivedAt(time.Second) // call AttachReceivedAt like demux would + err, panicErr := pM.transition(inMsg) + require.NoError(t, err) + require.NoError(t, panicErr) + + // make sure vote verify requests + verifyEvent := ev(cryptoAction{T: verifyVote, M: voteMsg, Round: r - 1, Period: p, Step: propose, TaskIndex: 1}) + require.Truef(t, pM.getTrace().Contains(verifyEvent), "Player should verify vote") + + // send voteVerified + inMsg = messageEvent{T: voteVerified, Input: voteMsg, TaskIndex: 1} + err, panicErr = pM.transition(inMsg) + require.NoError(t, err) + require.NoError(t, panicErr) + + assertCorrectReceivedAtSet(t, pWhite, pM, helper, r, p, pP, pV, proposalMsg) +} + +func assertCorrectReceivedAtSet(t *testing.T, pWhite *player, pM ioAutomata, helper *voteMakerHelper, + r round, p period, pP *proposal, pV *proposalValue, m message) { // make sure payload verify request - verifyEvent := ev(cryptoAction{T: verifyPayload, M: m, TaskIndex: 0}) + verifyEvent := ev(cryptoAction{T: verifyPayload, M: m, Round: r - 1, Period: p, Step: propose, TaskIndex: 0}) require.Truef(t, pM.getTrace().Contains(verifyEvent), "Player should verify payload") // payloadVerified - inMsg = messageEvent{T: payloadVerified, Input: message{Proposal: *pP}, Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion}} - inMsg = inMsg.AttachValidatedAt(2 * time.Second) - err, panicErr = pM.transition(inMsg) + inMsg := messageEvent{T: payloadVerified, Input: message{Proposal: *pP}, Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion}} + inMsg = inMsg.AttachValidatedAt(2 * time.Second) // call AttachValidatedAt like demux would + err, panicErr := pM.transition(inMsg) require.NoError(t, err) require.NoError(t, panicErr) diff --git a/agreement/proposal.go b/agreement/proposal.go index ec382fe947..bf021f2cfe 100644 --- a/agreement/proposal.go +++ b/agreement/proposal.go @@ -99,11 +99,6 @@ type proposal struct { // validated (and thus was ready to be delivered to the state // machine), relative to the zero of that round. validatedAt time.Duration - - // receivedAt indicates the time at which this proposal was - // delivered to the agreement package (as a messageEvent), - // relative to the zero of that round. - receivedAt time.Duration } func makeProposal(ve ValidatedBlock, pf crypto.VrfProof, origPer period, origProp basics.Address) proposal { From 8bc462a16b2bb409246438fa13ea953cd2a4859a Mon Sep 17 00:00:00 2001 From: Will Winder Date: Fri, 10 Mar 2023 09:59:58 -0500 Subject: [PATCH 73/81] feat(algocfg): add development profile and profile descriptions. (#5164) --- cmd/algocfg/profileCommand.go | 101 +++++++++++++++++++++++++--------- 1 file changed, 74 insertions(+), 27 deletions(-) diff --git a/cmd/algocfg/profileCommand.go b/cmd/algocfg/profileCommand.go index ef64dda0be..2cf467d060 100644 --- a/cmd/algocfg/profileCommand.go +++ b/cmd/algocfg/profileCommand.go @@ -19,39 +19,70 @@ package main import ( "bufio" "fmt" - "github.com/spf13/cobra" "os" "path/filepath" "strings" + "github.com/spf13/cobra" + "github.com/algorand/go-algorand/cmd/util/datadir" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/util/codecs" ) -// profileConfigUpdater updates the provided config for non-defaults in a given profile -type profileConfigUpdater func(cfg config.Local) config.Local - -// defaultConfigUpdater leaves all default values in place -func defaultConfigUpdater(cfg config.Local) config.Local { - return cfg -} - -// relayConfigUpdater alters config values to set up a relay node -func relayConfigUpdater(cfg config.Local) config.Local { - cfg.Archival = true - cfg.EnableLedgerService = true - cfg.EnableBlockService = true - cfg.NetAddress = "4160" - return cfg +// configUpdater updates the provided config for non-defaults in a given profile +type configUpdater struct { + updateFunc func(cfg config.Local) config.Local + description string } var ( + development = configUpdater{ + description: "Build on Algorand.", + updateFunc: func(cfg config.Local) config.Local { + cfg.EnableExperimentalAPI = true + cfg.EnableDeveloperAPI = true + return cfg + }, + } + + conduit = configUpdater{ + description: "Provide data for the Conduit tool.", + updateFunc: func(cfg config.Local) config.Local { + cfg.EnableFollowMode = true + cfg.MaxAcctLookback = 64 + cfg.CatchupParallelBlocks = 64 + return cfg + }, + } + + participation = configUpdater{ + description: "Participate in consensus or simply ensure chain health by validating blocks.", + updateFunc: func(cfg config.Local) config.Local { + cfg.CatchupBlockValidateMode = 0b1100 + return cfg + }, + } + + relay = configUpdater{ + description: "Relay consensus messages across the network and support catchup.", + updateFunc: func(cfg config.Local) config.Local { + cfg.Archival = true + cfg.EnableLedgerService = true + cfg.EnableBlockService = true + cfg.NetAddress = "4160" + return cfg + }, + } + // profileNames are the supported pre-configurations of config values - profileNames = map[string]profileConfigUpdater{ - "relay": relayConfigUpdater, - "default": defaultConfigUpdater, + profileNames = map[string]configUpdater{ + "participation": participation, + "conduit": conduit, + "relay": relay, + "development": development, } + forceUpdate bool ) @@ -64,8 +95,19 @@ func init() { var profileCmd = &cobra.Command{ Use: "profile", - Short: "Manipulate config profiles", - Args: cobra.NoArgs, + Short: "Generate config.json file from a profile.", + Long: `Initialize algod config.json files based on a usage profile. + +The config file generated by these profiles can be used as a starting point +for a nodes configuration. The defaults for a given profile should be treated +as supplemental to the documentation, you should review the documentation to +understand what the settings are doing. + +For more details about configuration settings refer to the developer portal: +https://developer.algorand.org/docs/run-a-node/reference/config/ + +Profiles are subject to change or removal.`, + Args: cobra.NoArgs, Run: func(cmd *cobra.Command, args []string) { cmd.HelpFunc()(cmd, args) }, @@ -73,20 +115,25 @@ var profileCmd = &cobra.Command{ var listProfileCmd = &cobra.Command{ Use: "list", - Short: "List config profiles", + Short: "A list of valid config profiles and a short description.", Args: cobra.NoArgs, Run: func(cmd *cobra.Command, args []string) { - var profiles string + longest := 0 for key := range profileNames { - profiles += fmt.Sprintf("%s ", key) + if len(key) > longest { + longest = len(key) + } + } + + for key, value := range profileNames { + reportInfof("%-*s %s", longest, key, value.description) } - reportInfof(profiles) }, } var setProfileCmd = &cobra.Command{ Use: "set", - Short: "Set preconfigured config defaults", + Short: "Set config.json file from a profile.", Args: cobra.ExactArgs(1), Run: func(cmd *cobra.Command, args []string) { datadir.OnDataDirs(func(dataDir string) { @@ -120,7 +167,7 @@ var setProfileCmd = &cobra.Command{ func getConfigForArg(configType string) (config.Local, error) { cfg := config.GetDefaultLocal() if updater, ok := profileNames[configType]; ok { - return updater(cfg), nil + return updater.updateFunc(cfg), nil } return config.Local{}, fmt.Errorf("invalid profile type %v", configType) } From a3536913ec128551abe8b04d2ff901466efcfed0 Mon Sep 17 00:00:00 2001 From: Will Winder Date: Fri, 10 Mar 2023 10:33:55 -0500 Subject: [PATCH 74/81] Docs: add follower node documentation. (#5181) --- docs/follower_node.md | 63 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 docs/follower_node.md diff --git a/docs/follower_node.md b/docs/follower_node.md new file mode 100644 index 0000000000..9c58a7a82f --- /dev/null +++ b/docs/follower_node.md @@ -0,0 +1,63 @@ +# Follower Node + +When started with `"EnableFollowMode": true`, algod starts with a special +property which allows it to be paused. This allows software to be written +which runs synchronously with a node. If you need account balances at each +round, or at a particular round, this is the only way to do it. On start +the node will be paused, the REST API is used to resume and select a new +round. + +It also allows fetching a `Ledger State Delta` object for recent rounds. +This object is not designed for external users, but may be useful for +advanced applications if you're willing to figure things out. It contains +complete information for transitioning a database to the next round, +including new account balances, changes to application and asset states, +and new box information. Such information was previously unavailable to +application developers. + +This mode has a number of [restrictions](#restrictions), which are described +below. + +Follower mode was initially created to be a data source for [Conduit](https://github.com/algorand/conduit). + +## Configuration + +Behavior is controlled with the `config.json` file: + +| property | description | +| EnableFollowMode | When set to `true` the node starts as a network follower. | +| MaxAcctLookback | The number of additional `Ledger State Delta` objects available. The default can be used, increasing to 64 or higher could help performance. | +| CatchupParallelBlocks | The number of blocks that are fetched concurrently. The default can be used, increasing to 64 or higher could help performance. | + +## Usage + +On startup, a follower node will be paused (synchronized) with its ledger's +current round. For a new deployment configured as a follower node, the +initial sync round is 0. When a sync round is set, the node advance +`MaxAcctLookback-1` rounds. The node is synchronized for the availability +of `Ledger State Delta` data. This means the minimum sync round is provided +and the node advances to cache future rounds. + +New public endpoints are available to control the sync round: +* `GET /v2/ledger/sync` - fetch the current sync round. +* `DELETE /v2/ledger/sync` - resume normal catchup by deleting the sync round. +* `POST /v2/ledger/sync/{round}` - set the sync round. + +The `Ledger State Delta` is not designed for external consumption, but may +still be useful for advanced applications. When the sync round is set, this +endpoint can be used to fetch the `Ledger State Delta` for that round and up +to `MaxAcctLookback - 1` ahead of it: +* `GET /v2/deltas/{round}` - Fetch the raw Ledger State Delta, optionally provide `format=msgp` for the internal msgp encoded object. + +## Restrictions + +The follower node was stripped of all functionality not directly related to +assisting with data-gathering capabilities. Since it is designed to run +alongside another application, it was made as lightweight as possible. +Other restrictions relate to the fact that this node is designed to be +paused. So there are no guarantees that it's internal state matches the +current round of consensus. + +In particular, the follower node cannot participate in consensus or send +transactions to the network. Any attempt to register participation keys or +submit transactions will be rejected. From 485ad921be5f6d4d40cd29ec36f3db929180fa22 Mon Sep 17 00:00:00 2001 From: Eric Warehime Date: Fri, 10 Mar 2023 08:05:19 -0800 Subject: [PATCH 75/81] devmode: Fix devmode networking (#5182) --- node/node.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/node/node.go b/node/node.go index be3b8156db..abff864d4e 100644 --- a/node/node.go +++ b/node/node.go @@ -535,7 +535,10 @@ func (node *AlgorandFullNode) broadcastSignedTxGroup(txgroup []transactions.Sign if err != nil { logging.Base().Infof("unable to pin transaction: %v", err) } - + // DevMode nodes do not broadcast txns to the network + if node.devMode { + return nil + } var enc []byte var txids []transactions.Txid for _, tx := range txgroup { From c73ef66d8c60f27b9c927a75f59e1e3131eee201 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Fri, 10 Mar 2023 12:05:03 -0500 Subject: [PATCH 76/81] ledger: fix error shadowing in onlineAccountsNewRoundImpl (#5188) --- data/transactions/logic/eval.go | 2 +- data/transactions/logic/evalAppTxn_test.go | 1 + data/transactions/logic/evalStateful_test.go | 6 ++ data/transactions/logic/eval_test.go | 1 + ledger/acctdeltas.go | 9 +- ledger/acctdeltas_test.go | 101 +++++++++++++++++++ ledger/acctonline.go | 1 + ledger/internal/appcow.go | 8 +- 8 files changed, 124 insertions(+), 5 deletions(-) diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go index 42f5669c3b..3de0f101d6 100644 --- a/data/transactions/logic/eval.go +++ b/data/transactions/logic/eval.go @@ -709,7 +709,7 @@ func EvalContract(program []byte, gi int, aid basics.AppIndex, params *EvalParam return false, nil, errors.New("no ledger in contract eval") } if params.SigLedger == nil { - params.SigLedger = params.Ledger + return false, nil, errors.New("no sig ledger in contract eval") } if aid == 0 { return false, nil, errors.New("0 appId in contract eval") diff --git a/data/transactions/logic/evalAppTxn_test.go b/data/transactions/logic/evalAppTxn_test.go index b33a97d742..2886b57a52 100644 --- a/data/transactions/logic/evalAppTxn_test.go +++ b/data/transactions/logic/evalAppTxn_test.go @@ -2634,6 +2634,7 @@ itxn_submit txg := []transactions.SignedTxnWithAD{tx} ep := NewEvalParams(txg, MakeTestProto(), &transactions.SpecialAddresses{}) ep.Ledger = ledger + ep.SigLedger = ledger TestApp(t, callpay3+"int 1", ep, "insufficient balance") // inner contract needs money ledger.NewAccount(appAddr(222), 1_000_000) diff --git a/data/transactions/logic/evalStateful_test.go b/data/transactions/logic/evalStateful_test.go index a2f33cc909..f6ab14a078 100644 --- a/data/transactions/logic/evalStateful_test.go +++ b/data/transactions/logic/evalStateful_test.go @@ -362,6 +362,7 @@ func testApps(t *testing.T, programs []string, txgroup []transactions.SignedTxn, } ledger.Reset() ep.Ledger = ledger + ep.SigLedger = ledger testAppsBytes(t, codes, ep, expected...) } @@ -1301,6 +1302,7 @@ intc_1 }, ) ep.Ledger = ledger + ep.SigLedger = ledger saved := ops.Program[firstCmdOffset] require.Equal(t, OpsByName[0]["intc_0"].Opcode, saved) @@ -1353,6 +1355,7 @@ func TestAppLocalStateReadWrite(t *testing.T) { }, ) ep.Ledger = ledger + ep.SigLedger = ledger ledger.NewApp(txn.Txn.Sender, 100, basics.AppParams{}) ledger.NewLocals(txn.Txn.Sender, 100) @@ -1733,6 +1736,7 @@ int 0x77 }, ) ep.Ledger = ledger + ep.SigLedger = ledger ledger.NewApp(txn.Txn.Sender, 100, basics.AppParams{}) delta := testApp(t, source, ep) @@ -1908,6 +1912,7 @@ int 7 ledger := NewLedger(nil) ledger.NewAccount(txn.Txn.Sender, 1) ep.Ledger = ledger + ep.SigLedger = ledger ledger.NewApp(txn.Txn.Sender, 100, basics.AppParams{}) delta := testApp(t, source, ep) @@ -2111,6 +2116,7 @@ int 1 }, ) ep.Ledger = ledger + ep.SigLedger = ledger ledger.NewApp(txn.Txn.Sender, 100, basics.AppParams{}) ledger.NewLocals(txn.Txn.Sender, 100) ledger.NewAccount(txn.Txn.Receiver, 1) diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go index 016940605e..5d5f321280 100644 --- a/data/transactions/logic/eval_test.go +++ b/data/transactions/logic/eval_test.go @@ -5404,6 +5404,7 @@ func TestOpJSONRef(t *testing.T) { ledger.NewApp(txn.Txn.Receiver, 0, basics.AppParams{}) ep := defaultEvalParams(txn) ep.Ledger = ledger + ep.SigLedger = ledger testCases := []struct { source string previousVersErrors []Expect diff --git a/ledger/acctdeltas.go b/ledger/acctdeltas.go index f1e10d21ce..fcc937e01f 100644 --- a/ledger/acctdeltas.go +++ b/ledger/acctdeltas.go @@ -1000,8 +1000,9 @@ func onlineAccountsNewRoundImpl( err = fmt.Errorf("empty voting data for online account %s: %v", data.address.String(), newAcct) } else { // create a new entry. + var ref trackerdb.OnlineAccountRef normBalance := newAcct.NormalizedOnlineBalance(proto) - ref, err := writer.InsertOnlineAccount(data.address, normBalance, newAcct, updRound, uint64(newAcct.VoteLastValid)) + ref, err = writer.InsertOnlineAccount(data.address, normBalance, newAcct, updRound, uint64(newAcct.VoteLastValid)) if err == nil { updated := trackerdb.PersistedOnlineAccountData{ Addr: data.address, @@ -1025,7 +1026,8 @@ func onlineAccountsNewRoundImpl( if newStatus == basics.Online { err = fmt.Errorf("empty voting data but online account %s: %v", data.address.String(), newAcct) } else { - ref, err := writer.InsertOnlineAccount(data.address, 0, trackerdb.BaseOnlineAccountData{}, updRound, 0) + var ref trackerdb.OnlineAccountRef + ref, err = writer.InsertOnlineAccount(data.address, 0, trackerdb.BaseOnlineAccountData{}, updRound, 0) if err == nil { updated := trackerdb.PersistedOnlineAccountData{ Addr: data.address, @@ -1041,8 +1043,9 @@ func onlineAccountsNewRoundImpl( } } else { if prevAcct.AccountData != newAcct { + var ref trackerdb.OnlineAccountRef normBalance := newAcct.NormalizedOnlineBalance(proto) - ref, err := writer.InsertOnlineAccount(data.address, normBalance, newAcct, updRound, uint64(newAcct.VoteLastValid)) + ref, err = writer.InsertOnlineAccount(data.address, normBalance, newAcct, updRound, uint64(newAcct.VoteLastValid)) if err == nil { updated := trackerdb.PersistedOnlineAccountData{ Addr: data.address, diff --git a/ledger/acctdeltas_test.go b/ledger/acctdeltas_test.go index 78de0b7fa1..146a8f1ac8 100644 --- a/ledger/acctdeltas_test.go +++ b/ledger/acctdeltas_test.go @@ -2797,6 +2797,107 @@ func TestOnlineAccountsDeletion(t *testing.T) { } } +type mockOnlineAccountsErrorWriter struct { +} + +var errMockOnlineAccountsErrorWriter = errors.New("synthetic err") + +func (w *mockOnlineAccountsErrorWriter) InsertOnlineAccount(addr basics.Address, normBalance uint64, data trackerdb.BaseOnlineAccountData, updRound uint64, voteLastValid uint64) (ref trackerdb.OnlineAccountRef, err error) { + return nil, errMockOnlineAccountsErrorWriter +} + +func (w *mockOnlineAccountsErrorWriter) Close() {} + +// TestOnlineAccountsNewRoundError checks onlineAccountsNewRoundImpl propagates errors to the caller +func TestOnlineAccountsNewRoundError(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + writer := &mockOnlineAccountsErrorWriter{} + proto := config.Consensus[protocol.ConsensusCurrentVersion] + + addrA := ledgertesting.RandomAddress() + + // acct A is new, offline and then online => exercise new entry for account + deltaA := onlineAccountDelta{ + address: addrA, + newAcct: []trackerdb.BaseOnlineAccountData{ + { + MicroAlgos: basics.MicroAlgos{Raw: 100_000_000}, + }, + { + MicroAlgos: basics.MicroAlgos{Raw: 100_000_000}, + BaseVotingData: trackerdb.BaseVotingData{VoteFirstValid: 100}, + }, + }, + updRound: []uint64{1, 2}, + newStatus: []basics.Status{basics.Offline, basics.Online}, + } + updates := compactOnlineAccountDeltas{} + updates.deltas = append(updates.deltas, deltaA) + lastUpdateRound := basics.Round(1) + updated, err := onlineAccountsNewRoundImpl(writer, updates, proto, lastUpdateRound) + require.Error(t, err) + require.Equal(t, errMockOnlineAccountsErrorWriter, err) + require.Empty(t, updated) + + // update acct A => exercise "update" + deltaA2 := onlineAccountDelta{ + address: addrA, + newAcct: []trackerdb.BaseOnlineAccountData{ + { + MicroAlgos: basics.MicroAlgos{Raw: 100_000_000}, + BaseVotingData: trackerdb.BaseVotingData{VoteFirstValid: 200}, + }, + }, + updRound: []uint64{3}, + newStatus: []basics.Status{basics.Online}, + oldAcct: trackerdb.PersistedOnlineAccountData{ + Addr: addrA, + Ref: &mockEntryRef{}, + AccountData: trackerdb.BaseOnlineAccountData{ + MicroAlgos: basics.MicroAlgos{Raw: 100_000_000}, + BaseVotingData: trackerdb.BaseVotingData{VoteFirstValid: 100}, + }, + }, + } + updates = compactOnlineAccountDeltas{} + updates.deltas = append(updates.deltas, deltaA2) + lastUpdateRound = basics.Round(3) + updated, err = onlineAccountsNewRoundImpl(writer, updates, proto, lastUpdateRound) + require.Error(t, err) + require.Equal(t, errMockOnlineAccountsErrorWriter, err) + require.Empty(t, updated) + + // make acct A offline => exercise "deletion" + deltaA3 := onlineAccountDelta{ + address: addrA, + newAcct: []trackerdb.BaseOnlineAccountData{ + { + MicroAlgos: basics.MicroAlgos{Raw: 100_000_000}, + BaseVotingData: trackerdb.BaseVotingData{}, // empty + }, + }, + updRound: []uint64{4}, + newStatus: []basics.Status{basics.Offline}, + oldAcct: trackerdb.PersistedOnlineAccountData{ + Addr: addrA, + Ref: &mockEntryRef{}, + AccountData: trackerdb.BaseOnlineAccountData{ + MicroAlgos: basics.MicroAlgos{Raw: 100_000_000}, + BaseVotingData: trackerdb.BaseVotingData{VoteFirstValid: 200}, + }, + }, + } + updates = compactOnlineAccountDeltas{} + updates.deltas = append(updates.deltas, deltaA3) + lastUpdateRound = basics.Round(4) + updated, err = onlineAccountsNewRoundImpl(writer, updates, proto, lastUpdateRound) + require.Error(t, err) + require.Equal(t, errMockOnlineAccountsErrorWriter, err) + require.Empty(t, updated) +} + func randomBaseAccountData() trackerdb.BaseAccountData { vd := trackerdb.BaseVotingData{ VoteFirstValid: basics.Round(crypto.RandUint64()), diff --git a/ledger/acctonline.go b/ledger/acctonline.go index 6bc8125f95..2eb962f866 100644 --- a/ledger/acctonline.go +++ b/ledger/acctonline.go @@ -478,6 +478,7 @@ func (ao *onlineAccounts) postCommit(ctx context.Context, dcc *deferredCommitCon for _, persistedAcct := range dcc.updatedPersistedOnlineAccounts { ao.baseOnlineAccounts.write(persistedAcct) + // add account into onlineAccountsCache only if prior history exists ao.onlineAccountsCache.writeFrontIfExist( persistedAcct.Addr, cachedOnlineAccount{ diff --git a/ledger/internal/appcow.go b/ledger/internal/appcow.go index c43eb94f16..f843e17064 100644 --- a/ledger/internal/appcow.go +++ b/ledger/internal/appcow.go @@ -456,9 +456,15 @@ func MakeDebugBalances(l LedgerForCowBase, round basics.Round, proto protocol.Co func (cb *roundCowState) StatefulEval(gi int, params *logic.EvalParams, aidx basics.AppIndex, program []byte) (pass bool, evalDelta transactions.EvalDelta, err error) { // Make a child cow to eval our program in calf := cb.child(1) - defer calf.recycle() + defer func() { + // get rid of references to the object that is about to be recycled + params.Ledger = nil + params.SigLedger = nil + calf.recycle() + }() params.Ledger = calf + params.SigLedger = calf // Eval the program pass, cx, err := logic.EvalContract(program, gi, aidx, params) From 7954f8ec7d9a8ce5dbc15c7854e4cc519bb1385a Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Fri, 10 Mar 2023 12:57:49 -0500 Subject: [PATCH 77/81] tests: fix esingle-payer-swap e2e subs test (#5189) --- test/scripts/e2e_subs/single-payer-swap.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/scripts/e2e_subs/single-payer-swap.sh b/test/scripts/e2e_subs/single-payer-swap.sh index e31ffc660f..a9949e225d 100755 --- a/test/scripts/e2e_subs/single-payer-swap.sh +++ b/test/scripts/e2e_subs/single-payer-swap.sh @@ -40,10 +40,10 @@ ${gcmd} clerk send -a 100 -f "${MOOCHER}" -t "${PAYER}" --fee 0 -o cheap.txn # Since goal was modified to allow zero when this feature was added, let's confirm # that it's not encoded (should be "omitempty") set +e -FOUND=$(msgpacktool -d < cheap.txn | grep fee) +FOUND=$(msgpacktool -d < cheap.txn | grep '"fee"') set -e if [[ $FOUND != "" ]]; then - date "+{scriptname} FAIL fee was improperly encoded $FOUND %Y%m%d_%H%M%S" + date "+${scriptname} FAIL fee was improperly encoded $FOUND %Y%m%d_%H%M%S" false fi From dba5a0820af0b4e721e821a3bba4ebc7d46baddd Mon Sep 17 00:00:00 2001 From: Hang Su <87964331+ahangsu@users.noreply.github.com> Date: Fri, 10 Mar 2023 12:59:24 -0500 Subject: [PATCH 78/81] Enhancement: minor change in `agreement/type.go` comment, numbers are not matching (#5186) --- agreement/types.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/agreement/types.go b/agreement/types.go index 450570c620..bc2decb7d0 100644 --- a/agreement/types.go +++ b/agreement/types.go @@ -68,9 +68,9 @@ const ( ) func (s step) nextVoteRanges() (lower, upper time.Duration) { - extra := recoveryExtraTimeout // eg 2500 ms - lower = deadlineTimeout // eg 17500 ms (15000 + 2500) - upper = lower + extra // eg 20000 ms + extra := recoveryExtraTimeout // eg 2000 ms + lower = deadlineTimeout // eg 17000 ms (15000 + 2000) + upper = lower + extra // eg 19000 ms for i := next; i < s; i++ { extra *= 2 @@ -78,8 +78,8 @@ func (s step) nextVoteRanges() (lower, upper time.Duration) { upper = lower + extra } - // e.g. if s == 14 - // extra = 2 ^ 8 * 2500ms = 256 * 2.5 = 512 + 128 = 640s + // e.g. if s == 11 + // extra = 2 ^ 8 * 2000ms = 256 * 2.0 = 512s return lower, upper } From ae331194876e94e930c5990062c78700aca397de Mon Sep 17 00:00:00 2001 From: cce <51567+cce@users.noreply.github.com> Date: Fri, 10 Mar 2023 14:47:32 -0500 Subject: [PATCH 79/81] telemetry: fine-grained breakdown of AssembleBlockStats.StopReason (#5191) --- data/pools/transactionPool.go | 6 +++--- logging/telemetryspec/metric.go | 17 +++++++++++++---- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/data/pools/transactionPool.go b/data/pools/transactionPool.go index c99391ccc4..bc0da751fd 100644 --- a/data/pools/transactionPool.go +++ b/data/pools/transactionPool.go @@ -888,7 +888,7 @@ func (pool *TransactionPool) AssembleBlock(round basics.Round, deadline time.Tim // if the transaction pool is more than two rounds behind, we don't want to wait. if pool.assemblyResults.roundStartedEvaluating <= round.SubSaturate(2) { pool.log.Infof("AssembleBlock: requested round is more than a single round ahead of the transaction pool %d <= %d-2", pool.assemblyResults.roundStartedEvaluating, round) - stats.StopReason = telemetryspec.AssembleBlockEmpty + stats.StopReason = telemetryspec.AssembleBlockPoolBehind pool.assemblyMu.Unlock() return pool.assembleEmptyBlock(round) } @@ -935,7 +935,7 @@ func (pool *TransactionPool) AssembleBlock(round basics.Round, deadline time.Tim if !pool.assemblyResults.ok { // it didn't. Lucky us - we already prepared an empty block, so we can return this right now. pool.log.Warnf("AssembleBlock: ran out of time for round %d", round) - stats.StopReason = telemetryspec.AssembleBlockTimeout + stats.StopReason = telemetryspec.AssembleBlockTimeoutEmpty if emptyBlockErr != nil { emptyBlockErr = fmt.Errorf("AssembleBlock: failed to construct empty block : %w", emptyBlockErr) } @@ -955,7 +955,7 @@ func (pool *TransactionPool) AssembleBlock(round basics.Round, deadline time.Tim return nil, ErrStaleBlockAssemblyRequest } else if pool.assemblyResults.roundStartedEvaluating == round.SubSaturate(1) { pool.log.Warnf("AssembleBlock: assembled block round did not catch up to requested round: %d != %d", pool.assemblyResults.roundStartedEvaluating, round) - stats.StopReason = telemetryspec.AssembleBlockTimeout + stats.StopReason = telemetryspec.AssembleBlockEvalOld return pool.assembleEmptyBlock(round) } else if pool.assemblyResults.roundStartedEvaluating < round { return nil, fmt.Errorf("AssembleBlock: assembled block round much behind requested round: %d != %d", diff --git a/logging/telemetryspec/metric.go b/logging/telemetryspec/metric.go index fcb6c1d45d..da5f4a7536 100644 --- a/logging/telemetryspec/metric.go +++ b/logging/telemetryspec/metric.go @@ -77,21 +77,30 @@ type StateProofStats struct { TxnSize int } -// AssembleBlockTimeout represents AssemblePayset exiting due to timeout +// AssembleBlockTimeout represents AssembleBlock exiting due to timeout const AssembleBlockTimeout = "timeout" -// AssembleBlockFull represents AssemblePayset exiting due to block being full +// AssembleBlockTimeout represents AssembleBlock giving up after a timeout and returning an empty block +const AssembleBlockTimeoutEmpty = "timeout-empty" + +// AssembleBlockFull represents AssembleBlock exiting due to block being full const AssembleBlockFull = "block-full" -// AssembleBlockEmpty represents AssemblePayset exiting due to no more txns +// AssembleBlockEmpty represents AssembleBlock exiting due to no more txns const AssembleBlockEmpty = "pool-empty" +// AssembleBlockPoolBehind represents the transaction pool being more than two roudns behind +const AssembleBlockPoolBehind = "pool-behind" + +// AssembleBlockEvalOld represents the assembled block that was returned being a round too old +const AssembleBlockEvalOld = "eval-old" + // AssembleBlockAbandon represents the block generation being abandoned since it won't be needed. const AssembleBlockAbandon = "block-abandon" const assembleBlockMetricsIdentifier Metric = "AssembleBlock" -// AssembleBlockMetrics is the set of metrics captured when we compute AssemblePayset +// AssembleBlockMetrics is the set of metrics captured when we compute AssembleBlock type AssembleBlockMetrics struct { AssembleBlockStats } From bba9be3532f7ae9198099c4a9a26befb95dd30d0 Mon Sep 17 00:00:00 2001 From: John Lee Date: Fri, 10 Mar 2023 15:15:47 -0500 Subject: [PATCH 80/81] algocfg: Bug fix - Add colon to indicate port specification (#5193) --- cmd/algocfg/profileCommand.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/algocfg/profileCommand.go b/cmd/algocfg/profileCommand.go index 2cf467d060..bbddfa26c7 100644 --- a/cmd/algocfg/profileCommand.go +++ b/cmd/algocfg/profileCommand.go @@ -70,7 +70,7 @@ var ( cfg.Archival = true cfg.EnableLedgerService = true cfg.EnableBlockService = true - cfg.NetAddress = "4160" + cfg.NetAddress = ":4160" return cfg }, } From 55e10f1fc457c2f6ee6aa6f17d0f1bd29547a940 Mon Sep 17 00:00:00 2001 From: DevOps Service Date: Fri, 10 Mar 2023 20:17:51 +0000 Subject: [PATCH 81/81] Update the Version, BuildNumber, genesistimestamp.data --- buildnumber.dat | 1 + genesistimestamp.dat | 1 + 2 files changed, 2 insertions(+) create mode 100644 buildnumber.dat create mode 100644 genesistimestamp.dat diff --git a/buildnumber.dat b/buildnumber.dat new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/buildnumber.dat @@ -0,0 +1 @@ +0 diff --git a/genesistimestamp.dat b/genesistimestamp.dat new file mode 100644 index 0000000000..c72c6a7795 --- /dev/null +++ b/genesistimestamp.dat @@ -0,0 +1 @@ +1558657885