diff --git a/Taskfile.yml b/Taskfile.yml index f166b406f..17308b13d 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -144,42 +144,42 @@ tasks: docker volume rm -f {{ .NAME }} # ************ dev ************ - # dev:up: - # desc: Start the dev environment - # deps: - # - task: build:docker - # cmds: - # - task: dev:up:nb - - # dev:up:debug: - # desc: Start the dev environment - # deps: - # - task: build:docker - # vars: { VARIANT: 'debug' } - # cmds: - # - task: dev:up:nb + dev:up: + desc: Start the dev environment + deps: + - task: build:docker + cmds: + - task: dev:up:nb - # dev:up:nb: - # desc: Start the dev environment without rebuilding docker image - # env: - # # NOTE: this timeout should be long enough to attach to debugger - # KACT_WAIT_TIMEOUT: 20s - # dir: test # different module - # cmds: - # - go test ./acceptance -run ^TestLocalDevSetup -timeout 12h -dev -v {{.CLI_ARGS}} + dev:up:debug: + desc: Start the dev environment + deps: + - task: build:docker + vars: { VARIANT: 'debug' } + cmds: + - task: dev:up:nb + + dev:up:nb: + desc: Start the dev environment without rebuilding docker image + env: + # NOTE: this timeout should be long enough to attach to debugger + KACT_WAIT_TIMEOUT: 20s + dir: test # different module + cmds: + - go test ./acceptance -run ^TestLocalDevSetup -timeout 12h -dev -v {{.CLI_ARGS}} - # dev:testnet:up: - # desc: Start the dev environment(with testnet) - # deps: - # - task: build:docker - # cmds: - # - task: dev:testnet:up:nb + dev:testnet:up: + desc: Start the dev environment(with testnet) + deps: + - task: build:docker + cmds: + - task: dev:testnet:up:nb - # dev:testnet:up:nb: - # desc: Start the dev environment(with testnet) without rebuilding docker image - # dir: test # different module - # cmds: - # - go test ./integration -run ^TestLocalDevSetup$ -timeout 12h -dev -v {{.CLI_ARGS}} + dev:testnet:up:nb: + desc: Start the dev environment(with testnet) without rebuilding docker image + dir: test # different module + cmds: + - go test ./integration -run ^TestLocalDevSetup$ -timeout 12h -dev -v {{.CLI_ARGS}} # ************ test ************ # test with build:docker task support passing CLI_ARGS to go test, e.g. task test:act -- -debug diff --git a/app/setup/testnet.go b/app/setup/testnet.go index c98cf8d64..de3b845f5 100644 --- a/app/setup/testnet.go +++ b/app/setup/testnet.go @@ -25,7 +25,7 @@ import ( func TestnetCmd() *cobra.Command { var numVals, numNVals int - var noPex bool + var noPex, uniquePorts bool var startingPort uint64 var outDir string @@ -33,7 +33,16 @@ func TestnetCmd() *cobra.Command { Use: "testnet", Short: "Generate configuration for multiple nodes", RunE: func(cmd *cobra.Command, args []string) error { - return GenerateTestnetConfigs(outDir, numVals, numNVals, noPex, startingPort) + return GenerateTestnetConfigs(&TestnetConfig{ + RootDir: outDir, + NumVals: numVals, + NumNVals: numNVals, + NoPex: noPex, + StartingPort: startingPort, + }, &ConfigOpts{ + UniquePorts: uniquePorts, + DnsHost: false, + }) }, } @@ -50,14 +59,42 @@ func TestnetCmd() *cobra.Command { cmd.Flags().BoolVar(&noPex, "no-pex", false, "disable peer exchange") cmd.Flags().Uint64VarP(&startingPort, "port", "p", 6600, "starting P2P port for the nodes") cmd.Flags().StringVarP(&outDir, "out-dir", "o", ".testnet", "output directory for generated node root directories") - + cmd.Flags().BoolVarP(&uniquePorts, "unique-ports", "u", false, "use unique ports for each node") return cmd } -func GenerateTestnetConfigs(outDir string, numVals, numNVals int, noPex bool, startingPort uint64) error { +type TestnetConfig struct { + RootDir string + ChainID string + NumVals int + NumNVals int + NoPex bool + StartingPort uint64 + StartingIP string + HostnamePrefix string + DnsNamePrefix string // optional and only used if DnsHost is true (default: node) + + Owner string +} + +type ConfigOpts struct { + // UniquePorts is a flag to generate unique listening addresses + // (JSON-RPC, HTTP, Admin, P2P, node RPC) for each node. + // This is useful for testing multiple nodes on the same machine. + // If it is used for generating a single config, it has no effect. + UniquePorts bool + + // DnsHost is a flag to use DNS hostname as host in the config + // instead of ip. It will be used together with DnsNamePrefix to generate + // hostnames. + // This is useful for testing nodes inside docker containers. + DnsHost bool +} + +func GenerateTestnetConfigs(cfg *TestnetConfig, opts *ConfigOpts) error { // ensure that the directory exists // expand the directory path - outDir, err := node.ExpandPath(outDir) + outDir, err := node.ExpandPath(cfg.RootDir) if err != nil { return err } @@ -68,10 +105,10 @@ func GenerateTestnetConfigs(outDir string, numVals, numNVals int, noPex bool, st var keys []crypto.PrivateKey // generate the configuration for the nodes - for i := range numVals + numNVals { + for i := range cfg.NumVals + cfg.NumNVals { // generate Keys, so that the connection strings and the validator set can be generated before the node config files are generated var seed [32]byte - binary.LittleEndian.PutUint64(seed[:], startingPort+uint64(i)) + binary.LittleEndian.PutUint64(seed[:], cfg.StartingPort+uint64(i)) seed = sha256.Sum256(seed[:]) rr := rand.NewChaCha8(seed) priv := node.NewKey(&deterministicPRNG{ChaCha8: rr}) @@ -81,18 +118,44 @@ func GenerateTestnetConfigs(outDir string, numVals, numNVals int, noPex bool, st // key 0 is leader leaderPub := keys[0].Public() + var bootNodes []string + for i := range cfg.NumVals { + pubKey := keys[i].Public() + + hostname := cfg.StartingIP + if cfg.StartingIP == "" { + hostname = "127.0.0.1" + } + + if opts.DnsHost { + hostname = fmt.Sprintf("%s%d", cfg.DnsNamePrefix, i) + } + + port := 6600 + if opts.UniquePorts { + port = 6600 + i + } + + bootNodes = append(bootNodes, node.FormatPeerString(pubKey.Bytes(), pubKey.Type(), hostname, port)) + } + + chainID := cfg.ChainID + if chainID == "" { + chainID = "kwil-testnet" + } genConfig := &config.GenesisConfig{ - ChainID: "kwil-testnet", + ChainID: chainID, Leader: leaderPub.Bytes(), // rethink this so it can be different key types? - Validators: make([]*ktypes.Validator, numVals), + Validators: make([]*ktypes.Validator, cfg.NumVals), DisabledGasCosts: true, JoinExpiry: 14400, VoteExpiry: 108000, MaxBlockSize: 6 * 1024 * 1024, MaxVotesPerTx: 200, + DBOwner: cfg.Owner, } - for i := range numVals { + for i := range cfg.NumVals { genConfig.Validators[i] = &ktypes.Validator{ PubKey: keys[i].Public().Bytes(), Power: 1, @@ -100,14 +163,19 @@ func GenerateTestnetConfigs(outDir string, numVals, numNVals int, noPex bool, st } // generate the configuration for the nodes - for i := range numVals + numNVals { + portOffset := 0 + for i := range cfg.NumVals + cfg.NumNVals { + if opts.UniquePorts { + portOffset = i + } err = GenerateNodeRoot(&NodeGenConfig{ - PortOffset: i, - IP: "127.0.0.1", - NoPEX: noPex, + PortOffset: portOffset, + IP: cfg.StartingIP, + NoPEX: cfg.NoPex, RootDir: filepath.Join(outDir, fmt.Sprintf("node%d", i)), NodeKey: keys[i], Genesis: genConfig, + BootNodes: bootNodes, }) if err != nil { return err @@ -127,6 +195,7 @@ type NodeGenConfig struct { Genesis *config.GenesisConfig // TODO: gasEnabled, private p2p, auth RPC, join expiry, allocs, etc. + BootNodes []string } func GenerateNodeRoot(ncfg *NodeGenConfig) error { @@ -144,16 +213,7 @@ func GenerateNodeRoot(ncfg *NodeGenConfig) error { } cfg.P2P.Pex = !ncfg.NoPEX - leaderPub, err := crypto.UnmarshalPublicKey(ncfg.Genesis.Leader, crypto.KeyTypeSecp256k1) - if err != nil { - return err - } - - if !ncfg.NodeKey.Public().Equals(leaderPub) { - // make everyone connect to leader - cfg.P2P.BootNodes = []string{node.FormatPeerString( - leaderPub.Bytes(), leaderPub.Type(), cfg.P2P.IP, 6600)} - } + cfg.P2P.BootNodes = ncfg.BootNodes // DB dbPort := ncfg.DBPort diff --git a/node/consensus/leader.go b/node/consensus/leader.go index 723f309b5..6905d32b0 100644 --- a/node/consensus/leader.go +++ b/node/consensus/leader.go @@ -177,17 +177,20 @@ func (ce *ConsensusEngine) addVote(ctx context.Context, vote *vote, sender strin defer ce.state.mtx.Unlock() if ce.state.blkProp == nil { - return errors.New("not processing any block proposal at the moment") + ce.log.Warn("Error adding vote: not processing any block proposal at the moment") + return nil } // check if the vote is for the current height if ce.state.blkProp.height != vote.height { - return errors.New("vote received for a different block height, ignore it") + ce.log.Warn("Error adding vote: Vote received for a different block height, ignore it", "height", vote.height) + return nil } // check if the vote is for the current block and from a validator if ce.state.blkProp.blkHash != vote.blkHash { - return fmt.Errorf("vote received for an incorrect block %s", vote.blkHash.String()) + ce.log.Warn("Error adding vote: Vote received for a different block", "height", vote.height, "blkHash", vote.blkHash) + return nil } // Check if the vote is from a validator diff --git a/test/driver/cli_driver.go b/test/driver/cli_driver.go index ace73ee89..11cfe09f4 100644 --- a/test/driver/cli_driver.go +++ b/test/driver/cli_driver.go @@ -231,6 +231,22 @@ func (d *KwilCliDriver) Execute(ctx context.Context, dbid string, action string, return out.TxHash, nil } +func (d *KwilCliDriver) ExecuteSQL(ctx context.Context, sql string, params map[string]any) (types.Hash, error) { + // actionInputs, err := d.prepareCliActionParams(ctx, dbid, action, inputs[0]) + // if err != nil { + // return types.Hash{}, fmt.Errorf("failed to get action params: %w", err) + // } + + args := []string{"database", "execute", "--sql", sql} + + cmd := d.newKwilCliCmd(args...) + out, err := mustRun[respTxHash](cmd, d.logger) + if err != nil { + return types.Hash{}, fmt.Errorf("failed to execute action: %w", err) + } + return out.TxHash, nil +} + func (d *KwilCliDriver) QueryDatabase(_ context.Context, query string) (*types.QueryResult, error) { args := []string{"database", "query", query} diff --git a/test/driver/client_driver.go b/test/driver/client_driver.go index 854296cd9..855ed1673 100644 --- a/test/driver/client_driver.go +++ b/test/driver/client_driver.go @@ -108,6 +108,14 @@ func (d *KwildClientDriver) Execute(ctx context.Context, dbid string, actionName return rec, nil } +func (d *KwildClientDriver) ExecuteSQL(ctx context.Context, sql string, params map[string]any) (types.Hash, error) { + rec, err := d.clt.ExecuteSQL(ctx, sql, params) + if err != nil { + return types.Hash{}, fmt.Errorf("error executing sql statement %s: error: %w", sql, err) + } + return rec, nil +} + func (d *KwildClientDriver) QueryDatabase(ctx context.Context, query string) (*types.QueryResult, error) { return d.clt.Query(ctx, query, nil) } diff --git a/test/integration/docker-compose-dev.yml b/test/integration/docker-compose-dev.yml index 2b5ed3a4b..32ead25de 100644 --- a/test/integration/docker-compose-dev.yml +++ b/test/integration/docker-compose-dev.yml @@ -1,7 +1,7 @@ version: "3" services: - # TODO: generate correspond number of nodes and exts by configuration + # TODO: generate corresponding number of nodes and exts by configuration node0: image: kwild:latest ports: @@ -24,14 +24,13 @@ services: --root=/app/kwil --log-format=plain --log-level=${LOG_LEVEL:-info} - --app.extension-endpoints=ext1:50051 - --app.admin-listen-addr=/tmp/admin.socket - --chain.p2p.listen-addr=tcp://0.0.0.0:26656 - --chain.rpc.listen-addr=tcp://0.0.0.0:26657 - --app.pg-db-host=pg0 - --app.pg-db-port=5432 - --app.pg-db-user=kwild - --app.pg-db-pass=kwild + --admin.listen=/tmp/admin.socket + --p2p.ip=tcp:0.0.0.0 + --p2p.port=6600 + --db.host=pg + --db.port=5432 + --db.user=kwild + --db.pass=kwild healthcheck: test: ["CMD", "curl", "--fail-with-body", "-s", "http://127.0.0.1:8484/api/v1/health/user"] interval: 2s diff --git a/test/integration/docker-compose-migration.yml.template b/test/integration/docker-compose-migration.yml.template index 4dc868951..05478ee05 100644 --- a/test/integration/docker-compose-migration.yml.template +++ b/test/integration/docker-compose-migration.yml.template @@ -1,12 +1,12 @@ version: "3" services: - # TODO: generate correspond number of nodes and exts by configuration + # TODO: generate correspond number of nodes by configuration new-node0: image: {{ .DockerImage }} ports: - "{{with .ExposedRPCPorts}}{{index . 0}}:{{end}}8484" - - "{{with .ExposedRPCPorts}}{{index . 0}}:{{end}}8584" + - "{{with .ExposedRPCPorts}}{{index . 0 | plus 100}}:{{end}}8584" - "6600" #env_file: # NOTE: docker compose by default will use `.env` file if presented @@ -19,21 +19,19 @@ services: networks: - {{ .Network }} depends_on: - new-ext1: - condition: service_started new-pg0: condition: service_healthy command: | --root=/app/kwil --log-format=plain --log-level=${LOG_LEVEL:-info} - --app.extension-endpoints=new-ext1:50051 - --chain.p2p.listen-addr=tcp://0.0.0.0:26656 - --chain.rpc.listen-addr=tcp://0.0.0.0:26657 - --app.pg-db-host=new-pg0 - --app.pg-db-port=5432 - --app.pg-db-user=kwild - --app.pg-db-pass=kwild + --admin.listen=/tmp/admin.socket + --p2p.ip=0.0.0.0 + --p2p.port=6600 + --db.host=new-pg0 + --db.port=5432 + --db.user=kwild + --db.pass=kwild healthcheck: test: ["CMD", "curl", "--fail-with-body", "-s", "http://127.0.0.1:8484/api/v1/health/user"] interval: 2s @@ -61,7 +59,7 @@ services: image: {{ .DockerImage }} ports: - "{{with .ExposedRPCPorts}}{{index . 1}}:{{end}}8484" - - "{{with .ExposedRPCPorts}}{{index . 1}}:{{end}}8584" + - "{{with .ExposedRPCPorts}}{{index . 1 | plus 100}}:{{end}}8584" - "6600" environment: GORACE: "halt_on_error=1 log_path=/app/kwil/datarace" @@ -72,21 +70,19 @@ services: networks: - {{ .Network }} depends_on: - new-ext1: - condition: service_started new-pg1: condition: service_healthy command: | --root=/app/kwil --log-format=plain --log-level=${LOG_LEVEL:-info} - --app.extension-endpoints=new-ext1:50051 - --chain.p2p.listen-addr=tcp://0.0.0.0:26656 - --chain.rpc.listen-addr=tcp://0.0.0.0:26657 - --app.pg-db-host=new-pg1 - --app.pg-db-port=5432 - --app.pg-db-user=kwild - --app.pg-db-pass=kwild + --admin.listen=/tmp/admin.socket + --p2p.ip=0.0.0.0 + --p2p.port=6600 + --db.host=new-pg1 + --db.port=5432 + --db.user=kwild + --db.pass=kwild healthcheck: test: ["CMD", "curl", "--fail-with-body", "-s", "http://127.0.0.1:8484/api/v1/health/user"] interval: 2s @@ -114,7 +110,7 @@ services: image: {{ .DockerImage }} ports: - "{{with .ExposedRPCPorts}}{{index . 2}}:{{end}}8484" - - "{{with .ExposedRPCPorts}}{{index . 2}}:{{end}}8584" + - "{{with .ExposedRPCPorts}}{{index . 2 | plus 100}}:{{end}}8584" - "6600" environment: GORACE: "halt_on_error=1 log_path=/app/kwil/datarace" @@ -125,21 +121,19 @@ services: networks: - {{ .Network }} depends_on: - new-ext1: - condition: service_started new-pg2: condition: service_healthy command: | --root=/app/kwil - --log-level=${LOG_LEVEL:-info} --log-format=plain - --app.extension-endpoints=new-ext1:50051 - --chain.p2p.listen-addr=tcp://0.0.0.0:26656 - --chain.rpc.listen-addr=tcp://0.0.0.0:26657 - --app.pg-db-host=new-pg2 - --app.pg-db-port=5432 - --app.pg-db-user=kwild - --app.pg-db-pass=kwild + --log-level=${LOG_LEVEL:-info} + --admin.listen=/tmp/admin.socket + --p2p.ip=0.0.0.0 + --p2p.port=6600 + --db.host=new-pg2 + --db.port=5432 + --db.user=kwild + --db.pass=kwild healthcheck: test: ["CMD", "curl", "--fail-with-body", "-s", "http://127.0.0.1:8484/api/v1/health/user"] interval: 2s @@ -164,13 +158,11 @@ services: retries: 10 # This node is used to test the scenario where new node join the network & sync the blocks - # Removing the ext dependency as test-container docker compose creates a new project everytime we run - # docker compose, and the ext is defined in a seperate project, so service lookup is hard. new-node3: image: {{ .DockerImage }} ports: - "{{with .ExposedRPCPorts}}{{index . 3}}:{{end}}8484" - - "1{{with .ExposedRPCPorts}}{{index . 3}}:{{end}}8584" + - "{{with .ExposedRPCPorts}}{{index . 3 | plus 100}}:{{end}}8584" - "6600" environment: GORACE: "halt_on_error=1 log_path=/app/kwil/datarace" @@ -181,21 +173,19 @@ services: networks: - {{ .Network }} depends_on: - new-ext3: - condition: service_started new-pg3: condition: service_healthy command: | --root=/app/kwil --log-format=plain --log-level=${LOG_LEVEL:-info} - --app.extension-endpoints=new-ext3:50051 - --chain.p2p.listen-addr=tcp://0.0.0.0:26656 - --chain.rpc.listen-addr=tcp://0.0.0.0:26657 - --app.pg-db-host=new-pg3 - --app.pg-db-port=5432 - --app.pg-db-user=kwild - --app.pg-db-pass=kwild + --admin.listen=/tmp/admin.socket + --p2p.ip=0.0.0.0 + --p2p.port=6600 + --db.host=new-pg3 + --db.port=5432 + --db.user=kwild + --db.pass=kwild healthcheck: test: ["CMD-SHELL", "/app/kwil-cli utils chain-info"] interval: 2s diff --git a/test/integration/docker-compose.yml.template b/test/integration/docker-compose.yml.template index 6e7912fd2..debf151d3 100644 --- a/test/integration/docker-compose.yml.template +++ b/test/integration/docker-compose.yml.template @@ -1,13 +1,13 @@ version: "3" services: - # TODO: generate correspond number of nodes and exts by configuration + # TODO: generate corresponding number of nodes and exts by configuration node0: image: {{ .DockerImage }} ports: - "{{with .ExposedRPCPorts}}{{index . 0}}:{{end}}8484" - "{{with .ExposedRPCPorts}}{{index . 0 | plus 100}}:{{end}}8584" - - "6600" + - "6600:6600" #env_file: # NOTE: docker compose by default will use `.env` file if presented environment: @@ -19,21 +19,20 @@ services: networks: - {{ .Network }} depends_on: - ext1: - condition: service_started pg0: condition: service_healthy command: | --root=/app/kwil --log-format=plain --log-level=${LOG_LEVEL:-info} - --app.extension-endpoints=ext1:50051 - --chain.p2p.listen-addr=tcp://0.0.0.0:26656 - --chain.rpc.listen-addr=tcp://0.0.0.0:26657 - --app.pg-db-host=pg0 - --app.pg-db-port=5432 - --app.pg-db-user=kwild - --app.pg-db-pass=kwild + --admin.listen=/tmp/admin.socket + --rpc.listen=0.0.0.0:8484 + --p2p.ip=0.0.0.0 + --p2p.port=6600 + --db.host=pg0 + --db.port=5432 + --db.user=kwild + --db.pass=kwild healthcheck: test: ["CMD", "curl", "--fail-with-body", "-s", "http://127.0.0.1:8484/api/v1/health/user"] interval: 2s @@ -61,8 +60,8 @@ services: image: {{ .DockerImage }} ports: - "{{with .ExposedRPCPorts}}{{index . 1}}:{{end}}8484" - - "{{with .ExposedRPCPorts}}{{index . 1}}:{{end}}8584" - - "6600" + - "{{with .ExposedRPCPorts}}{{index . 1 | plus 100}}:{{end}}8584" + - "6601:6600" environment: GORACE: "halt_on_error=1 log_path=/app/kwil/datarace" volumes: @@ -72,21 +71,20 @@ services: networks: - {{ .Network }} depends_on: - ext1: - condition: service_started pg1: condition: service_healthy command: | --root=/app/kwil --log-format=plain --log-level=${LOG_LEVEL:-info} - --app.extension-endpoints=ext1:50051 - --chain.p2p.listen-addr=tcp://0.0.0.0:26656 - --chain.rpc.listen-addr=tcp://0.0.0.0:26657 - --app.pg-db-host=pg1 - --app.pg-db-port=5432 - --app.pg-db-user=kwild - --app.pg-db-pass=kwild + --admin.listen=/tmp/admin.socket + --rpc.listen=0.0.0.0:8484 + --p2p.ip=0.0.0.0 + --p2p.port=6600 + --db.host=pg1 + --db.port=5432 + --db.user=kwild + --db.pass=kwild healthcheck: test: ["CMD", "curl", "--fail-with-body", "-s", "http://127.0.0.1:8484/api/v1/health/user"] interval: 2s @@ -115,7 +113,7 @@ services: ports: - "{{with .ExposedRPCPorts}}{{index . 2}}:{{end}}8484" - "{{with .ExposedRPCPorts}}{{index . 2 | plus 100}}:{{end}}8584" - - "6600" + - "6602:6600" environment: GORACE: "halt_on_error=1 log_path=/app/kwil/datarace" volumes: @@ -125,21 +123,20 @@ services: networks: - {{ .Network }} depends_on: - ext1: - condition: service_started pg2: condition: service_healthy command: | --root=/app/kwil - --log-level=${LOG_LEVEL:-info} --log-format=plain - --app.extension-endpoints=ext1:50051 - --chain.p2p.listen-addr=tcp://0.0.0.0:26656 - --chain.rpc.listen-addr=tcp://0.0.0.0:26657 - --app.pg-db-host=pg2 - --app.pg-db-port=5432 - --app.pg-db-user=kwild - --app.pg-db-pass=kwild + --log-level=${LOG_LEVEL:-info} + --admin.listen=/tmp/admin.socket + --rpc.listen=0.0.0.0:8484 + --p2p.ip=0.0.0.0 + --p2p.port=6600 + --db.host=pg2 + --db.port=5432 + --db.user=kwild + --db.pass=kwild healthcheck: test: ["CMD", "curl", "--fail-with-body", "-s", "http://127.0.0.1:8484/api/v1/health/user"] interval: 2s @@ -165,13 +162,14 @@ services: # This node is used to test the scenario where new node join the network & sync the blocks # Removing the ext dependency as test-container docker compose creates a new project everytime we run + # docker compose, and the ext is defined in a seperate project, so service lookup is hard. node3: image: {{ .DockerImage }} ports: - "{{with .ExposedRPCPorts}}{{index . 3}}:{{end}}8484" - "{{with .ExposedRPCPorts}}{{index . 3 | plus 100}}:{{end}}8584" - - "6600" + - "6603:6600" environment: GORACE: "halt_on_error=1 log_path=/app/kwil/datarace" volumes: @@ -181,21 +179,20 @@ services: networks: - {{ .Network }} depends_on: - ext3: - condition: service_started pg3: condition: service_healthy command: | --root=/app/kwil --log-format=plain --log-level=${LOG_LEVEL:-info} - --app.extension-endpoints=ext3:50051 - --chain.p2p.listen-addr=tcp://0.0.0.0:26656 - --chain.rpc.listen-addr=tcp://0.0.0.0:26657 - --app.pg-db-host=pg3 - --app.pg-db-port=5432 - --app.pg-db-user=kwild - --app.pg-db-pass=kwild + --admin.listen=/tmp/admin.socket + --rpc.listen=0.0.0.0:8484 + --p2p.ip=0.0.0.0 + --p2p.port=6600 + --db.host=pg3 + --db.port=5432 + --db.user=kwild + --db.pass=kwild healthcheck: test: ["CMD", "curl", "--fail-with-body", "-s", "http://127.0.0.1:8484/api/v1/health/user"] interval: 2s @@ -224,7 +221,7 @@ services: ports: - "{{with .ExposedRPCPorts}}{{index . 4}}:{{end}}8484" - "{{with .ExposedRPCPorts}}{{index . 4 | plus 100}}:{{end}}8584" - - "6600" + - "6604:6600" environment: GORACE: "halt_on_error=1 log_path=/app/kwil/datarace" volumes: @@ -234,21 +231,20 @@ services: networks: - {{ .Network }} depends_on: - ext1: - condition: service_started pg4: condition: service_healthy command: | --root=/app/kwil --log-format=plain --log-level=${LOG_LEVEL:-info} - --app.extension-endpoints=ext1:50051 - --chain.p2p.listen-addr=tcp://0.0.0.0:26656 - --chain.rpc.listen-addr=tcp://0.0.0.0:26657 - --app.pg-db-host=pg4 - --app.pg-db-port=5432 - --app.pg-db-user=kwild - --app.pg-db-pass=kwild + --admin.listen=/tmp/admin.socket + --rpc.listen=0.0.0.0:8484 + --p2p.ip=0.0.0.0 + --p2p.port=6600 + --db.host=pg4 + --db.port=5432 + --db.user=kwild + --db.pass=kwild healthcheck: test: ["CMD", "curl", "--fail-with-body", "-s", "http://127.0.0.1:8484/api/v1/health/user"] interval: 2s @@ -277,7 +273,7 @@ services: ports: - "{{with .ExposedRPCPorts}}{{index . 5}}:{{end}}8484" - "{{with .ExposedRPCPorts}}{{index . 5 | plus 100}}:{{end}}8584" - - "6600" + - "6605:6600" environment: GORACE: "halt_on_error=1 log_path=/app/kwil/datarace" volumes: @@ -287,21 +283,20 @@ services: networks: - {{ .Network }} depends_on: - ext1: - condition: service_started pg5: condition: service_healthy command: | --root=/app/kwil --log-format=plain --log-level=${LOG_LEVEL:-info} - --app.extension-endpoints=ext1:50051 - --chain.p2p.listen-addr=tcp://0.0.0.0:26656 - --chain.rpc.listen-addr=tcp://0.0.0.0:26657 - --app.pg-db-host=pg5 - --app.pg-db-port=5432 - --app.pg-db-user=kwild - --app.pg-db-pass=kwild + --admin.listen=/tmp/admin.socket + --rpc.listen=0.0.0.0:8484 + --p2p.ip=0.0.0.0 + --p2p.port=6600 + --db.host=pg5 + --db.port=5432 + --db.user=kwild + --db.pass=kwild healthcheck: test: ["CMD", "curl", "--fail-with-body", "-s", "http://127.0.0.1:8484/api/v1/health/user"] interval: 2s diff --git a/test/integration/helper.go b/test/integration/helper.go index 65bf97cf4..d65b6dba2 100644 --- a/test/integration/helper.go +++ b/test/integration/helper.go @@ -17,6 +17,7 @@ import ( "path" "path/filepath" "runtime" + "slices" "sort" "strconv" @@ -61,12 +62,12 @@ var ( var logWaitStrategies = map[string]string{ ExtContainer: "listening on", Ext3Container: "listening on", - "node0": "finalized block", - "node1": "finalized block", - "node2": "finalized block", - "node3": "finalized block", - "node4": "finalized block", - "node5": "finalized block", + "node0": "Committed Block", + "node1": "Committed Block", + "node2": "Committed Block", + "node3": "Committed Block", + "node4": "Committed Block", + "node5": "Committed Block", "kgw": "KGW Server started", "hardhat": "Started HTTP and WebSocket JSON-RPC server", "pg0": `listening on IPv4 address "0.0.0.0", port 5432`, @@ -82,6 +83,9 @@ const ( Ext3Container = "ext3" testChainID = "kwil-test-chain" MigrationChainID = "kwil-migration-chain" + + OwnerPrivKey = "f1aa5a7966c3863ccde3047f6a1e266cdc0c76b399e256b8fede92b1c69e4f4e" + OwnerAddress = "0xc89d42189f0450c2b2c3c61f58ec5d628176a1e7" ) // IntTestConfig is the config for integration test @@ -449,7 +453,20 @@ func (r *IntHelper) GenerateTestnetConfigs(homeDir string) { } }*/ - err := setup.GenerateTestnetConfigs(homeDir, r.cfg.NValidator, r.cfg.NNonValidator, false, 6600) + testnetCfg := &setup.TestnetConfig{ + RootDir: homeDir, + NumVals: r.cfg.NValidator, + NumNVals: r.cfg.NNonValidator, + ChainID: testChainID, + NoPex: false, + StartingPort: 6600, + HostnamePrefix: "kwil-", + DnsNamePrefix: "node", + Owner: OwnerAddress, + // StartingIP: "172.10.100.2", + } + + err := setup.GenerateTestnetConfigs(testnetCfg, &setup.ConfigOpts{UniquePorts: false, DnsHost: true}) /*err := nodecfg.GenerateTestnetConfig(&nodecfg.TestnetGenerateConfig{ ChainID: testChainID, diff --git a/test/integration/kwild_test.go b/test/integration/kwild_test.go index e54c5bfe7..045da23a8 100644 --- a/test/integration/kwild_test.go +++ b/test/integration/kwild_test.go @@ -4,11 +4,10 @@ import ( "context" "flag" "testing" - "time" "github.com/kwilteam/kwil-db/core/types" - "github.com/kwilteam/kwil-db/test/integration" + "github.com/kwilteam/kwil-db/test/specifications" ) var dev = flag.Bool("dev", false, "run for development purpose (no tests)") @@ -17,27 +16,26 @@ var spamTest = flag.Bool("spam", false, "run the spam test that requires a speci var forkTest = flag.Bool("fork", false, "run the fork test that requires a special docker image to be built") -var drivers = flag.String("drivers", "jsonrpc,cli", "comma separated list of drivers to run") +// TODO: test cli driver later +var drivers = flag.String("drivers", "jsonrpc", "comma separated list of drivers to run") // NOTE: `-parallel` is a flag that is already used by `go test` var parallelMode = flag.Bool("parallel-mode", false, "run tests in parallel mode") // Here we make clear the services will be used in each stage -var basicServices = []string{integration.ExtContainer, "pg0", "pg1", "pg2", "node0", "node1", "node2"} -var newServices = []string{integration.Ext3Container, "pg3", "node3"} +var basicServices = []string{"pg0", "pg1", "pg2", "node0", "node1", "node2"} +var newServices = []string{"pg3", "node3"} // NOTE: allServices will be sorted by docker-compose(in setup), so the order is not reliable -var allServices = []string{integration.ExtContainer, integration.Ext3Container, - "pg0", "pg1", "pg2", "pg3", "node0", "node1", "node2", "node3", -} +var allServices = []string{"pg0", "pg1", "pg2", "pg3", "node0", "node1", "node2", "node3"} -var migrationServices = []string{"new-ext1", "new-pg0", "new-pg1", "new-pg2", "new-node0", "new-node1", "new-node2"} +var migrationServices = []string{"new-pg0", "new-pg1", "new-pg2", "new-node0", "new-node1", "new-node2"} -var migrationServices2 = []string{"new-ext3", "new-pg3", "new-node3"} +var migrationServices2 = []string{"new-pg3", "new-node3"} -var singleNodeServices = []string{integration.ExtContainer, "pg0", "node0"} +var singleNodeServices = []string{"pg0", "node0"} -var byzAllServices = []string{integration.ExtContainer, integration.Ext3Container, "pg0", "pg1", "pg2", "pg3", "pg4", "pg5", "node0", "node1", "node2", "node3", "node4", "node5"} +var byzAllServices = []string{"pg0", "pg1", "pg2", "pg3", "pg4", "pg5", "node0", "node1", "node2", "node3", "node4", "node5"} func TestLocalDevSetup(t *testing.T) { if !*dev { @@ -49,7 +47,6 @@ func TestLocalDevSetup(t *testing.T) { ctx := context.Background() opts := []integration.HelperOpt{ - integration.WithBlockInterval(time.Second), integration.WithValidators(4), integration.WithNonValidators(0), integration.WithExposedRPCPorts(), @@ -61,52 +58,38 @@ func TestLocalDevSetup(t *testing.T) { helper.WaitForSignals(t) } -// func TestKwildDatabaseIntegration(t *testing.T) { -// if *parallelMode { -// t.Parallel() -// } - -// ctx := context.Background() - -// opts := []integration.HelperOpt{ -// integration.WithBlockInterval(time.Second), -// integration.WithValidators(4), -// integration.WithNonValidators(0), -// } - -// testDrivers := strings.Split(*drivers, ",") -// for _, driverType := range testDrivers { -// t.Run(driverType+"_driver", func(t *testing.T) { -// helper := integration.NewIntHelper(t, opts...) -// helper.Setup(ctx, basicServices) +func TestKwildDatabaseIntegration(t *testing.T) { + ctx := context.Background() -// node0Driver := helper.GetUserDriver(ctx, "node0", driverType, nil) -// node1Driver := helper.GetUserDriver(ctx, "node1", driverType, nil) -// node2Driver := helper.GetUserDriver(ctx, "node2", driverType, nil) + opts := []integration.HelperOpt{ + integration.WithValidators(4), + integration.WithNonValidators(0), + integration.WithExposedRPCPorts(), + } -// // Create a new database and verify that the database exists on other nodes -// specifications.DatabaseDeploySpecification(ctx, t, node0Driver) -// // TODO: wait for node 1 and 2 to hit whatever height 0 is at -// time.Sleep(2 * time.Second) -// specifications.DatabaseVerifySpecification(ctx, t, node1Driver, true) -// specifications.DatabaseVerifySpecification(ctx, t, node2Driver, true) + helper := integration.NewIntHelper(t, opts...) + helper.Setup(ctx, allServices) -// specifications.ExecuteDBInsertSpecification(ctx, t, node0Driver) + driverType := "jsonrpc" + node0Driver := helper.GetUserDriver(ctx, "node0", driverType, nil) + // node1Driver := helper.GetUserDriver(ctx, "node1", driverType, nil) + // node2Driver := helper.GetUserDriver(ctx, "node2", driverType, nil) -// // restart node1 and ensure that the app state is synced -// helper.RestartNode(ctx, "node1", 15*time.Second) -// node1Driver = helper.GetUserDriver(ctx, "node1", driverType, nil) + // create a new namespace + specifications.CreateNamespaceSpecification(ctx, t, node0Driver) -// specifications.ExecuteDBUpdateSpecification(ctx, t, node1Driver) -// specifications.ExecuteDBDeleteSpecification(ctx, t, node2Driver) + // create tables + specifications.CreateTablesSpecification(ctx, t, node0Driver) -// // specifications.ExecutePermissionedActionSpecification(ctx, t, invalidUserDriver) + // create user using sql + specifications.CreateUserSQLSpecification(ctx, t, node0Driver) -// specifications.DatabaseDropSpecification(ctx, t, node1Driver) + // create user using action + // specifications.CreateUserActionSpecification(ctx, t, node0Driver) -// }) -// } -// } + // list users + // specifications.ListUsersActionSpecification(ctx, t, node1Driver) +} // func TestKwildValidatorRemoval(t *testing.T) { // if *parallelMode { diff --git a/test/specifications/dsl.go b/test/specifications/dsl.go index 26ca6622a..ff0d99cc6 100644 --- a/test/specifications/dsl.go +++ b/test/specifications/dsl.go @@ -49,6 +49,7 @@ type ExecuteQueryDsl interface { TxQueryDsl // ExecuteAction executes QUERY to a database Execute(ctx context.Context, dbid string, actionName string, actionInputs ...[]any) (types.Hash, error) + ExecuteSQL(ctx context.Context, sql string, params map[string]any) (types.Hash, error) QueryDatabase(ctx context.Context, query string) (*types.QueryResult, error) SupportBatch() bool } diff --git a/test/specifications/namespace.go b/test/specifications/namespace.go new file mode 100644 index 000000000..3fce6c302 --- /dev/null +++ b/test/specifications/namespace.go @@ -0,0 +1,79 @@ +package specifications + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +var ( + namespace = "kwil_test" + invalidNamespace = "dummy_test" + + createNamespace = `CREATE NAMESPACE IF NOT EXISTS ` + namespace + `;` + + createUsersTable = `CREATE TABLE IF NOT EXISTS users ( + id INT PRIMARY KEY, + name TEXT NOT NULL, + age INT NOT NULL + );` + + createPostsTable = `CREATE TABLE IF NOT EXISTS posts ( + id INT PRIMARY KEY, + owner_id INT NOT NULL REFERENCES users(id), + content TEXT, + created_at INT + );` + + createUserAction = `CREATE ACTION create_user($id INT, $name TEXT, $age INT) public {INSERT INTO users (id, name, age) VALUES (id, name, age);}` + + listUsersAction = `CREATE ACTION list_users() public { + SELECT * + FROM users; + }` +) + +func CreateNamespaceSpecification(ctx context.Context, t *testing.T, execute ExecuteQueryDsl) { + txHash, err := execute.ExecuteSQL(ctx, createNamespace, nil) + require.NoError(t, err) + + expectTxSuccess(t, execute, ctx, txHash, defaultTxQueryTimeout)() +} + +func CreateTablesSpecification(ctx context.Context, t *testing.T, execute ExecuteQueryDsl) { + invalidCreateCmd := fmt.Sprintf("{%s}%s", invalidNamespace, createUsersTable) + txHash, err := execute.ExecuteSQL(ctx, invalidCreateCmd, nil) + require.NoError(t, err) + + expectTxFail(t, execute, ctx, txHash, defaultTxQueryTimeout)() + + createCmd := fmt.Sprintf("{%s}%s", namespace, createUsersTable) + txHash, err = execute.ExecuteSQL(ctx, createCmd, nil) + require.NoError(t, err) + + expectTxSuccess(t, execute, ctx, txHash, defaultTxQueryTimeout)() +} + +func CreateUserSQLSpecification(ctx context.Context, t *testing.T, execute ExecuteQueryDsl) { + createCmd := fmt.Sprintf("{%s}INSERT INTO users (id, name, age) VALUES (1, 'satoshi', 42);", namespace) + txHash, err := execute.ExecuteSQL(ctx, createCmd, nil) + require.NoError(t, err) + + expectTxSuccess(t, execute, ctx, txHash, defaultTxQueryTimeout)() +} + +func CreateUserActionSpecification(ctx context.Context, t *testing.T, execute ExecuteQueryDsl) { + txHash, err := execute.Execute(ctx, namespace, createUserAction, nil) + require.NoError(t, err) + + expectTxSuccess(t, execute, ctx, txHash, defaultTxQueryTimeout)() +} + +func ListUsersActionSpecification(ctx context.Context, t *testing.T, execute ExecuteQueryDsl) { + txHash, err := execute.Execute(ctx, namespace, listUsersAction, nil) + require.NoError(t, err) + + expectTxSuccess(t, execute, ctx, txHash, defaultTxQueryTimeout)() +} diff --git a/test/specifications/utils.go b/test/specifications/utils.go index e00752659..a58d6c6f5 100644 --- a/test/specifications/utils.go +++ b/test/specifications/utils.go @@ -1,92 +1,59 @@ package specifications -// type DatabaseSchemaLoader interface { -// Load(t *testing.T, targetSchema *testSchema) *types.Schema -// LoadWithoutValidation(t *testing.T, targetSchema *testSchema) *types.Schema -// } - -// type FileDatabaseSchemaLoader struct { -// Modifier func(db *types.Schema) -// } - -// func (l *FileDatabaseSchemaLoader) Load(t *testing.T, targetSchema *testSchema) *types.Schema { -// t.Helper() - -// d, err := os.ReadFile(targetSchema.GetFilePath()) -// if err != nil { -// t.Fatal("cannot open database schema file", err) -// } - -// parseResult, err := parse.Parse(d) -// if err != nil { -// t.Fatal("cannot parse database schema", err) -// } - -// l.Modifier(parseResult) -// return parseResult -// } - -// func (l *FileDatabaseSchemaLoader) LoadWithoutValidation(t *testing.T, targetSchema *testSchema) *types.Schema { -// t.Helper() - -// d, err := os.ReadFile(targetSchema.GetFilePath()) -// if err != nil { -// t.Fatal("cannot open database schema file", err) -// } - -// db, err := parse.ParseSchemaWithoutValidation(d) -// if err != nil { -// t.Fatal("cannot parse database schema", err) -// } -// // ignore parser validation error - -// l.Modifier(db.Schema) - -// return db.Schema -// } - -// func ExpectTxSuccess(t *testing.T, spec TxQueryDsl, ctx context.Context, txHash types.Hash) { -// expectTxSuccess(t, spec, ctx, txHash, defaultTxQueryTimeout)() -// } - -// func expectTxSuccess(t *testing.T, spec TxQueryDsl, ctx context.Context, txHash types.Hash, waitFor time.Duration) func() { -// return func() { -// var status strings.Builder -// require.Eventually(t, func() bool { -// // prevent appending to the prior invocation(s) -// status.Reset() -// if err := spec.TxSuccess(ctx, txHash); err == nil { -// return true -// // Consider failing faster for unexpected errors: -// // } else if !errors.Is(err, driver.ErrTxNotConfirmed) { -// // t.Fatal(err) -// // return false -// } else { -// status.WriteString(err.Error()) -// return false -// } -// }, waitFor, time.Millisecond*300, "tx failed: %s", status.String()) -// } -// } - -// func ExpectTxfail(t *testing.T, spec TxQueryDsl, ctx context.Context, txHash types.Hash) { -// expectTxFail(t, spec, ctx, txHash, defaultTxQueryTimeout)() -// } - -// func expectTxFail(t *testing.T, spec TxQueryDsl, ctx context.Context, txHash types.Hash, waitFor time.Duration) func() { -// return func() { -// var status strings.Builder -// require.Eventually(t, func() bool { -// // prevent appending to the prior invocation(s) -// status.Reset() -// if err := spec.TxSuccess(ctx, txHash); err == nil { -// status.WriteString("success") -// return false -// } else { -// status.WriteString(err.Error()) -// // NOTE: ErrTxNotConfirmed is not considered a failure, should retry -// return !errors.Is(err, driver.ErrTxNotConfirmed) -// } -// }, waitFor, time.Second*1, "tx should fail - status: %v, hash %x", status.String(), txHash) -// } -// } +import ( + "context" + "errors" + "strings" + "testing" + "time" + + "github.com/kwilteam/kwil-db/core/types" + "github.com/kwilteam/kwil-db/test/driver" + "github.com/stretchr/testify/require" +) + +func ExpectTxSuccess(t *testing.T, spec TxQueryDsl, ctx context.Context, txHash types.Hash) { + expectTxSuccess(t, spec, ctx, txHash, defaultTxQueryTimeout)() +} + +func expectTxSuccess(t *testing.T, spec TxQueryDsl, ctx context.Context, txHash types.Hash, waitFor time.Duration) func() { + return func() { + var status strings.Builder + require.Eventually(t, func() bool { + // prevent appending to the prior invocation(s) + status.Reset() + if err := spec.TxSuccess(ctx, txHash); err == nil { + return true + // Consider failing faster for unexpected errors: + // } else if !errors.Is(err, driver.ErrTxNotConfirmed) { + // t.Fatal(err) + // return false + } else { + status.WriteString(err.Error()) + return false + } + }, waitFor, time.Millisecond*300, "tx failed: %s", status.String()) + } +} + +func ExpectTxfail(t *testing.T, spec TxQueryDsl, ctx context.Context, txHash types.Hash) { + expectTxFail(t, spec, ctx, txHash, defaultTxQueryTimeout)() +} + +func expectTxFail(t *testing.T, spec TxQueryDsl, ctx context.Context, txHash types.Hash, waitFor time.Duration) func() { + return func() { + var status strings.Builder + require.Eventually(t, func() bool { + // prevent appending to the prior invocation(s) + status.Reset() + if err := spec.TxSuccess(ctx, txHash); err == nil { + status.WriteString("success") + return false + } else { + status.WriteString(err.Error()) + // NOTE: ErrTxNotConfirmed is not considered a failure, should retry + return !errors.Is(err, driver.ErrTxNotConfirmed) + } + }, waitFor, time.Second*1, "tx should fail - status: %v, hash %x", status.String(), txHash) + } +}