diff --git a/balancer/provider/balancer.go b/balancer/provider/balancer.go index e8b1d7fa6..9e5dba2f7 100644 --- a/balancer/provider/balancer.go +++ b/balancer/provider/balancer.go @@ -3,6 +3,9 @@ package provider import ( "context" "fmt" + "sort" + "strings" + "github.com/google/uuid" "github.com/jackc/pgx/v5" "github.com/pg-sharding/spqr/balancer" @@ -14,8 +17,6 @@ import ( "github.com/pg-sharding/spqr/pkg/spqrlog" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" - "sort" - "strings" ) type BalancerImpl struct { diff --git a/cmd/router/main.go b/cmd/router/main.go index 5b7dc33ad..ee752d03f 100644 --- a/cmd/router/main.go +++ b/cmd/router/main.go @@ -317,6 +317,26 @@ var runCmd = &cobra.Command{ } }() + /* initialize metadata */ + if rcfg.UseInitSQL { + i := instance.NewInitSQLMetadataBootstraper(rcfg.InitSQL) + if err := i.InitializeMetadata(ctx, router); err != nil { + return err + } + } else if rcfg.UseCoordinatorInit { + /* load config if not yet */ + if err := config.LoadCoordinatorCfg(ccfgPath); err != nil { + return err + } + e := instance.NewEtcdMetadataBootstraper(config.CoordinatorConfig().QdbAddr) + if err := e.InitializeMetadata(ctx, router); err != nil { + return err + } + } else { + /* TODO: maybe error-out? */ + router.Initialize() + } + wg := &sync.WaitGroup{} wg.Add(1) diff --git a/coordinator/provider/coordinator.go b/coordinator/provider/coordinator.go index f526cb9d9..ae0429c40 100644 --- a/coordinator/provider/coordinator.go +++ b/coordinator/provider/coordinator.go @@ -3,7 +3,6 @@ package provider import ( "context" "crypto/tls" - "fmt" "net" "time" @@ -73,6 +72,9 @@ func (ci grpcConnectionIterator) IterRouter(cb func(cc *grpc.ClientConn, addr st if err := cb(cc, r.Address); err != nil { return err } + if err := cc.Close(); err != nil { + return err + } } return nil } @@ -235,22 +237,23 @@ func (qc *qdbCoordinator) watchRouters(ctx context.Context) { switch resp.Status { case routerproto.RouterStatus_CLOSED: spqrlog.Zero.Debug().Msg("router is closed") - if err := qc.SyncRouterMetadata(ctx, internalR); err != nil { - return err - } - if _, err := rrClient.OpenRouter(ctx, &routerproto.OpenRouterRequest{}); err != nil { + if err := qc.SyncRouterCoordinatorAddress(ctx, internalR); err != nil { return err } /* Mark router as opened in qdb */ - err := qc.db.OpenRouter(ctx, internalR.ID) + err := qc.db.CloseRouter(ctx, internalR.ID) if err != nil { return err } + case routerproto.RouterStatus_OPENED: spqrlog.Zero.Debug().Msg("router is opened") /* TODO: check router metadata consistency */ + if err := qc.SyncRouterCoordinatorAddress(ctx, internalR); err != nil { + return err + } /* Mark router as opened in qdb */ err := qc.db.OpenRouter(ctx, internalR.ID) @@ -287,7 +290,7 @@ func (qc *qdbCoordinator) lockCoordinator(ctx context.Context, initialRouter boo } router := &topology.Router{ ID: uuid.NewString(), - Address: fmt.Sprintf("%s:%s", config.RouterConfig().Host, config.RouterConfig().GrpcApiPort), + Address: net.JoinHostPort(config.RouterConfig().Host, config.RouterConfig().GrpcApiPort), State: qdb.OPENED, } if err := qc.RegisterRouter(ctx, router); err != nil { @@ -296,7 +299,7 @@ func (qc *qdbCoordinator) lockCoordinator(ctx context.Context, initialRouter boo if err := qc.SyncRouterMetadata(ctx, router); err != nil { spqrlog.Zero.Error().Err(err).Msg("sync router metadata when locking coordinator") } - if err := qc.UpdateCoordinator(ctx, fmt.Sprintf("%s:%s", config.CoordinatorConfig().Host, config.CoordinatorConfig().GrpcApiPort)); err != nil { + if err := qc.UpdateCoordinator(ctx, net.JoinHostPort(config.CoordinatorConfig().Host, config.CoordinatorConfig().GrpcApiPort)); err != nil { return false } return true @@ -391,6 +394,8 @@ func (qc *qdbCoordinator) traverseRouters(ctx context.Context, cb func(cc *grpc. } defer cc.Close() + defer cc.Close() + if err := cb(cc); err != nil { spqrlog.Zero.Debug().Err(err).Str("router id", rtr.ID).Msg("traverse routers") return err @@ -948,6 +953,45 @@ func (qc *qdbCoordinator) SyncRouterMetadata(ctx context.Context, qRouter *topol spqrlog.Zero.Debug().Msg("successfully add all key ranges") rCl := routerproto.NewTopologyServiceClient(cc) + if _, err := rCl.UpdateCoordinator(ctx, &routerproto.UpdateCoordinatorRequest{ + Address: net.JoinHostPort(config.CoordinatorConfig().Host, config.CoordinatorConfig().GrpcApiPort), + }); err != nil { + return err + } + + if resp, err := rCl.OpenRouter(ctx, &routerproto.OpenRouterRequest{}); err != nil { + return err + } else { + spqrlog.Zero.Debug(). + Interface("response", resp). + Msg("open router response") + } + + return nil +} + +// TODO : unit tests +func (qc *qdbCoordinator) SyncRouterCoordinatorAddress(ctx context.Context, qRouter *topology.Router) error { + spqrlog.Zero.Debug(). + Str("address", qRouter.Address). + Msg("qdb coordinator: sync router metadata") + + cc, err := DialRouter(qRouter) + if err != nil { + return err + } + defer cc.Close() + + /* Update current coordinator address. */ + /* Todo: check that router metadata is in sync. */ + + rCl := routerproto.NewTopologyServiceClient(cc) + if _, err := rCl.UpdateCoordinator(ctx, &routerproto.UpdateCoordinatorRequest{ + Address: net.JoinHostPort(config.CoordinatorConfig().Host, config.CoordinatorConfig().GrpcApiPort), + }); err != nil { + return err + } + if resp, err := rCl.OpenRouter(ctx, &routerproto.OpenRouterRequest{}); err != nil { return err } else { diff --git a/docker/router/Dockerfile b/docker/router/Dockerfile index 5a5b4f076..7654f8a4c 100644 --- a/docker/router/Dockerfile +++ b/docker/router/Dockerfile @@ -3,4 +3,4 @@ FROM spqr-base-image RUN apt-get update && apt-get install -y postgresql-client COPY ./docker/router/ssl/localhost.crt /etc/spqr/ssl/server.crt COPY ./docker/router/ssl/localhost.key /etc/spqr/ssl/server.key -ENTRYPOINT CONFIG_PATH=${ROUTER_CONFIG=/spqr/docker/router/cfg.yaml} COORD_CONFIG_PATH=${COORDINATOR_CONFIG=/spqr/docker/coordinator/cfg.yaml} && CUR_HOST=$(cat ${CONFIG_PATH} | grep "host:") && sed -i "s/${CUR_HOST}/${ROUTER_HOST=${CUR_HOST}}/g" ${CONFIG_PATH} && /spqr/spqr-router run --config ${CONFIG_PATH} --coordinator-config ${COORD_CONFIG_PATH} --proto-debug +ENTRYPOINT CONFIG_PATH=${ROUTER_CONFIG=/spqr/docker/router/cfg.yaml} COORD_CONFIG_PATH=${COORDINATOR_CONFIG=/spqr/docker/coordinator/cfg.yaml} && CUR_HOST=$(cat ${CONFIG_PATH} | grep "host:") && sed -i "s/${CUR_HOST}/${ROUTER_HOST=${CUR_HOST}}/g" ${CONFIG_PATH} && /spqr/spqr-router run --config ${CONFIG_PATH} --coordinator-config ${COORD_CONFIG_PATH} >> ${ROUTER_LOG} diff --git a/examples/router-etcd-init.yaml b/examples/router-etcd-init.yaml new file mode 100644 index 000000000..0172e2556 --- /dev/null +++ b/examples/router-etcd-init.yaml @@ -0,0 +1,66 @@ +log_level: debug + +host: '::1' +router_port: '6432' +admin_console_port: '7432' +grpc_api_port: '7010' + +world_shard_fallback: true +router_mode: PROXY + +use_coordinator_init: true + +with_coordinator: true + +frontend_tls: + key_file: /etc/odyssey/ssl/server.key + cert_file: /etc/odyssey/ssl/server.crt + sslmode: disable + +frontend_rules: + - usr: user1 + db: db1 + pool_mode: TRANSACTION + pool_prepared_statement: true + auth_rule: + auth_method: ok + password: strong + - pool_mode: TRANSACTION + pool_default: true + pool_prepared_statement: false + auth_rule: + auth_method: ok + +backend_rules: + - usr: user1 + db: db1 + pool_discard: false + pool_rollback: true + - pool_default: true + pool_discard: false + pool_rollback: true + +shards: + sh1: + tls: + key_file: /etc/odyssey/ssl/server.key + sslmode: disable + cert_file: /etc/odyssey/ssl/server.crt + db: db1 + usr: user1 + pwd: 12345678 + type: DATA + hosts: + - 'localhost:5550' + sh2: + tls: + key_file: /etc/odyssey/ssl/server.key + sslmode: disable + cert_file: /etc/odyssey/ssl/server.crt + db: db1 + usr: user1 + pwd: 12345678 + type: DATA + hosts: + - 'localhost:5551' + diff --git a/pkg/coord/adapter.go b/pkg/coord/adapter.go index 0264a8712..7dacc7ede 100644 --- a/pkg/coord/adapter.go +++ b/pkg/coord/adapter.go @@ -282,6 +282,15 @@ func (a *Adapter) SyncRouterMetadata(ctx context.Context, router *topology.Route return err } +// SyncRouterCoordinatorAddress implements meta.EntityMgr. +func (a *Adapter) SyncRouterCoordinatorAddress(ctx context.Context, router *topology.Router) error { + c := proto.NewRouterServiceClient(a.conn) + _, err := c.SyncMetadata(ctx, &proto.SyncMetadataRequest{ + Router: topology.RouterToProto(router), + }) + return err +} + // TODO : unit tests // TODO : implement func (a *Adapter) AddDataShard(ctx context.Context, shard *datashards.DataShard) error { diff --git a/pkg/coord/local/clocal.go b/pkg/coord/local/clocal.go index 693906f78..f41a144ec 100644 --- a/pkg/coord/local/clocal.go +++ b/pkg/coord/local/clocal.go @@ -470,6 +470,10 @@ func (qr *LocalCoordinator) SyncRouterMetadata(ctx context.Context, router *topo return ErrNotCoordinator } +func (qr *LocalCoordinator) SyncRouterCoordinatorAddress(ctx context.Context, router *topology.Router) error { + return ErrNotCoordinator +} + func (qr *LocalCoordinator) UpdateCoordinator(ctx context.Context, addr string) error { return qr.qdb.UpdateCoordinator(ctx, addr) } diff --git a/pkg/models/topology/routers.go b/pkg/models/topology/routers.go index 66659c170..4f2a254f1 100644 --- a/pkg/models/topology/routers.go +++ b/pkg/models/topology/routers.go @@ -18,6 +18,7 @@ type RouterMgr interface { ListRouters(ctx context.Context) ([]*Router, error) UnregisterRouter(ctx context.Context, id string) error SyncRouterMetadata(ctx context.Context, router *Router) error + SyncRouterCoordinatorAddress(ctx context.Context, router *Router) error UpdateCoordinator(ctx context.Context, address string) error GetCoordinator(ctx context.Context) (string, error) } diff --git a/qdb/etcdqdb.go b/qdb/etcdqdb.go index cd45ebb57..4fa39dde9 100644 --- a/qdb/etcdqdb.go +++ b/qdb/etcdqdb.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "net" "path" "sort" "sync" @@ -512,7 +513,7 @@ func (q *EtcdQDB) TryCoordinatorLock(ctx context.Context) error { return err } - op := clientv3.OpPut(coordLockKey, config.CoordinatorConfig().Host, clientv3.WithLease(clientv3.LeaseID(leaseGrantResp.ID))) + op := clientv3.OpPut(coordLockKey, net.JoinHostPort(config.CoordinatorConfig().Host, config.CoordinatorConfig().GrpcApiPort), clientv3.WithLease(clientv3.LeaseID(leaseGrantResp.ID))) tx := q.cli.Txn(ctx).If(clientv3util.KeyMissing(coordLockKey)).Then(op) stat, err := tx.Commit() if err != nil { diff --git a/router/instance/etcd.go b/router/instance/etcd.go index a726047c4..9cac3f4e7 100644 --- a/router/instance/etcd.go +++ b/router/instance/etcd.go @@ -1,5 +1,80 @@ package instance +import ( + "context" + "time" + + "github.com/pg-sharding/spqr/pkg/models/distributions" + "github.com/pg-sharding/spqr/pkg/models/kr" + "github.com/pg-sharding/spqr/pkg/spqrlog" + "github.com/pg-sharding/spqr/qdb" +) + type EtcdMetadataBootstraper struct { - RouterMetadataBootstraper + QdbAddr string +} + +// InitializeMetadata implements RouterMetadataBootstraper. +func (e *EtcdMetadataBootstraper) InitializeMetadata(ctx context.Context, r RouterInstance) error { + etcdConn, err := qdb.NewEtcdQDB(e.QdbAddr) + if err != nil { + return err + } + defer etcdConn.Client().Close() + + /* Initialize distributions */ + ds, err := etcdConn.ListDistributions(ctx) + if err != nil { + return err + } + + for _, d := range ds { + if err := r.Console().Mgr().CreateDistribution(ctx, distributions.DistributionFromDB(d)); err != nil { + spqrlog.Zero.Error().Err(err).Msg("failed to initialize instance") + return err + } + + /* initialize key ranges within distribution */ + krs, err := etcdConn.ListKeyRanges(ctx, d.ID) + if err != nil { + return err + } + + for _, ckr := range krs { + if err := r.Console().Mgr().CreateKeyRange(ctx, kr.KeyRangeFromDB(ckr)); err != nil { + spqrlog.Zero.Error().Err(err).Msg("failed to initialize instance") + return err + } + } + } + + retryCnt := 50 + + for { + c, err := etcdConn.GetCoordinator(ctx) + if err != nil { + if retryCnt > 0 { + /* await the roiter to appear */ + time.Sleep(time.Second) + retryCnt-- + continue + } + return err + } + + err = r.Console().Mgr().UpdateCoordinator(ctx, c) + + if err == nil { + break + } + return err + } + + r.Initialize() + + return nil +} + +func NewEtcdMetadataBootstraper(QdbAddr string) RouterMetadataBootstraper { + return &EtcdMetadataBootstraper{QdbAddr: QdbAddr} } diff --git a/router/instance/instance.go b/router/instance/instance.go index 22475b3fa..466a17d90 100644 --- a/router/instance/instance.go +++ b/router/instance/instance.go @@ -29,6 +29,7 @@ type RouterInstance interface { Initialize() bool Console() console.Console + Config() *config.Router } type InstanceImpl struct { @@ -38,10 +39,10 @@ type InstanceImpl struct { Mgr meta.EntityMgr Writer workloadlog.WorkloadLog - stchan chan struct{} - addr string - frTLS *tls.Config - WithJaeger bool + stchan chan struct{} + addr string + frTLS *tls.Config + cfg *config.Router notifier *sdnotifier.Notifier } @@ -51,6 +52,10 @@ func (r *InstanceImpl) Console() console.Console { return r.AdmConsole } +func (r *InstanceImpl) Config() *config.Router { + return r.cfg +} + func (r *InstanceImpl) ID() string { return "noid" } @@ -143,24 +148,11 @@ func NewRouter(ctx context.Context, rcfg *config.Router, ns string, persist bool Mgr: lc, stchan: stchan, frTLS: frTLS, - WithJaeger: rcfg.WithJaeger, + cfg: rcfg, Writer: writ, notifier: notifier, } - /* initialize metadata */ - if rcfg.UseInitSQL { - i := NewInitSQLMetadataBootstraper(rcfg.InitSQL) - if err := i.InitializeMetadata(ctx, r); err != nil { - return nil, err - } - } else if rcfg.UseCoordinatorInit { - panic("implement me") - } else { - /* TODO: maybe error-out? */ - r.Initialize() - } - return r, nil } @@ -201,7 +193,7 @@ func (r *InstanceImpl) serv(netconn net.Conn, pt port.RouterPortType) error { } func (r *InstanceImpl) Run(ctx context.Context, listener net.Listener, pt port.RouterPortType) error { - if r.WithJaeger { + if r.cfg.WithJaeger { closer, err := r.initJaegerTracer(r.RuleRouter.Config()) if err != nil { return fmt.Errorf("could not initialize jaeger tracer: %s", err) diff --git a/test/drivers/gorm-regress/docker-compose.yaml b/test/drivers/gorm-regress/docker-compose.yaml index 5508d1dbb..473cf82ae 100644 --- a/test/drivers/gorm-regress/docker-compose.yaml +++ b/test/drivers/gorm-regress/docker-compose.yaml @@ -36,6 +36,7 @@ services: - "6432:6432" environment: - ROUTER_CONFIG=/spqr/test/regress/conf/router.yaml + - ROUTER_LOG='router1.log' hostname: regress_router container_name: regress_router depends_on: @@ -83,4 +84,4 @@ services: EXTRA_PARAMS: client_encoding=UTF8 depends_on: - router - - qdb01 \ No newline at end of file + - qdb01 diff --git a/test/drivers/hibernate-regress/docker-compose.yaml b/test/drivers/hibernate-regress/docker-compose.yaml index 394dffc57..e70ab6ae8 100644 --- a/test/drivers/hibernate-regress/docker-compose.yaml +++ b/test/drivers/hibernate-regress/docker-compose.yaml @@ -36,6 +36,7 @@ services: - "6432:6432" environment: - ROUTER_CONFIG=/spqr/test/regress/conf/router.yaml + - ROUTER_LOG='router1.log' hostname: regress_router container_name: regress_router depends_on: @@ -82,4 +83,4 @@ services: container_name: regress_tests depends_on: - router - - qdb01 \ No newline at end of file + - qdb01 diff --git a/test/drivers/jdbc-regress/docker-compose.yaml b/test/drivers/jdbc-regress/docker-compose.yaml index 1aac4498b..be82ba40e 100644 --- a/test/drivers/jdbc-regress/docker-compose.yaml +++ b/test/drivers/jdbc-regress/docker-compose.yaml @@ -36,6 +36,7 @@ services: - "6432:6432" environment: - ROUTER_CONFIG=/spqr/test/regress/conf/router.yaml + - ROUTER_LOG='router1.log' hostname: regress_router container_name: regress_router depends_on: @@ -76,4 +77,4 @@ services: hostname: regress_tests container_name: regress_tests depends_on: - - coordinator \ No newline at end of file + - coordinator diff --git a/test/feature/conf/router_cluster.yaml b/test/feature/conf/router_cluster.yaml new file mode 100644 index 000000000..ab5fab0fd --- /dev/null +++ b/test/feature/conf/router_cluster.yaml @@ -0,0 +1,46 @@ +host: 'regress_router' +router_port: '6432' +admin_console_port: '7432' +grpc_api_port: '7000' +router_mode: PROXY +log_level: debug +time_quantiles: + - 0.75 +world_shard_fallback: true +show_notice_messages: true +use_coordinator_init: true +frontend_rules: + - db: regress + usr: regress + pool_default: true + pool_mode: TRANSACTION + auth_rule: + auth_method: ok +shards: + sh1: + db: regress + usr: regress + pwd: 12345678 + type: DATA + hosts: + - 'spqr_shard_1:6432' + sh2: + db: regress + usr: regress + pwd: 12345678 + type: DATA + hosts: + - 'spqr_shard_2:6432' + +backend_rules: + - db: regress + usr: regress + pool_discard: true + pool_rollback: true + auth_rules: + sh1: + auth_method: md5 + password: 12345678 + sh2: + auth_method: md5 + password: 12345678 diff --git a/test/feature/conf/router_with_coordinator.yaml b/test/feature/conf/router_with_coordinator.yaml index 98365631b..02773a2d4 100644 --- a/test/feature/conf/router_with_coordinator.yaml +++ b/test/feature/conf/router_with_coordinator.yaml @@ -9,6 +9,7 @@ time_quantiles: world_shard_fallback: true show_notice_messages: true with_coordinator: true +use_coordinator_init: true frontend_rules: - db: regress usr: regress diff --git a/test/feature/docker-compose.yaml b/test/feature/docker-compose.yaml index 0729988a2..0bf800f4b 100644 --- a/test/feature/docker-compose.yaml +++ b/test/feature/docker-compose.yaml @@ -40,6 +40,7 @@ services: - "7012:7002" environment: - ROUTER_CONFIG=${ROUTER_CONFIG} + - ROUTER_LOG='router1.log' - 'ROUTER_HOST=host: ''regress_router''' - COORDINATOR_CONFIG=${COORDINATOR_CONFIG} hostname: regress_router @@ -65,6 +66,7 @@ services: - "7022:7002" environment: - ROUTER_CONFIG=${ROUTER_CONFIG} + - ROUTER_LOG='router2.log' - 'ROUTER_HOST=host: ''regress_router_2''' - COORDINATOR_CONFIG=${COORDINATOR_CONFIG_2} hostname: regress_router_2 diff --git a/test/feature/features/coordinator.feature b/test/feature/features/coordinator.feature index 3a546ce95..a40eaca20 100644 --- a/test/feature/features/coordinator.feature +++ b/test/feature/features/coordinator.feature @@ -3,6 +3,10 @@ Feature: Coordinator test # # Make host "coordinator" take control # + Given cluster environment is + """ + ROUTER_CONFIG=/spqr/test/feature/conf/router_cluster.yaml + """ Given cluster is up and running And host "coordinator2" is stopped And host "coordinator2" is started diff --git a/test/feature/features/init_etcd.feature b/test/feature/features/init_etcd.feature new file mode 100644 index 000000000..eff948510 --- /dev/null +++ b/test/feature/features/init_etcd.feature @@ -0,0 +1,46 @@ +Feature: Initialize router metadata from Etcd + Background: + # + # Run routers with coordinators + # Stop all coordinators + # + Given cluster environment is + """ + ROUTER_CONFIG=/spqr/test/feature/conf/router_with_coordinator.yaml + COORDINATOR_CONFIG=/spqr/test/feature/conf/router_coordinator.yaml + COORDINATOR_CONFIG_2=/spqr/test/feature/conf/router_coordinator_2.yaml + """ + Given cluster is up and running + And host "coordinator2" is stopped + When I run SQL on host "router-admin" + """ + UNREGISTER ROUTER ALL; + """ + Then command return code should be "0" + And host "router" is stopped + And host "router2" is stopped + + Scenario: Router initialize its metadata from Etcd when no coodinator alive + When I run SQL on host "coordinator" + """ + CREATE DISTRIBUTION ds1 COLUMN TYPES integer; + CREATE KEY RANGE krid1 FROM 19 ROUTE TO sh1 FOR DISTRIBUTION ds1; + """ + Then command return code should be "0" + + When host "coordinator" is stopped + And host "router" is started + + When I run SQL on host "router-admin" + """ + SHOW key_ranges + """ + Then SQL result should match json_exactly + """ + [{ + "Key range ID":"krid1", + "Distribution ID":"ds1", + "Lower bound":"19", + "Shard ID":"sh1" + }] + """ diff --git a/test/feature/spqr_test.go b/test/feature/spqr_test.go index 2288438f9..c8c106a59 100644 --- a/test/feature/spqr_test.go +++ b/test/feature/spqr_test.go @@ -5,7 +5,6 @@ import ( "encoding/json" "errors" "fmt" - "github.com/pg-sharding/spqr/pkg/models/spqrerror" "io" "log" "os" @@ -17,6 +16,8 @@ import ( "text/template" "time" + "github.com/pg-sharding/spqr/pkg/models/spqrerror" + "github.com/cucumber/godog" "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/stdlib" @@ -45,8 +46,8 @@ const ( coordinatorPassword = "password" dbName = "regress" consoleName = "spqr-console" - postgresqlConnectTimeout = 30 * time.Second - postgresqlInitialConnectTimeout = 10 * time.Second + postgresqlConnectTimeout = 60 * time.Second + postgresqlInitialConnectTimeout = 30 * time.Second postgresqlQueryTimeout = 10 * time.Second ) @@ -206,7 +207,7 @@ func (tctx *testContext) connectPostgresql(addr string, timeout time.Duration) ( func (tctx *testContext) connectPostgresqlWithCredentials(username string, password string, addr string, timeout time.Duration) (*sqlx.DB, error) { ping := func(db *sqlx.DB) bool { - ctx, cancel := context.WithTimeout(context.Background(), 4*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() err := db.PingContext(ctx) if err != nil { @@ -868,6 +869,7 @@ func InitializeScenario(s *godog.ScenarioContext, t *testing.T, debug bool) { tctx.composerEnv = []string{ "ROUTER_CONFIG=/spqr/test/feature/conf/router.yaml", "COORDINATOR_CONFIG=/spqr/test/feature/conf/coordinator.yaml", + "COORDINATOR_CONFIG_2=/spqr/test/feature/conf/coordinator.yaml", } tctx.variables = make(map[string]interface{}) return ctx, nil diff --git a/test/regress/docker-compose.yaml b/test/regress/docker-compose.yaml index 78ba849bb..a2199f690 100644 --- a/test/regress/docker-compose.yaml +++ b/test/regress/docker-compose.yaml @@ -36,6 +36,7 @@ services: - "6432:6432" environment: - ROUTER_CONFIG=/spqr/test/regress/conf/router.yaml + - ROUTER_LOG='router1.log' hostname: regress_router container_name: regress_router depends_on: diff --git a/test/stress/docker-compose.yaml b/test/stress/docker-compose.yaml index 2eca37a70..aefbfd6cf 100644 --- a/test/stress/docker-compose.yaml +++ b/test/stress/docker-compose.yaml @@ -28,6 +28,7 @@ services: - "6432:6432" environment: - ROUTER_CONFIG=/spqr/test/stress/router.yaml + - ROUTER_LOG='router1.log' hostname: stress_router container_name: stress_router depends_on: