From f18ad9a476a2cc3abaa612fc75cd8268065ebf80 Mon Sep 17 00:00:00 2001 From: Michael Demmer Date: Thu, 16 Jan 2025 21:49:01 -0500 Subject: [PATCH 1/6] gist of the idea of a sticky_random balancer --- go/vt/vtgateproxy/mysql_server.go | 1 + go/vt/vtgateproxy/sticky_random_balancer.go | 82 +++++++++++++++++++++ 2 files changed, 83 insertions(+) create mode 100644 go/vt/vtgateproxy/sticky_random_balancer.go diff --git a/go/vt/vtgateproxy/mysql_server.go b/go/vt/vtgateproxy/mysql_server.go index 26c4422f28b..7ac74ae20ce 100644 --- a/go/vt/vtgateproxy/mysql_server.go +++ b/go/vt/vtgateproxy/mysql_server.go @@ -201,6 +201,7 @@ func (ph *proxyHandler) ComQuery(c *mysql.Conn, query string, callback func(*sql return mysql.NewSQLErrorFromError(err) } + ctx = context.WithValue(ctx, CONN_ID_KEY, int(c.ConnectionID)) result, err := ph.proxy.Execute(ctx, session, query, make(map[string]*querypb.BindVariable)) if err := mysql.NewSQLErrorFromError(err); err != nil { diff --git a/go/vt/vtgateproxy/sticky_random_balancer.go b/go/vt/vtgateproxy/sticky_random_balancer.go new file mode 100644 index 00000000000..4301e3889f6 --- /dev/null +++ b/go/vt/vtgateproxy/sticky_random_balancer.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Sticky random is a derivative based on the round_robin balancer which uses a Context +// variable to maintain client-side affinity to a given connection. + +package vtgateproxy + +import ( + "math/rand/v2" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/grpclog" +) + +type ConnIdKey string + +const CONN_ID_KEY = ConnIdKey("ConnId") + +const Name = "sticky_random" + +var logger = grpclog.Component("sticky_random") + +// newBuilder creates a new roundrobin balancer builder. +func newStickyRandomBuilder() balancer.Builder { + return base.NewBalancerBuilder(Name, &stickyPickerBuilder{}, base.Config{HealthCheck: true}) +} + +func init() { + balancer.Register(newStickyRandomBuilder()) +} + +type stickyPickerBuilder struct{} + +func (*stickyPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { + logger.Infof("stickyRandomPicker: Build called with info: %v", info) + if len(info.ReadySCs) == 0 { + return base.NewErrPicker(balancer.ErrNoSubConnAvailable) + } + scs := make([]balancer.SubConn, 0, len(info.ReadySCs)) + for sc := range info.ReadySCs { + scs = append(scs, sc) + } + return &stickyPicker{ + subConns: scs, + } +} + +type stickyPicker struct { + // subConns is the snapshot of the balancer when this picker was + // created. The slice is immutable. + subConns []balancer.SubConn +} + +func (p *stickyPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + + subConnsLen := len(p.subConns) + + connId := info.Ctx.Value(CONN_ID_KEY).(int) + if connId == 0 { + connId = rand.IntN(subConnsLen) // shouldn't happen + } + + sc := p.subConns[connId%subConnsLen] + return balancer.PickResult{SubConn: sc}, nil +} From eae6b55d4dd91959d06ee7b1a8bc5c5372ef55be Mon Sep 17 00:00:00 2001 From: Michael Demmer Date: Fri, 17 Jan 2025 11:09:39 -0800 Subject: [PATCH 2/6] make sticky_random actually work --- go/vt/vtgateproxy/firstready_balancer.go | 5 +- go/vt/vtgateproxy/mysql_server.go | 12 ++- go/vt/vtgateproxy/sim/vtgateproxysim.go | 101 ++++++++++++++++++++ go/vt/vtgateproxy/sticky_random_balancer.go | 22 +++-- go/vt/vtgateproxy/vtgateproxy.go | 1 + 5 files changed, 129 insertions(+), 12 deletions(-) create mode 100644 go/vt/vtgateproxy/sim/vtgateproxysim.go diff --git a/go/vt/vtgateproxy/firstready_balancer.go b/go/vt/vtgateproxy/firstready_balancer.go index d9dd380861b..b8d07e370b7 100644 --- a/go/vt/vtgateproxy/firstready_balancer.go +++ b/go/vt/vtgateproxy/firstready_balancer.go @@ -41,12 +41,13 @@ import ( ) // newBuilder creates a new first_ready balancer builder. -func newBuilder() balancer.Builder { +func newFirstReadyBuilder() balancer.Builder { return base.NewBalancerBuilder("first_ready", &frPickerBuilder{currentConns: map[string]balancer.SubConn{}}, base.Config{HealthCheck: true}) } func init() { - balancer.Register(newBuilder()) + log.V(1).Infof("registering first_ready balancer") + balancer.Register(newFirstReadyBuilder()) } // frPickerBuilder implements both the Builder and the Picker interfaces. diff --git a/go/vt/vtgateproxy/mysql_server.go b/go/vt/vtgateproxy/mysql_server.go index 7ac74ae20ce..4c72de77378 100644 --- a/go/vt/vtgateproxy/mysql_server.go +++ b/go/vt/vtgateproxy/mysql_server.go @@ -196,12 +196,13 @@ func (ph *proxyHandler) ComQuery(c *mysql.Conn, query string, callback func(*sql } }() + ctx = context.WithValue(ctx, CONN_ID_KEY, int(c.ConnectionID)) + if session.SessionPb().Options.Workload == querypb.ExecuteOptions_OLAP { err := ph.proxy.StreamExecute(ctx, session, query, make(map[string]*querypb.BindVariable), callback) return mysql.NewSQLErrorFromError(err) } - ctx = context.WithValue(ctx, CONN_ID_KEY, int(c.ConnectionID)) result, err := ph.proxy.Execute(ctx, session, query, make(map[string]*querypb.BindVariable)) if err := mysql.NewSQLErrorFromError(err); err != nil { @@ -262,6 +263,8 @@ func (ph *proxyHandler) ComPrepare(c *mysql.Conn, query string, bindVars map[str } }(session) + ctx = context.WithValue(ctx, CONN_ID_KEY, int(c.ConnectionID)) + _, fld, err := ph.proxy.Prepare(ctx, session, query, bindVars) err = mysql.NewSQLErrorFromError(err) if err != nil { @@ -307,6 +310,8 @@ func (ph *proxyHandler) ComStmtExecute(c *mysql.Conn, prepare *mysql.PrepareData } }() + ctx = context.WithValue(ctx, CONN_ID_KEY, int(c.ConnectionID)) + if session.SessionPb().Options.Workload == querypb.ExecuteOptions_OLAP { err := ph.proxy.StreamExecute(ctx, session, prepare.PrepareStmt, prepare.BindVars, callback) return mysql.NewSQLErrorFromError(err) @@ -347,6 +352,8 @@ func (ph *proxyHandler) getSession(ctx context.Context, c *mysql.Conn) (*vtgatec options.ClientFoundRows = true } + ctx = context.WithValue(ctx, CONN_ID_KEY, int(c.ConnectionID)) + var err error session, err = ph.proxy.NewSession(ctx, options, c.Attributes) if err != nil { @@ -371,6 +378,9 @@ func (ph *proxyHandler) closeSession(ctx context.Context, c *mysql.Conn) { if session.SessionPb().InTransaction { defer atomic.AddInt32(&busyConnections, -1) } + + ctx = context.WithValue(ctx, CONN_ID_KEY, int(c.ConnectionID)) + err := ph.proxy.CloseSession(ctx, session) if err != nil { log.Errorf("Error happened in transaction rollback: %v", err) diff --git a/go/vt/vtgateproxy/sim/vtgateproxysim.go b/go/vt/vtgateproxy/sim/vtgateproxysim.go new file mode 100644 index 00000000000..e8c6faa934d --- /dev/null +++ b/go/vt/vtgateproxy/sim/vtgateproxysim.go @@ -0,0 +1,101 @@ +package main + +import ( + "flag" + "fmt" + "math/rand" + "sort" + "time" + + "github.com/guptarohit/asciigraph" +) + +var ( + numClients = flag.Int("c", 9761, "Number of clients") + numVtgates = flag.Int("v", 1068, "Number of vtgates") + numConnections = flag.Int("n", 4, "number of connections per client host") + numZones = flag.Int("z", 4, "number of zones") +) + +func main() { + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + + flag.Parse() + + fmt.Printf("Simulating %d clients => %d vtgates with %d zones %d conns per client\n\n", + *numClients, *numVtgates, *numZones, *numConnections) + + var clients []string + for i := 0; i < *numClients; i++ { + clients = append(clients, fmt.Sprintf("client-%03d", i)) + } + + var vtgates []string + for i := 0; i < *numVtgates; i++ { + vtgates = append(vtgates, fmt.Sprintf("vtgate-%03d", i)) + } + + // for now just consider 1/N of the s "local" + localClients := clients[:*numClients / *numZones] + localVtgates := vtgates[:*numVtgates / *numZones] + + conns := map[string][]string{} + + // Simulate "discovery" + for _, client := range localClients { + var clientConns []string + + for i := 0; i < *numConnections; i++ { + vtgate := localVtgates[rnd.Intn(len(localVtgates))] + clientConns = append(clientConns, vtgate) + } + + conns[client] = clientConns + } + + counts := map[string]int{} + for _, conns := range conns { + for _, vtgate := range conns { + counts[vtgate]++ + } + } + + histogram := map[int]int{} + max := 0 + min := -1 + for _, count := range counts { + histogram[count]++ + if count > max { + max = count + } + if min == -1 || count < min { + min = count + } + } + + fmt.Printf("Conns per vtgate\n%v\n\n", counts) + fmt.Printf("Histogram of conn counts\n%v\n\n", histogram) + + plot := []float64{} + for i := 0; i < len(localVtgates); i++ { + plot = append(plot, float64(counts[localVtgates[i]])) + } + sort.Float64s(plot) + graph := asciigraph.Plot(plot) + fmt.Println("Number of conns per vtgate host") + fmt.Println(graph) + fmt.Println("") + fmt.Println("") + + fmt.Printf("Conn count per vtgate distribution [%d - %d] (%d clients => %d vtgates with %d zones %d conns\n\n", + min, max, *numClients, *numVtgates, *numZones, *numConnections) + plot = []float64{} + for i := min; i < max; i++ { + plot = append(plot, float64(histogram[i])) + } + graph = asciigraph.Plot(plot) + fmt.Println(graph) + + fmt.Printf("\nConn stats: min %d max %d spread %d spread/min %f spread/avg %f\n", + min, max, max-min, float64(max-min)/float64(min), float64(max-min)/float64((max+min)/2)) +} diff --git a/go/vt/vtgateproxy/sticky_random_balancer.go b/go/vt/vtgateproxy/sticky_random_balancer.go index 4301e3889f6..b70860f803b 100644 --- a/go/vt/vtgateproxy/sticky_random_balancer.go +++ b/go/vt/vtgateproxy/sticky_random_balancer.go @@ -26,30 +26,27 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" - "google.golang.org/grpc/grpclog" + "vitess.io/vitess/go/vt/log" ) type ConnIdKey string const CONN_ID_KEY = ConnIdKey("ConnId") -const Name = "sticky_random" - -var logger = grpclog.Component("sticky_random") - // newBuilder creates a new roundrobin balancer builder. func newStickyRandomBuilder() balancer.Builder { - return base.NewBalancerBuilder(Name, &stickyPickerBuilder{}, base.Config{HealthCheck: true}) + return base.NewBalancerBuilder("sticky_random", &stickyPickerBuilder{}, base.Config{HealthCheck: true}) } func init() { + log.V(1).Infof("registering sticky_random balancer") balancer.Register(newStickyRandomBuilder()) } type stickyPickerBuilder struct{} func (*stickyPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { - logger.Infof("stickyRandomPicker: Build called with info: %v", info) + // log.V(100).Infof("stickyRandomPicker: Build called with info: %v", info) if len(info.ReadySCs) == 0 { return base.NewErrPicker(balancer.ErrNoSubConnAvailable) } @@ -72,11 +69,18 @@ func (p *stickyPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) subConnsLen := len(p.subConns) - connId := info.Ctx.Value(CONN_ID_KEY).(int) - if connId == 0 { + var connId int + connIdVal := info.Ctx.Value(CONN_ID_KEY) + if connIdVal != nil { + connId = connIdVal.(int) + log.V(100).Infof("stickyRandomPicker: using connId %d", connId) + } else { + log.V(100).Infof("stickyRandomPicker: nonexistent connId -- using random") connId = rand.IntN(subConnsLen) // shouldn't happen } + // XXX/demmer might want to hash the connId rather than just mod sc := p.subConns[connId%subConnsLen] + return balancer.PickResult{SubConn: sc}, nil } diff --git a/go/vt/vtgateproxy/vtgateproxy.go b/go/vt/vtgateproxy/vtgateproxy.go index 8de456781f3..c1d67b459b9 100644 --- a/go/vt/vtgateproxy/vtgateproxy.go +++ b/go/vt/vtgateproxy/vtgateproxy.go @@ -215,6 +215,7 @@ func Init() { case "round_robin": case "first_ready": case "pick_first": + case "sticky_random": break default: log.Fatalf("invalid balancer type %s", *balancerType) From 072efbaa080c3682b2a6e0aa01bdceab2912f165 Mon Sep 17 00:00:00 2001 From: Michael Demmer Date: Thu, 23 Jan 2025 08:04:59 -0800 Subject: [PATCH 3/6] include the zone locality in the address attributes Make it available to the picker layer along with the pool type. Also rework how the locality is computed to do it as part of building the target host rather than deferring the comparisons to the sorter. --- go/vt/vtgateproxy/discovery.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/go/vt/vtgateproxy/discovery.go b/go/vt/vtgateproxy/discovery.go index 8bef891b485..9786b409e1c 100644 --- a/go/vt/vtgateproxy/discovery.go +++ b/go/vt/vtgateproxy/discovery.go @@ -59,6 +59,7 @@ import ( // const PoolTypeAttr = "PoolType" +const ZoneLocalAttr = "ZoneLocal" // Resolver(https://godoc.org/google.golang.org/grpc/resolver#Resolver). type JSONGateResolver struct { @@ -98,6 +99,7 @@ type targetHost struct { Addr string PoolType string Affinity string + IsLocal bool } var ( @@ -309,7 +311,7 @@ func (b *JSONGateResolverBuilder) parse() (bool, error) { return false, fmt.Errorf("error parsing JSON discovery file %s: port field %s has invalid value %v", b.jsonPath, b.portField, port) } - target := targetHost{hostname.(string), fmt.Sprintf("%s:%s", address, port), poolType.(string), affinity.(string)} + target := targetHost{hostname.(string), fmt.Sprintf("%s:%s", address, port), poolType.(string), affinity.(string), affinity == b.affinityValue} targets[target.PoolType] = append(targets[target.PoolType], target) } @@ -321,7 +323,7 @@ func (b *JSONGateResolverBuilder) parse() (bool, error) { targetCount.ResetAll() for poolType := range targets { - b.sorter.shuffleSort(targets[poolType], b.affinityField, b.affinityValue) + b.sorter.shuffleSort(targets[poolType]) if len(targets[poolType]) > *numConnections { targets[poolType] = targets[poolType][:b.numConnections] } @@ -353,7 +355,7 @@ func (b *JSONGateResolverBuilder) getTargets(poolType string) []targetHost { targets = append(targets, b.targets[poolType]...) b.mu.RUnlock() - b.sorter.shuffleSort(targets, b.affinityField, b.affinityValue) + b.sorter.shuffleSort(targets) return targets } @@ -373,7 +375,7 @@ func newShuffleSorter() *shuffleSorter { // shuffleSort shuffles a slice of targetHost to ensure every host has a // different order to iterate through, putting the affinity matching (e.g. same // az) hosts at the front and the non-matching ones at the end. -func (s *shuffleSorter) shuffleSort(targets []targetHost, affinityField, affinityValue string) { +func (s *shuffleSorter) shuffleSort(targets []targetHost) { n := len(targets) head := 0 // Only need to do n-1 swaps since the last host is always in the right place. @@ -383,7 +385,7 @@ func (s *shuffleSorter) shuffleSort(targets []targetHost, affinityField, affinit j := head + s.rand.Intn(tail-head+1) s.mu.Unlock() - if affinityField != "" && affinityValue == targets[j].Affinity { + if targets[j].IsLocal { targets[head], targets[j] = targets[j], targets[head] head++ } else { @@ -406,7 +408,8 @@ func (b *JSONGateResolverBuilder) update(r *JSONGateResolver) error { var addrs []resolver.Address for _, target := range targets { - addrs = append(addrs, resolver.Address{Addr: target.Addr, Attributes: attributes.New(PoolTypeAttr, r.poolType)}) + attrs := attributes.New(PoolTypeAttr, r.poolType).WithValue(ZoneLocalAttr, target.IsLocal) + addrs = append(addrs, resolver.Address{Addr: target.Addr, Attributes: attrs}) } // If we've already selected some targets, give the new addresses some time to warm up before removing From 952fc33e69b6aec5f4c7f56e3a7315890cd536ff Mon Sep 17 00:00:00 2001 From: Michael Demmer Date: Thu, 23 Jan 2025 08:21:43 -0800 Subject: [PATCH 4/6] include IsLocal in debug output --- go/vt/vtgateproxy/discovery.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/go/vt/vtgateproxy/discovery.go b/go/vt/vtgateproxy/discovery.go index 9786b409e1c..34bafa5cbd7 100644 --- a/go/vt/vtgateproxy/discovery.go +++ b/go/vt/vtgateproxy/discovery.go @@ -491,12 +491,13 @@ const ( {{range $i, $p := .Pools}} - + {{range index $.Targets $p}} + {{end}} {{end}}
{{$p}}{{$p}}
{{.Hostname}} {{.Addr}} {{.Affinity}}{{.IsLocal}}
From 1b6038a553f4b894f4116dd784fb02d4cf2e5000 Mon Sep 17 00:00:00 2001 From: Michael Demmer Date: Thu, 23 Jan 2025 08:30:29 -0800 Subject: [PATCH 5/6] add support for local zone affinity to sticky_random Using the zone local attribute inhjected by discovery (if it exists), update sticky_random so that it biases to only use the local zone connections if there are any available, otherwise fall back to remote. --- go/vt/vtgateproxy/sticky_random_balancer.go | 28 +++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/go/vt/vtgateproxy/sticky_random_balancer.go b/go/vt/vtgateproxy/sticky_random_balancer.go index b70860f803b..ce6232df568 100644 --- a/go/vt/vtgateproxy/sticky_random_balancer.go +++ b/go/vt/vtgateproxy/sticky_random_balancer.go @@ -45,15 +45,39 @@ func init() { type stickyPickerBuilder struct{} +// Would be nice if this were easier in golang +func boolValue(val interface{}) bool { + switch val := val.(type) { + case bool: + return val + } + return false +} + func (*stickyPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // log.V(100).Infof("stickyRandomPicker: Build called with info: %v", info) if len(info.ReadySCs) == 0 { return base.NewErrPicker(balancer.ErrNoSubConnAvailable) } + scs := make([]balancer.SubConn, 0, len(info.ReadySCs)) - for sc := range info.ReadySCs { - scs = append(scs, sc) + + // Where possible filter to only ready conns in the local zone, using the remote + // zone only if there are no local conns available. + for sc, scInfo := range info.ReadySCs { + local := boolValue(scInfo.Address.Attributes.Value(ZoneLocalAttr)) + if local { + scs = append(scs, sc) + } + } + + // Otherwise use all the ready conns regardless of locality + if len(scs) == 0 { + for sc := range info.ReadySCs { + scs = append(scs, sc) + } } + return &stickyPicker{ subConns: scs, } From def45d7dfce52a7075b6e4cfe2bd946d14c817c0 Mon Sep 17 00:00:00 2001 From: Michael Demmer Date: Thu, 23 Jan 2025 08:58:38 -0800 Subject: [PATCH 6/6] add num_backup_conns option to force discovery of non-zone-local vtgates --- go/vt/vtgateproxy/discovery.go | 28 ++++++++++++++++++++-------- go/vt/vtgateproxy/vtgateproxy.go | 2 ++ 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/go/vt/vtgateproxy/discovery.go b/go/vt/vtgateproxy/discovery.go index 34bafa5cbd7..78ea8d179b4 100644 --- a/go/vt/vtgateproxy/discovery.go +++ b/go/vt/vtgateproxy/discovery.go @@ -84,6 +84,7 @@ type JSONGateResolverBuilder struct { affinityField string affinityValue string numConnections int + numBackupConns int mu sync.RWMutex targets map[string][]targetHost @@ -115,6 +116,7 @@ func RegisterJSONGateResolver( affinityField string, affinityValue string, numConnections int, + numBackupConns int, ) (*JSONGateResolverBuilder, error) { jsonDiscovery := &JSONGateResolverBuilder{ targets: map[string][]targetHost{}, @@ -125,6 +127,7 @@ func RegisterJSONGateResolver( affinityField: affinityField, affinityValue: affinityValue, numConnections: numConnections, + numBackupConns: numBackupConns, sorter: newShuffleSorter(), } @@ -265,7 +268,7 @@ func (b *JSONGateResolverBuilder) parse() (bool, error) { return false, fmt.Errorf("error parsing JSON discovery file %s: %v", b.jsonPath, err) } - var targets = map[string][]targetHost{} + var allTargets = map[string][]targetHost{} for _, host := range hosts { hostname, hasHostname := host["host"] address, hasAddress := host[b.addressField] @@ -312,7 +315,7 @@ func (b *JSONGateResolverBuilder) parse() (bool, error) { } target := targetHost{hostname.(string), fmt.Sprintf("%s:%s", address, port), poolType.(string), affinity.(string), affinity == b.affinityValue} - targets[target.PoolType] = append(targets[target.PoolType], target) + allTargets[target.PoolType] = append(allTargets[target.PoolType], target) } // If a pool disappears, the metric will not record this unless all counts @@ -322,16 +325,25 @@ func (b *JSONGateResolverBuilder) parse() (bool, error) { // targets and only resetting pools which disappear. targetCount.ResetAll() - for poolType := range targets { - b.sorter.shuffleSort(targets[poolType]) - if len(targets[poolType]) > *numConnections { - targets[poolType] = targets[poolType][:b.numConnections] + var selected = map[string][]targetHost{} + + for poolType := range allTargets { + b.sorter.shuffleSort(allTargets[poolType]) + + // try to pick numConnections from the front of the list (local zone) and numBackupConnections + // from the tail (remote zone). if that's not possible, just take the whole set + if len(allTargets[poolType]) >= b.numConnections+b.numBackupConns { + remoteOffset := len(allTargets[poolType]) - b.numBackupConns + selected[poolType] = append(allTargets[poolType][:b.numConnections], allTargets[poolType][remoteOffset:]...) + } else { + selected[poolType] = allTargets[poolType] } - targetCount.Set(poolType, int64(len(targets[poolType]))) + + targetCount.Set(poolType, int64(len(selected[poolType]))) } b.mu.Lock() - b.targets = targets + b.targets = selected b.mu.Unlock() return true, nil diff --git a/go/vt/vtgateproxy/vtgateproxy.go b/go/vt/vtgateproxy/vtgateproxy.go index c1d67b459b9..c7ff8f2c78e 100644 --- a/go/vt/vtgateproxy/vtgateproxy.go +++ b/go/vt/vtgateproxy/vtgateproxy.go @@ -53,6 +53,7 @@ const ( var ( vtgateHostsFile = flag.String("vtgate_hosts_file", "", "json file describing the host list to use for vtgate:// resolution") numConnections = flag.Int("num_connections", 4, "number of outbound GPRC connections to maintain") + numBackupConns = flag.Int("num_backup_conns", 1, "number of backup remote-zone GPRC connections to maintain") poolTypeField = flag.String("pool_type_field", "", "Field name used to specify the target vtgate type and filter the hosts") affinityField = flag.String("affinity_field", "", "Attribute (JSON file) used to specify the routing affinity , e.g. 'az_id'") affinityValue = flag.String("affinity_value", "", "Value to match for routing affinity , e.g. 'use-az1'") @@ -233,6 +234,7 @@ func Init() { *affinityField, *affinityValue, *numConnections, + *numBackupConns, ) if err != nil {