diff --git a/server/etcdmain/config.go b/server/etcdmain/config.go index e31d1c7cf8a..fa5d6d161a1 100644 --- a/server/etcdmain/config.go +++ b/server/etcdmain/config.go @@ -276,7 +276,7 @@ func newConfig() *config { fs.IntVar(&cfg.ec.ExperimentalCompactionBatchLimit, "experimental-compaction-batch-limit", cfg.ec.ExperimentalCompactionBatchLimit, "Sets the maximum revisions deleted in each compaction batch.") fs.DurationVar(&cfg.ec.ExperimentalCompactionSleepInterval, "experimental-compaction-sleep-interval", cfg.ec.ExperimentalCompactionSleepInterval, "Sets the sleep interval between each compaction batch.") fs.DurationVar(&cfg.ec.ExperimentalWatchProgressNotifyInterval, "experimental-watch-progress-notify-interval", cfg.ec.ExperimentalWatchProgressNotifyInterval, "Duration of periodic watch progress notifications.") - fs.DurationVar(&cfg.ec.ExperimentalDowngradeCheckTime, "experimental-downgrade-check-time", cfg.ec.ExperimentalDowngradeCheckTime, "Duration of time between two downgrade status check.") + fs.DurationVar(&cfg.ec.ExperimentalDowngradeCheckTime, "experimental-downgrade-check-time", cfg.ec.ExperimentalDowngradeCheckTime, "Duration of time between two downgrade status checks.") fs.DurationVar(&cfg.ec.ExperimentalWarningApplyDuration, "experimental-warning-apply-duration", cfg.ec.ExperimentalWarningApplyDuration, "Time duration after which a warning is generated if request takes more time.") fs.DurationVar(&cfg.ec.WarningUnaryRequestDuration, "warning-unary-request-duration", cfg.ec.WarningUnaryRequestDuration, "Time duration after which a warning is generated if a unary request takes more time.") fs.DurationVar(&cfg.ec.ExperimentalWarningUnaryRequestDuration, "experimental-warning-unary-request-duration", cfg.ec.ExperimentalWarningUnaryRequestDuration, "Time duration after which a warning is generated if a unary request takes more time. It's deprecated, and will be decommissioned in v3.7. Use --warning-unary-request-duration instead.") diff --git a/server/etcdmain/config_test.go b/server/etcdmain/config_test.go index 30239374eda..8332a892a97 100644 --- a/server/etcdmain/config_test.go +++ b/server/etcdmain/config_test.go @@ -15,6 +15,7 @@ package etcdmain import ( + "flag" "fmt" "net/url" "os" @@ -24,6 +25,7 @@ import ( "sigs.k8s.io/yaml" + "go.etcd.io/etcd/pkg/v3/flags" "go.etcd.io/etcd/server/v3/embed" ) @@ -378,6 +380,21 @@ func TestConfigFileElectionTimeout(t *testing.T) { } } +func TestFlagsPresentInHelp(t *testing.T) { + cfg := newConfig() + cfg.cf.flagSet.VisitAll(func(f *flag.Flag) { + if _, ok := f.Value.(*flags.IgnoredFlag); ok { + // Ignored flags do not need to be in the help + return + } + + flagText := fmt.Sprintf("--%s", f.Name) + if !strings.Contains(flagsline, flagText) && !strings.Contains(usageline, flagText) { + t.Errorf("Neither flagsline nor usageline in help.go contains flag named %s", flagText) + } + }) +} + func mustCreateCfgFile(t *testing.T, b []byte) *os.File { tmpfile, err := os.CreateTemp("", "servercfg") if err != nil { diff --git a/server/etcdmain/help.go b/server/etcdmain/help.go index 49b7114e6cd..1f139680d3d 100644 --- a/server/etcdmain/help.go +++ b/server/etcdmain/help.go @@ -23,6 +23,7 @@ import ( cconfig "go.etcd.io/etcd/server/v3/config" "go.etcd.io/etcd/server/v3/embed" + "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" ) var ( @@ -95,7 +96,13 @@ Member: --socket-reuse-port 'false' Enable to set socket option SO_REUSEPORT on listeners allowing rebinding of a port already in use. --socket-reuse-address 'false' - Enable to set socket option SO_REUSEADDR on listeners allowing binding to an address in TIME_WAIT state. + Enable to set socket option SO_REUSEADDR on listeners allowing binding to an address in TIME_WAIT state. + --enable-grpc-gateway + Enable GRPC gateway. + --raft-read-timeout '` + rafthttp.DefaultConnReadTimeout.String() + `' + Read timeout set on each rafthttp connection + --raft-write-timeout '` + rafthttp.DefaultConnWriteTimeout.String() + `' + Write timeout set on each rafthttp connection Clustering: --initial-advertise-peer-urls 'http://localhost:2380' @@ -170,6 +177,10 @@ Security: Path to the client server TLS key file. --client-cert-auth 'false' Enable client cert authentication. + --client-cert-file '' + Path to an explicit peer client TLS cert file otherwise cert file will be used when client auth is required. + --client-key-file '' + Path to an explicit peer client TLS key file otherwise key file will be used when client auth is required. --client-crl-file '' Path to the client certificate revocation list file. --client-cert-allowed-hostname '' @@ -184,6 +195,10 @@ Security: Path to the peer server TLS key file. --peer-client-cert-auth 'false' Enable peer client cert authentication. + --peer-client-cert-file '' + Path to an explicit peer client TLS cert file otherwise peer cert file will be used when client auth is required. + --peer-client-key-file '' + Path to an explicit peer client TLS key file otherwise peer key file will be used when client auth is required. --peer-trusted-ca-file '' Path to the peer server TLS trusted CA file. --peer-cert-allowed-cn '' @@ -282,6 +297,16 @@ Experimental feature: Set the maximum time duration to wait for the cluster to be ready. --experimental-snapshot-catch-up-entries '5000' Number of entries for a slow follower to catch up after compacting the raft storage entries. + --experimental-compaction-sleep-interval + Sets the sleep interval between each compaction batch. + --experimental-downgrade-check-time + Duration of time between two downgrade status checks. + --experimental-enable-lease-checkpoint-persist 'false' + Enable persisting remainingTTL to prevent indefinite auto-renewal of long lived leases. Always enabled in v3.6. Should be used to ensure smooth upgrade from v3.5 clusters with this feature enabled. Requires experimental-enable-lease-checkpoint to be enabled. + --experimental-memory-mlock + Enable to enforce etcd pages (in particular bbolt) to stay in RAM. + --experimental-snapshot-catchup-entries + Number of entries for a slow follower to catch up after compacting the raft storage entries. Unsafe feature: --force-new-cluster 'false'