From 9198439ef6d091119d304038e35e44d22bf7b20f Mon Sep 17 00:00:00 2001 From: Steve Jenson Date: Mon, 18 Jul 2016 14:09:07 -0700 Subject: [PATCH] Connection reuse is now the default. Log min/max latency --- CHANGELOG.md | 3 +++ README.md | 7 ++++--- main.go | 40 ++++++++++++++++++++++++++++++++++------ 3 files changed, 41 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bfec3ea..361a471 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,9 +4,12 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](http://semver.org/). ## Unreleased +### Added +- We now output min and max latency numbers bracketing the latency percentiles. (Fixes #13) ### Changed - Output now shows good/bad/failed requests - Improved qps calculation when fractional milliseconds are involved. (Fixed #5) +- -reuse is now on by default. If you want to turn reuse off, use -noreuse (Fixes #11) ### Added - You can now pass a CSV of Host headers and it will fairly split traffic with each Host header. diff --git a/README.md b/README.md index f303cc4..ad0c102 100644 --- a/README.md +++ b/README.md @@ -31,14 +31,15 @@ How often to report to stdout. The url to send backend traffic to. -`-reuse` +`-noreuse` -Reuse connections and reuse a single thread-safe http client. +Do not reuse connections. (Connection reuse is the default.) `-compress` Ask for compressed responses. + # Using multiple Host headers If you want to send multiple Host headers to a backend, pass a comma separated @@ -97,7 +98,7 @@ slowdowns. If you're running multi-hour tests, bumping up the reporting interval to 60 seconds (`60s` or `1m`) is recommended. ``` -$timestamp $good/$bad/$failed requests $size kilobytes $interval [$p50 $p95 $p99 $p999] +$timestamp $good/$bad/$failed requests $size kilobytes $interval $min [$p50 $p95 $p99 $p999] $max ``` ## Tips and tricks diff --git a/main.go b/main.go index 745cdc8..5b2127a 100644 --- a/main.go +++ b/main.go @@ -22,6 +22,8 @@ import ( "github.com/codahale/hdrhistogram" ) +const MAX_INT64 = 9223372036854775807 + type MeasuredResponse struct { sz uint64 code int @@ -33,12 +35,12 @@ type MeasuredResponse struct { func newClient( compress bool, https bool, - reuse bool, + noreuse bool, maxConn uint, ) *http.Client { tr := http.Transport{ DisableCompression: !compress, - DisableKeepAlives: !reuse, + DisableKeepAlives: noreuse, MaxIdleConnsPerHost: int(maxConn), } if https { @@ -111,7 +113,9 @@ func main() { host := flag.String("host", "web", "value of Host header to set") urldest := flag.String("url", "http://localhost:4140/", "Destination url") interval := flag.Duration("interval", 10*time.Second, "reporting interval") - reuse := flag.Bool("reuse", false, "reuse connections") + // FIX: remove this flag before open source release. + reuse := flag.Bool("reuse", true, "reuse connections. (deprecated: no need to set)") + noreuse := flag.Bool("noreuse", false, "don't reuse connections") compress := flag.Bool("compress", false, "use compression") flag.Usage = func() { @@ -129,6 +133,10 @@ func main() { exUsage("concurrency must be at least 1") } + if *reuse { + fmt.Printf("-reuse has been deprecated. Connection reuse is now the default\n") + } + hosts := strings.Split(*host, ",") dstURL, err := url.Parse(*urldest) @@ -142,6 +150,8 @@ func main() { good := uint64(0) bad := uint64(0) failed := uint64(0) + min := int64(MAX_INT64) + max := int64(0) // from 0 to 1 minute in nanoseconds // FIX: verify that these buckets work correctly for our use case. hist := hdrhistogram.New(0, 60000000000, 5) @@ -150,7 +160,7 @@ func main() { timeToWait := CalcTimeToWait(qps) doTLS := dstURL.Scheme == "https" - client := newClient(*compress, doTLS, *reuse, *concurrency) + client := newClient(*compress, doTLS, *noreuse, *concurrency) var sendTraffic sync.WaitGroup for i := uint(0); i < *concurrency; i++ { @@ -187,22 +197,31 @@ func main() { os.Exit(1) }() case t := <-timeout: + // When all requests are failures, ensure we don't accidentally + // print out a monstrously huge number. + if min == MAX_INT64 { + min = 0 + } // Periodically print stats about the request load. - fmt.Printf("%s %6d/%1d/%1d requests %6d kilobytes %s [%3d %3d %3d %4d ]\n", + fmt.Printf("%s %6d/%1d/%1d requests %6d kilobytes %s %3d [%3d %3d %3d %4d ] %4d\n", t.Format(time.RFC3339), good, bad, failed, (size / 1024), interval, + min/1000000, hist.ValueAtQuantile(50)/1000000, hist.ValueAtQuantile(95)/1000000, hist.ValueAtQuantile(99)/1000000, - hist.ValueAtQuantile(999)/1000000) + hist.ValueAtQuantile(999)/1000000, + max/1000000) count = 0 size = 0 good = 0 bad = 0 + min = MAX_INT64 + max = 0 failed = 0 hist.Reset() timeout = time.After(*interval) @@ -218,6 +237,15 @@ func main() { } else { bad++ } + + if managedResp.latency < min { + min = managedResp.latency + } + + if managedResp.latency > max { + max = managedResp.latency + } + hist.RecordValue(managedResp.latency) } }