diff --git a/Makefile b/Makefile index a3f95ff56..f43f14629 100644 --- a/Makefile +++ b/Makefile @@ -19,8 +19,8 @@ GO111MODULE_VALUE=auto PREFIX ?= out/ CMD=containerd-stargz-grpc ctr-remote - CMD_BINARIES=$(addprefix $(PREFIX),$(CMD)) +BENCHMARK_OUTPUT=out-bench/ .PHONY: all build check install-check-tools install uninstall clean test test-root test-all integration test-optimize benchmark test-pullsecrets test-cri @@ -61,6 +61,7 @@ uninstall: clean: @echo "$@" @rm -f $(CMD_BINARIES) + @rm -rf $(BENCHMARK_OUTPUT) test: @echo "$@" @@ -81,6 +82,9 @@ test-optimize: benchmark: @./script/benchmark/test.sh +fs-bench: + @./script/fs-bench/test.sh + test-pullsecrets: @./script/pullsecrets/test.sh diff --git a/script/fs-bench/fio/Dockerfile b/script/fs-bench/fio/Dockerfile new file mode 100644 index 000000000..b8fcaff0b --- /dev/null +++ b/script/fs-bench/fio/Dockerfile @@ -0,0 +1,24 @@ +FROM alpine:3.9 AS stage-build + +WORKDIR /work +RUN apk add git build-base linux-headers && \ + git clone git://git.kernel.dk/fio.git /work/fio && \ + cd /work/fio && \ + git checkout c96b385b6e0c78478697713e6da9174fba2432d3 && \ + ./configure --prefix=/work/root && \ + make && \ + make install + +FROM alpine:3.9 AS stage-final + +# How many bytes fio is going to read/write, 512MiB by default. +ARG size=256M + +# How many concurrent reading threads. +ARG thread=4 + +WORKDIR /work +COPY --from=stage-build /work/root/ / + +# Lay out test files here, so that they can be compressed into the image layer. +RUN fio -directory=/work -direct=1 -numjobs=$thread -size=$size -name=test -rw=read \ No newline at end of file diff --git a/script/fs-bench/test.sh b/script/fs-bench/test.sh new file mode 100755 index 000000000..6d3369412 --- /dev/null +++ b/script/fs-bench/test.sh @@ -0,0 +1,75 @@ +#!/bin/bash + +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +cleanup() { + local ORG_EXIT_CODE="${1}" + rm "${DOCKER_COMPOSE_YAML}" || true + exit "${ORG_EXIT_CODE}" +} +trap 'cleanup "$?"' EXIT SIGHUP SIGINT SIGQUIT SIGTERM + +CONTEXT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/" +REPO="${CONTEXT}../../" + +echo "Preparing docker-compose.yml..." +DOCKER_COMPOSE_YAML=$(mktemp) +BENCHMARKING_NODE=fs-bench +BENCHMARKING_CONTAINER=fs-bench +cat < "${DOCKER_COMPOSE_YAML}" +version: "3" +services: + ${BENCHMARKING_NODE}: + build: + context: "${CONTEXT}/work" + dockerfile: Dockerfile + container_name: ${BENCHMARKING_CONTAINER} + privileged: true + working_dir: /go/src/github.com/containerd/stargz-snapshotter + command: tail -f /dev/null + environment: + - NO_PROXY=127.0.0.1,localhost + - HTTP_PROXY=${HTTP_PROXY:-} + - HTTPS_PROXY=${HTTPS_PROXY:-} + - http_proxy=${http_proxy:-} + - https_proxy=${https_proxy:-} + tmpfs: + - /tmp:exec,mode=777 + volumes: + - "${REPO}:/go/src/github.com/containerd/stargz-snapshotter:ro" + - "/dev/fuse:/dev/fuse" + - "containerd-data:/var/lib/containerd:delegated" + - "containerd-stargz-grpc-data:/var/lib/containerd-stargz-grpc:delegated" + - "containerd-stargz-grpc-status:/run/containerd-stargz-grpc:delegated" +volumes: + containerd-data: + containerd-stargz-grpc-data: + containerd-stargz-grpc-status: +EOF + +echo "Setup benchmark environment..." +docker-compose -f "${DOCKER_COMPOSE_YAML}" build ${DOCKER_BUILD_ARGS:-} "${BENCHMARKING_NODE}" +docker-compose -f "${DOCKER_COMPOSE_YAML}" up -d --force-recreate + +echo "Benchmarking..." +docker exec -i "${BENCHMARKING_CONTAINER}" script/fs-bench/work/run.sh + +echo "Harvesting output..." +docker cp "${BENCHMARKING_CONTAINER}:/output" "${REPO}/out-bench" + +echo "Cleaning up benchmark environment..." +docker-compose -f "${DOCKER_COMPOSE_YAML}" down -v \ No newline at end of file diff --git a/script/fs-bench/work/Dockerfile b/script/fs-bench/work/Dockerfile new file mode 100644 index 000000000..4b2237ca6 --- /dev/null +++ b/script/fs-bench/work/Dockerfile @@ -0,0 +1,43 @@ +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM golang:1.13 + +# basic tools +COPY ./tools /tmp/tools + +RUN apt-get update -y && \ + apt-get install --no-install-recommends -y libbtrfs-dev libseccomp-dev fuse python \ + apt-transport-https software-properties-common && \ + curl -Lo gnuplot-5.2.8.tar.gz \ + https://sourceforge.net/projects/gnuplot/files/gnuplot/5.2.8/gnuplot-5.2.8.tar.gz/download && \ + tar xf gnuplot-5.2.8.tar.gz && \ + cd gnuplot-5.2.8 && \ + ./configure && make && make install && \ + cd /tmp/tools && \ + GO111MODULE=on go build -o "/usr/local/bin/process" "./process/main.go" && \ + GO111MODULE=on go build -o "/usr/local/bin/scrape" "./scrape/main.go" + +# runtime dependencies +RUN git clone https://github.com/opencontainers/runc \ + $GOPATH/src/github.com/opencontainers/runc && \ + cd $GOPATH/src/github.com/opencontainers/runc && \ + git checkout d736ef14f0288d6993a1845745d6756cfc9ddd5a && \ + GO111MODULE=off make -j2 BUILDTAGS='seccomp apparmor' && \ + GO111MODULE=off make install && \ + git clone https://github.com/containerd/containerd \ + $GOPATH/src/github.com/containerd/containerd && \ + cd $GOPATH/src/github.com/containerd/containerd && \ + git checkout 990076b731ec9446437972b41176a6b0f3b7bcbf && \ + GO111MODULE=off make -j2 && GO111MODULE=off make install \ No newline at end of file diff --git a/script/fs-bench/work/config/config.containerd.toml b/script/fs-bench/work/config/config.containerd.toml new file mode 100644 index 000000000..32f99b120 --- /dev/null +++ b/script/fs-bench/work/config/config.containerd.toml @@ -0,0 +1,4 @@ +[proxy_plugins] + [proxy_plugins.stargz] + type = "snapshot" + address = "/run/containerd-stargz-grpc/containerd-stargz-grpc.sock" diff --git a/script/fs-bench/work/config/config.stargz.toml b/script/fs-bench/work/config/config.stargz.toml new file mode 100644 index 000000000..3defb02db --- /dev/null +++ b/script/fs-bench/work/config/config.stargz.toml @@ -0,0 +1 @@ +noprefetch = true diff --git a/script/fs-bench/work/config/fio.conf b/script/fs-bench/work/config/fio.conf new file mode 100644 index 000000000..cfed665c1 --- /dev/null +++ b/script/fs-bench/work/config/fio.conf @@ -0,0 +1,10 @@ +[test] +# TODO randomize the size of each read (pread, to be precise). +bs=4k +rw=randread +size=256M +numjobs=4 +directory=/work +write_bw_log=/output/test +write_iops_log=/output/test +write_lat_log=/output/test \ No newline at end of file diff --git a/script/fs-bench/work/reset.sh b/script/fs-bench/work/reset.sh new file mode 100755 index 000000000..a3c35c477 --- /dev/null +++ b/script/fs-bench/work/reset.sh @@ -0,0 +1,102 @@ +#!/bin/bash + +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +CONTAINERD_ROOT=/var/lib/containerd/ +CONTAINERD_CONFIG_DIR=/etc/containerd/ +REMOTE_SNAPSHOTTER_SOCKET=/run/containerd-stargz-grpc/containerd-stargz-grpc.sock +REMOTE_SNAPSHOTTER_ROOT=/var/lib/containerd-stargz-grpc/ +REMOTE_SNAPSHOTTER_CONFIG_DIR=/etc/containerd-stargz-grpc/ + +for arg; do + case x"$arg" in + x-nosnapshotter) + NOSNAPSHOTTER="-nosnapshotter" + ;; + x-nocleanup) + NOCLEANUP="-nocleanup" + ;; + x*) + SCRAPE_OUTPUT_FILENAME="$arg" + ;; + esac +done + +RETRYNUM=30 +RETRYINTERVAL=1 +TIMEOUTSEC=180 +function retry { + local SUCCESS=false + for i in $(seq ${RETRYNUM}) ; do + if eval "timeout ${TIMEOUTSEC} ${@}" ; then + SUCCESS=true + break + fi + echo "Fail(${i}). Retrying..." + sleep ${RETRYINTERVAL} + done + if [ "${SUCCESS}" == "true" ] ; then + return 0 + else + return 1 + fi +} + +function kill_all { + if [ "${1}" != "" ] ; then + ps aux | grep "${1}" | grep -v grep | grep -v $(basename ${0}) | sed -E 's/ +/ /g' | cut -f 2 -d ' ' | xargs -I{} kill -9 {} || true + fi +} + +function cleanup { + rm -rf "${CONTAINERD_ROOT}"* + if [ -f "${REMOTE_SNAPSHOTTER_SOCKET}" ] ; then + rm "${REMOTE_SNAPSHOTTER_SOCKET}" + fi + if [ -d "${REMOTE_SNAPSHOTTER_ROOT}snapshotter/snapshots/" ] ; then + find "${REMOTE_SNAPSHOTTER_ROOT}snapshotter/snapshots/" \ + -maxdepth 1 -mindepth 1 -type d -exec umount "{}/fs" \; + fi + rm -rf "${REMOTE_SNAPSHOTTER_ROOT}"* +} + +echo "cleaning up the environment..." +kill_all "containerd" +kill_all "containerd-stargz-grpc" +kill_all "scrape" +if [ "$NOCLEANUP" == "-nocleanup" ]; then + echo "DO NOT CLEANUP containerd & stargz-snapshotter layers" +else + cleanup +fi +if [ "${NOSNAPSHOTTER}" == "-nosnapshotter" ] ; then + echo "DO NOT RUN remote snapshotter" +else + echo "running remote snaphsotter..." + containerd-stargz-grpc --log-level=debug \ + --address="${REMOTE_SNAPSHOTTER_SOCKET}" \ + --config="${REMOTE_SNAPSHOTTER_CONFIG_DIR}config.stargz.toml" \ + &>/var/log/containerd-stargz-grpc.log & + retry ls "${REMOTE_SNAPSHOTTER_SOCKET}" + mkdir -p "$(dirname "${SCRAPE_OUTPUT_FILENAME}")" + scrape -output "${SCRAPE_OUTPUT_FILENAME}" -netinf eth0 -interval 1s \ + -pid "$(ps aux | grep -v grep | grep containerd-stargz-grpc | awk '{print $2}')" & +fi +echo "running containerd..." +containerd --log-level debug \ + --config="${CONTAINERD_CONFIG_DIR}config.containerd.toml" &>/var/log/containerd.log & +retry ctr version diff --git a/script/fs-bench/work/run.sh b/script/fs-bench/work/run.sh new file mode 100755 index 000000000..da5b11307 --- /dev/null +++ b/script/fs-bench/work/run.sh @@ -0,0 +1,152 @@ +#!/bin/bash + +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +# Set environemnt variable if you want use a customize fio image, +# whose entrypoint must be the start of a fio test, and output all its logs +# (stdio (in file `stdio`), bw_log, iops_log, and lat_log) to /output. +# 256m_4t stands for 4 threads and each read 256MiB (1024MiB in total). +IMAGE_LEGACY="${IMAGE_LEGACY:-docker.io/stargz/fio:legacy_256m_4t}" +IMAGE_STARGZ="${IMAGE_STARGZ:-docker.io/stargz/fio:stargz_256m_4t}" +IMAGE_ESTARGZ="${IMAGE_ESTARGZ:-docker.io/stargz/fio:estargz_256m_4t}" + +CONTEXT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +FS_BENCH_TOOLS_DIR=$CONTEXT/tools +REBOOT_CONTAINERD_SCRIPT=$CONTEXT/reset.sh +REMOTE_SNAPSHOTTER_CONFIG_DIR=/etc/containerd-stargz-grpc/ +FIO_CONF=$CONTEXT/config/fio.conf + +specList() { + uname -r + cat /etc/os-release + cat /proc/cpuinfo + cat /proc/meminfo + mount + df -T +} +echo "Machine spec list:" +specList + +installRemoteSnapshotter() { + which containerd-stargz-grpc && return + local REPO_CONFIG_DIR=$CONTEXT/config/ + local CONTAINERD_CONFIG_DIR=/etc/containerd/ + + mkdir -p /tmp/out + PREFIX=/tmp/out/ make clean + PREFIX=/tmp/out/ make -j2 + PREFIX=/tmp/out/ make install + mkdir -p "${CONTAINERD_CONFIG_DIR}" && \ + cp "${REPO_CONFIG_DIR}"config.containerd.toml "${CONTAINERD_CONFIG_DIR}" + mkdir -p "${REMOTE_SNAPSHOTTER_CONFIG_DIR}" && \ + cp "${REPO_CONFIG_DIR}"config.stargz.toml "${REMOTE_SNAPSHOTTER_CONFIG_DIR}" +} +echo "Installing remote snapshotter..." +installRemoteSnapshotter + +set_noprefetch() { + local NOPREFETCH="${1}" + sed -i 's/noprefetch = .*/noprefetch = '"${NOPREFETCH}"'/g' "${REMOTE_SNAPSHOTTER_CONFIG_DIR}config.stargz.toml" +} + +testLegacy() { + local output="/output/legacy" + local containerID="fs-bench_legacy_$(basename $(mktemp))" + + mkdir -p "$output" + set_noprefetch "true" # disalbe prefetch + + "${REBOOT_CONTAINERD_SCRIPT}" -nosnapshotter -nocleanup + ctr-remote image pull "$IMAGE_LEGACY" + ctr-remote run --rm --snapshotter=overlayfs \ + --mount type=bind,src=$output,dst=/output,options=rbind:rw \ + --mount type=bind,src=$FIO_CONF,dst=/fio.conf,options=rbind:ro \ + "$IMAGE_LEGACY" "$containerID" \ + fio /fio.conf --output /output/summary.txt +} +echo "Benchmarking legacy image..." +testLegacy + +testStargz() { + local output="/output/stargz" + local tmpdir=$(mktemp -d) + local tmpMetrics=$(mktemp) + local containerID="fs-bench_stargz_$(basename $(mktemp))" + + mkdir -p "$output" + set_noprefetch "true" # disable prefetch + + "${REBOOT_CONTAINERD_SCRIPT}" "$tmpMetrics" + ctr-remote image rpull "$IMAGE_STARGZ" + ctr-remote run --rm --snapshotter=stargz \ + --mount type=bind,src=$output,dst=/output,options=rbind:rw \ + --mount type=bind,src=$FIO_CONF,dst=/fio.conf,options=rbind:ro \ + "$IMAGE_STARGZ" "$containerID" \ + fio /fio.conf --output /output/summary.txt + mv "$tmpMetrics" "$output/metrics" +} +echo "Benchmarking stargz image..." +testStargz + +testEstargz() { + local output="/output/estargz" + local tmpdir=$(mktemp -d) + local tmpMetrics=$(mktemp) + local containerID="fs-bench_estargz_$(basename $(mktemp))" + + mkdir -p "$output" + set_noprefetch "false" # enable prefetch + + "${REBOOT_CONTAINERD_SCRIPT}" "$tmpMetrics" + ctr-remote image rpull "$IMAGE_ESTARGZ" + ctr-remote run --rm --snapshotter=stargz \ + --mount type=bind,src=$output,dst=/output,options=rbind:rw \ + --mount type=bind,src=$FIO_CONF,dst=/fio.conf,options=rbind:ro \ + "$IMAGE_ESTARGZ" "$containerID" \ + fio /fio.conf --output /output/summary.txt + mv "$tmpMetrics" "$output/metrics" +} +echo "Benchmarking estargz image..." +testEstargz + +drawProcessMetrics() { + for c in stargz estargz ; do + tmpdir=$(mktemp -d) + mkdir -p $tmpdir/snapshotter + process -input /output/$c/metrics -output $tmpdir/snapshotter \ + -config $FS_BENCH_TOOLS_DIR/process/process.conf + pushd $tmpdir + gnuplot <$FS_BENCH_TOOLS_DIR/plot/snapshotter.gpm >/output/$c/snapshotter.svg + popd + rm -rf $tmpdir + done +} +echo "Drawing stargz-snapshotter process metrics..." +drawProcessMetrics + +drawFIOMetrics() { + for c in legacy stargz estargz ; do + plotOutputDir=$(mktemp -d) + $FS_BENCH_TOOLS_DIR/plot/fio.sh /output/$c $plotOutputDir + pushd $plotOutputDir + gnuplot /output/$c/fio.svg + popd + rm -rf plotOutputDir + done +} +echo "Drawing fio metrics..." +drawFIOMetrics \ No newline at end of file diff --git a/script/fs-bench/work/tools/go.mod b/script/fs-bench/work/tools/go.mod new file mode 100644 index 000000000..10192b76a --- /dev/null +++ b/script/fs-bench/work/tools/go.mod @@ -0,0 +1,11 @@ +module github.com/containerd/stargz-snapshotter/script/fs-bench/tools + +go 1.13 + +// DO NOT use `go mod tidy` here, it will probably introduce a problem like: +// https://github.com/prometheus/prometheus/issues/5590 + +require ( + github.com/c9s/goprocinfo v0.0.0-20200311234719-5750cbd54a3b // indirect + github.com/prometheus/prometheus v1.8.2-0.20191017095924-6f92ce560538 // indirect +) diff --git a/script/fs-bench/work/tools/plot/fio.sh b/script/fs-bench/work/tools/plot/fio.sh new file mode 100755 index 000000000..60913db4d --- /dev/null +++ b/script/fs-bench/work/tools/plot/fio.sh @@ -0,0 +1,143 @@ +#!/bin/bash + +set -euo pipefail + +DATA_DIR="${1}" +OUTPUT_DIR="${2}" + +processBW() { + local LOGFILE="$1" + awk -F ',' ' + BEGIN { + sum = 0.0 + cnt = 1 + lastMs = 0 + } + { + if ($1 != lastMs) { + printf("%d %g\n", lastMs, sum / 1024.0 / cnt) + sum = 0.0 + cnt = 1 + lastMs = $1 + next + } + sum += $2 + lastMs = $1 + cnt += 1 + } +' "$LOGFILE" +} + +processIOPS() { + local LOGFILE="$1" + awk -F ',' ' + BEGIN { + sum = 0.0 + cnt = 1 + lastMs = 0 + } + { + if ($1 != lastMs) { + printf("%d %g\n", lastMs, sum / cnt) + sum = 0.0 + cnt = 1 + lastMs = $1 + next + } + sum += $2 + lastMs = $1 + cnt += 1 + } +' "$LOGFILE" +} + +processLAT() { + local LOGFILE="$1" + awk -F ',' ' + BEGIN { + sum = 0.0 + cnt = 1 + lastMs = 0 + } + { + if ($1 != lastMs) { + printf("%d %g\n", lastMs, sum / 1000.0 / cnt) + sum = 0.0 + cnt = 1 + lastMs = $1 + next + } + sum += $2 + lastMs = $1 + cnt += 1 + } +' "$LOGFILE" +} + +cd "$DATA_DIR" +LOGS_BW=$(ls *_bw.*.log) +LOGS_IOPS=$(ls *_iops.*.log) +LOGS_LAT=$(ls *_lat.*.log) +LOGS_SLAT=$(ls *_slat.*.log) +LOGS_CLAT=$(ls *_clat.*.log) + +for log in $LOGS_BW; do + processBW "$log" >"$OUTPUT_DIR/$log" +done + +for log in $LOGS_IOPS; do + processIOPS "$log" >"$OUTPUT_DIR/$log" +done + +for log in $LOGS_LAT $LOGS_SLAT $LOGS_CLAT; do + processLAT "$log" >"$OUTPUT_DIR/$log" +done + +plotDerivatives() { + local LOGS="$(echo "$1" | tr '\n' ' ')" + if echo -n "$LOGS" | grep -q '[^ ][ ][^ ]'; then + firstLog=$(echo -n "$LOGS" | tr '\n' ' ' | sed 's/ .*$//') + restLog=$(echo -n "$LOGS" | tr '\n' ' ' | sed 's/^[^ ]* //') + printf "set key bottom right\n" + printf 'plot "%s" w l lw 2 title "%s"' "$firstLog" "${firstLog%.log}" + for log in $restLog; do + printf ', "%s" w l lw 2 title "%s"' "$log" "${log%.log}" + done + printf "\n" + else + printf "unset key\n" + printf 'plot "%s" w l lw 2\n' "$LOGS_BW" + fi +} + +cat >"$OUTPUT_DIR/plot.gpm" <:", q) + continue + } + name := strings.TrimSpace(fields[0]) + pq := strings.TrimSpace(fields[1]) + _, err := promql.ParseExpr(pq) + if err != nil { + log.Fatalf("invalid promql %q: %s", q, err) + } + queries[name] = pq + } + if err := sin.Err(); err != nil { + log.Fatalf("read config file %q: %s", *config, err) + } +} + +func buildMemRemoteStorage() error { + defer log.Printf("built memory storage") + + sin := bufio.NewScanner(inputFile) + for sin.Scan() { + ln := sin.Text() + fields := strings.Split(ln, " ") + if len(fields) != 3 { + log.Printf("invalid input line %q, want format ' '", ln) + continue + } + metricName := fields[0] + + timestamp, err := strconv.ParseInt(fields[1], 10, 64) + if err != nil { + log.Printf("invalid timestamp %q: %s", ln, err) + continue + } + if timestamp < oldestTs { + oldestTs = timestamp + } + if timestamp > newesTs { + newesTs = timestamp + } + + value, err := strconv.ParseFloat(fields[2], 64) + if err != nil { + log.Printf("invalid value %q: %s", ln, err) + continue + } + + storage[metricName] = append(storage[metricName], prompb.Sample{ + Value: value, + Timestamp: timestamp, + }) + } + err := sin.Err() + if err != nil { + return fmt.Errorf("read input file %q: %w", *input, err) + } + + sortedStorage := map[string][]prompb.Sample{} + for metricName, samples := range storage { + sort.Slice(samples, func(i, j int) bool { + return samples[i].Timestamp < samples[j].Timestamp + }) + sortedStorage[metricName] = samples + } + storage = sortedStorage + return err +} + +func setupStorage() error { + server = httptest.NewServer(http.HandlerFunc(remoteReadHandler)) + log.Printf("setup mock server") + + remoteStorage = remote.NewStorage( + &promLogger{log.New(os.Stdout, "remote", log.LstdFlags)}, + prometheus.DefaultRegisterer, + func() (int64, error) { return time.Now().Unix() * 1000, nil }, + //func() (int64, error) { return oldestTs, nil }, + "data", + 10*time.Hour, + ) + u, _ := url.Parse(server.URL) + remoteCfg := &promConfig.Config{RemoteReadConfigs: []*promConfig.RemoteReadConfig{{ + URL: &commonConfig.URL{u}, + ReadRecent: true, + RemoteTimeout: model.Duration(10 * time.Hour), + }}} + + if err := remoteStorage.ApplyConfig(remoteCfg); err != nil { + log.Fatalf("apply config %v: %s", remoteCfg) + } + log.Printf("setup storage client") + + engine = promql.NewEngine(promql.EngineOpts{ + MaxConcurrent: 1, + MaxSamples: 1e6, + Timeout: 10 * time.Hour, + }) + log.Printf("setup promql engine") + return nil +} + +func remoteReadHandler(w http.ResponseWriter, r *http.Request) { + compressed, err := ioutil.ReadAll(r.Body) + if err != nil { + w.WriteHeader(500) + w.Write([]byte(fmt.Sprintf("read req body: %s", err))) + } + + reqBuf, err := snappy.Decode(nil, compressed) + if err != nil { + w.WriteHeader(500) + w.Write([]byte(fmt.Sprintf("decompress req body: %s", err))) + } + + var req prompb.ReadRequest + if err := proto.Unmarshal(reqBuf, &req); err != nil { + w.WriteHeader(500) + w.Write([]byte(fmt.Sprintf("unmarshal req body: %s", err))) + } + + rsts := []*prompb.QueryResult{} + + for _, q := range req.Queries { + var name string + for _, m := range q.Matchers { + if m.Name == "__name__" { + name = m.Value + break + } + } + if len(name) == 0 { + log.Printf("got prom remote read with out __name__ label: %+v", q) + continue + } + rsts = append(rsts, queryStorage(name, q.StartTimestampMs, q.EndTimestampMs)) + } + + data, err := proto.Marshal(&prompb.ReadResponse{Results: rsts}) + if err != nil { + w.WriteHeader(500) + w.Write([]byte(fmt.Sprintf("marshal rsp body: %s", err))) + } + compressed = snappy.Encode(nil, data) + + w.Header().Set("Content-Type", "application/x-protobuf") + w.Header().Set("Content-Encoding", "snappy") + w.WriteHeader(200) + w.Write(compressed) +} + +func queryStorage(metricName string, start, end int64) *prompb.QueryResult { + samples := storage[metricName] + startI := sort.Search(len(samples), func(i int) bool { + return samples[i].Timestamp >= start + }) + endI := sort.Search(len(samples), func(i int) bool { + return samples[i].Timestamp > end + }) + return &prompb.QueryResult{Timeseries: []*prompb.TimeSeries{ + { + Labels: []prompb.Label{ + { + Name: "__name__", + Value: metricName, + }, + }, + Samples: samples[startI:endI], + }, + }} +} + +func executePromQL() error { + var ( + i = 0 + start = time.Unix(oldestTs/1000, oldestTs%1000*1e6) + end = time.Unix(newesTs/1000, newesTs%1000*1e6) + ) + + for name, query := range queries { + q, err := engine.NewRangeQuery(remoteStorage, query, start, end, time.Second) + if err != nil { + log.Printf("new range query %s %q: %s", name, query, err) + continue + } + i += 1 + log.Printf("(%d/%d) executing %s %q", i, len(queries), name, query) + rst := q.Exec(context.Background()) + if rst.Err != nil { + log.Printf("execute %s %q: %s", name, query, rst.Err) + continue + } + mst, err := rst.Matrix() + if err != nil { + log.Printf("not a matrix %s %+v: %s", name, rst, err) + continue + } + if len(mst) != 1 { + log.Printf("want exact 1 vector, got %d vectors", len(mst)) + continue + } + results[name] = mst[0].Points + } + return nil +} + +func outputResults() error { + sb := &strings.Builder{} + for name, pts := range results { + filename := filepath.Join(*output, name) + f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + log.Printf("open %q: %s", filename, err) + continue + } + sb.Reset() + for i := range pts { + p := &pts[i] + sb.WriteString(properTimeStr(p.T) + " " + strconv.FormatFloat(p.V, 'g', -1, 64) + "\n") + } + if n, err := f.WriteString(sb.String()); err != nil { + log.Printf("output result to %q, written %d bytes, err %s", f.Name(), n, err) + continue + } + } + return nil +} + +func properTimeStr(t int64) string { + if *timefmt == "incsec" { + return strconv.FormatInt((t - oldestTs)/1000, 10) + } + return strconv.FormatInt(t, 10) +} + +type promLogger struct { + *log.Logger +} + +func (pl *promLogger) Log(kvs ...interface{}) error { + if len(kvs)%2 != 0 { + return errors.New("odd number of parameter to Log(kvs...)") + } + sb := &strings.Builder{} + for i := 0; i < len(kvs); i += 2 { + if i > 0 { + sb.WriteString(", ") + } + sb.WriteString(fmt.Sprintf("%s=%s", kvs[i], kvs[i+1])) + } + log.Println(sb.String()) + return nil +} diff --git a/script/fs-bench/work/tools/process/process.conf b/script/fs-bench/work/tools/process/process.conf new file mode 100644 index 000000000..b4137c136 --- /dev/null +++ b/script/fs-bench/work/tools/process/process.conf @@ -0,0 +1,16 @@ +iops-read : rate(syscr[30s]) +iops-write : rate(syscw[30s]) +bw-read : rate(readbytes[30s]) / 1024 / 1024 +bw-write : rate(writebytes[30s]) / 1024 / 1024 +cpu : rate(time[30s]) +cpu-user : rate(utime[30s]) +cpu-sys : rate(ctime[30s]) +mem-rss-mib : resident / 1024 / 1024 +mem-vir-mib : virtual / 1024 / 1024 +major-pagefault : rate(majflt[30s]) +minor-pagefault : rate(minflt[30s]) +fd : fd +rxbytes : rate(rxbytes[30s]) / 1024 / 1024 +txbytes : rate(txbytes[30s]) / 1024 / 1024 +rxpkts : rate(rxpkts[30s]) +txpkts : rate(txpkts[30s]) diff --git a/script/fs-bench/work/tools/scrape/main.go b/script/fs-bench/work/tools/scrape/main.go new file mode 100644 index 000000000..895231765 --- /dev/null +++ b/script/fs-bench/work/tools/scrape/main.go @@ -0,0 +1,170 @@ +package main + +import ( + "flag" + "fmt" + "log" + "os" + "os/signal" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + + proc "github.com/c9s/goprocinfo/linux" +) + +var ( + pid = flag.Int("pid", 1, "process to watch") + interval = flag.Duration("interval", time.Second, "how often scrape process metrics once") + output = flag.String("output", "-", "output file, specify - for /dev/stdout") + netinf = flag.String("netinf", "eth0", "network interface name") + + outputFile = os.Stdout +) + +func main() { + flag.Parse() + + if *output != "-" && *output != "/dev/stdout" { + var err error + outputFile, err = os.OpenFile(*output, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + log.Fatalf("open %q: %s", *output, err) + } + } + + wg := &sync.WaitGroup{} + wg.Add(1) + go worker(wg, SetupSignalHandler()) + wg.Wait() +} + +func worker(wg *sync.WaitGroup, stop <-chan struct{}) { + defer wg.Done() + + ticker := time.NewTicker(*interval) + defer ticker.Stop() + + scrape(time.Now()) + + for { + select { + case <-stop: + return + case now := <-ticker.C: + scrape(now) + } + } +} + +// See https://github.com/prometheus/procfs/blob/master/proc_stat.go for details on userHZ. +const userHz = 100.0 + +var pagesize = uint64(os.Getpagesize()) + +func formatUint(u uint64) string { + return strconv.FormatUint(u, 10) +} + +func formatFloat(f float64) string { + return strconv.FormatFloat(f, 'g', -1, 64) +} + +func scrape(now time.Time) { + pp, err := proc.ReadProcess(uint64(*pid), "/proc") + if err != nil { + log.Printf("read /proc/%d: %s", *pid, err) + return + } + + nets, err := proc.ReadNetworkStat("/proc/net/dev") + if err != nil { + log.Printf("read /proc/net/dev: %s", err) + return + } + + var net *proc.NetworkStat + for i := range nets { + if nets[i].Iface == *netinf { + net = &nets[i] + break + } + } + if net == nil { + log.Printf("%s not found in /proc/net/dev", *netinf) + return + } + + fddir := filepath.Join("/proc", strconv.Itoa(*pid), "fd") + fd, err := os.Open(fddir) + if err != nil { + log.Printf("read %s: %s", fddir, err) + } + fds, err := fd.Readdirnames(-1) + if err != nil { + log.Printf("read %s dirnames: %s", fddir, err) + } + nfd := len(fds) + + ts := " " + strconv.FormatInt(now.Unix() * 1000 + int64(now.Nanosecond()) / 1e6, 10) + " " + sb := &strings.Builder{} + + // iops + sb.WriteString("syscr" + ts + formatUint(pp.IO.Syscr) + "\n") + sb.WriteString("syscw" + ts + formatUint(pp.IO.Syscw) + "\n") + + // bw + sb.WriteString("readbytes" + ts + formatUint(pp.IO.ReadBytes) + "\n") + sb.WriteString("writebytes" + ts + formatUint(pp.IO.WriteBytes) + "\n") + + // cpu time in seconds + sb.WriteString("utime" + ts + formatFloat(float64(pp.Stat.Utime)/userHz) + "\n") + sb.WriteString("ctime" + ts + formatFloat(float64(pp.Stat.Stime)/userHz) + "\n") + sb.WriteString("time" + ts + formatFloat(float64(pp.Stat.Utime+pp.Stat.Stime)/userHz) + "\n") + + // mem in bytes + sb.WriteString("resident" + ts + formatUint(pp.Statm.Resident*pagesize) + "\n") + sb.WriteString("virtual" + ts + formatUint(pp.Stat.Vsize) + "\n") + + // page faults + sb.WriteString("majflt" + ts + formatUint(pp.Stat.Majflt) + "\n") + sb.WriteString("minflt" + ts + formatUint(pp.Stat.Minflt) + "\n") + + // fd + sb.WriteString("fd" + ts + strconv.Itoa(nfd) + "\n") + + // net + sb.WriteString("rxbytes" + ts + formatUint(net.RxBytes) + "\n") + sb.WriteString("txbytes" + ts + formatUint(net.TxBytes) + "\n") + sb.WriteString("rxpkts" + ts + formatUint(net.RxPackets) + "\n") + sb.WriteString("txpkts" + ts + formatUint(net.TxPackets) + "\n") + + if n, err := fmt.Fprint(outputFile, sb.String()); err != nil { + log.Printf("%s: write %s, written %d bytes, err %s", now, *output, n, err) + } +} + +var onlyOneSignalHandler = make(chan struct{}) +var shutdownSignals = []os.Signal{os.Interrupt, syscall.SIGTERM} + +// SetupSignalHandler registered for SIGTERM and SIGINT. A stop channel is returned +// which is closed on one of these signals. If a second signal is caught, the program +// is terminated with exit code 1. +func SetupSignalHandler() <-chan struct{} { + close(onlyOneSignalHandler) // panics when called twice + + stop := make(chan struct{}) + c := make(chan os.Signal, 2) + signal.Notify(c, shutdownSignals...) + go func() { + <-c + close(stop) + <-c + os.Exit(1) // second signal. Exit directly. + }() + + return stop +} diff --git a/stargz/fs.go b/stargz/fs.go index a1685b3c7..af611ae25 100644 --- a/stargz/fs.go +++ b/stargz/fs.go @@ -300,7 +300,7 @@ func (fs *filesystem) Mount(ctx context.Context, mountpoint string, labels map[s Options: []string{"suid"}, // allow setuid inside container }) if err != nil { - logCtx.WithError(err).Debug("failed to make filesstem server") + logCtx.WithError(err).Debug("failed to make filesystem server") return err }